diff --git a/.bsp/sbt.json b/.bsp/sbt.json deleted file mode 100644 index 32a255928..000000000 --- a/.bsp/sbt.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"sbt","version":"1.10.1","bspVersion":"2.1.0-M1","languages":["scala"],"argv":["C:\\Program Files\\Java\\jdk-21/bin/java","-Xms100m","-Xmx100m","-classpath","C:/Users/youse/AppData/Roaming/JetBrains/IntelliJIdea2024.2/plugins/Scala/launcher/sbt-launch.jar","-Dsbt.script=C:\\Program%20Files%20(x86)\\sbt\\bin\\sbt.bat","xsbt.boot.Boot","-bsp"]} \ No newline at end of file diff --git a/.gitignore b/.gitignore new file mode 100644 index 000000000..9b5d9a80d --- /dev/null +++ b/.gitignore @@ -0,0 +1,4 @@ +target +.idea +.bsp +project \ No newline at end of file diff --git a/.idea/.name b/.idea/.name deleted file mode 100644 index d2925002d..000000000 --- a/.idea/.name +++ /dev/null @@ -1 +0,0 @@ -scalation \ No newline at end of file diff --git a/.idea/codeStyles/Project.xml b/.idea/codeStyles/Project.xml deleted file mode 100644 index 919ce1f1f..000000000 --- a/.idea/codeStyles/Project.xml +++ /dev/null @@ -1,7 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/.idea/codeStyles/codeStyleConfig.xml b/.idea/codeStyles/codeStyleConfig.xml deleted file mode 100644 index a55e7a179..000000000 --- a/.idea/codeStyles/codeStyleConfig.xml +++ /dev/null @@ -1,5 +0,0 @@ - - - - \ No newline at end of file diff --git a/.idea/libraries/sbt__org_openjfx_javafx_base_22_jar.xml b/.idea/libraries/sbt__org_openjfx_javafx_base_22_jar.xml deleted file mode 100644 index bb9af5614..000000000 --- a/.idea/libraries/sbt__org_openjfx_javafx_base_22_jar.xml +++ /dev/null @@ -1,11 +0,0 @@ - - - - - - - - - - - \ No newline at end of file diff --git a/.idea/libraries/sbt__org_openjfx_javafx_base_22_win_jar.xml b/.idea/libraries/sbt__org_openjfx_javafx_base_22_win_jar.xml deleted file mode 100644 index 7e0ea8ed8..000000000 --- a/.idea/libraries/sbt__org_openjfx_javafx_base_22_win_jar.xml +++ /dev/null @@ -1,9 +0,0 @@ - - - - - - - - - \ No newline at end of file diff --git a/.idea/libraries/sbt__org_openjfx_javafx_controls_22_jar.xml b/.idea/libraries/sbt__org_openjfx_javafx_controls_22_jar.xml deleted file mode 100644 index 839bdb154..000000000 --- a/.idea/libraries/sbt__org_openjfx_javafx_controls_22_jar.xml +++ /dev/null @@ -1,11 +0,0 @@ - - - - - - - - - - - \ No newline at end of file diff --git a/.idea/libraries/sbt__org_openjfx_javafx_controls_22_win_jar.xml b/.idea/libraries/sbt__org_openjfx_javafx_controls_22_win_jar.xml deleted file mode 100644 index 8c882e814..000000000 --- a/.idea/libraries/sbt__org_openjfx_javafx_controls_22_win_jar.xml +++ /dev/null @@ -1,9 +0,0 @@ - - - - - - - - - \ No newline at end of file diff --git a/.idea/libraries/sbt__org_openjfx_javafx_fxml_22_jar.xml b/.idea/libraries/sbt__org_openjfx_javafx_fxml_22_jar.xml deleted file mode 100644 index eaab146e9..000000000 --- a/.idea/libraries/sbt__org_openjfx_javafx_fxml_22_jar.xml +++ /dev/null @@ -1,11 +0,0 @@ - - - - - - - - - - - \ No newline at end of file diff --git a/.idea/libraries/sbt__org_openjfx_javafx_fxml_22_win_jar.xml b/.idea/libraries/sbt__org_openjfx_javafx_fxml_22_win_jar.xml deleted file mode 100644 index fa07ec00c..000000000 --- a/.idea/libraries/sbt__org_openjfx_javafx_fxml_22_win_jar.xml +++ /dev/null @@ -1,9 +0,0 @@ - - - - - - - - - \ No newline at end of file diff --git a/.idea/libraries/sbt__org_openjfx_javafx_graphics_22_jar.xml b/.idea/libraries/sbt__org_openjfx_javafx_graphics_22_jar.xml deleted file mode 100644 index 4d3e406fd..000000000 --- a/.idea/libraries/sbt__org_openjfx_javafx_graphics_22_jar.xml +++ /dev/null @@ -1,11 +0,0 @@ - - - - - - - - - - - \ No newline at end of file diff --git a/.idea/libraries/sbt__org_openjfx_javafx_graphics_22_win_jar.xml b/.idea/libraries/sbt__org_openjfx_javafx_graphics_22_win_jar.xml deleted file mode 100644 index d4b97de06..000000000 --- a/.idea/libraries/sbt__org_openjfx_javafx_graphics_22_win_jar.xml +++ /dev/null @@ -1,9 +0,0 @@ - - - - - - - - - \ No newline at end of file diff --git a/.idea/libraries/sbt__org_openjfx_javafx_media_22_jar.xml b/.idea/libraries/sbt__org_openjfx_javafx_media_22_jar.xml deleted file mode 100644 index e5b947da0..000000000 --- a/.idea/libraries/sbt__org_openjfx_javafx_media_22_jar.xml +++ /dev/null @@ -1,11 +0,0 @@ - - - - - - - - - - - \ No newline at end of file diff --git a/.idea/libraries/sbt__org_openjfx_javafx_media_22_win_jar.xml b/.idea/libraries/sbt__org_openjfx_javafx_media_22_win_jar.xml deleted file mode 100644 index 19a39e9a5..000000000 --- a/.idea/libraries/sbt__org_openjfx_javafx_media_22_win_jar.xml +++ /dev/null @@ -1,9 +0,0 @@ - - - - - - - - - \ No newline at end of file diff --git a/.idea/libraries/sbt__org_openjfx_javafx_swing_22_jar.xml b/.idea/libraries/sbt__org_openjfx_javafx_swing_22_jar.xml deleted file mode 100644 index d38e75c8b..000000000 --- a/.idea/libraries/sbt__org_openjfx_javafx_swing_22_jar.xml +++ /dev/null @@ -1,11 +0,0 @@ - - - - - - - - - - - \ No newline at end of file diff --git a/.idea/libraries/sbt__org_openjfx_javafx_swing_22_win_jar.xml b/.idea/libraries/sbt__org_openjfx_javafx_swing_22_win_jar.xml deleted file mode 100644 index 98b22019c..000000000 --- a/.idea/libraries/sbt__org_openjfx_javafx_swing_22_win_jar.xml +++ /dev/null @@ -1,9 +0,0 @@ - - - - - - - - - \ No newline at end of file diff --git a/.idea/libraries/sbt__org_openjfx_javafx_web_22_jar.xml b/.idea/libraries/sbt__org_openjfx_javafx_web_22_jar.xml deleted file mode 100644 index 4bceebccb..000000000 --- a/.idea/libraries/sbt__org_openjfx_javafx_web_22_jar.xml +++ /dev/null @@ -1,11 +0,0 @@ - - - - - - - - - - - \ No newline at end of file diff --git a/.idea/libraries/sbt__org_openjfx_javafx_web_22_win_jar.xml b/.idea/libraries/sbt__org_openjfx_javafx_web_22_win_jar.xml deleted file mode 100644 index 3bdfa9956..000000000 --- a/.idea/libraries/sbt__org_openjfx_javafx_web_22_win_jar.xml +++ /dev/null @@ -1,9 +0,0 @@ - - - - - - - - - \ No newline at end of file diff --git a/.idea/libraries/sbt__org_scala_lang_scala3_library_3_3_6_4_jar.xml b/.idea/libraries/sbt__org_scala_lang_scala3_library_3_3_6_4_jar.xml deleted file mode 100644 index 13d9ffe1a..000000000 --- a/.idea/libraries/sbt__org_scala_lang_scala3_library_3_3_6_4_jar.xml +++ /dev/null @@ -1,11 +0,0 @@ - - - - - - - - - - - \ No newline at end of file diff --git a/.idea/libraries/sbt__org_scala_lang_scala_library_2_13_15_jar.xml b/.idea/libraries/sbt__org_scala_lang_scala_library_2_13_15_jar.xml deleted file mode 100644 index 2e01bb45e..000000000 --- a/.idea/libraries/sbt__org_scala_lang_scala_library_2_13_15_jar.xml +++ /dev/null @@ -1,11 +0,0 @@ - - - - - - - - - - - \ No newline at end of file diff --git a/.idea/libraries/sbt__org_scalafx_scalafx_3_22_0_0_R33_jar.xml b/.idea/libraries/sbt__org_scalafx_scalafx_3_22_0_0_R33_jar.xml deleted file mode 100644 index 1c5eea6aa..000000000 --- a/.idea/libraries/sbt__org_scalafx_scalafx_3_22_0_0_R33_jar.xml +++ /dev/null @@ -1,11 +0,0 @@ - - - - - - - - - - - \ No newline at end of file diff --git a/.idea/libraries/sbt__scala_sdk_3_6_4.xml b/.idea/libraries/sbt__scala_sdk_3_6_4.xml deleted file mode 100644 index 276bec89e..000000000 --- a/.idea/libraries/sbt__scala_sdk_3_6_4.xml +++ /dev/null @@ -1,65 +0,0 @@ - - - - Scala_3_6 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - file://$USER_HOME$/AppData/Local/Coursier/cache/v1/https/repo1.maven.org/maven2/org/scala-lang/scala3-sbt-bridge/3.6.4/scala3-sbt-bridge-3.6.4.jar - - - - - - \ No newline at end of file diff --git a/.idea/misc.xml b/.idea/misc.xml deleted file mode 100644 index 49445490e..000000000 --- a/.idea/misc.xml +++ /dev/null @@ -1,4 +0,0 @@ - - - - \ No newline at end of file diff --git a/.idea/modules.xml b/.idea/modules.xml deleted file mode 100644 index 07ef4d629..000000000 --- a/.idea/modules.xml +++ /dev/null @@ -1,9 +0,0 @@ - - - - - - - - - \ No newline at end of file diff --git a/.idea/modules/scalation.iml b/.idea/modules/scalation.iml deleted file mode 100644 index 3df2b167c..000000000 --- a/.idea/modules/scalation.iml +++ /dev/null @@ -1,33 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/.idea/modules/scalation.scalation-build.iml b/.idea/modules/scalation.scalation-build.iml deleted file mode 100644 index 95974167d..000000000 --- a/.idea/modules/scalation.scalation-build.iml +++ /dev/null @@ -1,116 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/.idea/sbt.xml b/.idea/sbt.xml deleted file mode 100644 index 79343e098..000000000 --- a/.idea/sbt.xml +++ /dev/null @@ -1,18 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/.idea/scala_compiler.xml b/.idea/scala_compiler.xml deleted file mode 100644 index d101acde6..000000000 --- a/.idea/scala_compiler.xml +++ /dev/null @@ -1,16 +0,0 @@ - - - - - \ No newline at end of file diff --git a/.idea/scala_settings.xml b/.idea/scala_settings.xml deleted file mode 100644 index 1b970c734..000000000 --- a/.idea/scala_settings.xml +++ /dev/null @@ -1,7 +0,0 @@ - - - - - \ No newline at end of file diff --git a/.idea/vcs.xml b/.idea/vcs.xml deleted file mode 100644 index 35eb1ddfb..000000000 --- a/.idea/vcs.xml +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/.idea/workspace.xml b/.idea/workspace.xml deleted file mode 100644 index 70c186d8a..000000000 --- a/.idea/workspace.xml +++ /dev/null @@ -1,177 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - { - "customColor": "", - "associatedIndex": 4 -} - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 1743556426497 - - - - - - \ No newline at end of file diff --git a/README.html b/README.html index dd90edda6..1a9f090af 100644 --- a/README.html +++ b/README.html @@ -42,7 +42,7 @@

SCALAble SimulaTION - ScalaTion

Installation Instructions

Version 2.0 Requires: -Scala 3.6.4 and +Scala 3.8.2 and Java 21 (or from Open JDK Archive)
diff --git a/README.md b/README.md index 6bdbe45ed..9640adcce 100644 --- a/README.md +++ b/README.md @@ -34,7 +34,7 @@ Please read the LICENSE file (an MIT style license)

Installation Instructions

Version 2.0 Requires: -Scala 3.6.4 and +Scala 3.8.2 and Java 21 (or from Open JDK Archive)
diff --git a/build.sbt b/build.sbt index 2a1a235d9..16bd9d3f9 100644 --- a/build.sbt +++ b/build.sbt @@ -3,14 +3,14 @@ lazy val scalation = project.in(file(".")) .settings( - scalaVersion := "3.6.4", + scalaVersion := "3.8.2", scalacOptions ++= Seq( "-deprecation", // emit warning and location for usages of deprecated APIs "-explain", // explain errors in more detail // "-explain-types", // explain type errors in more detail "-new-syntax", // require `then` and `do` in control expressions. "-Wunused:all", // warn of unused imports, ... - "-Xfatal-warnings") // fail the compilation if there are any warnings + "-Werror") // fail the compilation if there are any warnings // javacOptions += "--add-modules jdk.incubator.vector" ) diff --git a/project/build.properties b/project/build.properties deleted file mode 100644 index ee4c672cd..000000000 --- a/project/build.properties +++ /dev/null @@ -1 +0,0 @@ -sbt.version=1.10.1 diff --git a/project/target/config-classes/$2861502907535c74dca3$.class b/project/target/config-classes/$2861502907535c74dca3$.class deleted file mode 100644 index 71cc256a2..000000000 Binary files a/project/target/config-classes/$2861502907535c74dca3$.class and /dev/null differ diff --git a/project/target/config-classes/$2861502907535c74dca3.cache b/project/target/config-classes/$2861502907535c74dca3.cache deleted file mode 100644 index e032ac2f4..000000000 --- a/project/target/config-classes/$2861502907535c74dca3.cache +++ /dev/null @@ -1 +0,0 @@ -scalation diff --git a/project/target/config-classes/$2861502907535c74dca3.class b/project/target/config-classes/$2861502907535c74dca3.class deleted file mode 100644 index 602e4c693..000000000 Binary files a/project/target/config-classes/$2861502907535c74dca3.class and /dev/null differ diff --git a/project/target/config-classes/$6654ed73398b327bbd7b$.class b/project/target/config-classes/$6654ed73398b327bbd7b$.class deleted file mode 100644 index 8afb4b769..000000000 Binary files a/project/target/config-classes/$6654ed73398b327bbd7b$.class and /dev/null differ diff --git a/project/target/config-classes/$6654ed73398b327bbd7b.cache b/project/target/config-classes/$6654ed73398b327bbd7b.cache deleted file mode 100644 index 050f36c67..000000000 --- a/project/target/config-classes/$6654ed73398b327bbd7b.cache +++ /dev/null @@ -1 +0,0 @@ -sbt.internal.DslEntry \ No newline at end of file diff --git a/project/target/config-classes/$6654ed73398b327bbd7b.class b/project/target/config-classes/$6654ed73398b327bbd7b.class deleted file mode 100644 index 096241bc4..000000000 Binary files a/project/target/config-classes/$6654ed73398b327bbd7b.class and /dev/null differ diff --git a/project/target/config-classes/$9579a629f282f890015b$.class b/project/target/config-classes/$9579a629f282f890015b$.class deleted file mode 100644 index 499ff4ee8..000000000 Binary files a/project/target/config-classes/$9579a629f282f890015b$.class and /dev/null differ diff --git a/project/target/config-classes/$9579a629f282f890015b.cache b/project/target/config-classes/$9579a629f282f890015b.cache deleted file mode 100644 index 050f36c67..000000000 --- a/project/target/config-classes/$9579a629f282f890015b.cache +++ /dev/null @@ -1 +0,0 @@ -sbt.internal.DslEntry \ No newline at end of file diff --git a/project/target/config-classes/$9579a629f282f890015b.class b/project/target/config-classes/$9579a629f282f890015b.class deleted file mode 100644 index 21c4cc5ac..000000000 Binary files a/project/target/config-classes/$9579a629f282f890015b.class and /dev/null differ diff --git a/project/target/config-classes/$b5876b275fb996f69cd1$.class b/project/target/config-classes/$b5876b275fb996f69cd1$.class deleted file mode 100644 index f9789b1f8..000000000 Binary files a/project/target/config-classes/$b5876b275fb996f69cd1$.class and /dev/null differ diff --git a/project/target/config-classes/$b5876b275fb996f69cd1.cache b/project/target/config-classes/$b5876b275fb996f69cd1.cache deleted file mode 100644 index 050f36c67..000000000 --- a/project/target/config-classes/$b5876b275fb996f69cd1.cache +++ /dev/null @@ -1 +0,0 @@ -sbt.internal.DslEntry \ No newline at end of file diff --git a/project/target/config-classes/$b5876b275fb996f69cd1.class b/project/target/config-classes/$b5876b275fb996f69cd1.class deleted file mode 100644 index ede64b8e4..000000000 Binary files a/project/target/config-classes/$b5876b275fb996f69cd1.class and /dev/null differ diff --git a/project/target/scala-2.12/sbt-1.0/sync/copy-resource b/project/target/scala-2.12/sbt-1.0/sync/copy-resource deleted file mode 100644 index 9d348e7bd..000000000 --- a/project/target/scala-2.12/sbt-1.0/sync/copy-resource +++ /dev/null @@ -1 +0,0 @@ -[[{},{}],{}] \ No newline at end of file diff --git a/project/target/scala-2.12/sbt-1.0/update/update_cache_2.12/inputs b/project/target/scala-2.12/sbt-1.0/update/update_cache_2.12/inputs deleted file mode 100644 index 5ce68556c..000000000 --- a/project/target/scala-2.12/sbt-1.0/update/update_cache_2.12/inputs +++ /dev/null @@ -1 +0,0 @@ --1508279937 \ No newline at end of file diff --git a/project/target/scala-2.12/sbt-1.0/update/update_cache_2.12/output b/project/target/scala-2.12/sbt-1.0/update/update_cache_2.12/output deleted file mode 100644 index c8ebf7988..000000000 --- a/project/target/scala-2.12/sbt-1.0/update/update_cache_2.12/output +++ /dev/null @@ -1 +0,0 @@ -{"cachedDescriptor":".","configurations":[{"configuration":{"name":"compile"},"modules":[],"details":[]},{"configuration":{"name":"compile-internal"},"modules":[{"module":{"organization":"org.scala-lang","name":"scala-library","revision":"2.12.19","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[{"organization":"org.scala-sbt","name":"io_2.12","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}},{"organization":"org.scala-sbt","name":"librarymanagement-ivy_2.12","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}},{"organization":"org.scala-sbt","name":"util-position_2.12","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}},{"organization":"org.scala-sbt","name":"sbt","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}},{"organization":"org.scala-sbt","name":"util-logging_2.12","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}},{"organization":"org.scala-sbt","name":"scripted-plugin_2.12","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}},{"organization":"org.scala-sbt","name":"librarymanagement-core_2.12","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}}],"extraAttributes":{"info.apiURL":"https://www.scala-lang.org/api/2.12.19/"},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"scala-library","type":"jar","extension":"jar","configurations":[],"extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/.sbt/boot/scala-2.12.19/lib/scala-library.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://www.scala-lang.org/","extraAttributes":{"info.apiURL":"https://www.scala-lang.org/api/2.12.19/"},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["Apache-2.0","https://www.apache.org/licenses/LICENSE-2.0"]],"callers":[]}],"details":[]},{"configuration":{"name":"docs"},"modules":[],"details":[]},{"configuration":{"name":"optional"},"modules":[],"details":[]},{"configuration":{"name":"plugin"},"modules":[],"details":[]},{"configuration":{"name":"pom"},"modules":[],"details":[]},{"configuration":{"name":"provided"},"modules":[{"module":{"organization":"org.scala-lang","name":"scala-library","revision":"2.12.19","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[{"organization":"org.scala-sbt","name":"io_2.12","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}},{"organization":"org.scala-sbt","name":"librarymanagement-ivy_2.12","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}},{"organization":"org.scala-sbt","name":"util-position_2.12","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}},{"organization":"org.scala-sbt","name":"sbt","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}},{"organization":"org.scala-sbt","name":"util-logging_2.12","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}},{"organization":"org.scala-sbt","name":"scripted-plugin_2.12","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}},{"organization":"org.scala-sbt","name":"librarymanagement-core_2.12","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}}],"extraAttributes":{"info.apiURL":"https://www.scala-lang.org/api/2.12.19/"},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"scala-library","type":"jar","extension":"jar","configurations":[],"extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/.sbt/boot/scala-2.12.19/lib/scala-library.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://www.scala-lang.org/","extraAttributes":{"info.apiURL":"https://www.scala-lang.org/api/2.12.19/"},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["Apache-2.0","https://www.apache.org/licenses/LICENSE-2.0"]],"callers":[]}],"details":[]},{"configuration":{"name":"runtime"},"modules":[],"details":[]},{"configuration":{"name":"runtime-internal"},"modules":[],"details":[]},{"configuration":{"name":"scala-doc-tool"},"modules":[],"details":[]},{"configuration":{"name":"scala-tool"},"modules":[{"module":{"organization":"org.scala-lang","name":"scala-compiler","revision":"2.12.19","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[{"organization":"org.scala-sbt","name":"io_2.12","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}},{"organization":"org.scala-sbt","name":"librarymanagement-ivy_2.12","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}},{"organization":"org.scala-sbt","name":"util-position_2.12","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}},{"organization":"org.scala-sbt","name":"sbt","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}},{"organization":"org.scala-sbt","name":"util-logging_2.12","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}},{"organization":"org.scala-sbt","name":"scripted-plugin_2.12","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}},{"organization":"org.scala-sbt","name":"librarymanagement-core_2.12","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}}],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"scala-compiler","type":"jar","extension":"jar","configurations":[],"extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/.sbt/boot/scala-2.12.19/lib/scala-compiler.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://www.scala-lang.org/","extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["Apache-2.0","https://www.apache.org/licenses/LICENSE-2.0"]],"callers":[]},{"module":{"organization":"org.scala-lang","name":"scala-compiler","revision":"2.12.19","configurations":"optional","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[{"organization":"org.scala-sbt","name":"io_2.12","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}},{"organization":"org.scala-sbt","name":"librarymanagement-ivy_2.12","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}},{"organization":"org.scala-sbt","name":"util-position_2.12","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}},{"organization":"org.scala-sbt","name":"sbt","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}},{"organization":"org.scala-sbt","name":"util-logging_2.12","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}},{"organization":"org.scala-sbt","name":"scripted-plugin_2.12","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}},{"organization":"org.scala-sbt","name":"librarymanagement-core_2.12","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}}],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"scala-compiler","type":"jar","extension":"jar","configurations":[],"extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/.sbt/boot/scala-2.12.19/lib/scala-compiler.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://www.scala-lang.org/","extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["Apache-2.0","https://www.apache.org/licenses/LICENSE-2.0"]],"callers":[]},{"module":{"organization":"org.scala-lang","name":"scala-library","revision":"2.12.19","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[{"organization":"org.scala-sbt","name":"io_2.12","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}},{"organization":"org.scala-sbt","name":"librarymanagement-ivy_2.12","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}},{"organization":"org.scala-sbt","name":"util-position_2.12","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}},{"organization":"org.scala-sbt","name":"sbt","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}},{"organization":"org.scala-sbt","name":"util-logging_2.12","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}},{"organization":"org.scala-sbt","name":"scripted-plugin_2.12","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}},{"organization":"org.scala-sbt","name":"librarymanagement-core_2.12","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}}],"extraAttributes":{"info.apiURL":"https://www.scala-lang.org/api/2.12.19/"},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"scala-library","type":"jar","extension":"jar","configurations":[],"extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/.sbt/boot/scala-2.12.19/lib/scala-library.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://www.scala-lang.org/","extraAttributes":{"info.apiURL":"https://www.scala-lang.org/api/2.12.19/"},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["Apache-2.0","https://www.apache.org/licenses/LICENSE-2.0"]],"callers":[]},{"module":{"organization":"org.scala-lang","name":"scala-library","revision":"2.12.19","configurations":"optional","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[{"organization":"org.scala-sbt","name":"io_2.12","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}},{"organization":"org.scala-sbt","name":"librarymanagement-ivy_2.12","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}},{"organization":"org.scala-sbt","name":"util-position_2.12","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}},{"organization":"org.scala-sbt","name":"sbt","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}},{"organization":"org.scala-sbt","name":"util-logging_2.12","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}},{"organization":"org.scala-sbt","name":"scripted-plugin_2.12","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}},{"organization":"org.scala-sbt","name":"librarymanagement-core_2.12","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}}],"extraAttributes":{"info.apiURL":"https://www.scala-lang.org/api/2.12.19/"},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"scala-library","type":"jar","extension":"jar","configurations":[],"extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/.sbt/boot/scala-2.12.19/lib/scala-library.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://www.scala-lang.org/","extraAttributes":{"info.apiURL":"https://www.scala-lang.org/api/2.12.19/"},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["Apache-2.0","https://www.apache.org/licenses/LICENSE-2.0"]],"callers":[]},{"module":{"organization":"org.scala-lang","name":"scala-reflect","revision":"2.12.19","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[{"organization":"org.scala-sbt","name":"io_2.12","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}},{"organization":"org.scala-sbt","name":"librarymanagement-ivy_2.12","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}},{"organization":"org.scala-sbt","name":"util-position_2.12","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}},{"organization":"org.scala-sbt","name":"sbt","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}},{"organization":"org.scala-sbt","name":"util-logging_2.12","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}},{"organization":"org.scala-sbt","name":"scripted-plugin_2.12","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}},{"organization":"org.scala-sbt","name":"librarymanagement-core_2.12","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}}],"extraAttributes":{"info.apiURL":"https://www.scala-lang.org/api/2.12.19/"},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"scala-reflect","type":"jar","extension":"jar","configurations":[],"extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/.sbt/boot/scala-2.12.19/lib/scala-reflect.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://www.scala-lang.org/","extraAttributes":{"info.apiURL":"https://www.scala-lang.org/api/2.12.19/"},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["Apache-2.0","https://www.apache.org/licenses/LICENSE-2.0"]],"callers":[]},{"module":{"organization":"org.scala-lang.modules","name":"scala-xml_2.12","revision":"2.2.0","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[{"organization":"org.scala-sbt","name":"io_2.12","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}},{"organization":"org.scala-sbt","name":"librarymanagement-ivy_2.12","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}},{"organization":"org.scala-sbt","name":"util-position_2.12","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}},{"organization":"org.scala-sbt","name":"sbt","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}},{"organization":"org.scala-sbt","name":"util-logging_2.12","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}},{"organization":"org.scala-lang","name":"*","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}},{"organization":"org.scala-sbt","name":"scripted-plugin_2.12","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}},{"organization":"org.scala-sbt","name":"librarymanagement-core_2.12","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}}],"extraAttributes":{"info.apiURL":"https://javadoc.io/doc/org.scala-lang.modules/scala-xml_2.13/","info.versionScheme":"early-semver"},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"scala-xml_2.12","type":"bundle","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/scala-lang/modules/scala-xml_2.12/2.2.0/scala-xml_2.12-2.2.0.jar","extraAttributes":{"info.apiURL":"https://javadoc.io/doc/org.scala-lang.modules/scala-xml_2.13/","info.versionScheme":"early-semver"},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scala-lang/modules/scala-xml_2.12/2.2.0/scala-xml_2.12-2.2.0.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"http://www.scala-lang.org/","extraAttributes":{"info.apiURL":"https://javadoc.io/doc/org.scala-lang.modules/scala-xml_2.13/","info.versionScheme":"early-semver"},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["Apache-2.0","https://www.apache.org/licenses/LICENSE-2.0"]],"callers":[]},{"module":{"organization":"jline","name":"jline","revision":"2.14.6","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[{"organization":"org.scala-sbt","name":"io_2.12","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}},{"organization":"org.scala-sbt","name":"librarymanagement-ivy_2.12","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}},{"organization":"org.scala-sbt","name":"util-position_2.12","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}},{"organization":"org.scala-sbt","name":"sbt","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}},{"organization":"org.scala-sbt","name":"util-logging_2.12","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}},{"organization":"org.scala-sbt","name":"scripted-plugin_2.12","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}},{"organization":"org.scala-sbt","name":"librarymanagement-core_2.12","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}}],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"jline","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/jline/jline/2.14.6/jline-2.14.6.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/jline/jline/2.14.6/jline-2.14.6.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["The BSD License","http://www.opensource.org/licenses/bsd-license.php"]],"callers":[]}],"details":[]},{"configuration":{"name":"sources"},"modules":[],"details":[]},{"configuration":{"name":"test"},"modules":[],"details":[]},{"configuration":{"name":"test-internal"},"modules":[{"module":{"organization":"org.scala-lang","name":"scala-library","revision":"2.12.19","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[{"organization":"org.scala-sbt","name":"io_2.12","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}},{"organization":"org.scala-sbt","name":"librarymanagement-ivy_2.12","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}},{"organization":"org.scala-sbt","name":"util-position_2.12","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}},{"organization":"org.scala-sbt","name":"sbt","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}},{"organization":"org.scala-sbt","name":"util-logging_2.12","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}},{"organization":"org.scala-sbt","name":"scripted-plugin_2.12","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}},{"organization":"org.scala-sbt","name":"librarymanagement-core_2.12","artifact":"*","configurations":[],"crossVersion":{"type":"Disabled"}}],"extraAttributes":{"info.apiURL":"https://www.scala-lang.org/api/2.12.19/"},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"scala-library","type":"jar","extension":"jar","configurations":[],"extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/.sbt/boot/scala-2.12.19/lib/scala-library.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://www.scala-lang.org/","extraAttributes":{"info.apiURL":"https://www.scala-lang.org/api/2.12.19/"},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["Apache-2.0","https://www.apache.org/licenses/LICENSE-2.0"]],"callers":[]}],"details":[]}],"stats":{"resolveTime":-1,"downloadTime":-1,"downloadSize":-1,"cached":false},"stamps":{}} \ No newline at end of file diff --git a/project/target/streams/_global/_global/_global/streams/out b/project/target/streams/_global/_global/_global/streams/out deleted file mode 100644 index e69de29bb..000000000 diff --git a/project/target/streams/_global/_global/csrLogger/_global/streams/out b/project/target/streams/_global/_global/csrLogger/_global/streams/out deleted file mode 100644 index e69de29bb..000000000 diff --git a/project/target/streams/_global/csrConfiguration/_global/streams/out b/project/target/streams/_global/csrConfiguration/_global/streams/out deleted file mode 100644 index e69de29bb..000000000 diff --git a/project/target/streams/_global/csrProject/_global/streams/out b/project/target/streams/_global/csrProject/_global/streams/out deleted file mode 100644 index e69de29bb..000000000 diff --git a/project/target/streams/_global/dependencyPositions/_global/streams/update_cache_2.12/input_dsp b/project/target/streams/_global/dependencyPositions/_global/streams/update_cache_2.12/input_dsp deleted file mode 100644 index 77b55f12a..000000000 --- a/project/target/streams/_global/dependencyPositions/_global/streams/update_cache_2.12/input_dsp +++ /dev/null @@ -1 +0,0 @@ --768128706 \ No newline at end of file diff --git a/project/target/streams/_global/dependencyPositions/_global/streams/update_cache_2.12/output_dsp b/project/target/streams/_global/dependencyPositions/_global/streams/update_cache_2.12/output_dsp deleted file mode 100644 index fa8310de2..000000000 --- a/project/target/streams/_global/dependencyPositions/_global/streams/update_cache_2.12/output_dsp +++ /dev/null @@ -1 +0,0 @@ -{"{\"organization\":\"org.scala-lang\",\"name\":\"scala-library\",\"revision\":\"2.12.19\",\"configurations\":\"provided\",\"isChanging\":false,\"isTransitive\":true,\"isForce\":false,\"explicitArtifacts\":[],\"inclusions\":[],\"exclusions\":[],\"extraAttributes\":{},\"crossVersion\":{\"type\":\"Disabled\"}}":{"value":{"$fields":["path","startLine"],"path":"(sbt.Classpaths.jvmBaseSettings) Defaults.scala","startLine":3407},"type":"LinePosition"}} \ No newline at end of file diff --git a/project/target/streams/_global/ivyConfiguration/_global/streams/out b/project/target/streams/_global/ivyConfiguration/_global/streams/out deleted file mode 100644 index e69de29bb..000000000 diff --git a/project/target/streams/_global/ivySbt/_global/streams/out b/project/target/streams/_global/ivySbt/_global/streams/out deleted file mode 100644 index e69de29bb..000000000 diff --git a/project/target/streams/_global/moduleSettings/_global/streams/out b/project/target/streams/_global/moduleSettings/_global/streams/out deleted file mode 100644 index e69de29bb..000000000 diff --git a/project/target/streams/_global/projectDescriptors/_global/streams/out b/project/target/streams/_global/projectDescriptors/_global/streams/out deleted file mode 100644 index e69de29bb..000000000 diff --git a/project/target/streams/_global/scalaCompilerBridgeScope/_global/streams/out b/project/target/streams/_global/scalaCompilerBridgeScope/_global/streams/out deleted file mode 100644 index e69de29bb..000000000 diff --git a/project/target/streams/_global/update/_global/streams/out b/project/target/streams/_global/update/_global/streams/out deleted file mode 100644 index 7f8496c1c..000000000 --- a/project/target/streams/_global/update/_global/streams/out +++ /dev/null @@ -1,3 +0,0 @@ -[debug] not up to date. inChanged = true, force = false -[debug] Updating ProjectRef(uri("file:/C:/Users/youse/OneDrive/Documents/New%20Scalation/scalation_2.0/project/"), "scalation_2-0-build")... -[debug] Done updating ProjectRef(uri("file:/C:/Users/youse/OneDrive/Documents/New%20Scalation/scalation_2.0/project/"), "scalation_2-0-build") diff --git a/project/target/streams/compile/_global/_global/compileOutputs/previous b/project/target/streams/compile/_global/_global/compileOutputs/previous deleted file mode 100644 index fd1739977..000000000 --- a/project/target/streams/compile/_global/_global/compileOutputs/previous +++ /dev/null @@ -1 +0,0 @@ -["sbt.Task[scala.collection.Seq[java.nio.file.Path]]",["C:\\Users\\youse\\OneDrive\\Documents\\GitHub\\scalation_2.0\\project\\target\\scala-2.12\\sbt-1.0\\zinc\\inc_compile_2.12.zip"]] \ No newline at end of file diff --git a/project/target/streams/compile/_global/_global/discoveredMainClasses/data b/project/target/streams/compile/_global/_global/discoveredMainClasses/data deleted file mode 100644 index 0637a088a..000000000 --- a/project/target/streams/compile/_global/_global/discoveredMainClasses/data +++ /dev/null @@ -1 +0,0 @@ -[] \ No newline at end of file diff --git a/project/target/streams/compile/bspReporter/_global/streams/out b/project/target/streams/compile/bspReporter/_global/streams/out deleted file mode 100644 index e69de29bb..000000000 diff --git a/project/target/streams/compile/compile/_global/streams/out b/project/target/streams/compile/compile/_global/streams/out deleted file mode 100644 index e69de29bb..000000000 diff --git a/project/target/streams/compile/compileIncremental/_global/streams/export b/project/target/streams/compile/compileIncremental/_global/streams/export deleted file mode 100644 index e69de29bb..000000000 diff --git a/project/target/streams/compile/compileIncremental/_global/streams/out b/project/target/streams/compile/compileIncremental/_global/streams/out deleted file mode 100644 index 5db868707..000000000 --- a/project/target/streams/compile/compileIncremental/_global/streams/out +++ /dev/null @@ -1,6 +0,0 @@ -[debug] [zinc] IncrementalCompile ----------- -[debug] IncrementalCompile.incrementalCompile -[debug] previous = Stamps for: 0 products, 0 sources, 0 libraries -[debug] current source = Set() -[debug] > initialChanges = InitialChanges(Changes(added = Set(), removed = Set(), changed = Set(), unmodified = ...),Set(),Set(),API Changes: Set()) -[debug] Full compilation, no sources in previous analysis. diff --git a/project/target/streams/compile/copyResources/_global/streams/out b/project/target/streams/compile/copyResources/_global/streams/out deleted file mode 100644 index 499952762..000000000 --- a/project/target/streams/compile/copyResources/_global/streams/out +++ /dev/null @@ -1,2 +0,0 @@ -[debug] Copy resource mappings: -[debug] diff --git a/project/target/streams/compile/dependencyClasspath/_global/streams/export b/project/target/streams/compile/dependencyClasspath/_global/streams/export deleted file mode 100644 index 6261fede0..000000000 --- a/project/target/streams/compile/dependencyClasspath/_global/streams/export +++ /dev/null @@ -1 +0,0 @@ -C:\Users\youse\.sbt\1.0\plugins\target\scala-2.12\sbt-1.0\classes;C:\Users\youse\.sbt\boot\scala-2.12.19\lib\scala-library.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\lib\scala-compiler.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\lib\scala-reflect.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\lib\scala-xml_2.12-2.2.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\actions_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\caffeine-2.8.5.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\checker-qual-3.4.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\collections_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\command_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\compiler-bridge_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\compiler-interface-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\completion_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\config-1.4.2.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\core-macros_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\disruptor-3.4.2.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\error_prone_annotations-2.4.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\file-tree-views-2.1.12.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\gigahorse-apache-http_2.12-0.7.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\gigahorse-core_2.12-0.7.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\io_2.12-1.10.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\ipcsocket-1.6.2.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\ivy-2.3.0-sbt-396a783bba347016e7fe30dacc60d355be607fe2.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jansi-2.4.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jline-2.14.7-sbt-9c3b6aca11c57e339441442bbf58e550cdfecb79.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jline-builtins-3.24.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jline-native-3.24.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jline-reader-3.24.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jline-style-3.24.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jline-terminal-3.24.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jline-terminal-jansi-3.24.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jline-terminal-jna-3.24.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jna-5.13.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jna-platform-5.13.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jsch-0.2.17.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\launcher-interface-1.4.3.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\librarymanagement-core_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\librarymanagement-ivy_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\lm-coursier-shaded_2.12-2.1.4.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\log4j-api-2.17.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\log4j-core-2.17.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\log4j-slf4j-impl-2.17.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\logic_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\main-settings_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\main_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\protocol_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\reactive-streams-1.0.3.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\run_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\sbinary_2.12-0.5.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\sbt-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\scala-collection-compat_2.12-2.11.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\scala-compiler-2.12.19.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\scala-library-2.12.19.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\scala-parser-combinators_2.12-1.1.2.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\scala-reflect-2.12.19.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\scala-xml_2.12-2.3.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\scripted-plugin_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\shaded-apache-httpasyncclient-0.7.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\shaded-jawn-parser_2.12-1.3.2.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\shaded-scalajson_2.12-1.0.0-M4.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\sjson-new-core_2.12-0.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\sjson-new-murmurhash_2.12-0.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\sjson-new-scalajson_2.12-0.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\slf4j-api-1.7.36.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\ssl-config-core_2.12-0.6.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\task-system_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\tasks_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\template-resolver-0.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\test-agent-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\test-interface-1.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\testing_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\util-cache_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\util-control_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\util-interface-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\util-logging_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\util-position_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\util-relation_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\util-tracking_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zero-allocation-hashing-0.16.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zinc-apiinfo_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zinc-classfile_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zinc-classpath_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zinc-compile-core_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zinc-compile_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zinc-core_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zinc-lm-integration_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zinc-persist-core-assembly-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zinc-persist_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zinc_2.12-1.10.1.jar diff --git a/project/target/streams/compile/exportedProducts/_global/streams/export b/project/target/streams/compile/exportedProducts/_global/streams/export deleted file mode 100644 index c09bd0fef..000000000 --- a/project/target/streams/compile/exportedProducts/_global/streams/export +++ /dev/null @@ -1 +0,0 @@ -C:\Users\youse\OneDrive\Documents\GitHub\scalation_2.0\project\target\scala-2.12\sbt-1.0\classes diff --git a/project/target/streams/compile/externalDependencyClasspath/_global/streams/export b/project/target/streams/compile/externalDependencyClasspath/_global/streams/export deleted file mode 100644 index 287231bc4..000000000 --- a/project/target/streams/compile/externalDependencyClasspath/_global/streams/export +++ /dev/null @@ -1 +0,0 @@ -C:\Users\youse\.sbt\boot\scala-2.12.19\lib\scala-library.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\lib\scala-compiler.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\lib\scala-reflect.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\lib\scala-xml_2.12-2.2.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\actions_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\caffeine-2.8.5.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\checker-qual-3.4.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\collections_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\command_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\compiler-bridge_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\compiler-interface-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\completion_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\config-1.4.2.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\core-macros_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\disruptor-3.4.2.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\error_prone_annotations-2.4.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\file-tree-views-2.1.12.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\gigahorse-apache-http_2.12-0.7.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\gigahorse-core_2.12-0.7.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\io_2.12-1.10.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\ipcsocket-1.6.2.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\ivy-2.3.0-sbt-396a783bba347016e7fe30dacc60d355be607fe2.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jansi-2.4.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jline-2.14.7-sbt-9c3b6aca11c57e339441442bbf58e550cdfecb79.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jline-builtins-3.24.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jline-native-3.24.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jline-reader-3.24.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jline-style-3.24.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jline-terminal-3.24.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jline-terminal-jansi-3.24.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jline-terminal-jna-3.24.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jna-5.13.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jna-platform-5.13.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jsch-0.2.17.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\launcher-interface-1.4.3.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\librarymanagement-core_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\librarymanagement-ivy_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\lm-coursier-shaded_2.12-2.1.4.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\log4j-api-2.17.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\log4j-core-2.17.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\log4j-slf4j-impl-2.17.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\logic_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\main-settings_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\main_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\protocol_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\reactive-streams-1.0.3.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\run_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\sbinary_2.12-0.5.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\sbt-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\scala-collection-compat_2.12-2.11.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\scala-compiler-2.12.19.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\scala-library-2.12.19.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\scala-parser-combinators_2.12-1.1.2.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\scala-reflect-2.12.19.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\scala-xml_2.12-2.3.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\scripted-plugin_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\shaded-apache-httpasyncclient-0.7.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\shaded-jawn-parser_2.12-1.3.2.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\shaded-scalajson_2.12-1.0.0-M4.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\sjson-new-core_2.12-0.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\sjson-new-murmurhash_2.12-0.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\sjson-new-scalajson_2.12-0.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\slf4j-api-1.7.36.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\ssl-config-core_2.12-0.6.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\task-system_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\tasks_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\template-resolver-0.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\test-agent-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\test-interface-1.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\testing_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\util-cache_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\util-control_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\util-interface-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\util-logging_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\util-position_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\util-relation_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\util-tracking_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zero-allocation-hashing-0.16.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zinc-apiinfo_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zinc-classfile_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zinc-classpath_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zinc-compile-core_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zinc-compile_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zinc-core_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zinc-lm-integration_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zinc-persist-core-assembly-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zinc-persist_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zinc_2.12-1.10.1.jar diff --git a/project/target/streams/compile/incOptions/_global/streams/out b/project/target/streams/compile/incOptions/_global/streams/out deleted file mode 100644 index 4b1f621b5..000000000 --- a/project/target/streams/compile/incOptions/_global/streams/out +++ /dev/null @@ -1,5 +0,0 @@ -[debug] Created transactional ClassFileManager with tempDir = C:\Users\youse\OneDrive\Documents\GitHub\scalation_2.0\project\target\scala-2.12\sbt-1.0\classes.bak -[debug] About to delete class files: -[debug] We backup class files: -[debug] Created transactional ClassFileManager with tempDir = C:\Users\youse\OneDrive\Documents\GitHub\scalation_2.0\project\target\scala-2.12\sbt-1.0\classes.bak -[debug] Removing the temporary directory used for backing up class files: C:\Users\youse\OneDrive\Documents\GitHub\scalation_2.0\project\target\scala-2.12\sbt-1.0\classes.bak diff --git a/project/target/streams/compile/internalDependencyClasspath/_global/streams/export b/project/target/streams/compile/internalDependencyClasspath/_global/streams/export deleted file mode 100644 index 8b1378917..000000000 --- a/project/target/streams/compile/internalDependencyClasspath/_global/streams/export +++ /dev/null @@ -1 +0,0 @@ - diff --git a/project/target/streams/compile/internalDependencyClasspath/_global/streams/out b/project/target/streams/compile/internalDependencyClasspath/_global/streams/out deleted file mode 100644 index e69de29bb..000000000 diff --git a/project/target/streams/compile/managedClasspath/_global/streams/export b/project/target/streams/compile/managedClasspath/_global/streams/export deleted file mode 100644 index 287231bc4..000000000 --- a/project/target/streams/compile/managedClasspath/_global/streams/export +++ /dev/null @@ -1 +0,0 @@ -C:\Users\youse\.sbt\boot\scala-2.12.19\lib\scala-library.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\lib\scala-compiler.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\lib\scala-reflect.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\lib\scala-xml_2.12-2.2.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\actions_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\caffeine-2.8.5.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\checker-qual-3.4.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\collections_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\command_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\compiler-bridge_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\compiler-interface-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\completion_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\config-1.4.2.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\core-macros_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\disruptor-3.4.2.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\error_prone_annotations-2.4.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\file-tree-views-2.1.12.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\gigahorse-apache-http_2.12-0.7.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\gigahorse-core_2.12-0.7.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\io_2.12-1.10.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\ipcsocket-1.6.2.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\ivy-2.3.0-sbt-396a783bba347016e7fe30dacc60d355be607fe2.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jansi-2.4.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jline-2.14.7-sbt-9c3b6aca11c57e339441442bbf58e550cdfecb79.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jline-builtins-3.24.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jline-native-3.24.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jline-reader-3.24.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jline-style-3.24.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jline-terminal-3.24.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jline-terminal-jansi-3.24.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jline-terminal-jna-3.24.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jna-5.13.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jna-platform-5.13.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jsch-0.2.17.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\launcher-interface-1.4.3.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\librarymanagement-core_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\librarymanagement-ivy_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\lm-coursier-shaded_2.12-2.1.4.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\log4j-api-2.17.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\log4j-core-2.17.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\log4j-slf4j-impl-2.17.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\logic_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\main-settings_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\main_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\protocol_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\reactive-streams-1.0.3.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\run_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\sbinary_2.12-0.5.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\sbt-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\scala-collection-compat_2.12-2.11.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\scala-compiler-2.12.19.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\scala-library-2.12.19.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\scala-parser-combinators_2.12-1.1.2.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\scala-reflect-2.12.19.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\scala-xml_2.12-2.3.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\scripted-plugin_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\shaded-apache-httpasyncclient-0.7.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\shaded-jawn-parser_2.12-1.3.2.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\shaded-scalajson_2.12-1.0.0-M4.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\sjson-new-core_2.12-0.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\sjson-new-murmurhash_2.12-0.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\sjson-new-scalajson_2.12-0.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\slf4j-api-1.7.36.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\ssl-config-core_2.12-0.6.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\task-system_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\tasks_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\template-resolver-0.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\test-agent-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\test-interface-1.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\testing_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\util-cache_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\util-control_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\util-interface-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\util-logging_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\util-position_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\util-relation_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\util-tracking_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zero-allocation-hashing-0.16.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zinc-apiinfo_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zinc-classfile_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zinc-classpath_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zinc-compile-core_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zinc-compile_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zinc-core_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zinc-lm-integration_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zinc-persist-core-assembly-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zinc-persist_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zinc_2.12-1.10.1.jar diff --git a/project/target/streams/compile/scalacOptions/_global/streams/out b/project/target/streams/compile/scalacOptions/_global/streams/out deleted file mode 100644 index e69de29bb..000000000 diff --git a/project/target/streams/compile/unmanagedClasspath/_global/streams/export b/project/target/streams/compile/unmanagedClasspath/_global/streams/export deleted file mode 100644 index 8b1378917..000000000 --- a/project/target/streams/compile/unmanagedClasspath/_global/streams/export +++ /dev/null @@ -1 +0,0 @@ - diff --git a/project/target/streams/compile/unmanagedClasspath/_global/streams/out b/project/target/streams/compile/unmanagedClasspath/_global/streams/out deleted file mode 100644 index e69de29bb..000000000 diff --git a/project/target/streams/compile/unmanagedJars/_global/streams/export b/project/target/streams/compile/unmanagedJars/_global/streams/export deleted file mode 100644 index 8b1378917..000000000 --- a/project/target/streams/compile/unmanagedJars/_global/streams/export +++ /dev/null @@ -1 +0,0 @@ - diff --git a/project/target/streams/runtime/dependencyClasspath/_global/streams/export b/project/target/streams/runtime/dependencyClasspath/_global/streams/export deleted file mode 100644 index 89827d136..000000000 --- a/project/target/streams/runtime/dependencyClasspath/_global/streams/export +++ /dev/null @@ -1 +0,0 @@ -C:\Users\youse\OneDrive\Documents\GitHub\scalation_2.0\project\target\scala-2.12\sbt-1.0\classes;C:\Users\youse\.sbt\1.0\plugins\target\scala-2.12\sbt-1.0\classes;C:\Users\youse\.sbt\boot\scala-2.12.19\lib\scala-compiler.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\lib\scala-library.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\lib\scala-reflect.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\lib\scala-xml_2.12-2.2.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\actions_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\caffeine-2.8.5.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\checker-qual-3.4.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\collections_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\command_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\compiler-bridge_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\compiler-interface-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\completion_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\config-1.4.2.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\core-macros_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\disruptor-3.4.2.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\error_prone_annotations-2.4.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\file-tree-views-2.1.12.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\gigahorse-apache-http_2.12-0.7.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\gigahorse-core_2.12-0.7.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\io_2.12-1.10.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\ipcsocket-1.6.2.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\ivy-2.3.0-sbt-396a783bba347016e7fe30dacc60d355be607fe2.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jansi-2.4.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jline-2.14.7-sbt-9c3b6aca11c57e339441442bbf58e550cdfecb79.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jline-builtins-3.24.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jline-native-3.24.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jline-reader-3.24.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jline-style-3.24.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jline-terminal-3.24.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jline-terminal-jansi-3.24.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jline-terminal-jna-3.24.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jna-5.13.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jna-platform-5.13.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jsch-0.2.17.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\launcher-interface-1.4.3.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\librarymanagement-core_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\librarymanagement-ivy_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\lm-coursier-shaded_2.12-2.1.4.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\log4j-api-2.17.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\log4j-core-2.17.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\log4j-slf4j-impl-2.17.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\logic_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\main-settings_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\main_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\protocol_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\reactive-streams-1.0.3.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\run_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\sbinary_2.12-0.5.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\sbt-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\scala-collection-compat_2.12-2.11.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\scala-compiler-2.12.19.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\scala-library-2.12.19.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\scala-parser-combinators_2.12-1.1.2.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\scala-reflect-2.12.19.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\scala-xml_2.12-2.3.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\scripted-plugin_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\shaded-apache-httpasyncclient-0.7.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\shaded-jawn-parser_2.12-1.3.2.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\shaded-scalajson_2.12-1.0.0-M4.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\sjson-new-core_2.12-0.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\sjson-new-murmurhash_2.12-0.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\sjson-new-scalajson_2.12-0.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\slf4j-api-1.7.36.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\ssl-config-core_2.12-0.6.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\task-system_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\tasks_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\template-resolver-0.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\test-agent-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\test-interface-1.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\testing_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\util-cache_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\util-control_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\util-interface-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\util-logging_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\util-position_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\util-relation_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\util-tracking_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zero-allocation-hashing-0.16.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zinc-apiinfo_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zinc-classfile_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zinc-classpath_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zinc-compile-core_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zinc-compile_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zinc-core_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zinc-lm-integration_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zinc-persist-core-assembly-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zinc-persist_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zinc_2.12-1.10.1.jar diff --git a/project/target/streams/runtime/exportedProducts/_global/streams/export b/project/target/streams/runtime/exportedProducts/_global/streams/export deleted file mode 100644 index c09bd0fef..000000000 --- a/project/target/streams/runtime/exportedProducts/_global/streams/export +++ /dev/null @@ -1 +0,0 @@ -C:\Users\youse\OneDrive\Documents\GitHub\scalation_2.0\project\target\scala-2.12\sbt-1.0\classes diff --git a/project/target/streams/runtime/externalDependencyClasspath/_global/streams/export b/project/target/streams/runtime/externalDependencyClasspath/_global/streams/export deleted file mode 100644 index 7f7bde626..000000000 --- a/project/target/streams/runtime/externalDependencyClasspath/_global/streams/export +++ /dev/null @@ -1 +0,0 @@ -C:\Users\youse\.sbt\boot\scala-2.12.19\lib\scala-compiler.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\lib\scala-library.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\lib\scala-reflect.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\lib\scala-xml_2.12-2.2.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\actions_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\caffeine-2.8.5.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\checker-qual-3.4.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\collections_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\command_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\compiler-bridge_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\compiler-interface-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\completion_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\config-1.4.2.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\core-macros_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\disruptor-3.4.2.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\error_prone_annotations-2.4.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\file-tree-views-2.1.12.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\gigahorse-apache-http_2.12-0.7.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\gigahorse-core_2.12-0.7.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\io_2.12-1.10.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\ipcsocket-1.6.2.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\ivy-2.3.0-sbt-396a783bba347016e7fe30dacc60d355be607fe2.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jansi-2.4.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jline-2.14.7-sbt-9c3b6aca11c57e339441442bbf58e550cdfecb79.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jline-builtins-3.24.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jline-native-3.24.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jline-reader-3.24.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jline-style-3.24.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jline-terminal-3.24.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jline-terminal-jansi-3.24.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jline-terminal-jna-3.24.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jna-5.13.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jna-platform-5.13.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jsch-0.2.17.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\launcher-interface-1.4.3.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\librarymanagement-core_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\librarymanagement-ivy_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\lm-coursier-shaded_2.12-2.1.4.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\log4j-api-2.17.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\log4j-core-2.17.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\log4j-slf4j-impl-2.17.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\logic_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\main-settings_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\main_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\protocol_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\reactive-streams-1.0.3.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\run_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\sbinary_2.12-0.5.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\sbt-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\scala-collection-compat_2.12-2.11.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\scala-compiler-2.12.19.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\scala-library-2.12.19.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\scala-parser-combinators_2.12-1.1.2.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\scala-reflect-2.12.19.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\scala-xml_2.12-2.3.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\scripted-plugin_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\shaded-apache-httpasyncclient-0.7.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\shaded-jawn-parser_2.12-1.3.2.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\shaded-scalajson_2.12-1.0.0-M4.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\sjson-new-core_2.12-0.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\sjson-new-murmurhash_2.12-0.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\sjson-new-scalajson_2.12-0.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\slf4j-api-1.7.36.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\ssl-config-core_2.12-0.6.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\task-system_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\tasks_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\template-resolver-0.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\test-agent-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\test-interface-1.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\testing_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\util-cache_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\util-control_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\util-interface-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\util-logging_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\util-position_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\util-relation_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\util-tracking_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zero-allocation-hashing-0.16.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zinc-apiinfo_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zinc-classfile_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zinc-classpath_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zinc-compile-core_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zinc-compile_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zinc-core_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zinc-lm-integration_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zinc-persist-core-assembly-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zinc-persist_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zinc_2.12-1.10.1.jar diff --git a/project/target/streams/runtime/fullClasspath/_global/streams/export b/project/target/streams/runtime/fullClasspath/_global/streams/export deleted file mode 100644 index 89827d136..000000000 --- a/project/target/streams/runtime/fullClasspath/_global/streams/export +++ /dev/null @@ -1 +0,0 @@ -C:\Users\youse\OneDrive\Documents\GitHub\scalation_2.0\project\target\scala-2.12\sbt-1.0\classes;C:\Users\youse\.sbt\1.0\plugins\target\scala-2.12\sbt-1.0\classes;C:\Users\youse\.sbt\boot\scala-2.12.19\lib\scala-compiler.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\lib\scala-library.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\lib\scala-reflect.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\lib\scala-xml_2.12-2.2.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\actions_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\caffeine-2.8.5.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\checker-qual-3.4.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\collections_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\command_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\compiler-bridge_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\compiler-interface-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\completion_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\config-1.4.2.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\core-macros_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\disruptor-3.4.2.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\error_prone_annotations-2.4.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\file-tree-views-2.1.12.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\gigahorse-apache-http_2.12-0.7.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\gigahorse-core_2.12-0.7.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\io_2.12-1.10.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\ipcsocket-1.6.2.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\ivy-2.3.0-sbt-396a783bba347016e7fe30dacc60d355be607fe2.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jansi-2.4.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jline-2.14.7-sbt-9c3b6aca11c57e339441442bbf58e550cdfecb79.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jline-builtins-3.24.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jline-native-3.24.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jline-reader-3.24.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jline-style-3.24.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jline-terminal-3.24.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jline-terminal-jansi-3.24.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jline-terminal-jna-3.24.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jna-5.13.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jna-platform-5.13.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jsch-0.2.17.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\launcher-interface-1.4.3.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\librarymanagement-core_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\librarymanagement-ivy_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\lm-coursier-shaded_2.12-2.1.4.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\log4j-api-2.17.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\log4j-core-2.17.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\log4j-slf4j-impl-2.17.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\logic_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\main-settings_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\main_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\protocol_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\reactive-streams-1.0.3.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\run_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\sbinary_2.12-0.5.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\sbt-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\scala-collection-compat_2.12-2.11.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\scala-compiler-2.12.19.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\scala-library-2.12.19.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\scala-parser-combinators_2.12-1.1.2.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\scala-reflect-2.12.19.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\scala-xml_2.12-2.3.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\scripted-plugin_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\shaded-apache-httpasyncclient-0.7.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\shaded-jawn-parser_2.12-1.3.2.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\shaded-scalajson_2.12-1.0.0-M4.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\sjson-new-core_2.12-0.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\sjson-new-murmurhash_2.12-0.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\sjson-new-scalajson_2.12-0.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\slf4j-api-1.7.36.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\ssl-config-core_2.12-0.6.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\task-system_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\tasks_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\template-resolver-0.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\test-agent-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\test-interface-1.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\testing_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\util-cache_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\util-control_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\util-interface-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\util-logging_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\util-position_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\util-relation_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\util-tracking_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zero-allocation-hashing-0.16.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zinc-apiinfo_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zinc-classfile_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zinc-classpath_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zinc-compile-core_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zinc-compile_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zinc-core_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zinc-lm-integration_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zinc-persist-core-assembly-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zinc-persist_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zinc_2.12-1.10.1.jar diff --git a/project/target/streams/runtime/internalDependencyClasspath/_global/streams/export b/project/target/streams/runtime/internalDependencyClasspath/_global/streams/export deleted file mode 100644 index c09bd0fef..000000000 --- a/project/target/streams/runtime/internalDependencyClasspath/_global/streams/export +++ /dev/null @@ -1 +0,0 @@ -C:\Users\youse\OneDrive\Documents\GitHub\scalation_2.0\project\target\scala-2.12\sbt-1.0\classes diff --git a/project/target/streams/runtime/internalDependencyClasspath/_global/streams/out b/project/target/streams/runtime/internalDependencyClasspath/_global/streams/out deleted file mode 100644 index e69de29bb..000000000 diff --git a/project/target/streams/runtime/managedClasspath/_global/streams/export b/project/target/streams/runtime/managedClasspath/_global/streams/export deleted file mode 100644 index 7f7bde626..000000000 --- a/project/target/streams/runtime/managedClasspath/_global/streams/export +++ /dev/null @@ -1 +0,0 @@ -C:\Users\youse\.sbt\boot\scala-2.12.19\lib\scala-compiler.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\lib\scala-library.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\lib\scala-reflect.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\lib\scala-xml_2.12-2.2.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\actions_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\caffeine-2.8.5.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\checker-qual-3.4.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\collections_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\command_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\compiler-bridge_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\compiler-interface-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\completion_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\config-1.4.2.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\core-macros_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\disruptor-3.4.2.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\error_prone_annotations-2.4.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\file-tree-views-2.1.12.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\gigahorse-apache-http_2.12-0.7.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\gigahorse-core_2.12-0.7.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\io_2.12-1.10.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\ipcsocket-1.6.2.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\ivy-2.3.0-sbt-396a783bba347016e7fe30dacc60d355be607fe2.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jansi-2.4.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jline-2.14.7-sbt-9c3b6aca11c57e339441442bbf58e550cdfecb79.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jline-builtins-3.24.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jline-native-3.24.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jline-reader-3.24.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jline-style-3.24.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jline-terminal-3.24.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jline-terminal-jansi-3.24.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jline-terminal-jna-3.24.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jna-5.13.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jna-platform-5.13.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\jsch-0.2.17.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\launcher-interface-1.4.3.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\librarymanagement-core_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\librarymanagement-ivy_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\lm-coursier-shaded_2.12-2.1.4.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\log4j-api-2.17.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\log4j-core-2.17.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\log4j-slf4j-impl-2.17.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\logic_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\main-settings_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\main_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\protocol_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\reactive-streams-1.0.3.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\run_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\sbinary_2.12-0.5.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\sbt-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\scala-collection-compat_2.12-2.11.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\scala-compiler-2.12.19.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\scala-library-2.12.19.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\scala-parser-combinators_2.12-1.1.2.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\scala-reflect-2.12.19.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\scala-xml_2.12-2.3.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\scripted-plugin_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\shaded-apache-httpasyncclient-0.7.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\shaded-jawn-parser_2.12-1.3.2.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\shaded-scalajson_2.12-1.0.0-M4.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\sjson-new-core_2.12-0.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\sjson-new-murmurhash_2.12-0.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\sjson-new-scalajson_2.12-0.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\slf4j-api-1.7.36.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\ssl-config-core_2.12-0.6.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\task-system_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\tasks_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\template-resolver-0.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\test-agent-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\test-interface-1.0.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\testing_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\util-cache_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\util-control_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\util-interface-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\util-logging_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\util-position_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\util-relation_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\util-tracking_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zero-allocation-hashing-0.16.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zinc-apiinfo_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zinc-classfile_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zinc-classpath_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zinc-compile-core_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zinc-compile_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zinc-core_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zinc-lm-integration_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zinc-persist-core-assembly-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zinc-persist_2.12-1.10.1.jar;C:\Users\youse\.sbt\boot\scala-2.12.19\org.scala-sbt\sbt\1.10.1\zinc_2.12-1.10.1.jar diff --git a/project/target/streams/runtime/unmanagedClasspath/_global/streams/export b/project/target/streams/runtime/unmanagedClasspath/_global/streams/export deleted file mode 100644 index 8b1378917..000000000 --- a/project/target/streams/runtime/unmanagedClasspath/_global/streams/export +++ /dev/null @@ -1 +0,0 @@ - diff --git a/project/target/streams/runtime/unmanagedClasspath/_global/streams/out b/project/target/streams/runtime/unmanagedClasspath/_global/streams/out deleted file mode 100644 index e69de29bb..000000000 diff --git a/project/target/streams/runtime/unmanagedJars/_global/streams/export b/project/target/streams/runtime/unmanagedJars/_global/streams/export deleted file mode 100644 index 8b1378917..000000000 --- a/project/target/streams/runtime/unmanagedJars/_global/streams/export +++ /dev/null @@ -1 +0,0 @@ - diff --git a/src/main/scala/scalation/Bool.scala b/src/main/scala/scalation/Bool.scala index 53bfaeef0..b0bb460ee 100644 --- a/src/main/scala/scalation/Bool.scala +++ b/src/main/scala/scalation/Bool.scala @@ -11,7 +11,8 @@ package scalation //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Extend `Boolean` to include an and, or, not, xor. Note: they all have the same precedence. +/** Extend `Boolean` to include an and, or, not, xor. + * @note: unfortunately they all have the same precedence. */ extension (x: Boolean) inline infix def and (y: Boolean): Boolean = x && y diff --git a/src/main/scala/scalation/Calc.scala b/src/main/scala/scalation/Calc.scala index f4d22db97..ef1498296 100644 --- a/src/main/scala/scalation/Calc.scala +++ b/src/main/scala/scalation/Calc.scala @@ -19,9 +19,11 @@ import scala.math._ */ object Calc: -// def f(x: Double): Double = 6.67408E-11 * 5.97219E24 * x / 6.371E6~^2 + log (1.0) + def f(x: Double): Double = 6.67430E-11 * 5.97219E24 * x / 6.371E6~^2 - def f(x: Double): Double = ceil (log10 (x / 9.0)) + def f2(x: Double): Double = 6.67408E-11 * 5.97219E24 * x / 6.371E6~^2 + log (1.0) + +// def f(x: Double): Double = ceil (log10 (x / 9.0)) end Calc diff --git a/src/main/scala/scalation/CircularQueue.scala b/src/main/scala/scalation/CircularQueue.scala index 30f80b9ac..0c2a7ee9a 100644 --- a/src/main/scala/scalation/CircularQueue.scala +++ b/src/main/scala/scalation/CircularQueue.scala @@ -80,7 +80,6 @@ class CircularQueue [A: ClassTag] (cap: Int): if isFull then front = (front + 1) % maxSize // isFull => drop oldest element nElem -= 1 - end if rear = (rear + 1) % maxSize // advance rear index store(rear) = elem // store new element nElem += 1 diff --git a/src/main/scala/scalation/CommonFunctions.scala b/src/main/scala/scalation/CommonFunctions.scala index 9fef91200..ac31cd10b 100644 --- a/src/main/scala/scalation/CommonFunctions.scala +++ b/src/main/scala/scalation/CommonFunctions.scala @@ -23,9 +23,11 @@ * //def log10 (x: Double): Double * def pow2 (x: Double): Double * def pow10 (x: Double): Double + * def pow (x: Double, a: Int, b: Int): Double * //def log1p (x: Double): Double * //def expm1 (x: Double): Double * def logb (b: Double, x: Double): Double + * def ihs (x: Double): Double * * Many common functions are also supplied by the `scala.math` package. */ @@ -125,6 +127,28 @@ inline def log2 (x: Double): Double = log (x) / log_2 inline def log10 (x: Double): Double = log (x) / log_10 */ +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Find the y-th root of x, i.e., x ~^ 1/y for Scala Longs. + * r = x ~^ 1/y is largest long integer r such that r ~^ y <= x. + * @see http://en.wikipedia.org/wiki/Shifting_nth_root_algorithm + * @see http://stackoverflow.com/questions/8826822/calculate-nth-root-with-integer-arithmetic + * @param x the Long base parameter + * @param y the Long root level (reciprocal exponent) parameter + */ +def lroot (x: Long, y: Long): Long = + var r = 1L // initial guess for root + + def step: Long = ((y-1) * r + x / r~^(y-1)) / y + + var q = step // find better root + while + r = q + q = step + q < r + do () // repeat looking for better root + r +end lroot + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The power base 2 function 2^x of type `FunctionS2S`. * Its inverse function is log2. @@ -139,6 +163,21 @@ inline def pow2 (x: Double): Double = pow (2.0, x) */ inline def pow10 (x: Double): Double = pow (10.0, x) +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Raise x to a rational (fractional) power r = b/c where b and c are longs using logic to handle + * the sign separately if the exponent is a simple fraction with an odd denominator. + * In particular, use `pow` when x is non-negative, else the identity (−a)^(b/c) = (−1)^b a^(b/c) + * when c is odd, otherwise return Not-a-Number (NaN). + * @see math.stackexchange.com/questions/317528/how-do-you-compute-negative-numbers-to-fractional-powers + * @param x the value of the base x^r + * @param r the rational `Rat` exponent: power/root, num/den, b/c + */ +def pow_ (x: Double, r: Rat): Double = + if x >= 0.0 then pow (x, r.toDouble) + else if r.den % 2 == 1 then pow (-1, r.num.toDouble) * pow (-x, r.toDouble) + else Double.NaN +end pow_ + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The power to p function x^p of type `FunctionS2S`. * Its inverse function is powTo (1/p)(x) @@ -233,6 +272,15 @@ inline def logb (b: Double, x: Double): Double = log (x) / log (b) println (s"expm1 (log1p ($x)) = ${expm1 (a)}") println (s"exp (log (1+$x)) - 1 = ${exp (b) - 1}") + banner (s"Test pow_ (xx, r)") + val xx = -8.0 + println (s"pow_ ($xx, Rat (2, 3))) = ${pow_ (xx, Rat (2, 3))}") + println (s"pow_ ($xx, Rat (1, 3))) = ${pow_ (xx, Rat (1, 3))}") + println (s"pow_ ($xx, Rat (1, 2))) = ${pow_ (xx, Rat (1, 2))}") + println (s"pow ($xx, 2/3)) = ${pow (xx, 2.toDouble/3)}") + println (s"pow ($xx, 1/3)) = ${pow (xx, 1.toDouble/3)}") + println (s"pow ($xx, 1/2)) = ${pow (xx, 1.toDouble/ 2)}") + banner (s"Test ihs (x = $x)") val u = ihs (x) println (s"lhs ($x)) = $u") diff --git a/src/main/scala/scalation/mathstat/Complex.scala b/src/main/scala/scalation/Complex.scala similarity index 90% rename from src/main/scala/scalation/mathstat/Complex.scala rename to src/main/scala/scalation/Complex.scala index e6ac80a48..5445af94a 100644 --- a/src/main/scala/scalation/mathstat/Complex.scala +++ b/src/main/scala/scalation/Complex.scala @@ -9,7 +9,6 @@ */ package scalation -package mathstat //import scala.language.implicitConversions import scala.math.{acos, cos, sin} @@ -60,54 +59,64 @@ case class Complex (re: Double, im: Double = 0.0) inline def negate (c: Complex): Complex = -c //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Add two complex numbers. + /** Add two complex numbers, this + c. * @param c add complex c to this */ def + (c: Complex): Complex = Complex (re + c.re, im + c.im) - def + (d: Double): Complex = Complex (re + d, im) - def plus (c: Complex, d: Complex): Complex = c + d + def + (d: Double): Complex = Complex (re + d, im) + inline def plus (c: Complex, d: Complex): Complex = c + d //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Subtract two complex numbers. + /** Subtract two complex numbers, this - c. * @param c subtract c from this */ def - (c: Complex): Complex = Complex (re - c.re, im - c.im) - def - (d: Double): Complex = Complex (re - d, im) + def - (d: Double): Complex = Complex (re - d, im) inline def minus (c: Complex, d: Complex): Complex = c - d //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Multiply two complex numbers. - * @param c multiply 'this' times c + /** Multiply two complex numbers, this * c. + * @param c multiply this times c */ def * (c: Complex): Complex = Complex (re * c.re - im * c.im, re * c.im + im * c.re) - def * (d: Double): Complex = Complex (re * d, im * d) + def * (d: Double): Complex = Complex (re * d, im * d) inline def times (c: Complex, d: Complex): Complex = c * d //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Divide two complex numbers. + /** Divide two complex numbers, this / c. * @param c divide this by c */ def / (c: Complex): Complex = Complex ((re * c.re + im * c.im) / (c.re * c.re + c.im * c.im), (im * c.re - re * c.im) / (c.re * c.re + c.im * c.im)) - def / (d: Double): Complex = Complex (re / d, im / d) + def / (d: Double): Complex = Complex (re / d, im / d) inline def div (c: Complex, d: Complex): Complex = c / d //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Raise a complex to the 'r'-th power (a double) using polar coordinates. + /** Raise a complex number to the 'r'-th power (a double) using polar coordinates. * @param r the power/exponent */ def ~^ (r: Double): Complex = - val (rad, ang) = polar - Complex.create (rad ~^ r, ang * r) - def ↑ (r: Double): Complex = this ~^ r + val (radi, ang) = polar + Complex.create (radi ~^ r, ang * r) inline def pow (c: Complex, r: Double): Complex = c ~^ r + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Raise a complex number to the 'r'-th power (a rational number) using polar coordinates. + * Extended to handle a negative base. + * @see `pow_` in CommonFunctions. + * @param r the power/exponent + */ + def ↑ (r: Rat): Complex = + val (radi, ang) = polar + Complex.create (radi ↑ r, ang * r.toDouble) + inline def pow_ (c: Complex, r: Rat): Complex = c ↑ r + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Return whether two complex numbers are nearly equal. * @param c compare this with c */ - def =~ (c: Complex): Boolean = (re =~ c.re && im =~ c.im) - def ≈ (c: Complex): Boolean = near_eq (this, c) + def =~ (c: Complex): Boolean = re =~ c.re && im =~ c.im + inline def ≈ (c: Complex): Boolean = this =~ c inline def near_eq (c: Complex, d: Complex): Boolean = c =~ d //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -123,7 +132,7 @@ case class Complex (re: Double, im: Double = 0.0) //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Return the complex number in polar coordinates (radius, angle). */ - def polar: (Double, Double) = { val rad = radius; (rad, acos (re / rad)) } + def polar: (Double, Double) = { val radi = radius; (radi, acos (re / radi)) } //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Return the complex conjugate: if z = (a + bi) then z.bar = (a - bi). @@ -332,6 +341,12 @@ object Complex: private val rr2 = 1.0 / math.sqrt (2.0) // reciprocal root of 2. + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Implicit conversion from `Double` to `Complex`. + * @param d the Double parameter to convert + */ +// implicit def double2Complex (d: Double): Rat = fromDouble (d) + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Create a complex number from a pair of 'Double's. * @param ct the tuple form of a complex number @@ -362,10 +377,10 @@ object Complex: //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Create a complex number from the given polar coordinates. - * @param rad the radius (the length of the vector in the 're-im' plane) - * @param ang the angle (the angle of the vector above the 're'-axis) + * @param radi the radius (the length of the vector in the 're-im' plane) + * @param ang the angle (the angle of the vector above the 're'-axis) */ - def create (rad: Double, ang: Double): Complex = Complex (rad * cos (ang), rad * sin (ang)) + def create (radi: Double, ang: Double): Complex = Complex (radi * cos (ang), radi * sin (ang)) //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Return the absolute value of that complex number. @@ -421,7 +436,7 @@ object Complex: /** Ordering for complex numbers */ val ord = new Ordering [Complex] - { def compare (c: Complex, d: Complex) = c compare d } + { def compare (c: Complex, d: Complex) = c compare d } end Complex @@ -469,7 +484,7 @@ end Complex println ("c < d = " + (c < d)) println ("d < c = " + (d < c)) - def sort (arr: Array [Complex]): Unit = { quickSort (arr)(Complex.ord) } + def sort (arr: Array [Complex]): Unit = { quickSort (arr)(using Complex.ord) } val arr = Array (e, d, c) println ("arr = " + stringOf (arr)) diff --git a/src/main/scala/scalation/DoublyLinkedList.scala b/src/main/scala/scalation/DoublyLinkedList.scala index 0eac28090..f43871205 100644 --- a/src/main/scala/scalation/DoublyLinkedList.scala +++ b/src/main/scala/scalation/DoublyLinkedList.scala @@ -5,52 +5,56 @@ * @date Sun Feb 25 20:55:28 EST 2024 * @see LICENSE (MIT style license file). * - * @note Data Structure: Doubly Linked List + * @note Data Structure: Doubly Linked List with head and tail References + * suitable for implementing queues supporting removal of any element */ package scalation -import scala.collection.mutable.AbstractIterable -import scala.reflect.ClassTag - +import scala.collection.mutable.{AbstractIterable, ListBuffer} //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `DoublyLinkedList` class provides a data structure implementing mutable - * doubly-linked lists. - * next --> --> - * tail (last car) --> [e1] [e2] [e3] <-- head (lead car) - * prev <-- <-- - * @param A the type of the elements/values in the list +/** The `DoublyLinkedList` class provides a data structure implementing mutable doubly-linked lists. + * Imagine a line of elements/cars moving left to right in a list/lane: + * remove head/lead car when it reaches the end of the lane + * add tail/last car when it reaches the beginning of the lane + * + * ahead --> --> + * tail (last car) --> [c3] [c2] [c1] <-- head (lead car) + * behind <-- <-- + * + * @tparam A the type of the elements/values in the list */ -class DoublyLinkedList [A: ClassTag] - extends AbstractIterable [A] - with Serializable: +class DoublyLinkedList [A] + extends AbstractIterable [A] + with Serializable: - private val debug = debugf ("DoublyLinkedList", true) + private val debug = debugf ("DoublyLinkedList", true) // debug function //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `Node` inner case class wraps elements in nodes for double linkage. - * @param elem the element - * @param prev the predecessor node (car ahead) - * @param next the successor node (car behind) + * @param elem the element in this node (you) + * @param ahead the node ahead of you (e.g., the car ahead) + * @param behind the node behind you (e.g., the car behind) */ - case class Node (elem: A, var prev: Node, var next: Node): + case class Node (elem: A, var ahead: Node, var behind: Node): override def toString: String = s"Node ($elem)" end Node - private var head_ : Node = null // head node (first car) + private var head_ : Node = null // head node (lead car) private var tail_ : Node = null // tail node (last car) //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** The `NodeIterator` inner class supports iterating over all the nodes in this list. + /** The `NodeIterator` inner class supports iterating over all the nodes in this list, + * moving foreward in list/lane (tail to head). * @param ns the starting node (defaults to tail) */ class NodeIterator (ns: Node = tail_) extends Iterator [Node]: - var n = ns + var n = ns // current node (positioned in list) def hasNext: Boolean = n != null - def next (): Node = { val n_ = n; n = n.next; n_ } + def next (): Node = { val cur = n; n = n.ahead; cur } // move forward towards the front of list/lane end NodeIterator //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -59,18 +63,19 @@ class DoublyLinkedList [A: ClassTag] */ def nodeIterator: Iterator [Node] = new NodeIterator () - def getPrev (n: Node): Node = n.prev + inline def getAhead (n: Node): Node = n.ahead - def getNext (n: Node): Node = n.next + inline def getBehind (n: Node ): Node = n.behind //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** The `ListIterator` inner class supports iterating over all the elements in this list. + /** The `ListIterator` inner class supports iterating over all the elements in this list, + * moving foreward in list/lane (tail to head). * @param ns the starting node (defaults to tail) */ class ListIterator (ns: Node = tail_) extends Iterator [A]: - var n = ns + var n = ns // current node (positioned in list) def hasNext: Boolean = n != null - def next (): A = { val n_ = n; n = n.next; n_.elem } + def next (): A = { val cur = n; n = n.ahead; cur.elem } // move forward towards the front of list/lane end ListIterator //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -83,137 +88,127 @@ class DoublyLinkedList [A: ClassTag] /** Retrieve the element in node n (e.g., the current car). * @param n the node containing the sought element */ - def elemAt (n: Node): A = n.elem + inline def elemAt (n: Node): A = n.elem //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the head/first node in the list (e.g, node holding the first car). + /** Return the lead/first node in the list (e.g, node holding the lead car). */ - override def head: A = head_.elem - - def headNode: Node = head_ + inline override def head: A = head_.elem + inline def headNode: Node = head_ //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the tail/last node in the list (e.g, node holding the last car). + /** Return the trail/last node in the list (e.g, node holding the trail car). */ - override def last: A = tail_.elem + inline override def last: A = tail_.elem - def lastNode: Node = tail_ + inline def lastNode: Node = tail_ //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Return whether the list is empty (head and tail are null). */ - override def isEmpty: Boolean = head_ == null + inline override def isEmpty: Boolean = head_ == null - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Add the first element to an empty list and return the new node n. + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::fixed + /** Add the first element (a lead car) to a list and return the new node n. * @param elm the element to be added - * @return the new node added n + * @return the new node be added */ def addFirst (elm: A): Node = - val n = Node (elm, null, head_) // new node has no predecessor and its next is the current head + val n = Node (elm, null, head_) // new node has nothing ahead, and its behind is the current head if head_ != null then // if list is not empty - head_.prev = n // update the previous head's prev to point to the new node + head_.ahead = n // update the head's ahead to point to the new node head_ = n // update head to point to the new node if tail_ == null then // if the list was empty (tail is null) tail_ = n // set tail to the new node + debug ("addFirst", s"added node $n as the first element in list") n end addFirst //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Add the new element into the list AFTER the given predecessor node `pn` and - * return the new node `n`. - * Relink: _pn_ <-> n <-> nn - * @param elm the element to be added - * @param pn the predecessor node (defaults to head if not given) + /** Add a new element into the list BEFORE/behind the given node `nn` and return + * the new node `n`. + * Relink: bn <-> nn TO bn <-> n <-> nn + * @param elm the new element to be added + * @param nn the given node (defaults to tail if not given) * @return the new node `n` */ - def addAfter (elm: A, pn: Node = head_): Node = - if pn == null || isEmpty then - addFirst (elm) + def add (elm: A, nn: Node = tail_): Node = + if isEmpty || nn == null then + addFirst (elm) // case 1: List is empty or no reference node else - val nn = pn.next // successor node nn - val n = Node (elm, pn, nn) // make a new node n - pn.next = n // link forward - if nn != null then nn.prev = n // link backward + val bn = nn.behind // bn references the node behind nn + val n = Node (elm, nn, bn) // new node is inserted with nn ahead and bn behind + + if bn != null then bn.ahead = n // fix ahead linkage of bn (behind) node + nn.behind = n // fix behind linkage of the nn (given) node - if pn == head_ then head_ = n // if pn was head, reset to n - debug ("addAfter", s"pn = $pn, n = $n, nn = $nn") + if nn == tail_ then tail_ = n // update tail if inserting at the end + + debug ("add", s"[bn = $bn] <-> [n = $n] <-> [nn = $nn]") n - end addAfter + end add //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Add the new element into the list BEFORE the given successor node `nn` and - * return the new node `n`. - * Relink: pn <-> _nn_ TO pn <-> n <-> _nn_ + /** Add a new element BEFORE the given successor node `nn` and return the new node `n`. + * Relink: pn <-> nn TO pn <-> n <-> nn + * The predecessor (`pn`) of the successor node `nn` is relinked to point to the new node `n`. + * Similarly, the new node `n` links back to `pn` and forward to `nn`. If `nn` is `null`, + * this method adds the element as the first element in the list. * @param elm the element to be added - * @param nn the successor(next) node (defaults to tail if not given) - * @return the new node `n` - */ - def add (elm: A, nn: Node = tail_): Node = - if nn == null || isEmpty then - addFirst (elm) + * @param nn the successor node (defaults to `null` if not provided) + * @return the newly created node `n` inserted before node `nn` + * + def addBefore (elm: A, pn: Node): Node = + val nn = pn.behind // Get the behind node (car behind `pn`) + + if nn == null then + // Case 1: `pn` is the head, so insert BEHIND it and assume tail. + val n = Node(elm, pn, null) // New node's ahead = pn, behind = null + pn.behind = n // Fix behind linkage + tail_ = n // Update the tail pointer + debug("addBefore", s"Inserted node $n behind head $pn (new tail)") + n else - val pn = nn.prev // predecessor node pn - val n = Node (elm, pn, nn) // make a new node n - nn.prev = n // link backward - if pn != null then pn.next = n // link forward - - if nn == tail_ then tail_ = n // if nn was tail, reset to n - debug("add", s"pn = $pn, n = $n, nn = $nn") + // Case 2: `pn` has a behind node (normal case, inserting between two nodes) + val n = Node(elm, pn, nn) // Insert between `pn` (ahead) and `nn` (behind) + pn.behind = n // Fix pn's behind pointer + nn.ahead = n // Fix nn's ahead pointer + debug("addBefore", s" pn= $pn n=$n and $nn") n - end add + end addBefore + */ //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Remove the node `n` from the linked list. - * Relink: pn <-> n <-> nn TO pn <-> nn - * @param n the node to remove (unlink) + * Relink: bn <-> n <-> an TO bn <-> an + * @param n the given node to remove (unlink) */ def remove (n: Node = head_): Unit = - val pn = n.prev // predecessor node pn - val nn = n.next // successor node nn + val an = n.ahead // an = the node/car AHEAD of node n + val bn = n.behind // bn = the node/car BEHIND node n - if pn != null then pn.next = nn // forward bypass of n - if nn != null then nn.prev = pn // backward bypass of n + if an != null then an.behind = bn // set an's ref: bn <- an + if bn != null then bn.ahead = an // set nn's ref: bn -> an - if n == head_ then head_ = nn // if n was head, reset to nn - if n == tail_ then tail_ = pn // if n was tail, reset to pn + if n == head_ then head_ = bn // if n was head, reset to bn + if n == tail_ then tail_ = an // if n was tail, reset to an - n.prev = null // n no longer links - n.next = null - debug ("remove", s"pn = $pn, nn = $nn") + n.ahead = null // n no longer links + n.behind = null + debug ("remove", s"[bn = $bn] <-> [an = $an]") end remove - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Add the new element BEFORE the given successor node `nn` and return the new node `n`. - * Relink: pn <-> nn TO pn <-> n <-> nn - * The predecessor (`pn`) of the successor node `nn` is relinked to point to the new node `n`. - * Similarly, the new node `n` links back to `pn` and forward to `nn`. If `nn` is `null`, - * this method adds the element as the first element in the list. - * @param elm the element to be added - * @param nn the successor node (defaults to `null` if not provided) - * @return the newly created node `n` inserted before node `nn` - */ - def addBefore (elm: A, nn: Node): Node = - if nn == null then addFirst (elm) // if nn is null, add as the first element - - val n = Node (elm, nn.prev, nn) // create the new node n with links to pn and nn - if nn.prev != null then // if there is a predecessor, link it to n - nn.prev.next = n - nn.prev = n // link nn back to the new node n - debug ("addBefore", s"elm = $elm, inserted before = ${nn.elem}") - n - end addBefore - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Clear the list of all nodes (and their elements). + /** Clear the list of all nodes (and their elements) by setting head_ and tail_ + * to null, so CG can reclaim the unreferenced nodes. */ def clear (): Unit = { tail_ = null; head_ = null } //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert this doubly linked list to a string. + /** Convert this doubly linked list to a string (tail to head). */ - override def toString (): String = + override def toString: String = val sb = StringBuilder ("DoublyLinkedList (tail -") for n <- nodeIterator do sb.append (s"> [ $n ] <-") sb.append (" head)").mkString @@ -222,12 +217,11 @@ class DoublyLinkedList [A: ClassTag] //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Convert the elements of this doubly linked list to a Scala List. * This method is efficient in terms of maintaining the correct order without - * nneeding a separate reverse at the end. + * needing a separate reverse at the end. */ - override def toList: List[A] = - val buf = new scala.collection.mutable.ListBuffer [A]() // use ListBuffer for efficient appends - for n <- nodeIterator do // traverse using the predefined nodeIterator - buf += n.elem + override def toList: List [A] = + val buf = ListBuffer [A] () // use ListBuffer for efficient appends + for n <- nodeIterator do buf += n.elem // traverse using the predefined nodeIterator buf.toList // convert ListBuffer to List end toList @@ -240,30 +234,90 @@ end DoublyLinkedList */ @main def doublyLinkedListTest (): Unit = - banner ("Test the add method") + banner ("Test the addFirst and add methods") val dll = DoublyLinkedList [Int] () - for i <- 0 until 10 do dll.add (i) - val n = dll.headNode - println (s"n = $n") - println (dll.getNext (n)) + for i <- 0 until 10 do + if dll.isEmpty then dll.addFirst (i) + else dll.add (i) + + banner ("Test the toString method") + println (s"dll = $dll") + + banner ("Test the remove method") + while ! dll.isEmpty do + dll.remove () + println (s"dll = $dll") + +end doublyLinkedListTest - banner ("Test the addAfter method") - dll.clear () - for i <- 0 until 10 do dll.addAfter (i) - // bannern ("Test the addBefore method") - // dll.clear () - // val initialNode = dll.addFirst (10) // start by adding an initial node to reference - // for i <- 1 until 10 do - // dll.addBefore (i, initialNode) // add before the initial node +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `doublyLinkedListTest` main function tests the `DoublyLinkedList` class. + * > runMain scalation.doublyLinkedListTest2 + */ +@main def doublyLinkedListTest2 (): Unit = + + banner ("Test the add method") + val dll = DoublyLinkedList [Int] () + for i <- 0 until 10 do dll.add(i) + val n = dll.headNode - // banner (dll.getprev (i)) - // banner (dll.getnext (i)) + println (s"n the head node is: $n") + println (s"the node behind n is: ${dll.getBehind (n)}") + println (s"the node ahead of n is: ${dll.getAhead (n)}") + println (s"dll = $dll") banner ("Test the remove method") while ! dll.isEmpty do dll.remove () println (dll) -end doublyLinkedListTest +end doublyLinkedListTest2 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `doublyLinkedListTest` main function tests the `DoublyLinkedList` class. + * > runMain scalation.doublyLinkedListTest3 + * +@main def doublyLinkedListTest3 (): Unit = + + banner("Test the add and addBefore methods") + + // Create DoublyLinkedList + val dll = DoublyLinkedList[Int]() + + dll.add(98) // DLL now contains [ 98 ] (single node) + val head = dll.headNode + dll.addBefore(99, head) // Insert 99 before Node(98) + dll.addBefore(95, head) // Insert 99 before Node(98) + println(dll) + +@main def doublyLinkedListTest4(): Unit = + banner("Test the add and addBefore methods") + + // Case 1: Insert normally at the tail (Default behavior) + val dll = DoublyLinkedList[Int]() + for i <- 0 until 5 do dll.add(i) + println("After normal insertion:") + println(dll) + + // Case 2: Insert before a given node (Middle of list) + val refNode = dll.headNode.behind // Second node in the list + println(s"Inserting before $refNode") + dll.addBefore(99, refNode) + println("After inserting 99 before the second node:") + println(dll) + + // Case 3: Insert before head (Becomes new head) + dll.addBefore(77, dll.headNode) + println("After inserting 77 before head:") + println(dll) + + // Case 4: Insert when list is empty (Should work with addFirst) + val emptyDll = DoublyLinkedList[Int]() + emptyDll.addBefore(55, null) + println("After inserting 55 into empty list:") + println(emptyDll) + + */ diff --git a/src/main/scala/scalation/DoublyLinkedList.scala.bak2 b/src/main/scala/scalation/DoublyLinkedList.scala.bak2 new file mode 100644 index 000000000..ef5a183c2 --- /dev/null +++ b/src/main/scala/scalation/DoublyLinkedList.scala.bak2 @@ -0,0 +1,270 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author John Miller, Korede Bishi + * @version 2.0 + * @date Sun Feb 25 20:55:28 EST 2024 + * @see LICENSE (MIT style license file). + * + * @note Data Structure: Doubly Linked List with front and tail references + * + * @see + * @see en.wikipedia.org/wiki/Queue_(abstract_data_type) + * + * May be used to implement FIFO Queues with removal of any element. + * Enqueue: add new element at the tail + * Denqueue: remove oldest element at front + */ + +package scalation + +import scala.collection.mutable.AbstractIterable +import scala.reflect.ClassTag + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `DoublyLinkedList` class provides a data structure implementing mutable + * doubly-linked lists. + * next --> --> + * tail (last car) --> [e3] [e2] [e1] <-- front (lead car) + * prev <-- <-- + * @param A the type of the elements/values in the list + */ +class DoublyLinkedList [A: ClassTag] + extends AbstractIterable [A] + with Serializable: + + private val debug = debugf ("DoublyLinkedList", true) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** The `Node` inner case class wraps elements in nodes for double linkage. + * @param elem the element + * @param prev the predecessor node (car ahead) + * @param next the successor node (car behind) + */ + case class Node (elem: A, var prev: Node, var next: Node): + + override def toString: String = s"Node ($elem)" + + end Node + + private var front_ : Node = null // front node (first car) + private var tail_ : Node = null // tail node (last car) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** The `NodeIterator` inner class supports iterating over all the nodes in this list. + * @param ns the starting node (defaults to tail) + */ + class NodeIterator (ns: Node = tail_) extends Iterator [Node]: + var n = ns + def hasNext: Boolean = n != null + def next (): Node = { val n_ = n; n = n.next; n_ } + end NodeIterator + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return an iterator for retrieving all the nodes in this list. + * @see scala.collection.IterableOnce + */ + def nodeIterator: Iterator [Node] = new NodeIterator () + + def getPrev (n: Node): Node = n.prev + + def getNext (n: Node): Node = n.next + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** The `ListIterator` inner class supports iterating over all the elements in this list. + * @param ns the starting node (defaults to tail) + */ + class ListIterator (ns: Node = tail_) extends Iterator [A]: + var n = ns + def hasNext: Boolean = n != null + def next (): A = { val n_ = n; n = n.next; n_.elem } + end ListIterator + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return an iterator for retrieving all the elements in this list. + * @see scala.collection.IterableOnce + */ + def iterator: Iterator [A] = new ListIterator () + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Retrieve the element in node n (e.g., the current car). + * @param n the node containing the sought element + */ + def elemAt (n: Node): A = n.elem + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the front/first node in the list (e.g, node holding the first car). + */ + override def front: A = front_.elem + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the tail/last node in the list (e.g, node holding the last car). + */ + override def last: A = tail_.elem + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return whether the list is empty (front and tail are null). + */ + override def isEmpty: Boolean = front_ == null + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Add the first element to an empty list and return the new node n. + * @param elm the element to be added + * @return the new node added n + */ + def addFirst (elm: A): Node = + val n = Node (elm, null, front_) // new node has no predecessor and its next is the current front + if front_ != null then // if list is not empty + front_.prev = n // update the previous front's prev to point to the new node + front_ = n // update front to point to the new node + if tail_ == null then // if the list was empty (tail is null) + tail_ = n // set tail to the new node + n + end addFirst + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Add the new element into the list AFTER the given predecessor node `pn` and + * return the new node `n`. + * Relink: _pn_ <-> n <-> nn + * @param elm the element to be added + * @param pn the predecessor node (defaults to front if not given) + * @return the new node `n` + */ + def addAfter (elm: A, pn: Node = front_): Node = + if pn == null || isEmpty then + addFirst (elm) + else + val nn = pn.next // successor node nn + val n = Node (elm, pn, nn) // make a new node n + pn.next = n // link forward + if nn != null then nn.prev = n // link backward + + if pn == front_ then front_ = n // if pn was front, reset to n + debug ("addAfter", s"pn = $pn, n = $n, nn = $nn") + n + end addAfter + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Add the new element into the list BEFORE the given successor node `nn` and + * return the new node `n`. + * Relink: pn <-> _nn_ TO pn <-> n <-> _nn_ + * @param elm the element to be added + * @param nn the successor(next) node (defaults to tail if not given) + * @return the new node `n` + */ + def add (elm: A, nn: Node = tail_): Node = + if nn == null || isEmpty then + addFirst (elm) + else + val pn = nn.prev // predecessor node pn + val n = Node (elm, pn, nn) // make a new node n + nn.prev = n // link backward + if pn != null then pn.next = n // link forward + + if nn == tail_ then tail_ = n // if nn was tail, reset to n + debug("add", s"pn = $pn, n = $n, nn = $nn") + n + end add + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Remove the node `n` from the linked list. + * Relink: pn <-> n <-> nn TO pn <-> nn + * @param n the node to remove (unlink) + */ + def remove (n: Node = front_): Unit = + val pn = n.prev // predecessor node pn + val nn = n.next // successor node nn + + if pn != null then pn.next = nn // forward bypass of n + if nn != null then nn.prev = pn // backward bypass of n + + if n == front_ then front_ = nn // if n was front, reset to nn + if n == tail_ then tail_ = pn // if n was tail, reset to pn + + n.prev = null // n no longer links + n.next = null + debug ("remove", s"pn = $pn, nn = $nn") + end remove + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Add the new element BEFORE the given successor node `nn` and return the new node `n`. + * Relink: pn <-> nn TO pn <-> n <-> nn + * The predecessor (`pn`) of the successor node `nn` is relinked to point to the new node `n`. + * Similarly, the new node `n` links back to `pn` and forward to `nn`. If `nn` is `null`, + * this method adds the element as the first element in the list. + * @param elm the element to be added + * @param nn the successor node (defaults to `null` if not provided) + * @return the newly created node `n` inserted before node `nn` + */ + def addBefore (elm: A, nn: Node): Node = + if nn == null then addFirst (elm) // if nn is null, add as the first element + + val n = Node (elm, nn.prev, nn) // create the new node n with links to pn and nn + if nn.prev != null then // if there is a predecessor, link it to n + nn.prev.next = n + nn.prev = n // link nn back to the new node n + debug ("addBefore", s"elm = $elm, inserted before = ${nn.elem}") + n + end addBefore + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Clear the list of all nodes (and their elements). + */ + def clear (): Unit = { tail_ = null; front_ = null } + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Convert this doubly linked list to a string. + */ + override def toString (): String = + val sb = StringBuilder ("DoublyLinkedList (tail -") + for n <- nodeIterator do sb.append (s"> [ $n ] <-") + sb.append (" front)").mkString + end toString + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Convert the elements of this doubly linked list to a Scala List. + * This method is efficient in terms of maintaining the correct order without + * nneeding a separate reverse at the end. + */ + override def toList: List[A] = + val buf = new scala.collection.mutable.ListBuffer [A]() // use ListBuffer for efficient appends + for n <- nodeIterator do // traverse using the predefined nodeIterator + buf += n.elem + buf.toList // convert ListBuffer to List + end toList + +end DoublyLinkedList + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `doublyLinkedListTest` main function tests the `DoublyLinkedList` class. + * > runMain scalation.doublyLinkedListTest + */ +@main def doublyLinkedListTest (): Unit = + + banner ("Test the add method") + val dll = DoublyLinkedList [Int] () + for i <- 0 until 10 do dll.add (i) + val n = dll.front + println (s"n = $n") + println (dll.getNext (n)) + + banner ("Test the addAfter method") + dll.clear () + for i <- 0 until 10 do dll.addAfter (i) + + // bannern ("Test the addBefore method") + // dll.clear () + // val initialNode = dll.addFirst (10) // start by adding an initial node to reference + // for i <- 1 until 10 do + // dll.addBefore (i, initialNode) // add before the initial node + + // banner (dll.getprev (i)) + // banner (dll.getnext (i)) + + banner ("Test the remove method") + while ! dll.isEmpty do + dll.remove () + println (dll) + +end doublyLinkedListTest + diff --git a/src/main/scala/scalation/Fib.scala b/src/main/scala/scalation/Fib.scala index 7c4b5d751..4eb746b7f 100644 --- a/src/main/scala/scalation/Fib.scala +++ b/src/main/scala/scalation/Fib.scala @@ -55,7 +55,7 @@ end Fib println (s"fib ($n) = $f_n") val rig = random.Randi0 (100000) - val a = (for i <- 0 until 10023 yield rig.igen).toArray // 10023 works, 10024 stack overflow + val a = (for _ <- 0 until 10023 yield rig.igen).toArray // 10023 works, 10024 stack overflow // println (s"a = ${stringOf (a)}") val m = min (a, a.length-1) println (s"min = $m") diff --git a/src/main/scala/scalation/FileReader.scala b/src/main/scala/scalation/FileReader.scala index 2cc1c0875..d4afcce1c 100644 --- a/src/main/scala/scalation/FileReader.scala +++ b/src/main/scala/scalation/FileReader.scala @@ -33,7 +33,7 @@ def readFile (fileName: String, fullPath: Boolean = false): Int = try buffer = Source.fromFile (path) // @see BufferedSource catch - case ex: IOException => _flaw ("readFile", s"IOException: file $path may not exist.") + case _ : IOException => _flaw ("readFile", s"IOException: file $path may not exist.") val lines = buffer.getLines var i = 0 @@ -63,7 +63,7 @@ def readFileIntoArray (fileName: String, fullPath: Boolean = false, limit: Int = try buffer = Source.fromFile (path) // @see BufferedSource catch - case ex: IOException => _flaw ("readFileIntoArray", s"IOException: file $path may not exist.") + case _ : IOException => _flaw ("readFileIntoArray", s"IOException: file $path may not exist.") val lineArr = if limit <= 0 then @@ -104,7 +104,7 @@ def readFileIter (fileName: String, fullPath: Boolean = false): (Iterator [Strin try buffer = Source.fromFile (path) // @see BufferedSource catch - case ex: IOException => _flaw ("readFileIter", s"IOException: file $path may not exist.") + case _ : IOException => _flaw ("readFileIter", s"IOException: file $path may not exist.") val it: Iterator [String] = buffer.getLines () // line iterator (it, buffer) // return iterator and buffer diff --git a/src/main/scala/scalation/GenIndexHtml.scala b/src/main/scala/scalation/GenIndexHtml.scala index ef39b56c0..635c4dc1c 100644 --- a/src/main/scala/scalation/GenIndexHtml.scala +++ b/src/main/scala/scalation/GenIndexHtml.scala @@ -53,14 +53,12 @@ import scala.collection.mutable.ArrayBuffer fos.write ("
  • " + fName + "
  • \n") else if fName != SKIP && fName != "index.html" then dirs += fi - end if end for for fi <- dirs do val fName = fi.getName () if fName != SKIP then fos.write ("
  • " + fName + "
  • \n") - end if end for fos.write ("\n\n") @@ -84,7 +82,6 @@ import scala.collection.mutable.ArrayBuffer val files = f.listFiles () if files != null then for fi <- files do try recDeleteIndex (fi) catch { case _ : Throwable => } - end if end if end recDeleteIndex diff --git a/src/main/scala/scalation/GetterSetter.scala b/src/main/scala/scalation/GetterSetter.scala new file mode 100644 index 000000000..f70998cef --- /dev/null +++ b/src/main/scala/scalation/GetterSetter.scala @@ -0,0 +1,88 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author John Miller + * @version 2.0 + * @date Wed Oct 1 13:01:06 EDT 2025 + * @see LICENSE (MIT style license file). + * + * @note Illustration of Creation and Use of Getter and Setter Methods in Scala 3 + * + * @see docs.scala-lang.org/tour/classes.html + */ + +package scalation + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `GetterSetter` object provides an example of how to create getter and setter methods + * in Scala 3: private field variable '_name', getter method 'name', setter method 'name_='. + * @note: When a field is val (not var), it is reasonable to make it public as its value can + * be accessed, but not changed, i.e., custom getter and setter methods need not be written. + * @see kkyr.io/blog/getters-and-setters-in-scala + */ +object GetterSetter: // for object, trait or class + + private val flaw = flawf ("GetterSetter") // ScalaTion convention for error messages + private var _width = 1.2 // example private variable + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Get the value of the _width` variable. May use inline for efficiency. + */ + inline def width: Double = _width + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Set the variable `_width` to the new value `width_` checking its validity. + * If the error/flaw is severe, may set `_width` to a default value or throw an exception. + * @param width_ the new width to be assigned + */ + def width_= (width_ : Double): Unit = + if width_ < 0.0 then flaw ("width_=", s"width_ = $width_ but should be non-negative") + _width = width_ + end width_= + +end GetterSetter + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `SetterOnly` class provides an example of how to create a setter method for a protected + * variable. + */ +class SetterOnly (): // for trait or class + + private val flaw = flawf ("SetterOnly") // ScalaTion convention for error messages + protected var length = 1.2 // variable only accessible in hierarchy + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Set the variable `length` to the new value `length_` checking its validity. + * If the error/flaw is severe, may set `length` to a default value or throw an exception. + * @param length_ the new length to be assigned + */ + def setLength (length_ : Double): Unit = + if length_ < 0.0 then flaw ("setLength", s"length_ = $length_ but should be non-negative") + length = length_ + end setLength + +end SetterOnly + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `getterSetterTest` main function shows how to use getter and setter methods in Scala 3. + * > runMain scalation.getterSetterTest + */ +@main def getterSetterTest (): Unit = + + import GetterSetter._ + + banner ("Test Custom GetterSetter object") + println (s"the current width = $width") // call to getter + width = 2.3 // call to setter + println (s"the updated width = $width") + width = -3.4 + println (s"the final width = $width") + + banner ("Test SetterOnly class") + val prop = SetterOnly () + prop.setLength (2.3) // call to setter + prop.setLength (-3.4) // call to setter + +end getterSetterTest + diff --git a/src/main/scala/scalation/HyperParameter.scala b/src/main/scala/scalation/HyperParameter.scala index 99b508e6d..838a13c94 100644 --- a/src/main/scala/scalation/HyperParameter.scala +++ b/src/main/scala/scalation/HyperParameter.scala @@ -38,12 +38,12 @@ class HyperParameter extends Cloneable: def default (name: String): ValueType = hparam (name)._2 //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Given the name, update the hyper-parameter value. + /** Given the name, update the hyper-parameter value. Note (v, d) for value, default * @param name the name of the hyper-parameter * @param value the value of the hyper-parameter */ def update (name: String, value: ValueType): Unit = - val (v, d) = hparam (name) + val (_, d) = hparam (name) hparam += name -> (value, d) end update @@ -54,7 +54,7 @@ class HyperParameter extends Cloneable: */ def updateReturn (name: String, value: ValueType): HyperParameter = val hp2 = clone ().asInstanceOf [HyperParameter] - val (v, d) = hp2.hparam (name) + val (_, d) = hp2.hparam (name) hp2.hparam += name -> (value, d) hp2 end updateReturn @@ -66,7 +66,7 @@ class HyperParameter extends Cloneable: def updateReturn (nvs: (String, ValueType)*): HyperParameter = val hp2 = clone ().asInstanceOf [HyperParameter] for nv <- nvs do - val (v, d) = hp2.hparam (nv._1) + val (_, d) = hp2.hparam (nv._1) hp2.hparam += nv._1 -> (nv._2, d) end for hp2 @@ -74,11 +74,12 @@ class HyperParameter extends Cloneable: //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Concatenate this hyper-parameter map with a second hyper-parameter map. + * When the names are the same, the value for hp2 is taken. * @param hp2 the second hyper-parameter map */ def ++ (hp2: HyperParameter): HyperParameter = val hp3 = clone ().asInstanceOf [HyperParameter] - for (n, v) <- hp2.hparam do hp3.hparam += n -> v + for (n, v) <- hp2.hparam do hp3.hparam += n -> v // (n, v) name, value pair hp3 end ++ @@ -139,6 +140,7 @@ end HyperParameter val hp2 = new HyperParameter hp2 += ("cThresh", 0.5, 0.5) + hp2 += ("eta", 0.2, 0.2) println (s"hp ++ hp2 = ${hp ++ hp2}") diff --git a/src/main/scala/scalation/LatLong.scala b/src/main/scala/scalation/LatLong.scala index 54055f300..639937d26 100644 --- a/src/main/scala/scalation/LatLong.scala +++ b/src/main/scala/scalation/LatLong.scala @@ -252,7 +252,6 @@ object LatLong2UTM: else if latIndex == -2 then latIndex = negLetters.length - 1 negLetters (latIndex).toString - end if end getLatZone //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: diff --git a/src/main/scala/scalation/Make_VectorI.scala b/src/main/scala/scalation/Make_VectorI.scala index 2025112e8..5c566685d 100644 --- a/src/main/scala/scalation/Make_VectorI.scala +++ b/src/main/scala/scalation/Make_VectorI.scala @@ -33,7 +33,6 @@ import scala.io.StdIn.readLine println (line) else cont = false - end if end while end makeVectorI diff --git a/src/main/scala/scalation/MergeSortIndirect.scala b/src/main/scala/scalation/MergeSortIndirect.scala index c5abd23f8..d9e2bbc19 100644 --- a/src/main/scala/scalation/MergeSortIndirect.scala +++ b/src/main/scala/scalation/MergeSortIndirect.scala @@ -57,16 +57,15 @@ class MergeSortIndirect (a: Array [ValueType]) (perm: Array [Int] = Array.range perm(k) = aux(j); j += 1 else perm(k) = aux(i); i += 1 - end if end for end mergeIndirect //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Recursive helper method that sorts 'perm(lo to hi). * @param lo the lower bound - * @param hi the upper bound + * @param hi the upper bound (e.g., a.length at beginning) */ - private def indirectSort (lo: Int, hi: Int = a.length): Unit = + private def indirectSort (lo: Int, hi: Int): Unit = if hi <= lo then return val mid = lo + (hi - lo) / 2 diff --git a/src/main/scala/scalation/MultiArrayDeque.scala b/src/main/scala/scalation/MultiArrayDeque.scala index 136d2cde5..445f77a44 100644 --- a/src/main/scala/scalation/MultiArrayDeque.scala +++ b/src/main/scala/scalation/MultiArrayDeque.scala @@ -12,7 +12,6 @@ package scalation import scala.collection.mutable.ArrayDeque import scala.math.round -import scala.reflect.ClassTag //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `MultiArrayDeques` class provides a data structure for storing multiple @@ -22,7 +21,7 @@ import scala.reflect.ClassTag * distance (from the start) to be found in another lane. * @param nLanes the number of lanes */ -class MultiArrayDeques [A: ClassTag] (nLanes: Int): +class MultiArrayDeques [A] (nLanes: Int): private val lane = Array.fill (nLanes)(ArrayDeque [A] ()) // allocated n lanes diff --git a/src/main/scala/scalation/PriorityQueue.scala b/src/main/scala/scalation/PriorityQueue.scala index 3f5e9370f..3c12310cb 100644 --- a/src/main/scala/scalation/PriorityQueue.scala +++ b/src/main/scala/scalation/PriorityQueue.scala @@ -19,7 +19,7 @@ * See the NOTICE file distributed with this work for * additional information regarding copyright ownership. * - * @authur John Miller added decreaseKey, increaseKey, printInOrder + * @author John Miller added decreaseKey, increaseKey, printInOrder */ package scalation @@ -29,7 +29,7 @@ import scala.collection.{AbstractIterator, IterableOnce, IterableOps, import scala.collection.generic.DefaultSerializationProxy import scala.collection.immutable import scala.collection.mutable.{AbstractIterable, ArrayBuilder, ArrayBuffer, - Builder, Cloneable, Growable, Iterable, Queue} + Builder, Cloneable, Growable, Iterable, Queue} import scala.math.Ordering //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -59,8 +59,8 @@ import scala.math.Ordering * println(pq.clone.dequeueAll) // prints ArraySeq(7, 5, 3, 2, 1) * }}} * - * @tparam A type of the elements in this priority queue. - * @param ord implicit ordering used to compare the elements of type `A`. + * @tparam A type of the elements in this priority queue. + * @param ord implicit ordering used to compare the elements of type `A`. * * @define Coll PriorityQueue * @define coll priority queue @@ -69,7 +69,8 @@ import scala.math.Ordering * @define mayNotTerminateInf * @define willNotTerminateInf */ -class PriorityQueue [A] (implicit val ord: Ordering [A]) +//class PriorityQueue [A] (implicit val ord: Ordering [A]) +class PriorityQueue [A] (using val ord: Ordering [A]) extends AbstractIterable [A] with Iterable [A] with IterableOps [A, Iterable, PriorityQueue [A]] @@ -101,7 +102,7 @@ class PriorityQueue [A] (implicit val ord: Ordering [A]) end p_swap end ResizableArrayAccess - private val resarr = new ResizableArrayAccess [A] // internal storage for priorty queue + private val resarr = new ResizableArrayAccess [A] // internal storage for priority queue resarr.p_size0 += 1 // we do not use array(0) TODO: explain -- what is the first element even for? @@ -161,7 +162,7 @@ class PriorityQueue [A] (implicit val ord: Ordering [A]) //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Treat an `AnyRef` reference as having type A. - * @param x the refereence + * @param x the reference */ private def toA (x: AnyRef): A = x.asInstanceOf [A] @@ -311,8 +312,7 @@ class PriorityQueue [A] (implicit val ord: Ordering [A]) fixDown (resarr.p_array, 1, resarr.p_size0 - 1) toA (result) else - throw new NoSuchElementException("no element to remove from heap") - end if + throw new NoSuchElementException ("no element to remove from heap") end dequeue //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -336,7 +336,6 @@ class PriorityQueue [A] (implicit val ord: Ordering [A]) val m = resarr.p_array.indexOf (elem.asInstanceOf [AnyRef]) // find the element in the heap resarr.p_array(m) = upElem.asInstanceOf [AnyRef] // replace it with its updated version fixDown (resarr.p_array, m, resarr.p_size0) // re-position in heap if needed - end if end decreaseKey //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -350,7 +349,6 @@ class PriorityQueue [A] (implicit val ord: Ordering [A]) val m = resarr.p_array.indexOf (elem.asInstanceOf [AnyRef]) // find the element in the heap resarr.p_array(m) = upElem.asInstanceOf [AnyRef] // replace it with its updated version fixUp (resarr.p_array, m) // re-position in heap if needed - end if end increaseKey //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -400,7 +398,8 @@ class PriorityQueue [A] (implicit val ord: Ordering [A]) * @return the reversed priority queue. */ def reverse: PriorityQueue [A] = - val revq = new PriorityQueue [A]()(ord.reverse) +// val revq = new PriorityQueue [A]()(using ord.reverse) + val revq = new PriorityQueue [A](using ord.reverse) val n = resarr.p_size0 revq.resarr.p_ensureSize (n) revq.resarr.p_size0 = n @@ -418,7 +417,7 @@ class PriorityQueue [A] (implicit val ord: Ordering [A]) * @return an iterator over all elements sorted in descending order. */ def reverseIterator: Iterator [A] = new AbstractIterator [A] { - private [PriorityQueue] var i = resarr.p_size0 - 1 + private var i = resarr.p_size0 - 1 def hasNext: Boolean = i >= 1 def next (): A = val n = resarr.p_array(i) @@ -460,10 +459,10 @@ class PriorityQueue [A] (implicit val ord: Ordering [A]) end clone //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Copy the elements in this priority's heap to to an array + /** Copy the elements in this priority's heap to an array. * @param xs the array to copy the elements in this priority's heap to - * @param start the index to start copu - * @param len ? + * @param start the index to start copy + * @param len the number to copy */ override def copyToArray [B >: A] (xs: Array [B], start: Int, len: Int): Int = // val copied = IterableOnce.elemsToCopyToArray (length, xs.length, start, len) @@ -492,7 +491,7 @@ end PriorityQueue //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `PriorityQueue` object ... +/** The `PriorityQueue` object provides methods for creating priority queues. */ @SerialVersionUID(3L) object PriorityQueue extends SortedIterableFactory [PriorityQueue]: diff --git a/src/main/scala/scalation/PriorityQueueFW.scala b/src/main/scala/scalation/PriorityQueueFW.scala new file mode 100644 index 000000000..7f77f1467 --- /dev/null +++ b/src/main/scala/scalation/PriorityQueueFW.scala @@ -0,0 +1,599 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author EPFL and Lightbend, Inc., John Miller + * @version 2.0 + * @date Sun Jun 6 15:07:08 EDT 2021 + * + * @note Fixed Width Extension to Scala's Priority Queue class + * with decreaseKey, increaseKey, printInOrder + */ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/* Scala (https://www.scala-lang.org) + * + * Copyright EPFL and Lightbend, Inc. + * + * Licensed under Apache License 2.0 + * (http://www.apache.org/licenses/LICENSE-2.0). + * + * See the NOTICE file distributed with this work for + * additional information regarding copyright ownership. + * + * @author John Miller added decreaseKey, increaseKey, printInOrder + */ + +package scalation + +import scala.collection.{AbstractIterator, IterableOnce, IterableOps, + SortedIterableFactory, StrictOptimizedIterableOps} +import scala.collection.generic.DefaultSerializationProxy +import scala.collection.immutable +import scala.collection.mutable.{AbstractIterable, ArrayBuilder, ArrayBuffer, + Builder, Cloneable, Growable, Iterable, Queue} +import scala.math.Ordering +import scala.runtime.ScalaRunTime.stringOf + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `PriorityQueueFW` class implements priority queues using a heap. + * To prioritize elements of type A there must be an implicit + * Ordering [A] available at creation. + * + * If multiple elements have the same priority in the ordering of this + * PriorityQueueFW, no guarantees are made regarding the order in which elements + * are returned by `dequeue` or `dequeueAll`. In particular, that means this + * class does not guarantee first in first out behaviour that may be + * incorrectly inferred from the Queue part of the name of this class. + * + * Only the `dequeue` and `dequeueAll` methods will return elements in priority + * order (while removing elements from the heap). Standard collection methods + * including `drop`, `iterator`, and `toString` will remove or traverse the heap + * in whichever order seems most convenient. + * + * Therefore, printing a `PriorityQueueFW` will not reveal the priority order of + * the elements, though the highest-priority element will be printed first. To + * print the elements in order, one must duplicate the `PriorityQueueFW` (by using + * `clone`, for instance) and then dequeue them: + * + * @example {{{ + * val pq = collection.mutable.PriorityQueueFW(1, 2, 5, 3, 7) + * println(pq) // elements probably not in order + * println(pq.clone.dequeueAll) // prints ArraySeq(7, 5, 3, 2, 1) + * }}} + * + * @tparam A type of the elements in this priority queue. + * @param ord implicit ordering used to compare the elements of type `A`. + * @param cap the capacity (maximum number of elements + 1) of this fixed width priority queue + * + * @define Coll PriorityQueueFW + * @define coll priority queue + * @define orderDependent + * @define orderDependentFold + * @define mayNotTerminateInf + * @define willNotTerminateInf + */ +//class PriorityQueueFW [A] (implicit val ord: Ordering [A]) +class PriorityQueueFW [A] (cap: Int)(using val ord: Ordering [A]) + extends AbstractIterable [A] + with Iterable [A] + with IterableOps [A, Iterable, PriorityQueueFW [A]] + with StrictOptimizedIterableOps [A, Iterable, PriorityQueueFW [A]] + with Builder [A, PriorityQueueFW [A]] + with Cloneable [PriorityQueueFW [A]] + with Growable [A] + with Serializable: + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** The `ResizableArrayAccess` class extends `ArrayBuffer` with methods below. + * The size of the ArrayBuffer should be fixed at + * Used for internal storage of heap. + */ + private class ResizableArrayAccess [A0] extends ArrayBuffer [A0] (cap): + + override def mapInPlace (f: A0 => A0): this.type = + var i = 1 // see "we do not use array(0)" comment below (???) + val siz = this.size + while i < siz do { this(i) = f(this(i)); i += 1 } + this + end mapInPlace + + def p_size0 = size0 + def p_size0_=(s: Int) = size0 = s + def p_array = array +// def p_ensureSize (n: Int) = super.ensureSize (n) + def p_swap (a: Int, b: Int): Unit = + val h = array(a); array(a) = array(b); array(b) = h + end p_swap + end ResizableArrayAccess + + private val debug = debugf ("PriorityQueueFW", true) // debug function + + private val resarr = new ResizableArrayAccess [A] // internal storage for priority queue + + resarr.p_size0 += 1 // we do not use array(0) TODO: explain -- what is the first element even for? + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the length of this priority queue. + */ + def length: Int = resarr.length - 1 // adjust length accordingly + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the size of this priority queue. + */ + override def size: Int = length + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the known size of this priority queue. + */ + override def knownSize: Int = length + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return whether this priority is empty. + */ + override def isEmpty: Boolean = resarr.p_size0 < 2 + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Make a priority from a collection type that is not eligible for + * EvidenceIterableFactoryDefaults since C != CC[A] (PriorityQueueFW[A] != Iterable[A]) + * @param coll the collection of elements to be turned into a priority queue + */ + override protected def fromSpecific (coll: scala.collection.IterableOnce [A]): PriorityQueueFW [A] = + PriorityQueueFW.from (coll) + end fromSpecific + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Make a priority queue builder. + */ + override protected def newSpecificBuilder: Builder [A, PriorityQueueFW [A]] = PriorityQueueFW.newBuilder + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Make this priority empty. + */ + override def empty: PriorityQueueFW [A] = PriorityQueueFW.empty + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Apply/map the function f in-place + * @param f the function to be applied + */ + def mapInPlace (f: A => A): this.type = + resarr.mapInPlace (f) + heapify (1) + this + end mapInPlace + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return this priority queue. + */ + def result () = this + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Treat an `AnyRef` reference as having type A. + * @param x the refereence + */ + private def toA (x: AnyRef): A = x.asInstanceOf [A] + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Make swaps going up the tree. + * Use `ord` directly to avoid allocating `OrderingOps` + * @param as array reference to heap + * @param m start index + */ + protected def fixUp (as: Array [AnyRef], m: Int): Unit = + var k: Int = m + while k > 1 && ord.lt (toA (as(k / 2)), toA (as(k))) do + resarr.p_swap (k, k / 2) + k = k / 2 + end while + end fixUp + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Make swaps going down the tree. + * Returns true if any swaps were done (used in heapify). + * Use `ord` directly to avoid allocating `OrderingOps`. + * @param as array reference to heap + * @param m start index + * @param n end index + */ + protected def fixDown (as: Array [AnyRef], m: Int, n: Int): Boolean = + var k: Int = m + while n >= 2 * k do + var j = 2 * k + if j < n && ord.lt (toA (as(j)), toA (as(j + 1))) then j += 1 + if ord.gteq (toA (as(k)), toA (as(j))) then return k != m + else + val h = as(k) + as(k) = as(j) + as(j) = h + k = j + end if + end while + k != m + end fixDown + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Insert a single element into the priority queue. + * @param elem the element to insert. + * @return this $coll. + */ + def addOne (elem: A): this.type = +// resarr.p_ensureSize (resarr.p_size0 + 1) + if resarr.p_size0 < cap then + resarr.p_array(resarr.p_size0) = elem.asInstanceOf [AnyRef] + fixUp (resarr.p_array, resarr.p_size0) + resarr.p_size0 += 1 + else + replaceLow (elem) // no space, replace lower of new and old lowest + this + end addOne + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** When the priority queue is full, only insert the new element if it is better than + * the lowest priority element. In which case, replace the lowest priority element. + * FIX - should work off of `ord` rather than using max (which assumes higher is worse) + * @param elem the new element to possibly insert + */ + private def replaceLow (elem: A): Unit = + debug ("replaceLow", s"${stringOf (resarr.p_array)}") + val (maxI, maxE) = findLow + debug ("replaceLow", s"ord.gt ($elem, $maxE) = ${ord.gt (elem, maxE)}") + if ord.gt (elem, maxE) then resarr.p_array(maxI) = elem.asInstanceOf [AnyRef] // overwrite if new is better + end replaceLow + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Find the lowest priority element in the queue; it will be in one of the leaf nodes. + * FIX - should work off of `ord` rather than using max (which assumes higher is worse) + */ + private def findLow: (Int, A) = + val n = resarr.p_size0-1 + var maxI = n + var maxE = toA (resarr.p_array(maxI)) + for i <- n/2 + 1 until n do + val el = toA (resarr.p_array(i)) + if ord.gt (maxE, el) then { maxI = i; maxE = el } + (maxI, maxE) + end findLow + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Add all elements. + * @param xs the elements to be added + */ + override def addAll (xs: IterableOnce [A]): this.type = + val from = resarr.p_size0 +// for x <- xs.iterator do unsafeAdd (x) + for x <- xs.iterator do addOne (x) + heapify (from) + this + end addAll + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Add an element without calling heapify. + * Like += but skips fixUp, which breaks the ordering invariant. + * A series of unsafeAdds MUST be followed by heapify. + * @param elem the element to add + * + private def unsafeAdd (elem: A): Unit = +// resarr.p_ensureSize (resarr.p_size0 + 1) + resarr.p_array(resarr.p_size0) = elem.asInstanceOf [AnyRef] + resarr.p_size0 += 1 + end unsafeAdd + */ + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Reestablish the heap order. + * Elements at indices 1..from-1 were already in heap order before any adds. + * Elements at indices from..n are newly added, their order must be fixed. + *---------------------------------------------------------------------------- + * Do fixDown on the parents of all the new elements, except the parent of the + * first new element, which is in the queue (that parent is treated specially + * because it might be the root) + * @param from fix the heap from position/index from to the end of the heap + */ + private def heapify (from: Int): Unit = + val n = length + + if from <= 2 then // no pre-existing order to maintain, do the textbook heapify algorithm + for i <- n/2 to 1 by -1 do fixDown (resarr.p_array, i, n) + else if n - from < 4 then // for very small adds, doing the simplest fix is faster + for i <- from to n do fixUp (resarr.p_array, i) + else + var min = from/2 // tracks the minimum element in the queue + val queue = scala.collection.mutable.Queue [Int](min) + + for i <- n/2 until min by -1 do + if fixDown (resarr.p_array, i, n) then + // there was a swap, so also need to fixDown i's parent + val parent = i/2 + if parent < min then // make sure same parent isn't added twice + min = parent + queue += parent + end if + end for + + while queue.nonEmpty do + val i = queue.dequeue () + if fixDown (resarr.p_array, i, n) then + val parent = i/2 + if parent < min && parent > 0 then + // the "parent > 0" is to avoid adding the parent of the root + min = parent + queue += parent + end if + end while + end if + end heapify + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Adds all elements provided by a `IterableOnce` object into the priority queue. + * @param xs a iterable object. + * @return a new priority queue containing elements of both `xs` and `this`. + */ + def ++ (xs: IterableOnce [A]): PriorityQueueFW [A] = { this.clone () ++= xs } + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Adds all elements to the queue. + * @param elems the elements to add. + */ + def enqueue (elems: A*): Unit = this ++= elems + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Returns the element with the highest priority in the queue, and removes + * this element from the queue. + * throws NoSuchElementException + * @return the element with the highest priority. + */ + def dequeue (): A = + if resarr.p_size0 > 1 then + resarr.p_size0 = resarr.p_size0 - 1 + val result = resarr.p_array(1) + resarr.p_array(1) = resarr.p_array(resarr.p_size0) + resarr.p_array(resarr.p_size0) = null // erase reference from array + fixDown (resarr.p_array, 1, resarr.p_size0 - 1) + toA (result) + else + throw new NoSuchElementException ("no element to remove from heap") + end dequeue + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Returns and removes all the elements in the priority queue. + */ + def dequeueAll [A1 >: A]: immutable.Seq [A1] = + val b = ArrayBuilder.make [Any] + b.sizeHint (size) + while nonEmpty do b += dequeue () + immutable.ArraySeq.unsafeWrapArray (b.result ()).asInstanceOf [immutable.ArraySeq [A1]] + end dequeueAll + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Since the element's priority is being decreased, move it to a lower priority + * position (toward the back). + * @param elem the element to reposition + * @param upElem the updated version of the element to reposition + */ + def decreaseKey (elem: A, upElem: A): Unit = + if ord.lt (upElem, elem) then // make sure priority is decreased + val m = resarr.p_array.indexOf (elem.asInstanceOf [AnyRef]) // find the element in the heap + resarr.p_array(m) = upElem.asInstanceOf [AnyRef] // replace it with its updated version + fixDown (resarr.p_array, m, resarr.p_size0) // re-position in heap if needed + end decreaseKey + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Since the element's priority is being increased, move it to a higher priority + * position (toward the front). + * @param elem the element to reposition + * @param upElem the updated version of the element to reposition + */ + def increaseKey (elem: A, upElem: A): Unit = + if ord.gt (upElem, elem) then // make sure priority is increased + val m = resarr.p_array.indexOf (elem.asInstanceOf [AnyRef]) // find the element in the heap + resarr.p_array(m) = upElem.asInstanceOf [AnyRef] // replace it with its updated version + fixUp (resarr.p_array, m) // re-position in heap if needed + end increaseKey + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Print the elements in the priority queue in order. + */ + def printInOrder: Unit = + print ("PriorityQueueFW: ") + println (clone.dequeueAll) + end printInOrder + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Returns the element with the highest priority in the queue, + * or throws an error if there is no element contained in the queue. + * @return the element with the highest priority. + */ + override def head: A = if resarr.p_size0 > 1 then toA (resarr.p_array(1)) + else throw new NoSuchElementException ("queue is empty") + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Removes all elements from the queue. After this operation is completed, + * the queue will be empty. + */ + def clear (): Unit = + resarr.clear () + resarr.p_size0 = 1 + end clear + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Returns an iterator which yields all the elements. Note: The order of elements + * returned is undefined. If you want to traverse the elements in priority queue + * order, use `clone().dequeueAll.iterator`. + * @return an iterator over all the elements. + */ + override def iterator: Iterator [A] = resarr.iterator.drop (1) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Returns the reverse of this priority queue. The new priority queue has + * the same elements as the original, but the opposite ordering. + * For example, the element with the highest priority in `pq` has the lowest + * priority in `pq.reverse`, and vice versa. + * Ties are handled arbitrarily. Elements with equal priority may or + * may not be reversed with respect to each other. + *---------------------------------------------------------------------------- + * Copy the existing data into the new array backwards; this won't put it exactly + * into the correct order, but will require less fixing than copying it in + * the original order + * @return the reversed priority queue. + */ + def reverse: PriorityQueueFW [A] = +// val revq = new PriorityQueueFW [A] ()(using ord.reverse) + val revq = new PriorityQueueFW [A] (cap)(using ord.reverse) + val n = resarr.p_size0 +// revq.resarr.p_ensureSize (n) + revq.resarr.p_size0 = n + val from = resarr.p_array + val to = revq.resarr.p_array + for i <- 1 until n do to(i) = from(n-i) + revq.heapify (1) + revq + end reverse + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Returns an iterator which yields all the elements in the reverse order + * than that returned by the method `iterator`. + * Note: The order of elements returned is undefined. + * @return an iterator over all elements sorted in descending order. + */ + def reverseIterator: Iterator [A] = new AbstractIterator [A] { + private var i = resarr.p_size0 - 1 + def hasNext: Boolean = i >= 1 + def next (): A = + val n = resarr.p_array(i) + i -= 1 + toA (n) + end next + } // reverseIterator + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Returns a regular queue containing the same elements. + * Note: the order of elements is undefined. + */ + def toQueue: Queue [A] = new Queue [A] ++= this.iterator + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Returns a textual representation of a queue as a string. + * @return the string representation of this queue. + */ + override def toString () = toList.mkString ("PriorityQueueFW(", ", ", ")") + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Converts this $coll to a list. + * Note: the order of elements is undefined. + * @return a list containing all elements of this $coll. + */ + override def toList: immutable.List [A] = immutable.List.from (this.iterator) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Clone this priority queue. + * @return a priority queue with the same elements. + */ + override def clone (): PriorityQueueFW [A] = + val pq = new PriorityQueueFW [A] (cap) + val n = resarr.p_size0 +// pq.resarr.p_ensureSize (n) + java.lang.System.arraycopy (resarr.p_array, 1, pq.resarr.p_array, 1, n-1) + pq.resarr.p_size0 = n + pq + end clone + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Copy the elements in this priority's heap to an array. + * @param xs the array to copy the elements in this priority's heap to + * @param start the index to start copy + * @param len the number of elements to copy + */ + override def copyToArray [B >: A] (xs: Array [B], start: Int, len: Int): Int = +// val copied = IterableOnce.elemsToCopyToArray (length, xs.length, start, len) + val copied = length // FIXED - protection would allow access + if copied > 0 then Array.copy (resarr.p_array, 1, xs, start, copied) + copied + end copyToArray + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + @deprecated("Use `PriorityQueueFW` instead", "2.13.0") + def orderedCompanion: PriorityQueueFW.type = PriorityQueueFW + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Use Defaultn Serialization Proxy for writing in place. + */ + protected [PriorityQueueFW] def writeReplace (): AnyRef = + new DefaultSerializationProxy (PriorityQueueFW.evidenceIterableFactory [A], this) + end writeReplace + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the name of this class. + */ + override protected [PriorityQueueFW] def className = "PriorityQueueFW" + +end PriorityQueueFW + + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `PriorityQueueFW` object provides methods for creating priority queues. + */ +@SerialVersionUID(3L) +object PriorityQueueFW extends SortedIterableFactory [PriorityQueueFW]: + + private var _cap = 5 // the current queue capacity + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Get the capacity cap. + */ + def cap: Int = _cap + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Set the capacity cap. + * @param c the new capacity + */ + def cap_= (c: Int): Unit = _cap = c + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a priority queue builder. + */ + def newBuilder [A : Ordering]: Builder [A, PriorityQueueFW [A]] = + new Builder [A, PriorityQueueFW [A]] { + val pq = new PriorityQueueFW [A] (_cap) +// def addOne (elem: A): this.type = { pq.unsafeAdd (elem); this } + def addOne (elem: A): this.type = { pq.addOne (elem); this } + def result (): PriorityQueueFW [A] = { pq.heapify (1); pq } + def clear (): Unit = pq.clear () } + end newBuilder + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create an empty priority queue. + */ + def empty [A : Ordering]: PriorityQueueFW [A] = new PriorityQueueFW [A] (_cap) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a priority queue from an iterable (once) collection. + * @param it the iterable (once) collection + */ + def from [E : Ordering] (it: IterableOnce [E]): PriorityQueueFW [E] = + val b = newBuilder [E] + b ++= it + b.result () + end from + +end PriorityQueueFW + + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `priorityQueueFWTest` main function tests the `PriorityQueueFW` class. + * > runMain scalation.priorityQueueFWTest + */ +@main def priorityQueueFWTest (): Unit = + + given ord: Ordering [Double] = summon [Ordering [Double]].reverse + + banner ("Test PriorityQueueFW") + val cap = 9 // position 0 used by data structure => cap = 8 elements + 1 + val pq = new PriorityQueueFW [Double] (cap) + for i <- 1 to 10 do + pq.enqueue (i * i) + println (pq) + println ("Insert 20") + pq.enqueue (20) + println (pq) + while ! pq.isEmpty do println (pq.dequeue ()) + +end priorityQueueFWTest + diff --git a/src/main/scala/scalation/Rat.scala b/src/main/scala/scalation/Rat.scala new file mode 100644 index 000000000..8d199106b --- /dev/null +++ b/src/main/scala/scalation/Rat.scala @@ -0,0 +1,724 @@ + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author John Miller, Casey Bowman + * @version 2.0 + * @date Sat Jul 20 22:24:50 EDT 2013 + * @see LICENSE (MIT style license file). + * + * @note Rational (Rat) Numbers + */ + +package scalation + +//import scala.language.implicitConversions +import scala.math.floor +import scala.util.control.Breaks.{breakable, break} + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `Rat` class is used to represent and operate on rational numbers. + * Internally, a rational number is represented as two long integers. + * Externally, two forms are supported: + * + * a/b = 2/3 via: Rat ("2/3"), 'toString' + * (a, b) = (2, 3) via: create ("(2, 3)") 'toString2' + * + * A `Rat` number can be created without loss of precision using the constructor, + * `apply`, `create` or `fromBigDecimal` methods. Other methods may lose precision. + * @param num the numerator (e.g., 2L) + * @param den the denominator (e.g., 3L) + */ +case class Rat (val num: Long, val den: Long = 1L) + extends Fractional [Rat] + with Ordered [Rat]: + + require (den != 0L) // the denominator must not be zero + + /** General alias for the parts of a complex number + */ + val (val1, val2) = (num, den) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Reduce the magnitude of the numerator and denonimator by dividing + * both by their Greatest Common Divisor (GCD). + */ + def reduce (): Rat = + val gc = gcd (num, den) + Rat (num / gc, den / gc) + end reduce + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Compute the unary minus (-). + */ + def unary_- : Rat = Rat (-num, -den) + inline def negate (q: Rat): Rat = -q + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Add two rational numbers, this + q. + * @param q add rational q to this + */ + def + (q: Rat): Rat = Rat (num * q.den + q.num * den, den * q.den) + def + (d: Double): Rat = this + fromDouble (d) + inline def plus (q: Rat, p: Rat): Rat = q + p + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Add a rational number plus a long, this + l. + * @param l add long l to this + */ + def + (l: Long): Rat = Rat (num + l * den, den) + inline def plus (q: Rat, l: Long): Rat = q + l + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Subtract two rational numbers, this - q. + * @param q subtract rational q from this + */ + def - (q: Rat): Rat = Rat (num * q.den - q.num * den, den * q.den) + def - (d: Double): Rat = this - fromDouble (d) + inline def minus (q: Rat, p: Rat): Rat = q - p + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Subtract: this rational number minus a long, this - l. + * @param l subtract long l from this + */ + def - (l: Long): Rat = Rat (num - l * den, den) + inline def minus (q: Rat, l: Long): Rat = q - l + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Multiply two rational numbers, this * q. + * @param q multiply this times rational q + */ + def * (q: Rat): Rat = Rat (num * q.num, den * q.den) + def * (d: Double): Rat = this * fromDouble (d) + inline def times (q: Rat, p: Rat): Rat = q * p + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Multiply a rational number times a long, this * l. + * @param l multiply this times long l + */ + def * (l: Long): Rat = Rat (num * l, den) + inline def times (q: Rat, l: Long): Rat = q * l + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Divide two rational numbers, this / q. + * @param q divide this by rational q + */ + def / (q: Rat): Rat = Rat (num * q.den, den * q.num) + def / (d: Double): Rat = this / fromDouble (d) + inline def div (q: Rat, p: Rat): Rat = q / p + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Divide a rational number div a long, this / l + * @param l divide this by long l + */ + def / (l: Long): Rat = Rat (num, den * l) + inline def div (q: Rat, l: Long): Rat = q / l + + def ÷ (num: Long, den: Long) = Rat (num, den) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Take the reciprocal of this rational number by swapping `num` and `den`. + */ + def recip = Rat (den, num) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Raise a rational number to the q-th power. + * @param q the rational power/exponent + */ + def ~^ (q: Rat): Rat = root (this ~^ q.num, q.den) + inline def pow (q: Rat, p: Rat): Rat = q ~^ p + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Raise a rational number to the l-th power. + * @param l the long power/exponent + */ + def ~^ (l: Long): Rat = Rat (num ~^ l, den ~^ l) + inline def pow (q: Rat, l: Long): Rat = q ~^ l + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Raise a rational number to the q-th power. Extended to handle a negative base. + * @see `pow_` in CommonFunctions. + * @param q the rational power/exponent + */ + def ↑ (q: Rat): Rat = fromDouble (this.toDouble ↑ q) + inline def pow_ (q: Rat, p: Rat): Rat = q ↑ p + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Take the l-th root of the rational number q. + * @param l the long root + */ + def root (q: Rat, l: Long): Rat = Rat (lroot (q.num, l), lroot (q.den, l)) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return whether two rational numbers are nearly equal. + * @param q the compare 'this' with q + */ + def =~ (q: Rat): Boolean = this.toDouble =~ q.toDouble + inline def ≈ (q: Rat): Boolean = this =~ q + inline def near_eq (q: Rat, p: Rat): Boolean = q =~ p + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the absolute value of this rational number. + */ + def abs: Rat = Rat (num.abs, den.abs) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the maximum of this and that rational numbers. + * @param q that rational number to compare with this + */ + def max (q: Rat): Rat = if q > this then q else this + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the minimum of this and that rational numbers. + * @param q that rational number to compare with this + */ + def min (q: Rat): Rat = if q < this then q else this + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the square root of that rational number. + * @param x that rational number + */ + def sqrt: Rat = this ~^ Rat (1L, 2L) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Determine whether this rational number is integral. + */ + def isIntegral: Boolean = den == 1L + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Compare two rational numbers (negative for <, zero for ==, positive for >). + * @param q the first rational number to compare + * @param p the second rational number to compare + */ + def compare (q: Rat, p: Rat): Int = q.num * p.den compare q.num * q.den + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Compare this rational number with that rational number 'q'. + * @param q that rational number + */ + def compare (q: Rat): Int = num * q.den compare q.num * den + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Compare 'this' rational number with that rational number 'q' for inequality. + * @param q that rational number + */ + def ≠ (q: Rat) = this != q + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Compare 'this' rational number with that rational number 'q' for less than + * or equal to. + * @param q that rational number + */ + def ≤ (q: Rat) = this <= q + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Compare 'this' rational number with that rational number 'q' for greater + * than or equal to. + * @param q that rational number + */ + def ≥ (q: Rat) = this >= q + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Determine whether 'this' is within the given bounds + * @param lim the given (lower, upper) bounds + */ + def in (lim: (Rat, Rat)): Boolean = lim._1 <= this && this <= lim._2 + def ∈ (lim: (Rat, Rat)): Boolean = lim._1 <= this && this <= lim._2 + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Determine whether 'this' is in the given set. + * @param lim the given set of values + */ + def in (set: Set [Rat]): Boolean = set contains this + def ∈ (set: Set [Rat]): Boolean = set contains this + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Determine whether 'this' is not within the given bounds + * @param lim the given (lower, upper) bounds + */ + def not_in (lim: (Rat, Rat)): Boolean = ! (lim._1 <= this && this <= lim._2) + def ∉ (lim: (Rat, Rat)): Boolean = ! (lim._1 <= this && this <= lim._2) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Determine whether 'this' is not in the given set. + * @param lim the given set of values + */ + def not_in (set: Set [Rat]): Boolean = ! (set contains this) + def ∉ (set: Set [Rat]): Boolean = ! (set contains this) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Convert that/this rational number to a Rat. + * @param q that rational number to convert + */ + def toRat (q: Rat) = q + + def toRat = this + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Convert that/this rational number to a `BigDecimal` number. + * @param q that rational number to convert + */ + def toBigDecimal (q: Rat): BigDecimal = BigDecimal (q.num) / BigDecimal (q.den) + + def toBigDecimal: BigDecimal = BigDecimal (num) / BigDecimal (den) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Convert that/this rational number to a `Double`. + * @param q that rational number to convert + */ + def toDouble (q: Rat): Double = q.num.toDouble / q.den.toDouble + + def toDouble: Double = num.toDouble / den.toDouble + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Convert that/this rational number to a `Float`. + * @param q that rational number to convert + */ + def toFloat (q: Rat): Float = (q.num.toDouble / q.den.toDouble).toFloat + + def toFloat: Float = (num.toDouble / den.toDouble).toFloat + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Convert that/this rational number to an `Int`. + * @param q that rational number to convert + */ + def toInt (q: Rat): Int = (q.num / q.den).toInt + + def toInt: Int = (num / den).toInt + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Convert this rational number to a `Long`. + * @param q that rational number to convert + */ + def toLong (q: Rat): Long = q.num / q.den + + def toLong: Long = num / den + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a rational number from a `BigDecimal` number. + * @param y the `BigDecimal` used to create the rational number + */ + def fromBigDecimal (y: BigDecimal): Rat = Rat.fromBigDecimal (y) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a rational number from a `Double`. + * @see Rat.double2Rat + * @param y the `Double` used to create the rational number + */ + def fromDouble (y: Double): Rat = Rat.fromDouble (y) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a rational number from a `Float`. `Float` is currently not fully + * supported. + * @param y the `Float` used to create the rational number + */ + def fromFloat (y: Float): Rat = Rat.fromDouble (y) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a rational number from an `Int`. + * @param n the `Int` used to create the rational number + */ + def fromInt (n: Int): Rat = Rat (n.toLong, 1L) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a rational number from a `Long`. + * @param n the `Long` used to create the rational number + */ + def fromLong (n: Long): Rat = Rat (n, 1L) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Override equals to determine whether this rational number equals rational 'c'. + * @param c the rational number to compare with this + */ + override def equals (c: Any): Boolean = + val q = c.asInstanceOf [Rat] + (num * q.den).equals (q.num * den) + end equals + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Must also override hashCode to be be compatible with equals. + */ + override def hashCode: Int = num.hashCode + 41 * den.hashCode + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Parse the string to create a rational number. + */ + def parseString (str: String): Option [Rat] = Some (Rat (str)) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Convert this rational number to a String of the form 'a/b'. + */ + override def toString: String = s"$num/$den" + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Convert this rational number to a String of the form '(a, b)'. + */ + def toString2: String = "(" + num + ", " + den + ")" + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Find the Great Common Denominator (GCD) of two long integers. + * @param l1 the first long number + * @param l2 the second long number + */ + private def gcd (l1: Long, l2: Long): Long = + BigInt (l1).gcd (l2).toLong +// if l2 == 0 then l1 else gcd (l2, l1 % l2) + end gcd + +end Rat + + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `Rat` companion object defines the origin (zero), one and minus one + * as well as some utility functions. + */ +object Rat: + + /** Zero (0) as a Rat number + */ + val _0 = Rat ( 0L) + + /** One (1) as a Rat number + */ + val _1 = Rat ( 1L) + + /** Negative one (-1) as a Rat number + */ + val _1n = Rat (-1L) + + /** Denominator (2 ~^ 54) big enough to capture largest Double significand (53 bits) + */ + val maxDen = 0x40000000000000L +// val maxDen = 18014398509481984L + + /** One in `BigDecimal` + */ + val _1_big = BigDecimal (1) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Implicit conversion from `Double` to `Rat`. + * @param d the Double parameter to convert + */ +// implicit def double2Rat (d: Double): Rat = fromDouble (d) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a rational number from a pair of Longs. + * @param qt the tuple form of a rational number + */ + def apply (qt: (Long, Long)): Rat = Rat (qt._1, qt._2) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a rational number from its primary string representation "a/b". + * Examples: "2/3", "2". + * @param qs the string form of a rational number + */ + def apply (qs: String): Rat = + val pair = qs.split ('/') + val p0 = pair(0) + Rat (if pair.length == 1 then (p0.toLong, 1L) + else (p0.toLong, pair(1).toLong)) + end apply + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a rational number from its secondary string representation "(a, b)". + * Examples: "(2, 3)", "(2, 1)". + * @param qs the string form of a rational number + */ + def create (qs: String): Rat = + val pair = qs.split ('/') + Rat (pair(0).drop(1).toLong, pair(1).dropRight(1).toLong) + end create + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Make a rational number from a String of the form "12.3E+7". + * @see http://docs.oracle.com/javase/1.5.0/docs/api/java/math/BigDecimal.html + * #BigDecimal%28java.lang.String%29 + * @param s the given String representation of a number + */ + def make (s: String): Rat = fromBigDecimal (BigDecimal (s)) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the absolute value of that rational number. + * @param x that rational number + */ + def abs (x: Rat) = x.abs + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the maximum of two rational numbers, q and p. + * @param q the first rational number to compare + * @param p the second rational number to compare + */ + def max (q: Rat, p: Rat): Rat = if p > q then p else q + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the minimum of two rational numbers, q and p. + * @param q the first rational number to compare + * @param p the second rational number to compare + */ + def min (q: Rat, p: Rat): Rat = if p < q then p else q + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the signum (sgn) of a rational number. The values may be -1, 0, or 1. + * @param r the rational number to obtain the sigum of + */ + def signum (r: Rat): Rat = + if r.num == 0 then _0 + else if r.num > 0 then fromDouble (math.signum (r.den.toDouble)) + else fromDouble (-math.signum (r.den.toDouble)) + end signum + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the square root of that rational number. + * @param x that rational number + */ + def sqrt (x: Rat): Rat = x ~^ Rat (1L, 2L) + + /** Ordering for rational numbers + */ + val ord = new Ordering [Rat] + { def compare (x: Rat, y: Rat) = x.compare (y) } + + /** Implicit numeric value for establishing type + */ +// implicit val num = _0 + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a rational number from a `BigDecimal` number. + * @param y the `BigDecimal` used to create the rational number + * @param md the maximum denominator + */ + def fromBigDecimal (y: BigDecimal): Rat = Rat (from_BigDecimal (y)) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Determine the numerator and denonimator of the closest rational number + * to the given `BigDecimal` number. + * @param y the `BigDecimal` used to create the rational number + * @param md the maximum denominator + */ + def from_BigDecimal (y: BigDecimal, md: Long = Long.MaxValue): (Long, Long) = + val epsilon = _1_big / md + var d = y + val n = d.setScale (0, BigDecimal.RoundingMode.FLOOR) // floor (d) + d -= n + if d < epsilon then return (n.toLong, 1L) + else if _1_big - epsilon < d then return (n.toLong + 1L, 1L) + + val dp = d + epsilon + val dm = d - epsilon + var low_n = 0L // lower numerator + var low_d = 1L // lower denominator + var upp_n = 1L // upper numerator + var upp_d = 1L // upper denominator + var mid_n = 1L // middle numerator + var mid_d = 1L // middle denominator + + breakable { + while true do + mid_n = low_n + upp_n + mid_d = low_d + upp_d + if mid_d * dp < mid_n then + upp_n = mid_n + upp_d = mid_d + else if mid_d * dm > mid_n then + low_n = mid_n + low_d = mid_d + else + break () + end while + } // breakable + (n.toLong * mid_d + mid_n, mid_d) + end from_BigDecimal + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a rational number from a `Double`. + * @param y the double used to create the rational number + * @param md the maximum denominator + */ + def fromDouble (y: Double): Rat = Rat (from_Double (y)) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Determine the numerator and denonimator of the closest rational number + * to the given `BigDecimal` number. + * @see http://stackoverflow.com/questions/5124743/algorithm-for-simplifying- + * decimal-to-fractions/5128558#5128558 + * @param y the double used to create the rational number + * @param md the maximum denominator + */ + def from_Double (y: Double, md: Long = maxDen): (Long, Long) = + val epsilon = 1.0 / md + var d = y + val n = floor (d) + d -= n + if d < epsilon then return (n.toLong, 1L) + else if 1.0 - epsilon < d then return (n.toLong + 1L, 1L) + var low_n = 0L + var low_d = 1L + var upp_n = 1L + var upp_d = 1L + var mid_n = 1L + var mid_d = 1L + + breakable { + while true do + mid_n = low_n + upp_n + mid_d = low_d + upp_d + if mid_d * (d + epsilon) < mid_n then + upp_n = mid_n + upp_d = mid_d + else if mid_n < (d - epsilon) * mid_d then + low_n = mid_n + low_d = mid_d + else + break () + end while + } // breakable + (n.toLong * mid_d + mid_n, mid_d) + end from_Double + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a rational number from a `Double`. + * FIX: if den not a power of 2, it goes to 'md'. + * @see http://rosettacode.org/wiki/Convert_decimal_number_to_rational + * @param y the double used to create the rational number. + * @param md the maximum denominator + */ + def fromDouble2 (y: Double, md: Long = maxDen): Rat = + if y =~ 0.0 then return _0 + val neg = y < 0.0 + val h = Array (0L, 1L, 0L) + val k = Array (1L, 0L, 0L) + + var f = if neg then -y else y + var a = 0L + var n = 1L + var x = 0L + var end = false + + while f != floor (f) do { n <<= 1; f *= 2 } // double f until no frac + var d = f.toLong + + breakable { + for i <- 0 to 63 do + a = if n != 0L then d / n else 0L + if i > 0 && a == 0L then break () + x = d; d = n; n = x % n + x = a + if k(1) * a + k(0) >= md then + x = (md - k(0)) / k(1) + if x * 2L >= a || k(1) >= md then end = true else break () + + h(2) = x * h(1) + h(0); h(0) = h(1); h(1) = h(2) + k(2) = x * k(1) + k(0); k(0) = k(1); k(1) = k(2) + + if end then break () + end for + } // breakable + + Rat (if neg then -h(1) else h(1), k(1)) + end fromDouble2 + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a rational number with a small odd denominator from a `Double` y + * by running through possible odd denominators d from 1 to `max_d` and creating + * two candidate numerators n for each d, one with n/d below and other above. + * @param y the double used to create the rational number. + */ + def fromDouble3 (y: Double): Rat = + val max_d = 51 // limit the maximum denominator to around 50 + var nb, n = 0 // numerator: best and temp + var db = 0 // denominator: best + var eb, e = Double.MaxValue // error: best and temp + + for d <- 1 to max_d by 2 do // try all odd denominators + val dd = d.toDouble + n = math.floor (y * d).toInt // candidate Rat (n, d) below y + e = math.abs (y - n/dd) + if e < eb then { eb = e; nb = n; db = d } + n = math.ceil (y * d).toInt // candidate Rat (n, d) above y + e = math.abs (y - n/dd) + if e < eb then { eb = e; nb = n; db = d } + end for + Rat (nb, db) + end fromDouble3 + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a rational number from a `Float`. + * @param y the float used to create the rational number. + */ + def fromFloat (y: Float): Rat = fromDouble (y) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a rational number from an `Int`. + * @param n the integer used to create the rational number. + */ + def fromInt (n: Int) = Rat (n) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a rational number from a `Long`. + * @param n the long used to create the rational number. + */ + def fromLong (n: Long) = Rat (n) + +end Rat + + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `ratTest` main function is used to test the `Rat` class. + * > runMain scalation.ratTest + */ +@main def ratTest (): Unit = + + import Rat._ + + val a = Rat (1L, 4L) + val b = Rat (1L, 2L) + val c = Rat (2L, 3L) + val d = Rat (8L, 10L) + val e = Rat (5L) + val f = Rat (4L, 5L) + + println ("maxDen = " + maxDen) + + println ("a = " + a) + println ("b = " + b) + println ("c = " + c) + println ("d = " + d) + println ("e = " + e) + println ("-c = " + -c) + println ("c + d = " + (c + d)) + println ("c - d = " + (c - d)) + println ("c * d = " + (c * d)) + println ("c / d = " + (c / d)) + println ("c ~^ 2L = " + (c ~^ 2L)) + println ("a ~^ b = " + (a ~^ b)) + println ("c.abs = " + c.abs) + println ("a.sqrt = " + a.sqrt) + println ("c < d = " + (c < d)) + println ("d.reduce = " + d.reduce ()) + + println ("fromDouble (.5)) = " + fromDouble (.5)) + println ("fromDouble (.25)) = " + fromDouble (.25)) + println ("fromDouble (.125)) = " + fromDouble (.125)) + println ("fromDouble (.0625)) = " + fromDouble (.0625)) + println ("fromDouble (-.125)) = " + fromDouble (-.125)) + println ("fromDouble (1./3.)) = " + fromDouble (1.0/3.0)) + println ("fromDouble (.334)) = " + fromDouble (.334)) + println ("fromDouble (.2)) = " + fromDouble (.2)) + println ("fromDouble (0.0)) = " + fromDouble (0.0)) + + println ("fromDouble3 (0.33)) = " + fromDouble3 (0.33)) + println ("fromDouble3 (0.333)) = " + fromDouble3 (0.333)) + println ("fromDouble3 (0.5)) = " + fromDouble3 (0.5)) + println ("fromDouble3 (0.67)) = " + fromDouble3 (0.67)) + println ("fromDouble3 (0.667)) = " + fromDouble3 (0.667)) + + println ("Compare two rational numbers") + println (s"$c.compare ($f) = ${c.compare (f)}") + println (s"$c.equals ($f) = ${c.equals (f)}") + println (s"$c == $f = ${c == f}") + println (s"$c =~ $f = ${c =~ f}") + println (s"$d.compare ($f) = ${d.compare (f)}") + println (s"$d.equals ($f) = ${d.equals (f)}") + println (s"$d == $f = ${d == f}") + println (s"$d =~ $f = ${d =~ f}") + +end ratTest + diff --git a/src/main/scala/scalation/SetExt.scala b/src/main/scala/scalation/SetExt.scala deleted file mode 100644 index c60f38545..000000000 --- a/src/main/scala/scalation/SetExt.scala +++ /dev/null @@ -1,70 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Wed Mar 13 21:00:26 EDT 2024 - * @see LICENSE (MIT style license file). - * - * @note Extension Method for Scala's Mutable Sets - */ - -package scalation - -import scala.collection.mutable.Set - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Extend `Set` to include unicode symbols for subset and proper subset (⊆, ⊂, ⊈, ⊄). - */ -extension [T] (x: Set [T]) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return whether set x is a subset of set y. - * @param y the other set - */ - inline def ⊆ (y: Set [T]): Boolean = x subsetOf y - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return whether set x is a proper subset of set y. - * @param y the other set - */ - inline def ⊂ (y: Set [T]): Boolean = (x subsetOf y) && x != y - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return whether set x is not a subset of set y. - * @param y the other set - */ - inline def ⊈ (y: Set [T]): Boolean = ! (x subsetOf y) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return whether set x is not a proper subset of set y. - * @param y the other set - */ - inline def ⊄ (y: Set [T]): Boolean = ! (x subsetOf y) || x == y - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `setExtTest` main function test the SetExt extention methods. - * > runMain scalation.setExtTest - */ -@main def setExtTest (): Unit = - - val x = Set (1, 2) - val y = Set (1, 2, 3) - - println (s"x = $x") - println (s"y = $y") - - println (s"x ⊆ y = ${x ⊆ y}") - println (s"x ⊆ x = ${x ⊆ x}") - - println (s"x ⊂ y = ${x ⊂ y}") - println (s"x ⊂ x = ${x ⊂ x}") - - println (s"x ⊈ y = ${x ⊈ y}") - println (s"x ⊈ x = ${x ⊈ x}") - - println (s"x ⊄ y = ${x ⊄ y}") - println (s"x ⊄ x = ${x ⊄ x}") - -end setExtTest - diff --git a/src/main/scala/scalation/SetExt.scalaa b/src/main/scala/scalation/SetExt.scalaa new file mode 100644 index 000000000..54d05788f --- /dev/null +++ b/src/main/scala/scalation/SetExt.scalaa @@ -0,0 +1,103 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author John Miller + * @version 2.0 + * @date Wed Mar 13 21:00:26 EDT 2024 + * @see LICENSE (MIT style license file). + * + * @note Extension Method for Scala's Mutable Sets + */ + +package scalation + +import scala.collection.mutable.Set + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Extend `Set` to include Unicode symbols for subset and proper subset (⊆, ⊂, ⊈, ⊄), + * interection/union (∩, ∪), and quantifiers exists, not exists and forall (∃, ∄, ∀). + * @see www.scala-lang.org/api/3.7.4/scala/collection/mutable/Set.html + * @see `scalation.Unicode` + */ +extension [T] (x: Set [T]) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return whether set x is a subset of set y. + * @param y the other set + */ + inline def ⊆ (y: Set [T]): Boolean = x subsetOf y + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return whether set x is a proper subset of set y. + * @param y the other set + */ + inline def ⊂ (y: Set [T]): Boolean = (x subsetOf y) && x != y + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return whether set x is not a subset of set y. + * @param y the other set + */ + inline def ⊈ (y: Set [T]): Boolean = ! (x subsetOf y) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return whether set x is not a proper subset of set y. + * @param y the other set + */ + inline def ⊄ (y: Set [T]): Boolean = ! (x subsetOf y) || x == y + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return whether there exists at least one element of set x for which predicate p holds. + * @param p the predicate to check + */ + inline def ∃ (p: T => Boolean): Boolean = x.exists (p) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return whether there does not exists at least one element of set x for which predicate p holds. + * @param p the predicate to check + */ + inline def ∄ (p: T => Boolean): Boolean = ! x.exists (p) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return whether predicate p holds for all elements of set x. + * @param p the predicate to check + */ + inline def ∀ (p: T => Boolean): Boolean = x.forall (p) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the intersetion of set x and set y (x & y). + * @param y the other set + */ + inline def ∩ (y: Set [T]): Set [T] = x.intersect (y) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the union of set x and set y (x | y). + * @param y the other set + */ + inline def ∪ (y: Set [T]): Set [T] = x.union (y) + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `setExtTest` main function test the SetExt extension methods. + * > runMain scalation.setExtTest + */ +@main def setExtTest (): Unit = + + val x = Set (1, 2) + val y = Set (1, 2, 3) + + println (s"x = $x") + println (s"y = $y") + + println (s"x ⊆ y = ${x ⊆ y}") + println (s"x ⊆ x = ${x ⊆ x}") + + println (s"x ⊂ y = ${x ⊂ y}") + println (s"x ⊂ x = ${x ⊂ x}") + + println (s"x ⊈ y = ${x ⊈ y}") + println (s"x ⊈ x = ${x ⊈ x}") + + println (s"x ⊄ y = ${x ⊄ y}") + println (s"x ⊄ x = ${x ⊄ x}") + +end setExtTest + diff --git a/src/main/scala/scalation/SkipList.scala b/src/main/scala/scalation/SkipList.scala index cee59f777..2e9d5ca02 100644 --- a/src/main/scala/scalation/SkipList.scala +++ b/src/main/scala/scalation/SkipList.scala @@ -12,7 +12,6 @@ package scalation -import scala.reflect.ClassTag import scala.util.Random //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -21,7 +20,7 @@ import scala.util.Random * @tparam V the type of the values assigned to keys in this sorted map * @param ordering the implicit ordering used to compare objects of type K */ -class SkipList [K: ClassTag, V: ClassTag] (using ordering: Ordering [K]): +class SkipList [K, V] (using ordering: Ordering [K]): private val maxlevel = 10 // maximum number of levels for the skip list private val random = new Random () diff --git a/src/main/scala/scalation/SortedSetExt.scala b/src/main/scala/scalation/SortedSetExt.scala new file mode 100644 index 000000000..10d22158d --- /dev/null +++ b/src/main/scala/scalation/SortedSetExt.scala @@ -0,0 +1,103 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author John Miller + * @version 2.0 + * @date Wed Mar 13 21:00:26 EDT 2024 + * @see LICENSE (MIT style license file). + * + * @note Extension Method for Scala's Mutable SortedSets + */ + +package scalation + +import scala.collection.mutable.SortedSet + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Extend `SortedSet` to include Unicode symbols for subset and proper subset (⊆, ⊂, ⊈, ⊄), + * interection/union (∩, ∪), and quantifiers exists, not exists and forall (∃, ∄, ∀). + * @see www.scala-lang.org/api/3.7.4/scala/collection/mutable/SortedSet.html + * @see `scalation.Unicode` + */ +extension [T] (x: SortedSet [T]) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return whether set x is a subset of set y. + * @param y the other set + */ + inline def ⊆ (y: SortedSet [T]): Boolean = x subsetOf y + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return whether set x is a proper subset of set y. + * @param y the other set + */ + inline def ⊂ (y: SortedSet [T]): Boolean = (x subsetOf y) && x != y + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return whether set x is not a subset of set y. + * @param y the other set + */ + inline def ⊈ (y: SortedSet [T]): Boolean = ! (x subsetOf y) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return whether set x is not a proper subset of set y. + * @param y the other set + */ + inline def ⊄ (y: SortedSet [T]): Boolean = ! (x subsetOf y) || x == y + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return whether there exists at least one element of set x for which predicate p holds. + * @param p the predicate to check + */ + inline def ∃ (p: T => Boolean): Boolean = x.exists (p) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return whether there does not exists at least one element of set x for which predicate p holds. + * @param p the predicate to check + */ + inline def ∄ (p: T => Boolean): Boolean = ! x.exists (p) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return whether predicate p holds for all elements of set x. + * @param p the predicate to check + */ + inline def ∀ (p: T => Boolean): Boolean = x.forall (p) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the intersetion of set x and set y (x & y). + * @param y the other set + */ + inline def ∩ (y: SortedSet [T]): SortedSet [T] = x.intersect (y) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the union of set x and set y (x | y). + * @param y the other set + */ + inline def ∪ (y: SortedSet [T]): SortedSet [T] = x.union (y) + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `sortedSetExtTest` main function test the SortedSetExt extension methods. + * > runMain scalation.sortedSetExtTest + */ +@main def sortedSetExtTest (): Unit = + + val x = SortedSet (1, 2) + val y = SortedSet (1, 2, 3) + + println (s"x = $x") + println (s"y = $y") + + println (s"x ⊆ y = ${x ⊆ y}") + println (s"x ⊆ x = ${x ⊆ x}") + + println (s"x ⊂ y = ${x ⊂ y}") + println (s"x ⊂ x = ${x ⊂ x}") + + println (s"x ⊈ y = ${x ⊈ y}") + println (s"x ⊈ x = ${x ⊈ x}") + + println (s"x ⊄ y = ${x ⊄ y}") + println (s"x ⊄ x = ${x ⊄ x}") + +end sortedSetExtTest + diff --git a/src/main/scala/scalation/TimeNum.scala b/src/main/scala/scalation/TimeNum.scala index 8c2422f87..eee8b28b1 100644 --- a/src/main/scala/scalation/TimeNum.scala +++ b/src/main/scala/scalation/TimeNum.scala @@ -26,6 +26,15 @@ extension (s: Instant) def <<= (t: Instant): Boolean = s.compareTo (t) <= 0 +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Given instance to be used with methods such as `sorted` to define the ordering + * of elements. + * FIX - should be implicit in "sorted (using timeNumOrd)" + */ +given timeNumOrd: Ordering [TimeNum] = new Ordering [TimeNum]: + def compare (s: TimeNum, t: TimeNum) = s compare t + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `TimeNum` companion object is used to represent and operate on date-time values. * It contains an implicit class definition for `TimeNum`, which extends @@ -85,7 +94,7 @@ object TimeNum: /** Ordering for date-time values */ - val ord = new Ordering [TimeNum] { def compare (s: TimeNum, t: TimeNum) = s compare t } +// val ord = new Ordering [TimeNum] { def compare (s: TimeNum, t: TimeNum) = s compare t } /** Nano-seconds must be strictly than this limit (billion nanoseconds = 1 second) */ @@ -208,10 +217,8 @@ object TimeNum: var (dts, dtp) = (dt, dtPattern) if ! dtPattern.contains ("h") && ! dtPattern.contains ("H") then dts += DEFAULT_HOUR._1; dtp += DEFAULT_HOUR._2 - end if if ! dtPattern.contains ("z") && ! dtPattern.contains ("Z") then dts += DEFAULT_ZONE._1; dtp += DEFAULT_ZONE._2 - end if debug ("apply", s"dts = $dts, dtp = $dtp") new TimeNum (dts, dtp) end apply @@ -449,36 +456,36 @@ class TimeNum (val inst: Instant) inline def ≥ (t: TimeNum): Boolean = t <= this //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Determine whether this is within the given bounds + /** Determine whether this is within the given bounds. * @param lim the given (lower, upper) bounds */ - def in (lim: (TimeNum, TimeNum)): Boolean = lim._1 <= this && this <= lim._2 + infix def in (lim: (TimeNum, TimeNum)): Boolean = lim._1 <= this && this <= lim._2 - inline def ∈ (lim: (TimeNum, TimeNum)): Boolean = this.in (lim) + inline def ∈ (lim: (TimeNum, TimeNum)): Boolean = this in lim //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Determine whether this is in the given set. * @param lim the given set of values */ - def in (set: Set [TimeNum]): Boolean = set contains this + infix def in (set: Set [TimeNum]): Boolean = set contains this - inline def ∈ (set: Set [TimeNum]): Boolean = this.in (set) + inline def ∈ (set: Set [TimeNum]): Boolean = this in set //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Determine whether this is not within the given bounds + /** Determine whether this is outside the given bounds. * @param lim the given (lower, upper) bounds */ - def not_in (lim: (TimeNum, TimeNum)): Boolean = ! (lim._1 <= this && this <= lim._2) + infix def out (lim: (TimeNum, TimeNum)): Boolean = this < lim._1 || lim._2 < this - inline def ∉ (lim: (TimeNum, TimeNum)): Boolean = this.not_in (lim) + inline def ∉ (lim: (TimeNum, TimeNum)): Boolean = this out lim //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Determine whether this is not in the given set. * @param lim the given set of values */ - def not_in (set: Set [TimeNum]): Boolean = ! (set contains this) + infix def not_in (set: Set [TimeNum]): Boolean = ! (set contains this) - inline def ∉ (set: Set [TimeNum]): Boolean = this.not_in (set) + inline def ∉ (set: Set [TimeNum]): Boolean = this not_in set //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Get a temporal aspect (e.g., month, week, day, hour, ...). @@ -634,7 +641,7 @@ end TimeNum val arr = Array (date1, date2, date3) println ("compare = " + (date1 compare date2)) println ("original arr = " + stringOf (arr)) - val sarr = arr.sorted (ord) + val sarr = arr.sorted (using timeNumOrd) println ("sorted arr = " + stringOf (sarr)) end timeNumTest diff --git a/src/main/scala/scalation/Timer.scala b/src/main/scala/scalation/Timer.scala index 9b8c93a51..4f27f04f8 100644 --- a/src/main/scala/scalation/Timer.scala +++ b/src/main/scala/scalation/Timer.scala @@ -78,7 +78,7 @@ def time [R] (amplify: Int, skip: Boolean = true) (block: => R): R = var result: R = null.asInstanceOf [R] if skip then result = block // skip measuring first time due to JIT-Compiler val t0 = nanoTime () // start time - cfor (0, amplify) { i => result = block } // exercise code amplify times + cfor (0, amplify) { _ => result = block } // exercise code amplify times val t1 = nanoTime () // end time println ("Elapsed time: " + (t1 - t0) * NS_PER_MS / amplify + " ms") result @@ -112,11 +112,35 @@ def timed [R] (amplify: Int, skip: Boolean = true) (block: => R): (R, Double) = var result: R = null.asInstanceOf [R] if skip then result = block // skip measuring first time due to JIT-Compiler val t0 = nanoTime () // start time - cfor (0, amplify) { i => result = block } // exercise code amplify times + cfor (0, amplify) { _ => result = block } // exercise code amplify times val t1 = nanoTime () // end time (result, (t1 - t0) * NS_PER_MS / amplify) end timed +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Calculate the elapsed time in milliseconds (ms) for the execution of an + * arbitrary block of code: 'timedX { block }'. Return any result produced + * by the block of code and the average elapsed time. + * @see http://stackoverflow.com/questions/9160001/how-to-profile-methods-in-scala + * Executes the block of code multiple times for better resolution and accuracy, + * throwing the slowest execution to improve consistency (e.g., may be slow due to GC). + * @param amplify the number of times to execute the block of code + * @param skip whether to skip the first execution due to slowness of the JIT-Compiler + * @param block the block of code to be executed + */ +def timedX [R] (amplify: Int, skip: Boolean = true) (block: => R): (R, Double) = + var result: R = null.asInstanceOf [R] + if skip then result = block // skip measuring first time due to JIT-Compiler + val dt = Array.ofDim [Long] (amplify+1) + var t0 = 0L + cfor (0, amplify+1) { i => + t0 = nanoTime () // start time + result = block + dt(i) = nanoTime () - t0 // time difference + } // cfor + (result, (dt.sum - dt.max) * NS_PER_MS / amplify) +end timedX + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Calculate the elapsed time in milliseconds (ms) for the execution of an * arbitrary block of code: 'gauge { block }'. Return the block of code's @@ -143,7 +167,7 @@ end gauge def gauge [R] (amplify: Int, skip: Boolean = true) (block: => R): Double = if skip then block // skip measuring first time due to JIT-Compiler val t0 = nanoTime () // start time - cfor (0, amplify) { i => block } // exercise code amplify times + cfor (0, amplify) { _ => block } // exercise code amplify times val t1 = nanoTime () // end time (t1 - t0) * NS_PER_MS / amplify end gauge @@ -187,9 +211,17 @@ end memoryUsed end quadraticEq val (a, b, c) = (1, -5, 6) - banner (s"Quadratic Equation ax^2 + bx + c = 0 where a = $a, b = $b, c = $c") - val roots = time (1000) { quadraticEq (1, -5, 6) } + banner (s"time: Quadratic Equation ax^2 + bx + c = 0 where a = $a, b = $b, c = $c") + val roots = time (10000) { quadraticEq (1, -5, 6) } println (s"roots = $roots") + banner (s"timed: Quadratic Equation ax^2 + bx + c = 0 where a = $a, b = $b, c = $c") + var result = timed (10000) { quadraticEq (1, -5, 6) } + println (s"result = $result") + + banner (s"timedX: Quadratic Equation ax^2 + bx + c = 0 where a = $a, b = $b, c = $c") + result = timedX (10000) { quadraticEq (1, -5, 6) } + println (s"result = $result") + end timerTest diff --git a/src/main/scala/scalation/Unicode.scala b/src/main/scala/scalation/Unicode.scala index 868cb67af..413f26ee8 100644 --- a/src/main/scala/scalation/Unicode.scala +++ b/src/main/scala/scalation/Unicode.scala @@ -6,7 +6,14 @@ * @date Wed Sep 9 13:07:48 EDT 2015 * @see LICENSE (MIT style license file). * - * @note Facilitates of the Use of Unicode Symbols + * @note Facilitates of the Use of Unicode Symbols in ScalaTion (copy-paste) + * + * @see Unicode operator definitions: + * in scalation: `Complex`, `Rat`, `SortedSetExt`, `TimeNum`, `Util`, `ValueType` + * in mathstat: `MatrixD`, `VectorD` + * in database: `Tabular` + * in calculus: `Differential`, `Integral`, `Poly` + * in modeling.forecasting: `ARIMA_diff` */ package scalation @@ -17,7 +24,8 @@ import scala.runtime.ScalaRunTime.stringOf /** The `Unicode` object provides arrays to facilitate the use of Unicode. * ScalaTion currently uses a few UTF-16 characters, see code below. Most UTF-16 * characters are 2 bytes (16 bits). Extended characters are encoded in 4 bytes. - * ScalaTion limits characters to the range 'U+0000' to 'U+2bff'. + * The 7-bit ASCII range is a subset of Unicode, the first 128 code points from U+0000 to U+007F + * ScalaTion limits characters to the range '\u0000' to 'U+2BFF'. * Developers should test Unicode symbols here before trying them in the code. * @see en.wikipedia.org/wiki/UTF-16 * @see www.tamasoft.co.jp/en/general-info/unicode.html @@ -26,6 +34,119 @@ import scala.runtime.ScalaRunTime.stringOf */ object Unicode: +// operators + + // Symbol Code Point Description Category + val op = Array ( ('¬', '\u00AC'), // Not Sign (Negation) Logic (Latin-1 Supplement) + ('±', '\u00B1'), // Plus-Minus Sign Arithmetic + ('·', '\u00B7'), // Middle Dot/Bullet Point Miscellaneous + ('×', '\u00D7'), // Multiplication Sign Arithmetic + ('÷', '\u00F7'), // Division Sign Arithmetic/Relational Algebra + + ('ᵀ', '\u1D40'), // Transpose (used as superscript T) Linear Algebra + ('‖', '\u2016'), // Double Vertical Line (Norm/Determinant) Linear Algebra + ('‾', '\u203E'), // Overscore Max, with _ for Min + ('⁻', '\u207B'), // Superscript Minus Sign Max, Exponentiation + ('₋', '\u208B'), // Subscript Minus Sign Min, Exponentiation + + ('ℱ', '\u2131'), // Script F for Aggregate Function Relational Algebra + ('↑', '\u2191'), // Upward Arrow Exponentiation/Sorting + ('↓', '\u2193'), // Downward Arrow Sorting + ('↦', '\u21A6'), // Rightwards Arrow From Bar (Maps To) Mapping + + ('∀', '\u2200'), // For All (Universal Quantifier) Logic + ('∂', '\u2202'), // Partial Differential Calculus (use d for derivative) + ('∃', '\u2203'), // There Exists (Existential Quantifier) Logic + ('∄', '\u2204'), // Not Exists (Existential Quantifier) Logic + ('∆', '\u2206'), // Difference Time Series + ('∇', '\u2207'), // Nabla (Gradient, Curl, Divergence) Calculus/Vector + ('∈', '\u2208'), // Element Of Set Theory + ('∉', '\u2209'), // Not Element Of Set Theory + ('∏', '\u220F'), // N-ary Product N-ary Operator + + ('∑', '\u2211'), // N-ary Summation N-ary Operator + ('∘', '\u2218'), // Ring Operator (Function Composition) Function + ('√', '\u221A'), // Square Root (Radical) Function + ('∛', '\u221B'), // Cube Root Function + ('∝', '\u221D'), // Proportional To Relational + + ('∣', '\u2223'), // Divides Relational + ('∥', '\u2225'), // Parallel To Relational + ('∧', '\u2227'), // Logical AND (Conjunction) Logic + ('∨', '\u2228'), // Logical OR (Disjunction) Logic + ('∩', '\u2229'), // Intersection Set Theory + ('∪', '\u222A'), // Union Set Theory + ('∫', '\u222B'), // Integral Calculus + ('∬', '\u222C'), // Double Integral Calculus + + ('≈', '\u2248'), // Almost Equal To Relational + + ('≠', '\u2260'), // Not Equal To Relational + ('≡', '\u2261'), // Identical To Relational + ('≤', '\u2264'), // Less-Than or Equal To Relational + ('≥', '\u2265'), // Greater-Than or Equal To Relational + + ('⊂', '\u2282'), // Subset Of Set Theory + ('⊃', '\u2283'), // Superset Of Set Theory + ('⊄', '\u2284'), // Not Subset Of Set Theory + ('⊅', '\u2285'), // Not Superset Of Set Theory + ('⊆', '\u2286'), // Subset Of Or Equal To Set Theory + ('⊇', '\u2287'), // Superset Of Or Equal To Set Theory + ('⊈', '\u2288'), // Not Subset Of Or Equal To Set Theory + ('⊉', '\u2289'), // Not Superset Of Or Equal To Set Theory + + ('⊕', '\u2295'), // Circled Plus (Direct Sum) Algebra/Linear Algebra + ('⊗', '\u2297'), // Circled Times (Tensor Product) Algebra/Linear Algebra + ('⊙', '\u2299'), // Hadamard Product Linear Algebra + ('⊥', '\u22A5'), // Perpendicular / Up tack Geometry/Logic + ('∙', '\u22C5'), // Dot Product operator (Vector/Matrix) Arithmetic/Linear Algebra + ('⋆', '\u22C6'), // Star/Convolution Linear Algebra + ('⋈', '\u22C8'), // Join Relational Algebra + ('⋉', '\u22C9'), // Left Join Relational Algebra + ('⋊', '\u22C9'), // Right Join Relational Algebra + + ('⟨', '\u27E8'), // Left Angle Bracket (Inner Product) Linear Algebra + ('⟩', '\u27E9')) // Right Angle Bracket (Inner Product) Linear Algebra + +// special values + + // Symbol Code Point Description Category + val sv = Array ( ('ŷ', '\u0177'), // y-hat (estimate for y) Statistics + ('ƒ', '\u0192'), // f with a hook ƒ = f' (derivative of f) + ('∅', '\u2205'), // Empty Set Set Theory + ('∞', '\u221E')) // Infinity Miscellaneous + +// Small Greek letters '\u03B1' to '\u03C9' + + // Symbol Code Point Description + val grk = Array ( ('α', '\u03B1'), // Greek Small Letter alpha + ('β', '\u03B2'), // Greek Small Letter beta + ('γ', '\u03B3'), // Greek Small Letter gamma + ('δ', '\u03B4'), // Greek Small Letter delta + ('ε', '\u03B5'), // Greek Small Letter epsilon + ('ζ', '\u03B6'), // Greek Small Letter zeta + ('η', '\u03B7'), // Greek Small Letter eta + ('θ', '\u03B8'), // Greek Small Letter theta + ('ι', '\u03B9'), // Greek Small Letter iota + ('κ', '\u03BA'), // Greek Small Letter kappa + ('λ', '\u03BB'), // Greek Small Letter lambda + ('μ', '\u03BC'), // Greek Small Letter mu + ('ν', '\u03BD'), // Greek Small Letter nu + ('ξ', '\u03BE'), // Greek Small Letter xi + ('ο', '\u03BF'), // Greek Small Letter omicron + ('π', '\u03C0'), // Greek Small Letter pi + ('ρ', '\u03C1'), // Greek Small Letter rho + ('ς', '\u03C2'), // Greek Small Letter final sigma + ('σ', '\u03C3'), // Greek Small Letter sigma + ('τ', '\u03C4'), // Greek Small Letter tau + ('υ', '\u03C5'), // Greek Small Letter upsilon + ('φ', '\u03C6'), // Greek Small Letter phi + ('χ', '\u03C7'), // Greek Small Letter chi + ('ψ', '\u03C8'), // Greek Small Letter psi + ('ω', '\u03C9')) // Greek Small Letter omega + +// superscripts + /** Unicode characters for superscripts to 0, 1, ... 9 */ val supc = Array ('⁰', '¹', '²', '³', '⁴', '⁵', '⁶', '⁷', '⁸', '⁹') @@ -34,6 +155,8 @@ object Unicode: */ val supn = Array ('\u2070', '\u00b9', '\u00b2', '\u00b3', '\u2074', '\u2075', '\u2076', '\u2077', '\u2078', '\u2079') +// subscripts + /** Unicode characters for subscripts to 0, 1, ... 9 */ val subc = Array ('₀', '₁', '₂', '₃', '₄', '₅', '₆', '₇', '₈', '₉') @@ -59,19 +182,41 @@ end Unicode //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `unicodeTest` main function is used to the `Unicode` object. + * Show operators, special characters, and Greek letters. * > runMain scalation.unicodeTest */ @main def unicodeTest (): Unit = import Unicode._ + println ("List of Unicode Operator Characters used in ScalaTion:") + for o <- op do println (o) + + println ("List of Unicode Special Characters used in ScalaTion:") + for s <- sv do println (s) + + println ("List of Unicode Greek Letters used in ScalaTion:") + for g <- grk do println (g) + +end unicodeTest + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `unicodeTest2` main function is used to the `Unicode` object. + * Show subscripts and superscripts. + * > runMain scalation.unicodeTest2 + */ +@main def unicodeTest2 (): Unit = + + import Unicode._ + val uc = Array ('Ɛ', 'Ʋ', 'π', 'σ', '∙', '≠', '≤', '≥', '⋂', '⋃', '⋈') val un = Array ('\u0190', '\u01b2', '\u03c0', '\u03c3', '\u2219', '\u2260', '\u2264', '\u2265', '\u22c2', '\u22c3', '\u22c8') - println ("unicode character:") + println ("Unicode character:") for c <- uc do println (s"c = $c = U+%04x".format (c.toInt)) - println ("unicode number:") + println ("Unicode number:") for n <- un do println (s"n = $n = U+%04x".format (n.toInt)) def ∙ () = "∙ worked" @@ -86,5 +231,57 @@ end Unicode println ("sup (12) = " + sup (12)) println ("sub (12) = " + sub (12)) -end unicodeTest +end unicodeTest2 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `unicodeTest3` main function is used to the `Unicode` object. + * Show Scala 3 operators. + * First character determines precedence: The first character of an operator is the + * primary factor in determining its precedence. + * Letters vs. symbols: Operators that start with letters have the lowest precedence, + * while those with symbols like *, /, and % have higher precedence. + * @see docs.scala-lang.org/tour/operators.html + * @see www.geeksforgeeks.org/scala/operators-precedence-in-scala/ + * > runMain scalation.unicodeTest3 + */ +@main def unicodeTest3 (): Unit = + + println (""" + Operator precedence table (from highest to lowest precedence) + Category Operators Associativity + Other Symbols Any symbol not listed Left to right + Postfix () [] Left to right + Unary ! ~ Right to left + Multiplicative * / % Left to right + Additive + - Left to right + List Cons :: Right to left + Shift >> >>> << Left to right + Relational > >= < <= Left to right + Equality == != Left to right + Bitwise AND & Left to right + Bitwise XOR ^ Left to right + Bitwise OR | Left to right + Logical AND && Left to Right + Logical OR || Left to Right + Assignment = += -= *= /= %= Right to left + (cont.) >>= <<= &= ^= |= Right to left + Infix letters and, or, if, etc. Left to right + """) + + println (""" + Operator precedence based on first character + (characters not shown below) + * / % + + - + : + < > + = ! + & + ^ + | + (all letters, $, _) + """) + +end unicodeTest3 diff --git a/src/main/scala/scalation/Util.scala b/src/main/scala/scalation/Util.scala index 4d547a816..596dc5d58 100644 --- a/src/main/scala/scalation/Util.scala +++ b/src/main/scala/scalation/Util.scala @@ -107,7 +107,7 @@ def getFromURL_File (path: String): Iterator [String] = // return fromURL (new URL (path)).getLines () return fromURL (new URI (path).toURL).getLines () catch - case mue: MalformedURLException => + case _ : MalformedURLException => end try end if @@ -117,7 +117,6 @@ def getFromURL_File (path: String): Iterator [String] = else println (s"getFromURL_File: file '$path' does not exist, try prefixing DATA-DIR") fromFile (DATA_DIR + path).getLines () - end if end getFromURL_File //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -216,6 +215,7 @@ end cfor //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** C/Java style for loop (over a range) provides improved performance. * usage: cfor (0, 10) { i => a(i) = 2 * i } + * usage: cfor (0, 10) { _ => println (rng.gen) } * usage: cfor (0, 10, 2) { i => a(i) = 2 * i } * @see august.nagro.us/scala-for-loop.html * @param start initialization value (i = start) @@ -296,7 +296,7 @@ end Σ */ inline def summation (n: Int)(inline formula: => Double): Double = var sum_ = 0.0 - cfor (0, n) { i => sum_ += formula } + cfor (0, n) { _ => sum_ += formula } sum_ end summation @@ -337,7 +337,8 @@ end median3 //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `cforTest` test the cfor loops that are faster replacements for Scala's for-comprehension. +/** The `cforTest` main function tests the cfor loops that are faster replacements for Scala's + * for-comprehension. * Run this code for performance results. Note, due to JIT, reordering the code may change * the relative performance. Generally, case 4 "cfor (0, 100000) { ... }" is the fastest. * @see Timer.scala @@ -440,7 +441,8 @@ end cforTest //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `cforTest2` test the cfor loops that are faster replacements for Scala's for-yield. +/** The `cforTest2` main function test the cfor loops that are faster replacements for Scala's + * for-yield. * Run this code for performance results. Note, due to JIT, reordering the code may change * the relative performance. Generally, case 4 "cfor (0, 100000) { ... }" is the fastest. * @see Timer.scala diff --git a/src/main/scala/scalation/ValueType.scala b/src/main/scala/scalation/ValueType.scala index fc338253c..e054cfa6f 100644 --- a/src/main/scala/scalation/ValueType.scala +++ b/src/main/scala/scalation/ValueType.scala @@ -6,9 +6,14 @@ * @see LICENSE (MIT style license file). * * @note ValueType - Union Datatype for Atomic Database Values + * `Double`, `Int`, `Long`, `String`, `TimeNum` * (includes useful related values and methods) * * @see "type ValueType" below + * + * @note Other Numeric/Numeric-Related Types: + * in Scala 3: `math.BigDecimal`, `math.BigInt`, `Byte`, `Char` `Float`, `Short` + * in ScalaTion: `Complex`, `Rat` */ package scalation @@ -91,7 +96,8 @@ val sqrt_2byPi = sqrt (2.0 / Pi) /** Indicators of missing/illegal values per datatype */ -val NO_DOUBLE = -0.0 +//val NO_DOUBLE = -0.0 +val NO_DOUBLE = NEGATIVE_INFINITY val NO_INT = java.lang.Integer.MIN_VALUE val NO_LONG = java.lang.Long.MIN_VALUE val NO_STRING = null.asInstanceOf [String] @@ -185,8 +191,8 @@ def rel_diff (x: Double, y: Double): Double = abs (x - y) / max (abs (x), abs (y * @param y the second double precision floating point number */ def near_eq (x: Double, y: Double): Boolean = - if isNaN (x) && isNaN (y) then return true // comment out to follow IEEE standard - if x == y then return true // they are equal + if isNaN (x) && isNaN (y) then return true // comment out to follow IEEE standard + if x == y then return true // they are equal val diff = abs (x - y) val norm1 = min (abs (x) + abs (y), MAX_VALUE) @@ -200,41 +206,62 @@ end near_eq def fmt (x: Double): String = "%.6f".format (x) //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Extend `Int` to include an exponentiation operator (~^), nearly equal (=~) and in/out. +/** Extend `Int` to include an exponentiation operator (~^), nearly equal (=~), ≤, ≥, ≠, and in/out. */ extension (x: Int) - def ~^ (y: Int): Int = pow (x.toDouble, y.toDouble).toInt - def =~ (y: Int): Boolean = x == y - infix def in (r: (Int, Int)): Boolean = r._1 <= x && x <= r._2 - infix def out (r: (Int, Int)): Boolean = x < r._1 || r._2 < x + inline def ~^ (y: Int): Int = pow (x.toDouble, y.toDouble).toInt + inline def =~ (y: Int): Boolean = x == y + inline def ≤ (y: Int): Boolean = x <= y + inline def ≥ (y: Int): Boolean = x >= y + inline def ≠ (y: Int): Boolean = x != y + infix def in (r: (Int, Int)): Boolean = r._1 <= x && x <= r._2 + inline def ∈ (r: (Int, Int)): Boolean = x in r + infix def out (r: (Int, Int)): Boolean = x < r._1 || r._2 < x + inline def ∉ (r: (Int, Int)): Boolean = x out r //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Extend `Long` to include an exponentiation operator (~^), nearly equal (=~) and in/out. +/** Extend `Long` to include an exponentiation operator (~^), nearly equal (=~), ≤, ≥, ≠, and in/out. */ extension (x: Long) - def ~^ (y: Long): Long = powl (x, y) - def =~ (y: Long): Boolean = x == y - infix def in (r: (Long, Long)): Boolean = r._1 <= x && x <= r._2 - infix def out (r: (Long, Long)): Boolean = x < r._1 || r._2 < x + inline def ~^ (y: Long): Long = powl (x, y) + inline def =~ (y: Long): Boolean = x == y + inline def ≤ (y: Long): Boolean = x <= y + inline def ≥ (y: Long): Boolean = x >= y + inline def ≠ (y: Long): Boolean = x != y + infix def in (r: (Long, Long)): Boolean = r._1 <= x && x <= r._2 + inline def ∈ (r: (Long, Long)): Boolean = x in r + infix def out (r: (Long, Long)): Boolean = x < r._1 || r._2 < x + inline def ∉ (r: (Long, Long)): Boolean = x out r //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Extend `Double` to include an exponentiation operator (~^), nearly equal (=~) and in/out. +/** Extend `Double` to include an exponentiation operators (~^, ↑), nearly equal (=~), ≤, ≥, ≠, and in/out. */ extension (x: Double) - def ~^ (y: Double): Double = pow (x, y) - def =~ (y: Double): Boolean = near_eq (x, y) - infix def in (r: (Double, Double)): Boolean = r._1 <= x && x <= r._2 - infix def out (r: (Double, Double)): Boolean = x < r._1 || r._2 < x + inline def ~^ (y: Double): Double = pow (x, y) + inline def ↑ (y: Rat): Double = pow_ (x, y) // extended to a negative base + inline def =~ (y: Double): Boolean = near_eq (x, y) + inline def ≤ (y: Double): Boolean = x <= y + inline def ≥ (y: Double): Boolean = x >= y + inline def ≠ (y: Double): Boolean = x != y + infix def in (r: (Double, Double)): Boolean = r._1 <= x && x <= r._2 + inline def ∈ (r: (Double, Double)): Boolean = x in r + infix def out (r: (Double, Double)): Boolean = x < r._1 || r._2 < x + inline def ∉ (r: (Double, Double)): Boolean = x out r //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Extend `String` to include an exponentiation operator (~^), nearly equal (=~) and in/out, +/** Extend `String` to include an exponentiation operator (~^), nearly equal (=~), ≤, ≥, ≠, and in/out, * as well operators for numeric types. Use '.mkDouble' instead of '.toDouble'. */ extension (x: String) - def ~^ (y: String): String = "NaN" - def =~ (y: String): Boolean = x.toLowerCase () == y.toLowerCase () - infix def in (r: (String, String)): Boolean = r._1 <= x && x <= r._2 - infix def out (r: (String, String)): Boolean = x < r._1 || r._2 < x + inline def ~^ (y: String): String = "NaN" + inline def =~ (y: String): Boolean = x.toLowerCase () == y.toLowerCase () + inline def ≤ (y: String): Boolean = x <= y + inline def ≥ (y: String): Boolean = x >= y + inline def ≠ (y: String): Boolean = x != y + infix def in (r: (String, String)): Boolean = r._1 <= x && x <= r._2 + inline def ∈ (r: (String, String)): Boolean = x in r + infix def out (r: (String, String)): Boolean = x < r._1 || r._2 < x + inline def ∉ (r: (String, String)): Boolean = x out r def unary_- : String = "-" + x def - (y: String): String = x diff y def * (y: String): String = x.repeat (y.toInt) @@ -255,9 +282,9 @@ def safe_toDouble (s: String): Double = try d = java.lang.Double.parseDouble (s) catch - case ex: java.lang.NullPointerException => + case _ : java.lang.NullPointerException => println ("safe_toDouble: can't parse null string") - case ex: java.lang.NumberFormatException => + case _ : java.lang.NumberFormatException => println (s"safe_toDouble: can't parse '$s' to create a Double") end try d @@ -273,9 +300,9 @@ def safe_toInt (s: String): Int = try d = java.lang.Integer.parseInt (s) catch - case ex: java.lang.NullPointerException => + case _ : java.lang.NullPointerException => println ("safe_toInt: can't parse null string") - case ex: java.lang.NumberFormatException => + case _ : java.lang.NumberFormatException => println (s"safe_toInt: can't parse '$s' to create a Int") end try d @@ -291,9 +318,9 @@ def safe_toLong (s: String): Long = try d = java.lang.Long.parseLong (s) catch - case ex: java.lang.NullPointerException => + case _ : java.lang.NullPointerException => println ("safe_toLong: can't parse null string") - case ex: java.lang.NumberFormatException => + case _ : java.lang.NumberFormatException => println (s"safe_toLong: can't parse '$s' to create a Long") end try d diff --git a/src/main/scala/scalation/animation/Animator.scala b/src/main/scala/scalation/animation/Animator.scala index 60a81b187..0389518d9 100644 --- a/src/main/scala/scalation/animation/Animator.scala +++ b/src/main/scala/scalation/animation/Animator.scala @@ -80,7 +80,6 @@ class Animator (graph: Dgraph) else flaw ("createNode", "for node npts = " + npts + " != 4") return - end if graph.addNode (node) nodeMap.put (eid, node) @@ -109,7 +108,6 @@ class Animator (graph: Dgraph) if from == null || to == null then flaw ("createEdge", s"found null node - from with id $from_eid = $from OR to with id $to_eid = $to") return - end if if npts == 0 then // Create a straight line using implicit coordinates derived from node coordinates. @@ -134,7 +132,6 @@ class Animator (graph: Dgraph) else flaw ("createEdge", s"for edges npts = $npts != 0, 1, 4 or 6") return - end if debug ("createEdge", s"npts = $npts, edge = $edge, pts = ${stringOf (pts)}") @@ -161,22 +158,20 @@ class Animator (graph: Dgraph) if primary then // create a free (can go anywhere) token if npts == 2 then - token = new graph.Token (shape, label, true, color, pts(0), pts(1), default, default) + token = new graph.Token (shape, label, color, pts(0), pts(1), default, default) graph.addFreeToken (token) // add free token to the graph else if npts == 4 then - token = new graph.Token (shape, label, true, color, pts(0), pts(1), pts(2), pts(3)) + token = new graph.Token (shape, label, color, pts(0), pts(1), pts(2), pts(3)) graph.addFreeToken (token) // add free token to the graph else flaw ("createToken", s"for free token npts = $npts != 2 or 4") return - end if else // create a bound (to a node) token val onNode = nodeMap.get (on_eid).getOrElse (null) if onNode == null then flaw ("createToken", "onNode is null") return - end if if npts == 0 then token = new graph.Token (shape, label, false, color, onNode, default, default) @@ -195,7 +190,6 @@ class Animator (graph: Dgraph) else flaw ("createToken", s"for bound token npts = $npts != 0 or 2") return - end if end if tokenMap.put (eid, token) @@ -211,7 +205,6 @@ class Animator (graph: Dgraph) if node == null then flaw ("destroyNode", s"node [eid = $eid] is null") return - end if graph.removeNode (node) nodeMap.remove (eid) @@ -227,7 +220,6 @@ class Animator (graph: Dgraph) if edge == null then flaw ("destroyEdge", "edge [eid = $eid] is null") return - end if graph.removeEdge (edge) edgeMap.remove (eid) @@ -243,7 +235,6 @@ class Animator (graph: Dgraph) if token == null then flaw ("destroyToken", "token is null") return - end if if token.primary then // destroy free token graph.freeTokens -= token @@ -274,7 +265,6 @@ class Animator (graph: Dgraph) else move (node.shape, pts) // FIX: also need to move the connected edges - end if end moveNode //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -291,7 +281,6 @@ class Animator (graph: Dgraph) flaw ("moveToken", "arbitrary moves not allowed for bound tokens") else move (token.shape, pts) - end if end moveToken //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -372,7 +361,6 @@ class Animator (graph: Dgraph) moveGivenToken2Node (token, to_eid) count += 1 if count == number then break () - end if end for } // end breakable @@ -425,7 +413,6 @@ class Animator (graph: Dgraph) val pts = Array (tLoc.x, tLoc.y) moveToken (eid, pts) return true // continuing on curve - end if end if return false // exhausted the curve @@ -445,7 +432,6 @@ class Animator (graph: Dgraph) else scale (node.shape, pts) // FIX: also need to move the connected edges - end if end scaleNode //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -482,7 +468,6 @@ class Animator (graph: Dgraph) else scaleGivenTokens (oldNode.tokens, from_eid, color, -amount) // remove fluid scaleGivenTokens (newNode.tokens, to_eid, color, amount) // add fluid - end if end if end scaleTokensAt @@ -508,7 +493,6 @@ class Animator (graph: Dgraph) scale (token.shape, Array (0.0, 0.0)) else // rescale the token scale (token.shape, Array (size, size)) - end if done = true break () // mission accomplished, return now end if @@ -522,7 +506,6 @@ class Animator (graph: Dgraph) // No color match, so create a new token/fluid of that color val eid = EidCounter.next () createToken (eid, Ellipse (), "t" + eid, false, color, node_eid, Array (change, change)) - end if end if end scaleGivenTokens diff --git a/src/main/scala/scalation/animation/DgAnimator.scala b/src/main/scala/scalation/animation/DgAnimator.scala index c5f9dfe00..79b743632 100644 --- a/src/main/scala/scalation/animation/DgAnimator.scala +++ b/src/main/scala/scalation/animation/DgAnimator.scala @@ -165,7 +165,6 @@ class DgAnimator (_title: String, fgColor: Color = black, bgColor: Color = white private def invokeCommand (c: AnimateCommand): Unit = if c.action != MoveToken then // remove if to see all move steps println (s"DgAnimator.invokeCommand: $c") - end if c.action match case CreateNode => diff --git a/src/main/scala/scalation/animation/Dgraph.scala b/src/main/scala/scalation/animation/Dgraph.scala index 1bcc346b7..504672462 100644 --- a/src/main/scala/scalation/animation/Dgraph.scala +++ b/src/main/scala/scalation/animation/Dgraph.scala @@ -78,7 +78,6 @@ class Dgraph (name: String, bipartite: Boolean = false): if bipartite && edge.from.primary == edge.to.primary then flaw ("addEdge", "node types for edge endpoints may not be the same") return false - end if outEdges += edge true end addEdge @@ -140,7 +139,6 @@ class Dgraph (name: String, bipartite: Boolean = false): shape.setLine (p1, p2, bend) else if direct then // directly set the line (use factory methods to move) shape.setLine (p1, p2) - end if //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Move the edge endpoints, p1 and p2, so edge connects to node boundaries, @@ -304,7 +302,7 @@ class Dgraph (name: String, bipartite: Boolean = false): * @param w the width of the token * @param h the height of the token */ - def this (shape: RectangularShape, label: String, primary: Boolean, color: Color, + def this (shape: RectangularShape, label: String, color: Color, x: Double, y: Double, w: Double, h: Double) = this (shape, label, true, color, null, w, h) shape.setFrame (x, y, w, h) @@ -401,7 +399,7 @@ class Dgraph (name: String, bipartite: Boolean = false): * @param level the recursion level */ def traverse (n: Node, level: Int): Unit = - for i <- 0 until level do print ("\t") + cfor (0, level) { _ => print ("\t") } println (n) // print visited node //visited.add (n) val outgoing = n.outEdges diff --git a/src/main/scala/scalation/animation/SimpleAnimator.scala b/src/main/scala/scalation/animation/SimpleAnimator.scala index fc060f8c8..8dac13f19 100644 --- a/src/main/scala/scalation/animation/SimpleAnimator.scala +++ b/src/main/scala/scalation/animation/SimpleAnimator.scala @@ -81,7 +81,7 @@ class SimpleAnimator (title: String) try Thread.sleep (tau) catch - case ex: InterruptedException => println ("SimpleAnimator.run: sleep failed") + case _ : InterruptedException => println ("SimpleAnimator.run: sleep failed") end try repaint () diff --git a/src/main/scala/scalation/animation/SimpleAnimator2.scala b/src/main/scala/scalation/animation/SimpleAnimator2.scala index c5cf93d77..9270bf0df 100644 --- a/src/main/scala/scalation/animation/SimpleAnimator2.scala +++ b/src/main/scala/scalation/animation/SimpleAnimator2.scala @@ -86,7 +86,7 @@ class SimpleAnimator2 (_title: String) try Thread.sleep (tau) catch - case ex: InterruptedException => println ("SimpleAnimator2.run: sleep failed") + case _ : InterruptedException => println ("SimpleAnimator2.run: sleep failed") end try repaint () diff --git a/src/main/scala/scalation/calculus/B_Spline.scala b/src/main/scala/scalation/calculus/B_Spline.scala index 0371c4bef..0445e66bb 100644 --- a/src/main/scala/scalation/calculus/B_Spline.scala +++ b/src/main/scala/scalation/calculus/B_Spline.scala @@ -271,10 +271,10 @@ object B_Spline: def benchmarked [R] (reps: Int = 100, useSeconds: Boolean = false, title: String = "benchmark") (block: => R): (R, Statistic) = val stat = new Statistic (title) - for i <- 0 until reps-1 do - val (r, ms) = timed (block) + cfor (0, reps-1) { _ => + val (_, ms) = timed (block) stat.tally (if useSeconds then ms / 1000 else ms) - end for + } // cfor val (r, ms) = timed (block) stat.tally (if useSeconds then ms / 1000 else ms) (r, stat) diff --git a/src/main/scala/scalation/calculus/DRadial.scala b/src/main/scala/scalation/calculus/DRadial.scala index f93398be7..fd61e890a 100644 --- a/src/main/scala/scalation/calculus/DRadial.scala +++ b/src/main/scala/scalation/calculus/DRadial.scala @@ -108,7 +108,6 @@ class DRadial (centers: VectorD = VectorD (0.0), radialType_ : RadialType = GAUS if n > 3 || n < 0 then flaw ("dPolyHarmonicSpline", "Only derivatives up to order 3 are currently supported") return NaN - end if if r == 0.0 then return 0.0 diff --git a/src/main/scala/scalation/calculus/Differential.scala b/src/main/scala/scalation/calculus/Differential.scala index cfad39db2..6cc1bc0ca 100644 --- a/src/main/scala/scalation/calculus/Differential.scala +++ b/src/main/scala/scalation/calculus/Differential.scala @@ -295,12 +295,12 @@ end differentialTest for i <- x.indices do var hh = 1E-4 println (" x \t\t h \t\t deriv \t\t 1-sided \t\t error \t\t 2-sided \t\t error") - for k <- 0 until 9 do + cfor (0, 9) { _ => resetH (hh) val (d0, d1, d2) = (d(x(i)), derivative1 (f, x(i)), derivative (f, x(i))) println (s"${x(i)} \t $hh \t $d0 \t $d1 \t ${abs (d1-d0)} \t $d2 \t ${abs (d2-d0)}") hh /= 10.0 - end for + } // cfor println () end for diff --git a/src/main/scala/scalation/calculus/Integral.scala b/src/main/scala/scalation/calculus/Integral.scala index c758527e2..0c564e912 100644 --- a/src/main/scala/scalation/calculus/Integral.scala +++ b/src/main/scala/scalation/calculus/Integral.scala @@ -60,7 +60,7 @@ object Integral: var x = a val dx = (b - a) / sd var sum = f(a) + f(b) - for i <- 1 until sd do { x += dx; sum += 2.0 * f(x) } + cfor (1, sd) { _ => x += dx; sum += 2.0 * f(x) } _1_2 * dx * sum end trap diff --git a/src/main/scala/scalation/database/BpNode.scala b/src/main/scala/scalation/database/BpNode.scala index f9fe21b03..e1f945562 100644 --- a/src/main/scala/scalation/database/BpNode.scala +++ b/src/main/scala/scalation/database/BpNode.scala @@ -322,7 +322,7 @@ end BpNode val node = new BpNode (0, true) // empty leaf node var right: BpNode = null - for i <- 1 to totKeys do + cfor (0, totKeys) { _ => val key = rng.nextInt (mx) banner (s"put key = $key") node.add (key, 2 * key) @@ -334,7 +334,7 @@ end BpNode right = rt println (s"AFTER split: node = $node, dk = $dk, rt = $rt") node.showRef (); rt.showRef () - end for + } // cfor banner ("Show Arrays") node.show () @@ -365,7 +365,7 @@ end bpNodeTest val rng = new Random (seed) val node = new BpNode (0, false) // empty internal node: false => not isLeaf - for i <- 1 to totKeys do + cfor (0, totKeys) { _ => val key = rng.nextInt (mx) banner (s"put key = $key") node.add (key, 2 * key) @@ -376,7 +376,7 @@ end bpNodeTest val (dk, rt) = node.splitI () // splitI keys between node (2) and right (2) println (s"AFTER split: node = $node, dk = $dk, rt = $rt") node.showRef (); rt.showRef () - end for + } // cfor banner ("Show Arrays") node.show () diff --git a/src/main/scala/scalation/database/BpTreeMap.scala b/src/main/scala/scalation/database/BpTreeMap.scala index 417f41598..efdeffe94 100644 --- a/src/main/scala/scalation/database/BpTreeMap.scala +++ b/src/main/scala/scalation/database/BpTreeMap.scala @@ -60,6 +60,7 @@ class BpTreeMap [V: ClassTag] (name: String = "BpTreeMap") */ def getFirst: V = first.ref(1).asInstanceOf [V] def getLast: V = last_.ref(last_.keys).asInstanceOf [V] + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `SortedMap` trait requires `Ordering` with a compare method to be defined. * @see https://scala-lang.org/api/3.3.0/scala/math/Ordering.html @@ -83,12 +84,12 @@ class BpTreeMap [V: ClassTag] (name: String = "BpTreeMap") * @param js the starting within node index (defaults to -1) */ class TreeIterator (ns: BpNode = first, js: Int = -1) extends Iterator [(ValueType, V)]: - var (n, j) = (ns, js) + private var (n, j) = (ns, js) def hasNext: Boolean = j < n.keys-1 || n.ref(0) != null def next (): (ValueType, V) = - // debug ("next", s"node n = $n, j = $j, n.keys = ${n.keys}") +// debug ("next", s"node n = $n, j = $j, n.keys = ${n.keys}") if j < n.keys-1 then j += 1 else { n = n.ref(0).asInstanceOf [BpNode]; j = 0 } (n(j), n.ref(j+1).asInstanceOf [V]) @@ -604,13 +605,13 @@ end bpTreeMapTest val rng = new Random (seed) val tree = new BpTreeMap [Int] ("Test2") - for i <- 1 to totKeys do + cfor (0, totKeys) { _ => val key = rng.nextInt (mx) banner (s"put ($key, ${2 * key})") tree.put (key, 2 * key) tree.show () tree.showLink() - end for + } // cfor banner ("Print Statistics") println (s"size = ${tree.size}") @@ -636,13 +637,13 @@ end bpTreeMapTest2 val seed = 1 val rng = new Random (seed) val tree = new BpTreeMap [Int] ("Test3") - val tree2 = new TreeMap [ValueType, Int] ()(ValueTypeOrd) + val tree2 = new TreeMap [ValueType, Int] ()(using ValueTypeOrd) - for i <- 1 to totKeys do + cfor (0, totKeys) { _ => val key = rng.nextInt (mx) tree.put (key, 2 * key) tree2.put (key, 2 * key) - end for + } // cfor var same = tree equals tree2 println (s"tree equals tree2 = $same") diff --git a/src/main/scala/scalation/database/JavaMap.scala b/src/main/scala/scalation/database/JavaMap.scala index 4f8d1d8fd..4b0c53de5 100644 --- a/src/main/scala/scalation/database/JavaMap.scala +++ b/src/main/scala/scalation/database/JavaMap.scala @@ -14,8 +14,6 @@ package scalation package database -import scala.reflect.ClassTag - // H a s h B a s e d I n d e x //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -28,8 +26,8 @@ import scala.reflect.ClassTag * @param initialCap the initial hash table size (number of slots) * @param loadFactor the load factor (number of keys over number of slots) */ -class JHashMap [K: ClassTag, V: ClassTag] (initialCap: Int = 16, - loadFactor: Float = 0.75) +class JHashMap [K, V] (initialCap: Int = 16, + loadFactor: Float = 0.75) extends Serializable: private val flaw = flawf ("JHashMap") @@ -73,7 +71,7 @@ end JHashMap * @tparam V the base-type of the values assigned to keys in this tree map * @param ord the implicit ordering used to compare objects of type K */ -class JTreeMap [K: ClassTag, V: ClassTag] (implicit val ord: Ordering [K]) +class JTreeMap [K, V] (implicit val ord: Ordering [K]) extends Serializable: private val flaw = flawf ("JTreeMap") @@ -127,7 +125,6 @@ end JTreeMap for i <- 1 to totalKeys by 2 do index.put (rng.igen, i~^2) else for i <- 1 to totalKeys by 2 do index.put (i, i~^2) - end if index.show () banner ("Find Keys") diff --git a/src/main/scala/scalation/database/LinHashMap.scala b/src/main/scala/scalation/database/LinHashMap.scala index b7a18dc64..c4fbfaf8d 100644 --- a/src/main/scala/scalation/database/LinHashMap.scala +++ b/src/main/scala/scalation/database/LinHashMap.scala @@ -94,7 +94,6 @@ class LinHashMap [K: ClassTag, V: ClassTag] (name: String, order: Int = 4, if j != nKeys then key(j) = key(nKeys) // overwrite with last pair value(j) = value(nKeys) - end if true else false end move @@ -119,7 +118,7 @@ class LinHashMap [K: ClassTag, V: ClassTag] (name: String, order: Int = 4, /** The list of buckets making up this hash table. */ private val hTable = new ArrayBuffer [Bucket] () - for i <- 0 until mod1 do hTable += new Bucket () + cfor (0, mod1) { _ => hTable += new Bucket () } //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `HTIterator` inner class supports iterating over all the elements @@ -129,7 +128,7 @@ class LinHashMap [K: ClassTag, V: ClassTag] (name: String, order: Int = 4, * @param js the starting within node index (defaults to 0) */ class HTIterator (is: Int = 0, bs: Bucket = hTable(0), js: Int = 0) extends Iterator [(K, V)]: - var (i, b, j) = (is, bs, js) + private var (i, b, j) = (is, bs, js) def hasNext: Boolean = j < b.nKeys - 1 || b.next != null || is < size0 - 1 def next (): (K, V) = if j < b.nKeys - 1 then j += 1 // next position in bucket @@ -196,7 +195,6 @@ class LinHashMap [K: ClassTag, V: ClassTag] (name: String, order: Int = 4, b.move (j, null) // move element j to null bucket if b != bh && b.nKeys == 0 then bp.next = b.next // remove empty overflow bucket - end if removed = true // element successfully removed, end loop end if } // cfor @@ -248,7 +246,6 @@ class LinHashMap [K: ClassTag, V: ClassTag] (name: String, order: Int = 4, isplit = 0 // if so, reset to zero mod1 = mod2 // double first hash function's modulus mod2 *= 2 // double second hash function's modulus - end if rehash (isp, b, bnew) // rehash some keys in chain b to bnew show () end split @@ -268,7 +265,6 @@ class LinHashMap [K: ClassTag, V: ClassTag] (name: String, order: Int = 4, mod2 = mod1 mod1 /= 2 isplit = mod1 - 1 - end if val bh = hTable(isplit) // home bucket for merge chain var b = bh @@ -303,10 +299,9 @@ class LinHashMap [K: ClassTag, V: ClassTag] (name: String, order: Int = 4, else if b != b1 && b.nKeys == 0 then bp.next = b.next // remove empty overflow bucket - end if end if - else j += 1 - end if + else + j += 1 end while } // cfor end rehash @@ -374,7 +369,6 @@ end LinHashMap for i <- 1 to totalKeys by 2 do ht.put (rng.igen, i * i) else for i <- 1 to totalKeys by 2 do ht.put (i, i * i) - end if ht.show () banner ("Find Keys") diff --git a/src/main/scala/scalation/database/MakeSchema.scala b/src/main/scala/scalation/database/MakeSchema.scala index e94bfc6e7..8eee34f7c 100644 --- a/src/main/scala/scalation/database/MakeSchema.scala +++ b/src/main/scala/scalation/database/MakeSchema.scala @@ -54,7 +54,7 @@ object MakeSchema: try TimeNum (str) catch - case ex: DateTimeException => correct = false + case _ : DateTimeException => correct = false correct end isDateTime diff --git a/src/main/scala/scalation/database/MinSpanningTree.scala b/src/main/scala/scalation/database/MinSpanningTree.scala index 1bbc694e9..eec77b06f 100644 --- a/src/main/scala/scalation/database/MinSpanningTree.scala +++ b/src/main/scala/scalation/database/MinSpanningTree.scala @@ -35,7 +35,7 @@ class MinSpanningTree (g: Graph, undirected: Boolean = true): // private val root = new TreeNode (0, 0, 0.0) // for vertex 0 in g, create a root node private val key = Array.fill (size)(MAX_VALUE) // cost/key array private val out = Array.fill (size)(true) // status of outside spanning tree - private val qu = PriorityQueue ()(NodeOrder) // priority queue of vertices + private val qu = PriorityQueue ()(using NodeOrder) // priority queue of vertices for i <- 0 until size do qu.enqueue (Elem (i, key(i))) // put all vertices in priority queue debug ("init", s"size = $size, already undirected = $undirected") diff --git a/src/main/scala/scalation/database/MultiMap.scala b/src/main/scala/scalation/database/MultiMap.scala index f89f02661..95e7e10e5 100644 --- a/src/main/scala/scalation/database/MultiMap.scala +++ b/src/main/scala/scalation/database/MultiMap.scala @@ -34,8 +34,9 @@ import scala.reflect.ClassTag * @param order the number of slots per bucket * @param loadFactor the (lower, upper) bound on the load factor (# keys over # home slots) */ -class LinHashMultiMap [K: ClassTag, V: ClassTag] (name: String, order: Int = 4, - loadFactor: (Double, Double) = (0.3, 1.2)) +//class LinHashMultiMap [K: ClassTag, V: ClassTag] (name: String, order: Int = 4, +class LinHashMultiMap [K: ClassTag, V] (name: String, order: Int = 4, + loadFactor: (Double, Double) = (0.3, 1.2)) extends LinHashMap [K, Set [V]] (name, order, loadFactor): //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -64,8 +65,8 @@ end LinHashMultiMap * @param initialCap the initial hash table size (number of slots) * @param loadFactor the load factor (number of keys over number of slots) */ -class HashMultiMap [K: ClassTag, V: ClassTag] (initialCap: Int = HashMap.defaultInitialCapacity, - loadFactor: Double = HashMap.defaultLoadFactor) +class HashMultiMap [K, V] (initialCap: Int = HashMap.defaultInitialCapacity, + loadFactor: Double = HashMap.defaultLoadFactor) extends Serializable: protected val hmap = new HashMap [K, Set [V]] (initialCap, @@ -111,8 +112,8 @@ end HashMultiMap * @param initialCap the initial hash table size (number of slots) * @param loadFactor the load factor (number of keys over number of slots) */ -class JHashMultiMap [K: ClassTag, V: ClassTag] (initialCap: Int = 16, - loadFactor: Float = 0.75) +class JHashMultiMap [K, V] (initialCap: Int = 16, + loadFactor: Float = 0.75) extends Serializable: protected val hmap = new java.util.HashMap [K, Set [V]] (initialCap, @@ -155,12 +156,12 @@ end JHashMultiMap /** The `BpTreeMultiMap` class provides tree maps that use the B+Tree algorithm. * It build on `BpTreeMap` allowing values to multi-valued `Set [V]` and can be * used for building Non-Unique Indices. - * @tparam K the type of the keys contained in this tree map + * -tparam K the type of the keys contained in this tree map (FIX add this type) * @tparam V the base-type of the values assigned to keys in this tree map * @param order the number of order (maximum number of children) of the tree * @param ord the implicit ordering used to compare objects of type K */ -class BpTreeMultiMap [V: ClassTag] (order: Int = 4) +class BpTreeMultiMap [V] (order: Int = 4) extends BpTreeMap [Set [V]] ("BpTreeMultiMap"): // FIX: add ", order" println (s"BpTreeMultiMap: order = $order") @@ -190,7 +191,7 @@ end BpTreeMultiMap * @tparam V the base-type of the values assigned to keys in this tree map * @param ord the implicit ordering used to compare objects of type K */ -class TreeMultiMap [K: ClassTag, V: ClassTag] (implicit val ord: Ordering [K]) +class TreeMultiMap [K, V] (implicit val ord: Ordering [K]) extends Serializable: protected val tree = new TreeMap [K, Set [V]] () // delegate to TreeMap @@ -234,7 +235,7 @@ end TreeMultiMap * @tparam V the base-type of the values assigned to keys in this tree map * @param ord the implicit ordering used to compare objects of type K */ -class JTreeMultiMap [K: ClassTag, V: ClassTag] (implicit val ord: Ordering [K]) +class JTreeMultiMap [K, V] (implicit val ord: Ordering [K]) extends Serializable: protected val tree = new java.util.TreeMap [K, Set [V]] () // delegate to Java's TreeMap @@ -296,7 +297,6 @@ end JTreeMultiMap for i <- 1 to totalKeys by 2 do index.put (rng.igen, Set (i~^2, i~^3)) else for i <- 1 to totalKeys by 2 do index.put (i, Set (i~^2, i~^3)) - end if index.show () banner ("Find Keys") diff --git a/src/main/scala/scalation/database/Normalization.scala b/src/main/scala/scalation/database/Normalization.scala index 46644a421..c685d2504 100644 --- a/src/main/scala/scalation/database/Normalization.scala +++ b/src/main/scala/scalation/database/Normalization.scala @@ -2,35 +2,78 @@ //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** @author John Miller * @version 2.0 - * @date Sat Mar 16 15:34:14 EDT 2024 + * @date Wed Apr 2 15:37:57 EDT 2025 * @see LICENSE (MIT style license file). * + * @see https://github.com/scalation/scalation_2.0 + * * @note Support Functions for Database Normalization Based on Functional Dependencies (FDs) * Includes implementations of the BCNF Decomposition Algorithm + * and the 4NF Decomposition Algorithm * and the 3NF Synthesis Algorithm * + * Relation r(R) defined of Schema R + * Assume X, Y ⊆ R + * + * Functional Dependency (FD) X -> Y + * for any t, u in r, t[X] = u[X] => t[Y] = u[Y] + * + * Multi-Valued Dependency (MVD) X ->> Y + * assuming R = XYZ, whenever (x, y1, z1) and (x, y2, z2) in r => + * (x, y1, z2) and (x, y2, z1) must also be in r + * * BCNF Boyce-Codd Normal Form -- for all nontrivial X -> Y, X must be a superkey + * 4NF Fourth Normal Form -- for all nontrivial X ->> Y, X must be a superkey * 3NF Third Normal Form -- for all nontrivial X -> Y, X must be a superkey OR * -- Y must consist of prime attributes + * + * FIX: (1) improve 4NF Decomposition, (2) finish Chase Algorithm that handles both FDs and MVDs, + * (3) remove the current limit for `Tableau` on the number of attributes, currently 26 "A" to "Z". + * (4) remove "TreeSet" prefix when printing `Attr` without constantly calling the `cut` method */ package scalation package database import scala.collection.mutable.{ArrayBuffer => VEC, SortedSet => SET} +//import scala.runtime.ScalaRunTime.stringOf import scala.util.boundary, boundary.break -type Attrs = SET [Char] +// @note: &~ means set difference (alias for diff) + +type Attrs = SET [Char] // type for set of attributes +type Symbols = Array [String] // types for array of symbols (@see `Tableau`) + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Cut out "TreeSet" from the `Attrs` toString method. + * @param a the given set of attributes + */ +def cut (a: Attrs): String = + val sa = a.toString + val n = sa.indexOf ('(') + sa.drop (n) +end cut + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Cut out the prefix (e.g., "ArrayBuffer") from the `Attrs` toString method. + * @param a the given collection of sets of attributes + */ +def cut (a: VEC [Attrs]): String = + val sa = a.toString + val n = sa.indexOf ('(') + sa.drop (n) +end cut + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `FD` case class is used to represent a Functional Dependency (x -> y). + * Caveat: Can also be be used for a Multi-Valued Dependency (MVD) x ->> y as well * @param x the Left-Hand Side (LHS) attributes of the FD * @param y the Right-Hand Side (RHS) attributes of the FD */ case class FD (x: Attrs, y: Attrs): - override def toString: String = - s"${x.toString.replace ("TreeSet", "")} -> ${y.toString.replace ("TreeSet", "")}" + override def toString: String = s"${x.toString.drop (7)} -> ${y.toString.drop (7)}" end FD @@ -38,8 +81,8 @@ end FD //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `Normalization` class provides methods useful in normalization theory * that can be used to assist in designing relational databases. - * These include Closure, Superkey, Key, Losslessness, Dependency - * Preservation, BCNF Decomposition, Minimal Cover, and 3NF Synthesis. + * These include Closure, Superkey, Key, Losslessness, Dependency, Preservation, + * BCNF Decomposition, 4NF Decomposition, Minimal Cover, and 3NF Synthesis. * @see `scalation.SetExt`, scalation.database.BinTree` * @param r the schema or complete set of attributes R * @param fd the given collection of Functional Dependencies (FDs) @@ -71,7 +114,7 @@ class Normalization (r: Attrs, fd: VEC [FD]): * @param fd_ the set of Functional Dependencies (FDs) to use (defaults to fd) */ def rclosure (z: Attrs, ri: Attrs, fd_ : VEC [FD] = fd): Attrs = - closure (z & ri, fd_) & ri + closure (z ∩ ri, fd_) ∩ ri //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Compute the restricted closure of z (Z+_p) with respect to db design (set of tables) @@ -139,9 +182,40 @@ class Normalization (r: Attrs, fd: VEC [FD]): */ def key (z: Attrs, ri: Attrs = r, fd_ : VEC [FD] = fd): Boolean = if ! superkey (z, ri, fd_) then return false - z.forall ( (a: Char) => ! superkey (z &~ SET (a), ri, fd_) ) + z.∄ ( (a: Char) => superkey (z &~ SET (a), ri, fd_) ) end key + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Find a (global) key for relational schema R efficiently using + * Algorithm based on Section 3 with modifications of + * An Efficient Algorithm to Compute the Candidate Keys of a Relational Database Schema. + * @author Charles Moseley (of code) + * @see https://people.eecs.ku.edu/~hossein/Pub/Journal/1996-Saiedian-TCJ.pdf + * @param fd_mc the minimal cover (mc) set of Functional Dependencies (FDs) to use (defaults to fd) + */ + def findKey (fd_mc : VEC [FD] = fd): Attrs = boundary: + // Step 1: form sets nrhs, bhs + val lhs = SET [Char] () // LHS attributes + val rhs = SET [Char] () // RHS attributes + for f <- fd_mc do { lhs ++= f.x; rhs ++= f.y } // collect sets of LHS and RHS attributes + val nrhs = r &~ rhs // attributes not in any RHS + val bhs = lhs ∩ rhs // attributes in both LHS and RHS + debug ("findKey", s"bhs = $bhs, nrhs = $nrhs") + + if closure (nrhs, fd_mc) == r then // Step 2: is the attribute set nrhs a key? + debug ("findKey", s"return since nrhs = $nrhs is a key") + break (nrhs) // found that nrhs is a key, return skipping Step 3 + + for n <- 1 to bhs.size do // Step 3: start with nrhs and try adding subsets of bhs + for ss <- bhs.subsets (n) do // subsets of bhs of length n + val key = nrhs ++ ss // form a possible key + if closure (key, fd_mc) == r then // is it a key? + debug ("findKey", s"return since key = $key is a key") + break (key) // found a key, return + + SET [Char] () // empty set => no key found + end findKey + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Determine whether the two sub-schemas { r1, r2 } represent a lossless decomposition * using the Pairwise Losslessness Test (PLT). @@ -150,7 +224,7 @@ class Normalization (r: Attrs, fd: VEC [FD]): * @param fd_ the set of Functional Dependencies (FDs) to use (defaults to fd) */ def lossless (r1: Attrs, r2: Attrs, fd_ : VEC [FD] = fd): Boolean = - val z = closure (r1 & r2, fd_) // take the closure of the intersection + val z = closure (r1 ∩ r2, fd_) // take the closure of the intersection r1 ⊆ z || r2 ⊆ z end lossless @@ -163,6 +237,61 @@ class Normalization (r: Attrs, fd: VEC [FD]): def lossless_ (p: VEC [Attrs], fd_ : VEC [FD] = fd): Boolean = p.exists (r ⊆ rclosure_p (_, p, fd_)) + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Determine whether the db design p is lossless by applying the chase algorithm + * for FDs (equality generating dependencies). When the LHSs of two rows in + * a tableau agree, make their RHSs the same (i.e., make their symbols the same). + * Given FD x -> y and two rows (x, y1, z1) and (x, y2, z2) update the second row to (x, y1, z2). + * If one of the symbols is distinguished, make both distinguished. + * @param tbl the given tableau + * @param fd_ the set of Functional Dependencies (FDs) to use (defaults to fd) + */ + def chaseFD (tbl: Tableau, fd_ : VEC [FD] = fd): Unit = + var changes = true + while changes do + changes = false + for f <- fd_ do changes = tbl.equateSymbols (f) + end chaseFD + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Determine whether the db design p is lossless by applying the chase algorithm + * for MVDs (tuple generating dependencies), + * Given MVD x ->> y and two rows (x, y1, z1) and (x, y2, z2) add a new row (x, y1, z2). + * @param tbl the given tableau + * @param mvd the set of Multi-Valued Dependencies (MVDs) to use + */ + def chaseMVD (tbl: Tableau, mvd: VEC [FD]): Unit = + var changes = true + while changes do + changes = false + for f <- mvd do changes = tbl.genTuples (f) + changes = false // FIX - infinite loop otherwise + end chaseMVD + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Determine whether the db design p is lossless by applying the chase algorithm + * for FDs (equality generating dependencies) and MVDs (tuple generating dependencies). + * @see `Tableau` + * @param p the set of sub-schemas defining the tables + * @param fd_ the set of Functional Dependencies (FDs) to use (defaults to fd) + * @param mvd the set of Multi-Valued Dependencies (MVDs) to use + */ + def chase (p: VEC [Attrs], fd_ : VEC [FD] = fd, mvd: VEC [FD] = null): Boolean = + val tbl = new Tableau (r, p) + banner (s"The initial tableau for database design p = ${cut(p)}") + + if fd_ != null then chaseFD (tbl, fd_) + banner (s"The post FD tableau for database design p = ${cut(p)}") + + if mvd != null then chaseMVD (tbl, mvd) + val lossless = tbl.allDistinguished () + banner (s"The final tableau for database design p = ${cut(p)} is lossless = $lossless") + tbl.printTableau () + lossless + end chase + +// B C N F + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Find the first Functional Dependency (FD) explicitly in fd_ that violates * the BCNF rule that the LHS of all applicable, nontrivial FDs are superkeys. @@ -173,7 +302,7 @@ class Normalization (r: Attrs, fd: VEC [FD]): def find_not_bcnf (ri: Attrs, fd_ : VEC [FD] = fd): FD = boundary: for f <- fd_ if f.x ⊆ ri do // LHS must be in Ri if ! superkey (f.x, ri, fd_) then // not a superkey w.r.t. Ri (violates BCNF) - val y = f.y & ri // y is relevant RHS of f.y in Ri + val y = f.y ∩ ri // y is relevant RHS of f.y in Ri if y.nonEmpty then break (FD (f.x, y)) // return FD: f.x -> y null // none found end find_not_bcnf @@ -188,7 +317,7 @@ class Normalization (r: Attrs, fd: VEC [FD]): def find_not_bcnf_Fp (ri: Attrs, fd_ : VEC [FD] = fd): FD = boundary: val k = ri.size for x <- ri.subsets () if x.size in (1, k-1) do // consider possible LHSs - val y = rclosure (x, ri, fd_) diff x // nontrivial FD x -> y inside Ri + val y = rclosure (x, ri, fd_) &~ x // nontrivial FD x -> y inside Ri if y.nonEmpty && ! superkey (x, ri, fd_) then // y is nonempty and x is not a superkey w.r.t Ri break (FD (x, y)) // return FD: x -> y null // none found @@ -205,12 +334,12 @@ class Normalization (r: Attrs, fd: VEC [FD]): * @param fd_ the set of Functional Dependencies (FDs) to use (defaults to fd) */ def bcnf_decomp (tree: BinTree [Attrs] = bcnf_root, ri: Attrs = r, fd_ : VEC [FD] = fd): Unit = - val f = find_not_bcnf (ri, fd_) // find FD in F that violates BCNF -// if f == null then f = find_not_bcnf_Fp (ri, fd_) // find FD in F+ that violates BCNF + var f = find_not_bcnf (ri, fd_) // find FD in F that violates BCNF + if f == null then f = find_not_bcnf_Fp (ri, fd_) // find FD in F+ that violates BCNF <<<< println (s"use FD $f to decompose $ri") if f != null then - val (r1, r2) = (f.x | f.y, ri diff f.y) + val (r1, r2) = (f.x ∪ f.y, ri &~ f.y) println (s"decompose $ri into ($r1, $r2)") val tleft = tree.addLeft (r1) // add r1 as left child val trigh = tree.addRigh (r2) // add r2 as right child @@ -218,6 +347,26 @@ class Normalization (r: Attrs, fd: VEC [FD]): if r2.size > 2 then bcnf_decomp (trigh, r2, fd_) // recursive call on right sub-tree end bcnf_decomp +// 4 N F + + private [database] val _4nf_root = new BinTree [Attrs] (r) // root of the 4NF Decomposition Tree + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** 4NF DECOMPOSITION ALGORITHM that builds a 4NF decomposition tree. + * Caveat: Reorder FDs so that those with prime attributes come last. + * FIX - order of FD and MVD can be important - defer when prime on RHS + * @param mvd the set of Multi-Valued Dependencies (MVDs) to use + * @param tree the tree (sub-tree) to work off of (defaults to _4nf_root) + * @param ri the i-th sub-table (defaults to r) + * @param fd_ the set of Functional Dependencies (FDs) to use (defaults to fd) + */ + def _4nf_decomp (mvd: VEC [FD], tree: BinTree [Attrs] = _4nf_root, ri: Attrs = r, fd_ : VEC [FD] = fd): Unit = + val deps = fd_ ++ mvd + bcnf_decomp (tree, ri, deps) + end _4nf_decomp + +// 3 N F + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Shrink the LHS of each Functional Dependency (FD) that contains extraneous attributes. * @param fd_ the set of Functional Dependencies (FDs) to use (defaults to fd) @@ -227,7 +376,7 @@ class Normalization (r: Attrs, fd: VEC [FD]): var changes = true while changes do changes = false - for b <- f.x if f.y ⊆ closure (f.x diff SET (b)) do + for b <- f.x if f.y ⊆ closure (f.x &~ SET (b)) do f.x -= b // remove extraneous attribute B changes = true end shrink_LHS @@ -291,37 +440,6 @@ class Normalization (r: Attrs, fd: VEC [FD]): rem end subset_tables - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Find a (global) key for relational schema R efficiently using - * Algorithm based on Section 3 with modifications of - * An Efficient Algorithm to Compute the Candidate Keys of a Relational Database Schema. - * @author Chaeles Moseley (of code) - * @see https://people.eecs.ku.edu/~hossein/Pub/Journal/1996-Saiedian-TCJ.pdf - * @param fd_mc the minimal cover (mc) set of Functional Dependencies (FDs) to use (defaults to fd) - */ - def findKey (fd_mc : VEC [FD] = fd): Attrs = boundary: - // Step 1: form sets nrhs, bhs - val lhs = SET [Char] () // LHS attributes - val rhs = SET [Char] () // RHS attributes - for f <- fd_mc do { lhs ++= f.x; rhs ++= f.y } // collect sets of LHS and RHS attributes - val nrhs = r &~ rhs // attributes not in any RHS - val bhs = lhs & rhs // attributes in both LHS and RHS - debug ("findKey", s"bhs = $bhs, nrhs = $nrhs") - - if closure (nrhs, fd_mc) == r then // Step 2: is the attribute set nrhs a key? - debug ("findKey", s"return since nrhs = $nrhs is a key") - break (nrhs) // found that nrhs is a key, return skipping Step 3 - - for n <- 1 to bhs.size do // Step 3: start with nrhs and try adding subsets of bhs - for ss <- bhs.subsets (n) do // subsets of bhs of length n - val key = nrhs ++ ss // form a possible key - if closure (key, fd_mc) == r then // is it a key? - debug ("findKey", s"return since key = $key is a key") - break (key) // found a key, return - - SET [Char] () // empty set => no key found - end findKey - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** 3NF SYNTHESIS ALGORITHM that synthesizes tables from Functional Dependencies (FDs). * Caveat: Assumes the Functional Dependencies (FDs) constitute a MINIMAL/CANONICAL COVER. @@ -330,7 +448,7 @@ class Normalization (r: Attrs, fd: VEC [FD]): def _3nf_synthesis (fd_mc: VEC [FD] = fd): VEC [Attrs] = val mfd = merge_fds (fd_mc) // STEP 1: MERGE FDs with common LHS val p = VEC [Attrs] () - for f <- mfd do p += f.x | f.y // STEP 2: each FD FORMS a table + for f <- mfd do p += f.x ∪ f.y // STEP 2: each FD FORMS a table p --= subset_tables (p) // STEP 3: REMOVE all SUBSET tables @@ -345,6 +463,155 @@ class Normalization (r: Attrs, fd: VEC [FD]): end Normalization +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `Tableau` class support the creation is tableaux, i.e., abstract tables + * containing rows of symbols (distinguished and non-distinguished). + * @param r the complete set of attributes + * @param p the set of sub-schemas defining the tables + */ +class Tableau (r: Attrs, p: VEC [Attrs]): + + private val debug = debugf ("Tableau", true) // the debug function + private val _A = 'A'.toInt // integer values of letter A + private val _a = 'a'.toInt // integer values of letter a + private val m = p.size // number of sub-schema/rows + private val n = r.size // number of attributes/columns + private val tabl = new VEC [Symbols] (m) // storage for two-dimensional abstract table + + for i <- p.indices do tabl += makeRow (p(i), i) // add all rows to the tableau + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Make a row for Tableau tabl, the abstract table of strings, where for row i, element j + * is a distinguished (e.g., b) symbols when character j is in sub-relation p_i, + * and is otherwise a non-distinguished symbol (e.g., b3). + * @param ri the cuurent sub-schema + * @param i the i-th row + */ + def makeRow (ri: Attrs, i: Int): Symbols = + val row = Array.ofDim [String] (n) + for j <- 0 until n do + val ch: Char = (_A + j).toChar + val dsym = (_a + j).toChar.toString // distinguished symbol + row(j) = if ri contains ch then dsym else dsym + i + row + end makeRow + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Make a new row for Tableau tabl by crossing row i with row j as follows: + * Given MVD x ->> y and two rows (x, y1, z1) and (x, y2, z2) add a new row (x, y2, z1). + * @param x the LHS attributes of the MVD + * @param y the RHS attributes of the MVD + * @param i the first row + * @param h the second row + */ + def makeRow (x: Attrs, y: Attrs, i: Int, j: Int): Symbols = + println (s"makeRow based on MVD: $x ->> $y") + val row = Array.ofDim [String] (n) + for k <- 0 until n do + val ch: Char = (_A + k).toChar + row(k) = if y contains ch then tabl(j)(k) else tabl(i)(k) + row + end makeRow + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Project the given row in the tableau onto attributes x". + * @param x the given attributes + * @param row the given row in the tableau + */ + def onto (x: Attrs, row: Symbols): Symbols = +// debug ("onto", s"project row = ${stringOf (row)} onto attributes x = $x") + (for ai <- x yield { val i = ai - _A; row(i) }).toArray + end onto + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Equate RHS symbols for attributes/columns in the tableau for rows i and j. + * @param y the attributes to equate (make the same) + * @param i index of i-th row + * @param j index of j-th row + */ + def equateRHS (y: Attrs, i: Int, j: Int): Boolean = + val same = onto (y, tabl(i)) sameElements onto (y, tabl(j)) + if ! same then + for ak <- y do + val k = ak - _A + if tabl(i)(k) < tabl(j)(k) then tabl(j)(k) = tabl(i)(k) + else tabl(i)(k) = tabl(j)(k) + end if + ! same + end equateRHS + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** For Functional Dependency (FD) f, try to equate symbols in the tableau + * by comparing each row with every other row. When two rows agree on their LHSs, + * make their RHSs the same. Return whether any symbols were changed. + * @param f the FD to use to try to equate symbols + */ + def equateSymbols (f: FD): Boolean = + printTableau () + debug ("equateSymbols", s"use FD $f to equate symbols in the tableau") + var change = false + for i <- tabl.indices; j <- 0 until i if j != i do + if onto (f.x, tabl(i)) sameElements onto (f.x, tabl(j)) then + println (s"for f.x = ${f.x}: rows i = $i and j = $j have same LHS") + if equateRHS (f.y, i, j) then change = true + end for + change + end equateSymbols + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** For Multi-Valued Dependency (MVD) f, try to generate and add new rows into the tableau + * by comparing each row with every other row. When two rows agree on their LHSs, + * make their RHSs the same. Return whether any symbols were changed. + * @param f the MVD to use to try to equate symbols + */ + def genTuples (f: FD): Boolean = + printTableau () + debug ("genTuples", s"use MVD $f to generate tuples add into the tableau") + var change = false + val nrows = tabl.size + for i <- 0 until nrows; j <- 0 until i if j != i do + if onto (f.x, tabl(i)) sameElements onto (f.x, tabl(j)) then + println (s"for f.x = ${f.x}: rows i = $i and j = $j have same LHS") + val newRow = makeRow (f.x, f.y, i, j) + if ! (newRow sameElements tabl(i)) then + tabl += newRow + change = true + end if + end for + change + end genTuples + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Determine whether the final tableau contains a row of all distinguished symbols. + * Distinguished symbols "a" to "z". Non-distinguished symbols "a1" to "z25" where + * e.g. 25 is the number of rows in the tableau. + */ + def allDistinguished (): Boolean = + cfor (0, m) { i => + if tabl(i).forall (_.length == 1) then + debug ("allDistinguished", s"row i = $i of tableau has all distinguished symbols") + return true + } // cfor + false + end allDistinguished + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Print the tableau. + */ + def printTableau (): Unit = + println ("-" * (27 + 3 * n)) + println (f"| Tableau | ${cut (r)} |") + println ("-" * (27 + 3 * n)) + for i <- 0 until m do + print (f"| ${cut (p(i))}%20s |") + for j <- 0 until n do print (f"${tabl(i)(j)}%3s") + println (" |") + println ("-" * (27 + 3 * n)) + end printTableau + +end Tableau + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `normalizationTest` main method tests the `Normalization` class. * For this example all FD are preserved. @@ -404,7 +671,7 @@ end normalizationTest /** The `normalizationTest2` main method tests the `Normalization` class. * For this example some FDs may not be preserved. * - * fd = sid -> sname address phone A -> BCD + * fd = sid -> sname addr phone A -> BCD * cid -> cname desc hours pid E -> FGHI * pid -> pname rank I -> JK * sid cid -> grade AE -> L @@ -419,8 +686,8 @@ end normalizationTest val r = SET ('A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L') val fd = VEC (FD (SET ('A'), SET ('B', 'C', 'D')), - FD (SET ('E'), SET ('F', 'G', 'H', 'I')), - FD (SET ('I'), SET ('J', 'K')), + FD (SET ('E'), SET ('F', 'G', 'H', 'I')), // what happens unpon swap + FD (SET ('I'), SET ('J', 'K')), // FD order FD (SET ('A', 'E'), SET ('L'))) val p = VEC (SET ('A', 'B', 'C', 'D'), SET ('E', 'F', 'G', 'H', 'I'), @@ -450,6 +717,7 @@ end normalizationTest banner ("Lossless?") println (s"lossless_ (p) = ${db.lossless_ (p)}") + db.chase (p, fd) banner ("Dependency Preservation?") for f <- fd do @@ -463,6 +731,77 @@ end normalizationTest2 //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `normalizationTest3` main method tests the `Normalization` class. + * For this example some FDs may not be preserved. Also, has an MVD. + * + * fd = sid -> sname addr phone A -> BCD + * pid -> pname rank I -> JK + * cid -> cname desc hours pid E -> FGHI + * sid cid -> grade AE -> L + * did -> dname office M -> NO + * + * mvd = pid ->> did I ->> M + * + * Comment out line (1) => success + * Comment out line (2) => Dependency Preservation fails + * Comment out lines (1 and 2) => Losslessness and Dependency Preservation fail + * + * > runMain scalation.database.normalizationTest3 + */ +@main def normalizationTest3 (): Unit = + + val r = SET ('A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O') + val fd = VEC (FD (SET ('A'), SET ('B', 'C', 'D')), + FD (SET ('I'), SET ('J', 'K')), // FD order + FD (SET ('E'), SET ('F', 'G', 'H', 'I')), // what id swapped + FD (SET ('A', 'E'), SET ('L')), + FD (SET ('M'), SET ('N', 'O'))) + val mvd = VEC (FD (SET ('I'), SET ('M'))) + val p = VEC (SET ('A', 'B', 'C', 'D'), + SET ('E', 'F', 'G', 'H', 'I'), + SET ('E', 'J', 'K'), // (1) a BCNF Decomposition +// SET ('I', 'J', 'K'), // (2) 3NF Synthesis + SET ('A', 'E', 'L')) + + val db = Normalization (r, fd) + + banner ("Schema r, Functional Dependencies fd, and DB Design p") + println (s"r = $r") + println (s"fd = $fd") + println (s"p = $p") + + banner ("LHS Superkey?") + for f <- fd do + val x = f.x + println (s"f = $f, closure ($x) = ${db.closure (x)}, " + + s"superkey ($x) = ${db.superkey (x)}") + + banner ("FD Violating BCNF?") + println (s"find_not_bcnf (r) = ${db.find_not_bcnf (r)}") + + banner ("BCNF Decomposition?") + db.bcnf_decomp () + db.bcnf_root.printTree () + + banner ("Lossless?") + println (s"lossless_ (p) = ${db.lossless_ (p)}") + db.chase (p, fd, mvd) + + banner ("Dependency Preservation?") + for f <- fd do + println (s"f = $f, preserve (f, p) = ${db.preserve (f, p)}") + + banner ("Find a Global Key") + println (s"findKey () = ${db.findKey ()}") + + banner ("4NF Decomposition?") + db._4nf_decomp (mvd) + db._4nf_root.printTree () + +end normalizationTest3 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `normalizationTest4` main method tests the `Normalization` class. * This example tests the BCNF Decomposition Algorithm and 3NF Synthesis Algorithm. * U = Course, Teacher, Student, Grade, Hour, Room * @@ -472,9 +811,9 @@ end normalizationTest2 * HS -> R * HT -> R * - * > runMain scalation.database.normalizationTest3 + * > runMain scalation.database.normalizationTest4 */ -@main def normalizationTest3 (): Unit = +@main def normalizationTest4 (): Unit = val r = SET ('C', 'T', 'H', 'R', 'S', 'G') val fd = VEC (FD (SET ('C'), SET ('T')), @@ -502,11 +841,11 @@ end normalizationTest2 banner ("Find a Global Key") println (s"findKey () = ${db.findKey ()}") -end normalizationTest3 +end normalizationTest4 //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `normalizationTest4` main method tests the `Normalization` class. +/** The `normalizationTest5` main method tests the `Normalization` class. * This example tests the 3NF Synthesis Algorithm. * * fd = C -> ST Cname -> Street cciTy @@ -514,9 +853,9 @@ end normalizationTest3 * B -> EY Bname -> assEts bcitY * L -> BCM Loanno -> Bname Cname aMount * - * > runMain scalation.database.normalizationTest4 + * > runMain scalation.database.normalizationTest5 */ -@main def normalizationTest4 (): Unit = +@main def normalizationTest5 (): Unit = val r = SET ('A', 'B', 'C', 'E', 'L', 'M', 'N', 'S', 'T', 'Y') val fd = VEC (FD (SET ('C'), SET ('S')), // MINIMAL COVER @@ -547,11 +886,11 @@ end normalizationTest3 banner ("Find a Global Key") println (s"findKey () = ${db.findKey ()}") -end normalizationTest4 +end normalizationTest5 //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `normalizationTest5` main method tests the `Normalization` class. +/** The `normalizationTest6` main method tests the `Normalization` class. * This example tests the Minimal Cover Algorithm. * * fd = C -> ST Cname -> Street cciTy @@ -559,9 +898,9 @@ end normalizationTest4 * B -> EY Bname -> assEts bcitY * L -> BCM Loanno -> bname cname aMount * - * > runMain scalation.database.normalizationTest5 + * > runMain scalation.database.normalizationTest6 */ -@main def normalizationTest5 (): Unit = +@main def normalizationTest6 (): Unit = val r = SET ('A', 'B', 'C', 'E', 'L', 'M', 'N', 'S', 'T', 'Y') val fd = VEC (FD (SET ('C'), SET ('S', 'T')), @@ -582,11 +921,11 @@ end normalizationTest4 banner ("Find a Global Key") println (s"findKey () = ${db.findKey ()}") -end normalizationTest5 +end normalizationTest6 //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `normalizationTest6` main method tests the `Normalization` class. +/** The `normalizationTest7` main method tests the `Normalization` class. * This example tests the Dependency Preservation of db design p. * * fd = A -> B @@ -595,9 +934,9 @@ end normalizationTest5 * D -> A * p = { AB, BC, CD } * - * > runMain scalation.database.normalizationTest6 + * > runMain scalation.database.normalizationTest7 */ -@main def normalizationTest6 (): Unit = +@main def normalizationTest7 (): Unit = val r = SET ('A', 'B', 'C', 'D') val fd = VEC (FD (SET ('A'), SET ('B')), @@ -617,11 +956,11 @@ end normalizationTest5 banner ("Find a Global Key") println (s"findKey () = ${db.findKey ()}") -end normalizationTest6 +end normalizationTest7 //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `normalizationTest7` main method tests the `Normalization` class. +/** The `normalizationTest8` main method tests the `Normalization` class. * This example tests the Dependency Preservation of db design p. * Compare with Test6. * @@ -631,9 +970,9 @@ end normalizationTest6 * A -> D // LHS & RHS swapped for this FD * p = { AB, BC, CD } * - * > runMain scalation.database.normalizationTest7 + * > runMain scalation.database.normalizationTest8 */ -@main def normalizationTest7 (): Unit = +@main def normalizationTest8 (): Unit = val r = SET ('A', 'B', 'C', 'D') val fd = VEC (FD (SET ('A'), SET ('B')), @@ -653,11 +992,11 @@ end normalizationTest6 banner ("Find a Global Key") println (s"findKey () = ${db.findKey ()}") -end normalizationTest7 +end normalizationTest8 //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `normalizationTest8` main method tests the `Normalization` class. +/** The `normalizationTest9` main method tests the `Normalization` class. * This example tests the findKey method (see Example 2 from FindKey paper). * * fd = AD -> B @@ -666,9 +1005,9 @@ end normalizationTest7 * B -> C * AC -> F * - * > runMain scalation.database.normalizationTest8 + * > runMain scalation.database.normalizationTest9 */ -@main def normalizationTest8 (): Unit = +@main def normalizationTest9 (): Unit = val r = SET ('A', 'B', 'C', 'D', 'E', 'F') // key = AB // val r = SET ('A', 'B', 'C', 'D', 'E', 'F', 'G') // key = ABG @@ -685,5 +1024,5 @@ end normalizationTest7 banner ("Find a Global Key") println (s"findKey () = ${db.findKey ()}") -end normalizationTest8 +end normalizationTest9 diff --git a/src/main/scala/scalation/database/SpanningTree.scala b/src/main/scala/scalation/database/SpanningTree.scala index 605b5e1a1..490299b96 100644 --- a/src/main/scala/scalation/database/SpanningTree.scala +++ b/src/main/scala/scalation/database/SpanningTree.scala @@ -78,7 +78,6 @@ class SpanningTree (g: MatrixD): out(j) = false // j is no longer outsise the tree qu.enqueue (j) return j - end if -1 end findNext diff --git a/src/main/scala/scalation/database/Tabular.scala b/src/main/scala/scalation/database/Tabular.scala index b48f5c84d..305fe3b62 100644 --- a/src/main/scala/scalation/database/Tabular.scala +++ b/src/main/scala/scalation/database/Tabular.scala @@ -16,6 +16,7 @@ * inline def ρ (newName: String): T = rename (newName) * inline def π (x: String): T = project (strim (x)) * inline def π (cPos: IndexedSeq [Int]): T = project (cPos) + * inline def π_ (x: String): T = project (schema diff strim (x)) * inline def σπ (a: String, apred: APredicate): T = selproject (a, apred) * inline def σ (a: String, apred: APredicate): T = select (a, apred) * inline def σ (predicate: Predicate): T = select (predicate) @@ -33,6 +34,7 @@ * inline def ⋉ (x: Schema, y: Schema, r2: T): T = leftJoin (x, y, r2) * inline def ⋊ (x: Schema, y: Schema, r2: T): T = rightJoin (x, y, r2) * inline def / (r2: T): T = divide (r2) + * inline def ÷ (r2: T): T = divide (r2) * inline def γ (ag: String): T = groupBy (ag) * inline def ℱ (ag: String, f_as: (AggFunction, String)*): T = aggregate (ag, f_as :_*) * inline def ↑ (x: String*): T = orderBy (x :_*) @@ -304,6 +306,16 @@ trait Tabular [T <: Tabular [T]] (val name: String, val schema: Schema, val doma inline def π (cPos: IndexedSeq [Int]): T = project (cPos) + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** PROJECTOUT the tuples in this table onto 'schema - the given attribute names'. + * @param x the schema/attribute names to project out + */ + inline def projectOut (x: Schema): T = project (schema diff x) + + inline def projectOut (x: String): T = project (schema diff strim (x)) + + inline def π_ (x: String): T = project (schema diff strim (x)) + // ========================================================== SELECT-PROJECT //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -589,6 +601,8 @@ trait Tabular [T <: Tabular [T]] (val name: String, val schema: Schema, val doma inline def / (r2: T): T = divide (r2) + inline def ÷ (r2: T): T = divide (r2) + // ================================================================ GROUP BY //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: diff --git a/src/main/scala/scalation/database/TimeInterval.scala b/src/main/scala/scalation/database/TimeInterval.scala index 9c6523c37..99f5503c3 100644 --- a/src/main/scala/scalation/database/TimeInterval.scala +++ b/src/main/scala/scalation/database/TimeInterval.scala @@ -370,7 +370,6 @@ end timeIntervalTest2 for cls <- periods do if lab conflict cls then println (s"Found time conflict between lab ${lab.format} and cls ${cls.format}") - end if end for end timeIntervalTest3 diff --git a/src/main/scala/scalation/database/TimeOfWeek.scala b/src/main/scala/scalation/database/TimeOfWeek.scala index a60cde858..188180cdb 100644 --- a/src/main/scala/scalation/database/TimeOfWeek.scala +++ b/src/main/scala/scalation/database/TimeOfWeek.scala @@ -156,7 +156,6 @@ case class TimeOfWeek (day: Int, second: Long): f"$E%s $HH%02d:$mm%02d:$ss%02d" else f"$E%s $HH%02d:$mm%02d" - end if end format //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: diff --git a/src/main/scala/scalation/database/Tree.scala b/src/main/scala/scalation/database/Tree.scala index b57542b11..260713e5a 100644 --- a/src/main/scala/scalation/database/Tree.scala +++ b/src/main/scala/scalation/database/Tree.scala @@ -89,23 +89,22 @@ object Tree: val tree = new Tree (root, depth) // make a tree from root if depth > 0 then val imax = rng.igen - for i <- 0 until imax do genPre (depth, root, 1, i, imax) // add root's children - end if + cfor (0, imax) { _ => genPre (depth, root, 1) } // i, imax) // add root's children //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /* Recursive helper method for generating a tree using a pre-order traversal. * @param depth the depth of the tree * @param p the parent node * @param lev the level of the node - * @param ord the birth order of the node - * @param sibs the number of siblings + * -param ord the birth order of the node + * -param sibs the number of siblings */ - def genPre (depth: Int, p: TreeNode, lev: Int, ord: Int, sibs: Int): Unit = +// def genPre (depth: Int, p: TreeNode, lev: Int, ord: Int, sibs: Int): Unit = + def genPre (depth: Int, p: TreeNode, lev: Int): Unit = val n = tree.add (p) // add node n to tree if lev < depth then val imax = rng.igen - for i <- 0 until imax do genPre (depth, n, lev+1, i, imax) // add n's children - end if + cfor (0, imax) { _ => genPre (depth, n, lev+1) } // i, imax) // add n's children end genPre tree @@ -183,7 +182,6 @@ class Tree (val root: TreeNode, depth: Int, val name: String = "tree"): if p != null then p.child += n // add n as child of p n.ord = p.child.size - 1 // record n's birth order - end if n // return node n end add @@ -200,7 +198,6 @@ class Tree (val root: TreeNode, depth: Int, val name: String = "tree"): if p != null then p.child += n // add n as child of p n.ord = p.child.size - 1 // record n's birth order - end if n // return node n end add @@ -258,10 +255,10 @@ end treeTest val FANOUT = 3 val root = new TreeNode (0, 0) // nid = 0, lev = 0 val ct = new Tree (root, 2) // root, depth = 2 - for i <- 0 until FANOUT do + cfor (0, FANOUT) { _ => val n = ct.add (ct.root) - for j <- 0 until FANOUT do ct.add (n) - end for + cfor (0, FANOUT) { _ => ct.add (n) } + } // cfor ct.printTree () end treeTest2 diff --git a/src/main/scala/scalation/database/graph/EdgeType.scala b/src/main/scala/scalation/database/graph/EdgeType.scala index d385aecca..4cf6c6486 100644 --- a/src/main/scala/scalation/database/graph/EdgeType.scala +++ b/src/main/scala/scalation/database/graph/EdgeType.scala @@ -48,7 +48,7 @@ class EdgeType (_name: String, */ def newShapeObj: CurvilinearShape = shape match - case s: Arrow => Arrow () + case _ : Arrow => Arrow () case _ => QArrow () end match end newShapeObj @@ -233,7 +233,7 @@ class EdgeType (_name: String, println ("|-" + "-" * len + "-|") System.out.print ("| ") prt ("from", wj) - for (k, v) <- tuple0 do prt (k, wj) + for (k, _) <- tuple0 do prt (k, wj) prt ("to", wj) println (" |") @@ -245,7 +245,7 @@ class EdgeType (_name: String, if edges (i).from == null then prt ("null-vertex", wj) else prt (edges (i).from.name, wj) - for (k, v) <- tuple_i do prt (v, wj) + for (_, v) <- tuple_i do prt (v, wj) if edges(i).to == null then prt ("null-vertex", wj) else prt (edges(i).to.name, wj) diff --git a/src/main/scala/scalation/database/graph/Topological.scala b/src/main/scala/scalation/database/graph/Topological.scala index 05bc2abba..48094d378 100644 --- a/src/main/scala/scalation/database/graph/Topological.scala +++ b/src/main/scala/scalation/database/graph/Topological.scala @@ -45,6 +45,7 @@ trait Topological (var elem: Element, var dist: Double) val oth = other.asInstanceOf [Topological] if elem == oth.elem then Option (dist compare oth.dist) else oth.elem `tryCompareTo` oth.elem + end tryCompareTo //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Return the topological objects/tokens in the neighborhood of this token. diff --git a/src/main/scala/scalation/database/graph/Vertex.scala b/src/main/scala/scalation/database/graph/Vertex.scala index 4612fa26d..9530b9874 100644 --- a/src/main/scala/scalation/database/graph/Vertex.scala +++ b/src/main/scala/scalation/database/graph/Vertex.scala @@ -14,7 +14,6 @@ package graph //import scala.collection.immutable.{Vector => VEC} import scala.collection.mutable.{ArrayBuffer => VEC} - import scala.collection.mutable.Map import scala.math.max @@ -74,13 +73,13 @@ class Vertex (_name: String, val prop: Property, _pos: VectorD = null) println ("|-" + "-" * len + "-|") print ("| ") - for (k, v) <- prop do prt (k, wj) + for (k, _) <- prop do prt (k, wj) println (" |") println ("|-" + "-" * len + "-|") print("| ") - for (k, v) <- prop do prt (v, wj) + for (_, v) <- prop do prt (v, wj) println (" |") println ("|-" + "-" * len + "-|") diff --git a/src/main/scala/scalation/database/graph/VertexType.scala b/src/main/scala/scalation/database/graph/VertexType.scala index d494a45f2..8ef6446cd 100644 --- a/src/main/scala/scalation/database/graph/VertexType.scala +++ b/src/main/scala/scalation/database/graph/VertexType.scala @@ -46,15 +46,17 @@ class VertexType (_name: String, val schema: Schema, private var primaryKey: String = null // property used as primary key private val index = Map [ValueType, Vertex] () // primary key -> vertex + def getPK: String = primaryKey + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Return a new shape object based on the shape template. */ def newShapeObj: Shape = shape match - case s: Ellipse => Ellipse () - case s: Octagon => Octagon () - case s: Rectangle => Rectangle () - case _ => RoundRectangle () + case _ : Ellipse => Ellipse () + case _ : Octagon => Octagon () + case _ : Rectangle => Rectangle () + case _ => RoundRectangle () end match end newShapeObj @@ -309,7 +311,7 @@ class VertexType (_name: String, val schema: Schema, for i <- rng do System.out.print ("| ") val tuple_i = verts(i).prop - for (k, v) <- tuple_i do prt (v, wj) + for (_, v) <- tuple_i do prt (v, wj) println (" |") end for println ("|-" + "-" * len + "-|") diff --git a/src/main/scala/scalation/database/graph_pm/DualIso.scala b/src/main/scala/scalation/database/graph_pm/DualIso.scala index 1efa672ee..9890b925d 100644 --- a/src/main/scala/scalation/database/graph_pm/DualIso.scala +++ b/src/main/scala/scalation/database/graph_pm/DualIso.scala @@ -82,7 +82,6 @@ class DualIso (g: Graph, q: Graph) if ! φ.isEmpty then matches += φ if matches.size % CHECK == 0 then println ("saltzDualIso: matches so far = " + matches.size) - end if else if ! φ.isEmpty then breakable { for i <- φ (depth) if ! contains (φ, depth, i) do diff --git a/src/main/scala/scalation/database/graph_pm/DualSim.scala b/src/main/scala/scalation/database/graph_pm/DualSim.scala index 8fa56afe1..6211a91f4 100644 --- a/src/main/scala/scalation/database/graph_pm/DualSim.scala +++ b/src/main/scala/scalation/database/graph_pm/DualSim.scala @@ -52,7 +52,6 @@ class DualSim (g: Graph, q: Graph) φ(u) -= v // remove vertex v from φ(u) if φ(u).isEmpty then break () // no match for vertex u => no overall match alter = true - end if // build newφ to contain only those vertices in φ(u_c) which also have a parent in φ(u) newφ ++= φInt end for @@ -95,7 +94,6 @@ class DualSim (g: Graph, q: Graph) φ(u) -= v // remove vertex v from φ(u) if φ(u).isEmpty then break () // no match for vertex u => no overall match alter = true - end if // build newφ to contain only those vertices in φ(u_c) which also have a parent in φ(u) newφ ++= φInt end for diff --git a/src/main/scala/scalation/database/graph_pm/Graph.scala b/src/main/scala/scalation/database/graph_pm/Graph.scala index 9cfec4780..7b614ad95 100644 --- a/src/main/scala/scalation/database/graph_pm/Graph.scala +++ b/src/main/scala/scalation/database/graph_pm/Graph.scala @@ -134,7 +134,6 @@ case class Graph (ch: Array [SET [Int]], flaw ("checkEdges", s"child of $u, with vertex id $u_c not in bounds 0..$maxId") okay = false break () - end if end for } // breakable okay @@ -152,7 +151,6 @@ case class Graph (ch: Array [SET [Int]], flaw ("checkElabels", s"no such edge from $u to $v") okay = false break () - end if end for } // breakable okay @@ -228,9 +226,8 @@ case class Graph (ch: Array [SET [Int]], //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Print this digraph in a deep sense with all the information. - * @param clip whether to clip out "Set(" and ")" */ - def printG (clip: Boolean = true): Unit = + def printG (): Unit = println (s"Graph ($name, $inverse, $size") for i <- ch.indices do println (toLine (i)) for (k, v) <- elabel do println (s"edge $k -> $v") @@ -258,7 +255,6 @@ case class Graph (ch: Array [SET [Int]], end for else for i <- vset do lv(i) = g.label(i) // direct correspondence for labels - end if (lv, vmap) end addVertices @@ -302,7 +298,7 @@ object Graph: schema: Array [String]): Graph = val n = label.length val ch = Array.fill (n)(SET [Int] ()) - for (e, l) <- elabel do ch(e._1) += e._2 + for (e, _) <- elabel do ch(e._1) += e._2 new Graph (ch, label, elabel, inverse, name, schema) end apply @@ -330,7 +326,6 @@ object Graph: for j <- 0 until n if j != i do // no vertex order restriction ch(i) += j elab += (i, j) -> adj(i, j) - end if end for new Graph (ch, lab, elab, inverse, name, schema) end fromMatrix diff --git a/src/main/scala/scalation/database/graph_pm/Graph0.scala b/src/main/scala/scalation/database/graph_pm/Graph0.scala index 0606155a1..5eddac3a6 100644 --- a/src/main/scala/scalation/database/graph_pm/Graph0.scala +++ b/src/main/scala/scalation/database/graph_pm/Graph0.scala @@ -111,7 +111,6 @@ case class Graph0 (ch: Array [SET [Int]], label: Array [ValueType], flaw ("checkEdges", s"child of $u, with vertex id $u_c not in bounds 0..$maxId") okay = false break () - end if end for } // breakable okay @@ -148,9 +147,8 @@ case class Graph0 (ch: Array [SET [Int]], label: Array [ValueType], //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Print this digraph in a deep sense with all the information. - * @param clip whether to clip out "Set(" and ")" */ - def printG (clip: Boolean = true): Unit = + def printG (): Unit = println (s"Graph0 ($name, $inverse, $size") for i <- ch.indices do println (toLine (i)) println (")") @@ -177,7 +175,6 @@ case class Graph0 (ch: Array [SET [Int]], label: Array [ValueType], end for else for i <- vset do lv(i) = g.label(i) // direct correspondence for labels - end if (lv, vmap) end addVertices diff --git a/src/main/scala/scalation/database/graph_pm/GraphGen.scala b/src/main/scala/scalation/database/graph_pm/GraphGen.scala index 6ff0abaa3..17f6e48df 100644 --- a/src/main/scala/scalation/database/graph_pm/GraphGen.scala +++ b/src/main/scala/scalation/database/graph_pm/GraphGen.scala @@ -82,7 +82,6 @@ class GraphGen (typeSelector: Char, stream: Int = 0): end for else ch(i) = rsg.igen (degree, size-1, i) // generate children uniformly across graph - end if end for val label = randDistLabels (size, nLabels) // randomly assign vertex labels @@ -252,7 +251,6 @@ class GraphGen (typeSelector: Char, stream: Int = 0): if ! (nodes contains v_ch) then nodes += v_ch // add child vertex to nodes q.enqueue (v_ch) // put it on the BFS queue - end if chs2 += v_ch edges += 1 else if nodes contains v_ch then // can only take edge if child in nodes @@ -260,7 +258,6 @@ class GraphGen (typeSelector: Char, stream: Int = 0): edges += 1 else println (s"genBFSquery: can't find enough child vertices for $v") - end if end if end for @@ -274,7 +271,6 @@ class GraphGen (typeSelector: Char, stream: Int = 0): maxEdges = edges maxNodes = nodes.clone () maxChMap = chMap.clone () - end if if nodes.size < size || edges < nedges then // not enough vertices/edges, try again nRestarts += 1 @@ -465,7 +461,6 @@ object GraphGen: if ! newNodeChildren.isEmpty then for newChild <- newNodeChildren if nodes.size < size do if ! nodes.contains (newChild) then { nodes += newChild; q.enqueue (newChild) } - end for end if end while @@ -473,7 +468,6 @@ object GraphGen: if nodes.size < size then nRestarts += 1 println ("nodes.size only " + nodes.size) - end if end while if nRestarts == maxRestarts then { println ("extractSubgraph: could not find a good query"); return null } @@ -503,7 +497,7 @@ object GraphGen: vertexMap.foreach { case (oldId, newId) => new2OldIds(newId) = oldId } // for each mapped vertex, assign its mapped children - val ch = Array.ofDim [SET [Int]] (nodes.size).map (x => SET [Int] ()) + val ch = Array.ofDim [SET [Int]] (nodes.size).map (_ => SET [Int] ()) for (v, v_ch) <- chMap do ch(vertexMap (v)) = v_ch.map (vertexMap (_)) // map the vertex and edge labels diff --git a/src/main/scala/scalation/database/graph_pm/GraphIO.scala b/src/main/scala/scalation/database/graph_pm/GraphIO.scala index 8b7271d61..def970a1e 100644 --- a/src/main/scala/scalation/database/graph_pm/GraphIO.scala +++ b/src/main/scala/scalation/database/graph_pm/GraphIO.scala @@ -172,9 +172,8 @@ object GraphIO: * to which the current vertex is adjacent. * @param lFile the file containing the graph labels * @param eFile the file the edges (to create adjacency sets) - * @param inverse whether to store inverse adjacency sets (parents) */ - def read2Files (lFile: String, eFile: String, inverse: Boolean = false): Graph = + def read2Files (lFile: String, eFile: String): Graph = val lLines = fromFile (lFile).getLines () // get the lines from lFile val label = lLines.map (x => toLabel (x.trim)).toArray // make the label array val eLines = fromFile (eFile).getLines () // get the lines from eFile @@ -189,11 +188,10 @@ object GraphIO: //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Read a graph from TWO specially formatted Pajek files. - * @param lFile the file containing the graph labels - * @param eFile the file the edges (to create adjacency sets) - * @param inverse whether to store inverse adjacency sets (parents) + * @param lFile the file containing the graph labels + * @param eFile the file the edges (to create adjacency sets) */ - def read2PajekFile (lFile: String, eFile: String, inverse: Boolean = false): Graph = + def read2PajekFile (lFile: String, eFile: String): Graph = val lLines = fromFile (lFile).getLines () // get the lines from lFile val label = lLines.map (x => toLabel (x.trim)).toArray val ch = Array.ofDim [SET [Int]] (label.size) @@ -229,7 +227,7 @@ end GraphIO val ran_graph = gGen.genRandomGraph (size, nLabels, eLabels, avDegree, inverse, "ran_graph") println (s"ran_graph = $ran_graph") - ran_graph.printG (false) + ran_graph.printG () ran_graph.printG () // Write the graph to a file diff --git a/src/main/scala/scalation/database/graph_pm/GraphMetrics.scala b/src/main/scala/scalation/database/graph_pm/GraphMetrics.scala index 954fed80a..6727523c2 100644 --- a/src/main/scala/scalation/database/graph_pm/GraphMetrics.scala +++ b/src/main/scala/scalation/database/graph_pm/GraphMetrics.scala @@ -13,6 +13,7 @@ package database package graph_pm import scala.collection.mutable.{ArrayBuffer, Queue} +import scala.runtime.ScalaRunTime.stringOf //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `GraphMetrics` class provides methods for determining graph metrics that @@ -32,7 +33,6 @@ class GraphMetrics (val g: Graph, isUndirected: Boolean = true): if ! isUndirected then for i <- 0 until n; j <- g.ch (i) do g.ch (j) += i // converting directed to undirected - end if //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Compute the diameter of graph g (i.e., maximum eccentricity). This also @@ -94,7 +94,6 @@ class GraphMetrics (val g: Graph, isUndirected: Boolean = true): if go(c) && len(c) == 0 then len(c) = len_c // distance from vertex i to c qu.enqueue (c) // put child c in queue - end if end for end visit @@ -133,24 +132,24 @@ end GraphMetrics // Compute the diameter of graph g var dia = 0 - for k <- 0 until 10 do + cfor (0, 10) { _ => time { dia = bfs.diam } println (s"diameter = $dia") - end for + } // cfor // Compute the radius of graph g var rd = 0 - for k <- 0 until 10 do + cfor (0, 10) { _ => time { rd = bfs.rad } println (s"radius = $rd") - end for + } // cfor // Return the central vertices of graph g var ctr: Array [Int] = null - for k <- 0 until 10 do + cfor (0, 10) { _ => time { ctr = bfs.central } - println ("central = ${stringOf (ctr)}") - end for + println (s"central = ${stringOf (ctr)}") + } // cfor end graphMetricsTest diff --git a/src/main/scala/scalation/database/graph_pm/ShortestPath.scala b/src/main/scala/scalation/database/graph_pm/ShortestPath.scala index 3a3327e63..68bb174d5 100644 --- a/src/main/scala/scalation/database/graph_pm/ShortestPath.scala +++ b/src/main/scala/scalation/database/graph_pm/ShortestPath.scala @@ -79,7 +79,6 @@ class ShortestPath (c: MatrixD, s: Int): val alt = v.dd + c(v.id, j) // compute alternate distance from s to j if alt < d(j) then p(j) = v.id; d(j) = alt; q += Item (j, d(j)) - end if end for debug ("spath", s"updated distance (s, v) : ($s, ${v.id}) = $d") end if diff --git a/src/main/scala/scalation/database/graph_pm/TopSort.scala b/src/main/scala/scalation/database/graph_pm/TopSort.scala index f36ae75f6..41ec7a0ae 100644 --- a/src/main/scala/scalation/database/graph_pm/TopSort.scala +++ b/src/main/scala/scalation/database/graph_pm/TopSort.scala @@ -13,8 +13,11 @@ package database package graph_pm import scala.collection.mutable.{Set => SET} +import scala.collection.mutable.ArrayBuffer import scala.runtime.ScalaRunTime.stringOf +import scalation.modeling.autograd.{Variabl, Function} + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `TrafficLight` object is an enumeration type for traffic light colors. * Vertices are marked Green (unvisited), Yellow (processing), or Red (done with). @@ -70,6 +73,36 @@ object TopSort: vList end topSort + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Topological sort for Autograd DAGs (Function nodes). + * Traverses the computation graph starting from a root `Variabl` + * (typically the final loss/output) and returns a topologically + * sorted sequence of `Function` nodes (inputs appear before the + * Functions that consume them). + * @param root the output variable whose graph should be sorted + * @throws IllegalStateException if a cycle is detected + */ + def topSortFunctions (root: Variabl): Seq [Function] = + val visited = SET [Function] () // permanently visited (processed) + val active = SET [Function] () // recursion stack for cycle detection + val ordering = ArrayBuffer [Function] () + + def dfs (fn: Function): Unit = + if active.contains (fn) then + throw new IllegalStateException (s"Cycle detected in autograd graph at Function node: $fn") + if ! visited.contains (fn) then + active += fn + // Recurse to predecessor Functions (those that produce this fn's inputs) + for in <- fn.inputs; gfn <- in.gradFn do dfs (gfn) + active -= fn + visited += fn + ordering += fn // post-order append yields inputs -> outputs + end dfs + + root.gradFn.foreach (dfs) + ordering.toSeq + end topSortFunctions + end TopSort import TopSort.topSort diff --git a/src/main/scala/scalation/database/graph_relation/VertexType.scala b/src/main/scala/scalation/database/graph_relation/VertexType.scala index 74732a507..9262c98dc 100644 --- a/src/main/scala/scalation/database/graph_relation/VertexType.scala +++ b/src/main/scala/scalation/database/graph_relation/VertexType.scala @@ -79,13 +79,11 @@ case class VertexType (name: String, schema: VEC [String], eschema: VEC [String] if ! (schema contains pname) then flaw ("check", s"error pname = $pname not found in schema") matched = false - end if end for for ename <- v.edge.keys do if ! (eschema contains ename) then flaw ("check", s"error ename = $ename not found in eschema") matched = false - end if end for if ! matched then break end for @@ -101,7 +99,6 @@ case class VertexType (name: String, schema: VEC [String], eschema: VEC [String] if ! (schema contains pkey) then flaw ("buildIndex", s"primary key property = $pkey is not in the schema") return - end if primaryKey = pkey for v <- verts do val key = v.prop(pkey) // the primary key diff --git a/src/main/scala/scalation/database/mugraph_pm/MuDualIso.scala b/src/main/scala/scalation/database/mugraph_pm/MuDualIso.scala index 2775b57e0..57dbdbb72 100644 --- a/src/main/scala/scalation/database/mugraph_pm/MuDualIso.scala +++ b/src/main/scala/scalation/database/mugraph_pm/MuDualIso.scala @@ -82,7 +82,6 @@ class MuDualIso (g: MuGraph, q: MuGraph) if ! φ.isEmpty then matches += φ if matches.size % CHECK == 0 then println ("saltzDualIso: matches so far = " + matches.size) - end if else if ! φ.isEmpty then breakable { for i <- φ (depth) if ! contains (φ, depth, i) do diff --git a/src/main/scala/scalation/database/mugraph_pm/MuDualSim.scala b/src/main/scala/scalation/database/mugraph_pm/MuDualSim.scala index dcfacbb69..adbe6b783 100644 --- a/src/main/scala/scalation/database/mugraph_pm/MuDualSim.scala +++ b/src/main/scala/scalation/database/mugraph_pm/MuDualSim.scala @@ -53,7 +53,6 @@ class MuDualSim (g: MuGraph, q: MuGraph) φ(u) -= v // remove vertex v from φ(u) if φ(u).isEmpty then break () // no match for vertex u => no overall match alter = true - end if // build newφ to contain only those vertices in φ(u_c) which also have a parent in φ(u) newφ ++= φInt end for @@ -96,7 +95,6 @@ class MuDualSim (g: MuGraph, q: MuGraph) φ(u) -= v // remove vertex v from φ(u) if φ(u).isEmpty then break () // no match for vertex u => no overall match alter = true - end if // build newφ to contain only those vertices in φ(u_c) which also have a parent in φ(u) newφ ++= φInt end for diff --git a/src/main/scala/scalation/database/mugraph_pm/MuGraph.scala b/src/main/scala/scalation/database/mugraph_pm/MuGraph.scala index 300e59322..1c8ff6822 100644 --- a/src/main/scala/scalation/database/mugraph_pm/MuGraph.scala +++ b/src/main/scala/scalation/database/mugraph_pm/MuGraph.scala @@ -226,9 +226,8 @@ case class MuGraph (ch: Array [SET [Int]], //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Print this multi-digraph in a deep sense with all the information. - * @param clip whether to clip out "Set(" and ")" */ - def printG (clip: Boolean = true): Unit = + def printG (): Unit = println (s"MuGraph ($name, $inverse, $size") for i <- ch.indices do println (toLine (i)) for (k, v) <- elabel do println (s"edge $k -> $v") @@ -301,7 +300,7 @@ object MuGraph: schema: Array [String]): MuGraph = val n = label.length val ch = Array.fill (n)(SET [Int] ()) - for (e, l) <- elabel do ch(e._1) += e._2 + for (e, _) <- elabel do ch(e._1) += e._2 new MuGraph (ch, label, elabel, inverse, name, schema) end apply diff --git a/src/main/scala/scalation/database/mugraph_pm/MuGraphGen.scala b/src/main/scala/scalation/database/mugraph_pm/MuGraphGen.scala index 671d02375..281510696 100644 --- a/src/main/scala/scalation/database/mugraph_pm/MuGraphGen.scala +++ b/src/main/scala/scalation/database/mugraph_pm/MuGraphGen.scala @@ -12,6 +12,7 @@ package scalation package database package mugraph_pm +import scala.annotation.unused import scala.collection.mutable.{Map, Queue} import scala.collection.mutable.{Set => SET} import scala.math.pow @@ -75,10 +76,10 @@ class MuGraphGen (typeSelector: Char): * @param inverse whether to create inverse adjacency (parents) * @param name the name of the graph */ - def genRandomGraph (size: Int, nLabels: Int, eLabels: Int, avDegree: Int, inverse: Boolean = false, + def genRandomGraph (size: Int, nLabels: Int, @unused eLabels: Int, avDegree: Int, inverse: Boolean = false, name: String = "g"): MuGraph = val ch = Array.ofDim [SET [Int]] (size) - for i <- ch.indices do // for each vertex i + for i <- ch.indices do // for each vertex i val degree = rand.nextInt (avDegree * 2 + 1) // out degree for vertex i val rvec = RandomVecI (degree, size-1, i) // random vector of integers ch(i) = rvec.igen.toArray.to (SET) // children of vertex i @@ -228,7 +229,7 @@ class MuGraphGen (typeSelector: Char): val newNodeChildren = g.ch (newNode) if ! newNodeChildren.isEmpty then val nncArr = newNodeChildren.toArray - for i <- 0 until rand.nextInt (avDegree * 2 + 1) if nodes.size < size do + for _ <- 0 until rand.nextInt (avDegree * 2 + 1) if nodes.size < size do val newChild = nncArr (rand.nextInt (newNodeChildren.size)) if ! (nodes contains newChild) then { nodes += newChild; q.enqueue (newChild) } else cycle = true @@ -253,7 +254,7 @@ class MuGraphGen (typeSelector: Char): vertexMap.foreach { case (oldId, newId) => new2OldIds(newId) = oldId } // for each mapped vertex, assign its mapped children - val ch = Array.ofDim [SET [Int]] (size).map (x => SET [Int] ()) + val ch = Array.ofDim [SET [Int]] (size).map (_ => SET [Int] ()) for (v, v_ch) <- chMap do ch(vertexMap (v)) = v_ch.map (vertexMap (_)) // map the vertex, edge labels @@ -295,7 +296,6 @@ class MuGraphGen (typeSelector: Char): if ! newNodeChildren.isEmpty then for newChild <- newNodeChildren if nodes.size < size do if ! (nodes contains newChild) then { nodes += newChild; q.enqueue (newChild) } - end for end if end while @@ -303,7 +303,6 @@ class MuGraphGen (typeSelector: Char): if nodes.size < size then nRestarts += 1 println ("nodes.size only " + nodes.size) - end if end while if nRestarts == maxRestarts then { println ("extractSubgraph: could not find a good query"); return null } @@ -314,7 +313,7 @@ class MuGraphGen (typeSelector: Char): for x <- nodes do { newLabelMap += (x -> c); c += 1 } val newToOldLabels = Array.ofDim [Int] (size) newLabelMap.foreach { case (oldL, newL) => newToOldLabels (newL) = oldL } - val ch = Array.ofDim [SET [Int]] (size).map (x => SET [Int] ()) + val ch = Array.ofDim [SET [Int]] (size).map (_ => SET [Int] ()) for (node, children) <- chMap do ch (newLabelMap(node)) = children.map (x => newLabelMap (x)) val label = newToOldLabels.map (x => g.label(x)).toArray val elab = Map [(Int, Int), SET [ValueType]] () // FIX to be implemented @@ -378,7 +377,7 @@ class MuGraphGen (typeSelector: Char): * @param pow the power/exponent */ private def powDistLabels (size: Int, nLabels: Int, pow: Double): Array [ValueType] = - Array.ofDim [ValueType] (size).map (x => powInt (0, nLabels, pow).asInstanceOf [ValueType]) + Array.ofDim [ValueType] (size).map (_ => powInt (0, nLabels, pow).asInstanceOf [ValueType]) end powDistLabels //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: diff --git a/src/main/scala/scalation/database/mugraph_pm/MuGraphMatcher.scala b/src/main/scala/scalation/database/mugraph_pm/MuGraphMatcher.scala index 74cda6063..f61a87468 100644 --- a/src/main/scala/scalation/database/mugraph_pm/MuGraphMatcher.scala +++ b/src/main/scala/scalation/database/mugraph_pm/MuGraphMatcher.scala @@ -130,7 +130,6 @@ trait MuGraphMatcher (g: MuGraph, q: MuGraph): if ans != null then for i <- φ.indices do println (s"$i: ${φ(i)} == ? ${ans(i)}") for i <- φ.indices do assert (φ(i) == ans(i)) - end if φ end test diff --git a/src/main/scala/scalation/database/relation/Ex_ProduceSales.scala b/src/main/scala/scalation/database/relation/Ex_ProduceSales.scalaa similarity index 100% rename from src/main/scala/scalation/database/relation/Ex_ProduceSales.scala rename to src/main/scala/scalation/database/relation/Ex_ProduceSales.scalaa diff --git a/src/main/scala/scalation/database/relation/Ex_Teaching.scala b/src/main/scala/scalation/database/relation/Ex_Teaching.scalaa similarity index 100% rename from src/main/scala/scalation/database/relation/Ex_Teaching.scala rename to src/main/scala/scalation/database/relation/Ex_Teaching.scalaa diff --git a/src/main/scala/scalation/database/relation/Relation.scala b/src/main/scala/scalation/database/relation/Relation.scalaa similarity index 100% rename from src/main/scala/scalation/database/relation/Relation.scala rename to src/main/scala/scalation/database/relation/Relation.scalaa diff --git a/src/main/scala/scalation/database/relation/TableGen.scala b/src/main/scala/scalation/database/relation/TableGen.scalaa similarity index 100% rename from src/main/scala/scalation/database/relation/TableGen.scala rename to src/main/scala/scalation/database/relation/TableGen.scalaa diff --git a/src/main/scala/scalation/database/table/BankDB.scala b/src/main/scala/scalation/database/table/BankDB.scala index d08788ee7..9c5ebd6bd 100644 --- a/src/main/scala/scalation/database/table/BankDB.scala +++ b/src/main/scala/scalation/database/table/BankDB.scala @@ -16,6 +16,7 @@ import Tabular._ //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `bankDB` main function uses the `Table` class for simple database application. + * Uses the original database schema from the older editions of Korth et al. * > runMain scalation.database.table.bankDB */ @main def bankDB (): Unit = @@ -44,6 +45,7 @@ import Tabular._ .add ("Main", 905, "Mary", 1000.0) .add ("Alps", 906, "Mary", 2000.0) .add ("Lake", 907, "Joe", 1500.0) + .add ("Alps", 908, "Joe", 1600.0) .show () loan.add ("Lake", 1001, "Peter", 1000.0) @@ -133,11 +135,43 @@ import Tabular._ val q4 = π("cname, ccity")(customer ⋈ (σ("bname != bname2")(a ⋈ ("cname == cname", a)))) q4.show () +// [b] List the names and cities (ccity) of customers who do not have a deposit account in the city in which they live. + + val has = π("cname, ccity")(σ("bcity == ccity")(customer ⋈ deposit ⋈ branch)) + val q5 = π("cname, ccity")(customer) - has + q5.show () + +// [4] List the names of customer having deposits at all branches located in the city the customer lives in. + + banner ("deposits at all branches located in the city the customer lives in") + + val q6 = π("cname")(customer) - π("cname")(π("cname, bname")(customer ⋈ ("ccity == bcity", branch)) - π("cname, bname")(deposit)) + q6.show () + +// val q7 = π("cname")(π("cname, bname")(customer ⋈ deposit) ÷ π("cname, bname")(customer ⋈ ("ccity == bcity", branch))) + val q7a = π("cname, bname")(customer ⋈ deposit) + val q7b = π("cname, bname")(customer ⋈ ("ccity == bcity", branch)) + val q7 = q7a ÷ q7b + q7a.show () + q7b.show () + q7.show () + + val q8 = π("cname")(σ("ccity == bcity")(customer ⋈ deposit ⋈ branch)) + q8.show () + + banner ("Q9: different branches for deposits and loans in Athens") + + val db = σ("bcity == 'Athens'")(branch) ⋈ deposit + val lb = σ("bcity == 'Athens'")(branch) ⋈ loan + val q9 = π("cname")(σ("bname != bname2")(db ⋈ ("cname == cname", lb))) + q9.show () + end bankDB //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `bankDB2` main function uses the `Table` class for simple database application. + * Uses the expanded database schema from the new edition of Korth et al. * > runMain scalation.database.table.bankDB2 */ @main def bankDB2 (): Unit = diff --git a/src/main/scala/scalation/database/table/GTable.scala b/src/main/scala/scalation/database/table/GTable.scala index 56b8e7de8..4ea7e9815 100644 --- a/src/main/scala/scalation/database/table/GTable.scala +++ b/src/main/scala/scalation/database/table/GTable.scala @@ -166,7 +166,6 @@ object GTable: else // REMAINING LINES val token = ln.split (sep, -1).map (_.trim) // array of token strings s.vertices += Vertex (makeTuple (token, domain, pos)) - end if l_no += 1 end for @@ -249,10 +248,8 @@ class GTable (name_ : String, schema_ : Schema, domain_ : Domain, key_ : Schema) flaw ("addE", s"$name: attempt to link to multiple targets vertices when edge type is unique, elab = $elab") else eset += e - end if else flaw ("addE", s"elab = $elab not an edge type for $name") - end if this end addE @@ -305,10 +302,8 @@ class GTable (name_ : String, schema_ : Schema, domain_ : Domain, key_ : Schema) val eset = u.edge.getOrElse (elab, null) if eset == null then u.edge += elab -> es else eset ++= es - end if else flaw ("addEs", s"elab = $elab not an edge type for $name") - end if end addEs //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -446,7 +441,7 @@ class GTable (name_ : String, schema_ : Schema, domain_ : Domain, key_ : Schema) * @param ref the foreign key reference (edge-label, referenced table) */ def expand (x: Schema, ref: (String, GTable)): GTable = - val (elab, refTab) = ref // edge-label, referenced table + val refTab = ref._2 // edge-label, referenced table // val x1 = schema intersect x // attributes from first table val x1 = meet (schema, x) // attributes from first table val x2 = meet (refTab.schema, x) // attributes from second table diff --git a/src/main/scala/scalation/database/table/KGTable.scala b/src/main/scala/scalation/database/table/KGTable.scala index b40ca2c58..9cca71990 100644 --- a/src/main/scala/scalation/database/table/KGTable.scala +++ b/src/main/scala/scalation/database/table/KGTable.scala @@ -106,7 +106,6 @@ class KGTable (name_ : String, schema_ : Schema, domain_ : Domain, key_ : Schema s.vertices ++= np.vertices for nn <- n.subtypes do addSubVertices (lev + 1, levels, s, nn.asInstanceOf [KGTable]) - end for end if end addSubVertices diff --git a/src/main/scala/scalation/database/table/LTable.scala b/src/main/scala/scalation/database/table/LTable.scala index 3ee360d1b..aec77568b 100644 --- a/src/main/scala/scalation/database/table/LTable.scala +++ b/src/main/scala/scalation/database/table/LTable.scala @@ -1,6 +1,6 @@ //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller +/** @author John Miller, Sahil Varma * @version 2.0 * @date Fri Jul 22 00:20:15 EDT 2022 * @see LICENSE (MIT style license file). @@ -55,6 +55,56 @@ object LTable: s end apply + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Read/create the linked-table with the given name into memory loading its columns + * with data from the CSV file named fileName. The attribute names are read from + * the FIRST LINE. + * @param fileName the file name (or file-path) of the data file + * @param name the name of the table + * @param domain_ the domains/data-types (as one string) for attributes ('D', 'I', 'L', 'S', 'X', 'T') + * @param key the attributes forming the primary key + * @param pos the sequence of column positions in the input file to be used (null => select all) + * @param sep the element separation string/regex (e.g., "," ";" " +") + */ + def load (fileName: String, name: String, domain_ : String, key: String, + pos: Array [Int], sep: String): LTable = + load (fileName, name, strim (domain_).map (_.head), key, pos, sep) + end load + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Read/create the linked-table with the given name into memory loading its columns + * with data from the CSV file named fileName. The attribute names are read from + * the FIRST LINE. + * @see scalation.readFileIntoArray + * @param fileName the file name (or file-path) of the data file + * @param name the name of the table + * @param domain the domains/data-types for attributes ('D', 'I', 'L', 'S', 'X', 'T') + * @param key the attributes forming the primary key + * @param pos_ the sequence of column positions in the input file to be used (null => select all) + * @param sep the element separation string/regex (e.g., "," ";" " +") + */ + def load (fileName: String, name: String, domain: Domain, key: String = null, + pos_ : Array [Int] = null, sep: String = ","): LTable = + apply (Table.load (fileName, name, domain, key, pos_, sep)) + end load + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Read/create the linked-table with the given name into memory loading its columns + * with data from the CSV file named fileName. The attribute names are read from + * the FIRST LINE. Use a short-cut (not reliable) to determines the column domains, + * by applying the 'tuple2type' method to the SECOND LINE. + * Note: safer to pull a row without missing or zero values from the middle of the dataset + * @see `tableTest3` + * @see scalation.readFileIntoArray + * @param fileName the file name (or file-path) of the data file + * @param name the name of the table + * @param mumCol the number of columns + * @param key the attributes forming the primary key + */ + def load (fileName: String, name: String, numCol: Int, key: String): LTable = + apply (Table.load (fileName, name, numCol, key)) + end load + end LTable import LTable.{cntr, debug, flaw} @@ -72,7 +122,7 @@ case class LTable (name_ : String, schema_ : Schema, domain_ : Domain, key_ : Sc extends Table (name_, schema_, domain_, key_) with Serializable: - private val links = Map [String, Map [ValueType, Tuple]] () // fkey -> pkey links + private val links = Map [String, Map [ValueType, Tuple]] () // fkey -> pkey links //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Add LINKAGE (foreign key reference) from this table to refTab and for each @@ -83,9 +133,11 @@ case class LTable (name_ : String, schema_ : Schema, domain_ : Domain, key_ : Sc * @param refTab the referenced table being linked to */ override def addLinkage (fkey: String, refTab: Table): Unit = - if ! refTab.hasIndex then refTab.create_index () // make sure refTab has a primary index - links += fkey -> Map [ValueType, Tuple] () // establish links map for fkey - for t <- tuples do addLink (fkey, t, refTab) // add link for each tuple + if ! refTab.hasIndex then refTab.create_index () // make sure refTab has a primary index + linkTypes += fkey -> refTab // add foreign key -> parent table (refTab) to link types + links += fkey -> Map [ValueType, Tuple] () // establish links map for fkey + for t <- tuples do addLink (fkey, t, refTab) // add link for each tuple + refTab.children += this // add this table -> parent table (refTab) end addLinkage //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -98,15 +150,14 @@ case class LTable (name_ : String, schema_ : Schema, domain_ : Domain, key_ : Sc */ def addLink (fkey: String, t: Tuple, refTab: Table): Unit = val t_fkey = pull (t, fkey) -// val refTup = refTab.index.getOrElse (new KeyType (t_fkey), null) // FIX - unify use of indices +// val refTup = refTab.index.getOrElse (new KeyType (t_fkey), null) // FIX - unify use of indices val refTup = refTab.index.getOrElse (t_fkey, null) if refTup == null then flaw ("addLink", s"$name: referential integrity violation for fkey = $fkey, value = $t_fkey") - else + else if ! (links(fkey) contains t_fkey) then val rTup = refTup.asInstanceOf [Tuple] debug ("addLink", s"$name: foreign key = $fkey add $t_fkey -> ${stringOf (rTup)}") links(fkey) += t_fkey -> rTup - end if end addLink //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -120,6 +171,22 @@ case class LTable (name_ : String, schema_ : Schema, domain_ : Domain, key_ : Sc links(fkey).remove (t_fkey).isDefined end removeLink + // R E L A T I O N A L G E B R A O P E R A T O R S + + // Most are inherited from `Table` + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** RENAME this table, returning a shallow copy of this table. + * Usage: customer rename "client" + *-------------------------------------------------------------------------- + * @param newName the new name for the table + */ + override def rename (newName: String): Table = + val s = new LTable (newName, schema, domain, key) + s.tuples ++= tuples // shallow copy + s + end rename + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Compute the EQUI-JOIN via the LINK JOIN (LJ) algorithm that uses direct LINKS * from this linkable-table to the referenced table keeping concatenated tuples @@ -130,24 +197,22 @@ case class LTable (name_ : String, schema_ : Schema, domain_ : Domain, key_ : Sc * @param ref the foreign key reference (foreign key attribute, referenced table) */ override def join (ref: (String, Table)): LTable = - val (fkey, refTab) = ref // foreign key, referenced table + val (fkey, refTab) = ref // foreign key, referenced table val s = new LTable (s"${name}_j_${cntr.inc ()}", disambiguate (schema, refTab.schema), domain ++ refTab.domain, key) - var link = links.getOrElse (fkey, null) // get link for foreign key + val link = links.getOrElse (fkey, null) // get link for foreign key if link == null then - addLinkage (fkey, refTab) // add the linkage - link = links.getOrElse (fkey, null) // try again - if link == null then - flaw ("join", s"$name: foreign key $fkey not established as a link") - end if - debug ("join", s"link = $link") - for t <- tuples do // iterate over fkey table - val t_fkey = pull (t, fkey) // pull out foreign key value - val u = link.getOrElse (t_fkey, null) // get tuple from pkey table - if u != null then s.tuples += t ++ u // add concatenated tuples - end for + flaw ("join", s"$name: foreign key $fkey not established as a link") +// debug ("join", s"link = $link") + + cfor (0, tuples.size) { i => // iterate over fkey table + val t = tuples(i) + val t_fkey = pull (t, fkey) // pull out foreign key value + val u = link.getOrElse (t_fkey, null) // get tuple from pkey table + if u != null then s.tuples += t ++ u // add concatenated tuples + } // cfor s end join @@ -159,22 +224,37 @@ case class LTable (name_ : String, schema_ : Schema, domain_ : Domain, key_ : Sc * @param r2 the second table */ override infix def join (r2: Table): Table = -// val common = schema intersect r2.schema // common attributes - val common = meet (schema, r2.schema) // common attributes +// val common = schema intersect r2.schema // common attributes + val common = meet (schema, r2.schema) // common attributes debug ("join", s"common = ${stringOf (common)}") val rest = r2.schema diff common - val newKey = if subset (common, key) then r2.key // three possibilities for new key + val newKey = if subset (common, key) then r2.key // three possibilities for new key else if subset (common, r2.key) then key else key ++ r2.key val s = new Table (s"${name}_j_${cntr.inc ()}", schema ++ rest, domain ++ r2.pull (rest), newKey) - // implement LJ algorithm - + val link = links.getOrElse (common(0), null) // get link for foreign key + cfor (0, tuples.size) { i => + val t = tuples(i) + val t_fkey = pull (t, common(0)) // pull out foreign key value + val u = link.getOrElse (t_fkey, null) // get tuple from pkey table + if u != null then s.tuples += t ++ r2.pull (u, rest) // add concatenated tuples + } // cfor s end join + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return a copy of this table limited to the first n tuples/rows. + * @param n the number of tuples/rows to keep + */ + override def limit (n: Int): LTable = + val s = new LTable (name + "_$n", schema, domain, key) + s.tuples ++= tuples.slice (0, n) + s + end limit + end LTable diff --git a/src/main/scala/scalation/database/table/Table.scala b/src/main/scala/scalation/database/table/Table.scala index 593310f89..4a0702918 100644 --- a/src/main/scala/scalation/database/table/Table.scala +++ b/src/main/scala/scalation/database/table/Table.scala @@ -1,6 +1,6 @@ //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller +/** @author John Miller, Sahil Varma * @version 2.0 * @date Fri Jun 17 11:19:14 EDT 2022 * @see LICENSE (MIT style license file). @@ -51,8 +51,8 @@ import scalation.database.{BpTreeMultiMap => MIndexMap} //import scalation.database.{JTreeMap => IndexMap} //import scalation.database.{JTreeMultiMap => MIndexMap} -import scala.collection.mutable.{ArrayBuffer => Bag, IndexedSeq, Map} -import scala.math.max +import scala.collection.mutable.{ArrayBuffer => Bag, IndexedSeq, Map, Set} +import scala.math.{max, min} import scala.runtime.ScalaRunTime.stringOf import scala.util.control.Breaks.{breakable, break} @@ -96,7 +96,7 @@ extension (t: Tuple) */ object Table: - private val debug = debugf ("Table", false) // debug function + private val debug = debugf ("Table", true) // debug function private val flaw = flawf ("Table") // flaw function private val cntr = Counter () // counter for generating unique names @@ -107,13 +107,13 @@ object Table: /** Set the full-path flag to the value of parameter fullPath. * @param fullPath flag indicating whether full or relative paths should be used */ - def setFullPath (fullPath: Boolean = true): Unit = { useFullPath = fullPath } + def setFullPath (fullPath: Boolean = true): Unit = useFullPath = fullPath //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Set the limit on the number of lines to read to lim. * @param lim the limit on the number of lines to read (<= 0 => unlimited) */ - def setLimit (lim: Int): Unit = { limit = lim } + def setLimit (lim: Int): Unit = limit = lim //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Create a table given convenient string specifications. @@ -136,13 +136,13 @@ object Table: end apply //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Read the table with the given name into memory loading its columns with data from - * the CSV file named fileName. The attribute names are read from the FIRST LINE. + /** Read/create the table with the given name into memory loading its columns with data + * from the CSV file named fileName. The attribute names are read from the FIRST LINE. * @param fileName the file name (or file-path) of the data file * @param name the name of the table * @param domain_ the domains/data-types (as one string) for attributes ('D', 'I', 'L', 'S', 'X', 'T') * @param key the attributes forming the primary key - * @param pos_ the sequence of column positions in the input file to be used (null => select all) + * @param pos the sequence of column positions in the input file to be used (null => select all) * @param sep the element separation string/regex (e.g., "," ";" " +") */ def load (fileName: String, name: String, domain_ : String, key: String, @@ -151,8 +151,8 @@ object Table: end load //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Read the table with the given name into memory loading its columns with data from - * the CSV file named fileName. The attribute names are read from the FIRST LINE. + /** Read/create the table with the given name into memory loading its columns with data + * from the CSV file named fileName. The attribute names are read from the FIRST LINE. * @see scalation.readFileIntoArray * @param fileName the file name (or file-path) of the data file * @param name the name of the table @@ -161,12 +161,12 @@ object Table: * @param pos_ the sequence of column positions in the input file to be used (null => select all) * @param sep the element separation string/regex (e.g., "," ";" " +") */ - def load (fileName: String, name: String, domain: Domain, key: String, + def load (fileName: String, name: String, domain: Domain, key: String = null, pos_ : Array [Int] = null, sep: String = ","): Table = debug ("load", s"""fileName = $fileName, name = $name, domain = ${stringOf (domain)}, key = $key, pos_ = $pos_, sep = '$sep'; useFullPath = $useFullPath, limit = $limit""") - + val pos = if pos_ == null then Array.range (0, domain.size) else pos_ val schema = Array.ofDim [String] (domain.size) @@ -198,8 +198,8 @@ object Table: end load //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Read the table with the given name into memory loading its columns with data from - * the CSV file named fileName. The attribute names are read from the FIRST LINE. + /** Read/create the table with the given name into memory loading its columns with data + * from the CSV file named fileName. The attribute names are read from the FIRST LINE. * Use a short-cut (not reliable) to determines the column domains, by applying * the 'tuple2type' method to the SECOND LINE. * Note: safer to pull a row without missing or zero values from the middle of the dataset @@ -241,6 +241,12 @@ object Table: cfor (0, numCol) { j => domain(j) = dom(j) } // collect from dom s.tuples += makeTuple (token, domain, pos) + else if l_no <= 100 then // REST of first 100 - for correcting domains + val token = ln.split (sep, -1).map (_.trim) // array of token strings + val dom = tuple2type (token) // guess domains from this data row + cfor (0, numCol) { j => if dom(j) == 'D' then domain(j) = 'D' } + s.tuples += makeTuple (token, domain, pos) + else // REMAINING LINES val token = ln.split (sep, -1).map (_.trim) // array of token strings s.tuples += makeTuple (token, domain, pos) @@ -310,9 +316,10 @@ object Table: //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** LOAD/Read the table with the given name into memory from a JSON file. + * FIX -- should be implemented * @param fileName the file name of the JSON file * @param name the name of the table to load - */ + * def load (fileName: String, name: String): Table = val jsonArr = readFileIntoArray (fileName) // val nlines = jsonArr.size @@ -324,6 +331,7 @@ object Table: // tab = gson.fromJson (jsonStr, tableType) tab end load + */ //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Create a table from a matrix of doubles. @@ -367,13 +375,13 @@ object Table: /** Return the minimum value of all the elements in a column. * @param colj the given column */ - def min (colj: Array [ValueType]): ValueType = colj.min (ValueTypeOrd) + def min (colj: Array [ValueType]): ValueType = colj.min (using ValueTypeOrd) //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Return the maximum value of all the elements in a column. * @param colj the given column */ - def max (colj: Array [ValueType]): ValueType = colj.max (ValueTypeOrd) + def max (colj: Array [ValueType]): ValueType = colj.max (using ValueTypeOrd) //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Return the average of all the elements for a numeric column or 0 otherwise. @@ -419,13 +427,16 @@ class Table (name: String, schema: Schema, domain: Domain, key: Schema) private [table] val tuples = Bag [Tuple] () // storage of tuples private [table] val linkTypes = Map [String, Table] () // link types for foreign keys -// private [table] val index = IndexMap [KeyType, Tuple] () // index on primary key - private [table] val index = IndexMap [Tuple] () // index on primary key +// private [table] val index = IndexMap [KeyType, Tuple] () // index on primary key (allows composite PK) +// private [table] val index = IndexMap [ValueType, Tuple] () // index on primary key (used for most index maps) FIX - unify + private [table] val index = IndexMap [Tuple] () // index on primary key (used for B+Tree) private [table] var hasIndex = false // whether the primary index has been built -// private [table] val sindex = Map [String, IndexMap [ValueType, Tuple]] () // map of secondary unique indices + +// private [table] val sindex = Map [String, IndexMap [ValueType, Tuple]] () // map of secondary unique indices FIX - unify private [table] val sindex = Map [String, IndexMap [Tuple]] () // map of secondary unique indices -// private [table] val mindex = Map [String, MIndexMap [ValueType, Tuple]] () // map of secondary non-unique indices +// private [table] val mindex = Map [String, MIndexMap [ValueType, Tuple]] () // map of secondary non-unique indices FIX - unify private [table] val mindex = Map [String, MIndexMap [Tuple]] () // map of secondary non-unique indices + private [table] val children = Set [Table] () // tables with FKs referencing this table private val groupMap = Map [ValueType, Bag [Tuple]] () // map from group key to collection of tuples protected val countX = domain.count ((c: Char) => c == 'X') // count the number of eXtended Strings @@ -461,12 +472,14 @@ class Table (name: String, schema: Schema, domain: Domain, key: Schema) * to this table specifying the foreign key attribute fkey and the table it * references refTab. If refTab does not have a primary index already, make one. * Caveat: a foreign key may not be composite. + * Caveat: it is assumed that the foreign key and the primary key have the same name * @param fkey the foreign key attribute * @param refTab the table being referenced (to its primary key) */ def addLinkage (fkey: String, refTab: Table): Unit = - if ! refTab.hasIndex then refTab.create_index () - linkTypes += fkey -> refTab + if ! refTab.hasIndex then refTab.create_index () // make sure refTab has a primary index + linkTypes += fkey -> refTab // add foreign key -> parent table (refTab) to link types + refTab.children += this // add this table -> parent table (refTab) end addLinkage //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -514,16 +527,9 @@ class Table (name: String, schema: Schema, domain: Domain, key: Schema) else toRemove += t_i } // cfor -// for t <- tuples do -// val pkey = new KeyType (pull (t, key)) // primary key -// val pkey = pull (t, key)(0) // primary key -// if index.getOrElse (pkey, null) == null then index += pkey -> t -// else toRemove += t -// end for - debug ("create_index", s"remove duplicate tuples = ${showT (toRemove)}") tuples --= toRemove - hasIndex = true + hasIndex = true // indicate the PRIMARY INDEX now exists end create_index //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -544,11 +550,6 @@ class Table (name: String, schema: Schema, domain: Domain, key: Schema) newIndex += skey -> t_i // add key-value pair into new index } // cfor -// for t <- tuples do -// val skey = (pull (t, atr)) // secondary (non-composite) key -// newIndex += skey -> t // add key-value pair into new index -// end for - sindex += atr -> newIndex // add new index into the sindex map end create_sindex @@ -569,12 +570,8 @@ class Table (name: String, schema: Schema, domain: Domain, key: Schema) newIndex.addOne1 (t_atr, t_i) // add key-value pair into new index } // cfor -// for t <- tuples do -// val t_atr = (pull (t, atr)) // non-unique attribute -// newIndex.addOne1 (t_atr, t) // add key-value pair into new index -// end for - mindex += atr -> newIndex // add new index into the mindex map +// hasIndex = true // FIX - why, it is not a primary index end create_mindex //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -582,11 +579,12 @@ class Table (name: String, schema: Schema, domain: Domain, key: Schema) */ def drop_index (): Unit = index.clear () - hasIndex = false + hasIndex = false // the PRIMARY INDEX no longer exists end drop_index //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** DROP a secondary INDEX that maps a secondary key to the tuple containing it. + * @param atr the attribute/column on which to drop the index */ def drop_sindex (atr: String): Unit = val oldIndex = sindex.getOrElse (atr, null) @@ -599,6 +597,7 @@ class Table (name: String, schema: Schema, domain: Domain, key: Schema) //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** DROP a non-unique INDEX that maps a non-unique attribute to the tuple containing it. + * @param atr the attribute/column on which to drop the index */ def drop_mindex (atr: String): Unit = val oldIndex = mindex.getOrElse (atr, null) @@ -699,12 +698,7 @@ class Table (name: String, schema: Schema, domain: Domain, key: Schema) cfor (0, tuples.size) { i => val ta = pull (tuples(i), a) if apred (ta) then s.tuples += Array (ta) - } // for - -// for t <- tuples do -// val ta = pull (t, a) -// if apred (ta) then s.tuples += Array (ta) -// end for + } // cfor s end selproject @@ -723,9 +717,7 @@ class Table (name: String, schema: Schema, domain: Domain, key: Schema) cfor (0, tuples.size) { i => val t_i = tuples(i) if apred (pull (t_i, a)) then s.tuples += t_i - } // for - -// for t <- tuples if apred (pull (t, a)) do s.tuples += t + } // cfor s end select @@ -768,7 +760,7 @@ class Table (name: String, schema: Schema, domain: Domain, key: Schema) * @param pkey_ the primary key value */ def select (pkey_ : KeyType): Table = - val pkey = pkey_.key(0) // FIX + val pkey = pkey_.key(0) // FIX, handle composite PK in future val s = new Table (s"${name}_s_${cntr.inc ()}", schema, domain, key) if hasIndex then @@ -780,6 +772,24 @@ class Table (name: String, schema: Schema, domain: Domain, key: Schema) s end select + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** FIND and return the tuple with the given primary key value pkey. + * Returns null if the primary key value is not found. + * Usage: customer find ("Mary") + *-------------------------------------------------------------------------- + * @param pkey the primary key value (FIX - should also handle KeyType) + */ + def find (pkey: ValueType): Tuple = + if hasIndex then + index.getOrElse (pkey, null) + else + cfor (0, tuples.size) { i => + val t_i = tuples(i) + if pull (t_i, key(0)) == pkey then return t_i + } // cfor + null + end find + // =================================================================== UNION //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -815,9 +825,7 @@ class Table (name: String, schema: Schema, domain: Domain, key: Schema) cfor (0, tuples.size) { i => val t_i = tuples(i) if ! (r2 contains t_i) then s.tuples += t_i - } // for - -// for t <- tuples do if ! (r2 contains t) then s.tuples += t + } // cfor s end minus @@ -837,9 +845,7 @@ class Table (name: String, schema: Schema, domain: Domain, key: Schema) cfor (0, tuples.size) { i => val t_i = tuples(i) if r2 contains t_i then s.tuples += t_i - } // for - -// for t <- tuples do if r2 contains t then s.tuples += t + } // cfor s end intersect @@ -859,12 +865,8 @@ class Table (name: String, schema: Schema, domain: Domain, key: Schema) cfor (0, tuples.size) { i => cfor (0, r2.tuples.size) { j => s.tuples += tuples(i) ++ r2.tuples(j) - } // for - } // for - -// for t <- tuples; u <- r2.tuples do -// s.tuples += t ++ u -// end for + } // cfor + } // cfor s end product @@ -889,12 +891,8 @@ class Table (name: String, schema: Schema, domain: Domain, key: Schema) cfor (0, r2.tuples.size) { j => val t_j = r2.tuples(j) if predicate (t_i, t_j) then s.tuples += t_i ++ t_j - } // for - } // for - -// for t <- tuples; u <- r2.tuples do -// if predicate (t, u) then s.tuples += t ++ u -// end for + } // cfor + } // cfor s end join @@ -911,6 +909,7 @@ class Table (name: String, schema: Schema, domain: Domain, key: Schema) val (a1, op, a2) = (tok(0), tok(1), tok(2)) // debug ("join", s"(a1, op, a2) = ($a1, $op, $a2)") + // FIX - there may be other cases where newKey can be reduced (when FK = PK use PK from FK table) val newKey = key ++ r2.key // requires keys from both tables val s = new Table (s"${name}_j_${cntr.inc ()}", disambiguate (schema, r2.schema), @@ -953,12 +952,8 @@ class Table (name: String, schema: Schema, domain: Domain, key: Schema) cfor (0, r2.tuples.size) { j => val t_j = r2.tuples(j) if pull (t_i, ix) eqElements r2.pull (t_j, iy) then s.tuples += t_i ++ t_j - } // for - } // for - -// for t <- tuples; u <- r2.tuples do -// if pull (t, x) eqElements r2.pull (u, y) then s.tuples += t ++ u -// end for + } // cfor + } // cfor s end join @@ -979,14 +974,14 @@ class Table (name: String, schema: Schema, domain: Domain, key: Schema) val s = new Table (s"${name}_j_${cntr.inc ()}", disambiguate (schema, refTab.schema), domain ++ refTab.domain, key) - if refTab.hasIndex then + if refTab.hasIndex then // use UNIQUE INDEX on PK cfor (0, tuples.size) { i => // iterate over fkey table val t_i = tuples(i) // val t_fkey = new KeyType (pull (t_i, fkey)) val t_fkey = pull (t_i, fkey) debug ("join", s"foreign key t_fkey = $t_fkey") - val t_j = refTab.index.getOrElse (t_fkey, null) // get u via pkey from refTab - if t_j != null then s.tuples += t_i ++ t_j // add concatenated tuples + val t_j = refTab.index.getOrElse (t_fkey, null) // get u via pkey from refTab + if t_j != null then s.tuples += t_i ++ t_j // add concatenated tuples } // cfor else flaw ("join", s"must call 'create_index' before using indexed-join on ${refTab.name}") @@ -1010,27 +1005,20 @@ class Table (name: String, schema: Schema, domain: Domain, key: Schema) val s = new Table (s"${name}_j_${cntr.inc ()}", disambiguate (schema, refTab.schema), domain ++ refTab.domain, key) - if hasIndex then +// if hasIndex then + if mindex.contains (fkey) then // use NON-UNIQUE INDEX on FK + val refTab_pkey = refTab.key(0) // primary key attribute from refTab cfor (0, refTab.tuples.size) { j => // iterate over pkey/refTab table - val u = refTab.tuples(j) - val u_pkey = pull (u, key) - debug ("join", s"primary key u_pkey = $u_pkey") - val idx = mindex(key(0)) // select multi-index by attribute - val ts = idx.getOrElse (u_pkey(0), null) // get {t} via fkey from this table + val u = refTab.tuples(j) // get j-th tuple from refTab + val u_pkey = refTab.pull (u, refTab_pkey) // get its PK value + debug ("join", s"primary key (atr = $refTab_pkey) u_pkey = $u_pkey") + val idx = mindex(fkey) // select multi-index by FK attribute + val ts = idx.getOrElse (u_pkey, null) // get {t} via fkey from this table if ts != null then for t <- ts do s.tuples += t ++ u // add concatenated tuples - } // for + } // cfor else - flaw (")join", s"must call 'create_index' before using indexed-join on $name") - -// for u <- refTab.tuples do // iterate over pkey/refTab table -// val u_pkey = pull (u, key) -// debug ("join", s"primary key u_pkey = $u_pkey") -// val idx = mindex(key(0)) // select multi-index by attribute -// val ts = idx.getOrElse (u_pkey(0), null) // get {t} via fkey from this table -// if ts != null then -// for t <- ts do s.tuples += t ++ u // add concatenated tuples -// end for + flaw ("_join", s"must call 'create_mindex' before using indexed-join on $name") s end _join @@ -1052,24 +1040,27 @@ class Table (name: String, schema: Schema, domain: Domain, key: Schema) domain ++ refTab.domain, key) val t_sz = tuples.size // number of typles in foreign key table - val pkey = refTab.key (0) // again requires non-composite primary keys - orderBy (fkey) // order the foreign key table - refTab.orderBy (pkey) // order the primary key table + val pkey = refTab.key(0) // again requires non-composite primary keys + +// orderBy (fkey) // order the foreign key table (in-place) +// refTab.orderBy (pkey) // order the primary key table (in-place) + + val tab1 = orderBy (fkey) // order the foreign key table + val tab2 = refTab.orderBy (pkey) // order the primary key table var i, j = 0 // cursors i and j for foreign, primary key tables // Loop over both tables while i < t_sz && j < refTab.tuples.size do - val t = tuples(i) // current tuple from foreign key table - val u = refTab.tuples(j) // current tuple from primary key table + val t = tab1.tuples(i) // current tuple from foreign key table + val u = tab2.tuples(j) // current tuple from primary key table - val t_k = pull (t, fkey) // foreign key value in current tuple - val u_k = pull (u, pkey) // primary key value in current tuple + val t_k = tab1.pull (t, fkey) // foreign key value in current tuple + val u_k = tab2.pull (u, pkey) // primary key value in current tuple if t_k == u_k then // If the keys match, concatenate the tuples and add to result table - s.tuples += (t ++ u) + s.tuples += t ++ u i += 1 // move forward in foreign key table - j += 1 // move forward in primary key table else if t_k < u_k then // If foreign key is less than primary key, move forward in foreign key table i += 1 @@ -1078,7 +1069,7 @@ class Table (name: String, schema: Schema, domain: Domain, key: Schema) j += 1 end while - debug ("_join_", s"cursors: i = $i, j = $j") + debug ("_join_", "sort-merge cursors: i = $i, j = $j") s end _join_ @@ -1100,6 +1091,7 @@ class Table (name: String, schema: Schema, domain: Domain, key: Schema) val s = new Table (s"${name}_j_${cntr.inc ()}", schema ++ rest, domain ++ r2.pull (rest), newKey) + val icommon = pullPos (common) // using integers id faster than strings val jcommon = r2.pullPos (common) // using integers id faster than strings @@ -1109,13 +1101,8 @@ class Table (name: String, schema: Schema, domain: Domain, key: Schema) val t_j = r2.tuples(j) if pull (t_i, icommon) eqElements r2.pull (t_j, jcommon) then s.tuples += t_i ++ r2.pull (t_j, rest) - } // for - } // for - -// for t <- tuples; u <- r2.tuples do -// if pull (t, common) eqElements r2.pull (u, common) then -// s.tuples += t ++ r2.pull (u, rest) -// end for + } // cfor + } // cfor s end join @@ -1141,8 +1128,15 @@ class Table (name: String, schema: Schema, domain: Domain, key: Schema) val s = new Table (s"${name}_j_${cntr.inc ()}", schema ++ rest, domain ++ r2.pull (rest), newKey) - // implement IJ-UI - + if r2.hasIndex then + cfor (0, tuples.size) { i => + val t_i = tuples(i) // tuple from the first table + val t_fkey = pull (t_i, common(0)) + val t_j = r2.index.getOrElse (t_fkey, null) // get matching row from r2 using the index + if t_j != null then s.tuples += t_i ++ r2.pull (t_j, rest) // concatenate and append the rows + } // for + else + flaw ("join_", s"must call 'create_index' before using indexed join on ${r2.name}") s end join_ @@ -1168,8 +1162,18 @@ class Table (name: String, schema: Schema, domain: Domain, key: Schema) val s = new Table (s"${name}_j_${cntr.inc ()}", schema ++ rest, domain ++ r2.pull (rest), newKey) - // implement IJ-NUI - + if mindex.contains (common(0)) then + cfor (0, r2.tuples.size) { j => // iterate over pkey/refTab table + val u = r2.tuples(j) + val u_pkey = r2.pull (u, r2.key(0)) + debug ("_join", s"primary key u_pkey = $u_pkey") + val idx = mindex (common(0)) // select multi-index by attribute + val ts = idx.getOrElse (u_pkey, null) // get {t} via fkey from this table + if ts != null then + for t <- ts do s.tuples += t ++ r2.pull (u, rest) // add concatenated tuples + } // cfor + else + flaw ("_join", s"must call 'create_index' before using indexed-join on $name") s end _join @@ -1195,7 +1199,7 @@ class Table (name: String, schema: Schema, domain: Domain, key: Schema) val s = new Table (s"${name}_j_${cntr.inc ()}", schema ++ rest, domain ++ r2.pull (rest), newKey) - // implement SMJ + // FIX - implement SMJ s end _join_ @@ -1222,11 +1226,7 @@ class Table (name: String, schema: Schema, domain: Domain, key: Schema) cfor (0, tuples.size) { i => val t_i = tuples(i) if ! (ss contains t_i) then s.tuples += t_i ++ absentTuple - } // for - -// for t <- tuples if ! (ss contains t) do -// s.tuples += t ++ absentTuple -// end for + } // cfor s end leftJoin @@ -1239,7 +1239,7 @@ class Table (name: String, schema: Schema, domain: Domain, key: Schema) * @param ref the foreign key reference (foreign key attribute, referenced table) */ def leftJoin (ref: (String, Table)): Table = - val (fkey, refTab) = ref + val refTab = ref._2 // (fkey, refTab) val s = join (ref) val absentTuple = nullTuple (refTab.domain) @@ -1248,11 +1248,7 @@ class Table (name: String, schema: Schema, domain: Domain, key: Schema) cfor (0, tuples.size) { i => val t_i = tuples(i) if ! (ss contains t_i) then s.tuples += t_i ++ absentTuple - } // for - -// for t <- tuples if ! (ss contains t) do -// s.tuples += t ++ absentTuple -// end for + } // cfor s end leftJoin @@ -1279,22 +1275,13 @@ class Table (name: String, schema: Schema, domain: Domain, key: Schema) val t_i = q.tuples(i) keep = true breakable { - for u <- r2.tuples do // change to cfor + cfor (0, r2.tuples.size) { i => + val u = r2.tuples(i) if ! (this contains t_i ++ u) then { keep = false; break () } - end for + } // cfor } // breakable if keep then s.tuples += t_i } // cfor - -// for t <- q.tuples do -// keep = true -// breakable { -// for u <- r2.tuples do -// if ! (this contains t ++ u) then { keep = false; break () } -// end for -// } // breakable -// if keep then s.tuples += t -// end for s end divide @@ -1319,12 +1306,6 @@ class Table (name: String, schema: Schema, domain: Domain, key: Schema) val group = groupMap.getOrElseUpdate (gkey, Bag [Tuple] ()) group += t_i // add tuple t_i to gkey's group } // cfor - -// for t <- tuples do -// val gkey = t(col) -// val group = groupMap.getOrElseUpdate (gkey, Bag [Tuple] ()) -// group += t // add tuple t to gkey's group -// end for this end groupBy @@ -1376,6 +1357,8 @@ class Table (name: String, schema: Schema, domain: Domain, key: Schema) s end orderBy +// FIX - add an in-place version of orderBy + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** ORDER-BY-DESC the given attributes, i.e., reorder the tuples in this table into * 'descending' order. A stable sorting is used to allow sorting on multiple attributes. @@ -1444,7 +1427,7 @@ class Table (name: String, schema: Schema, domain: Domain, key: Schema) if tuples(i)(j) == matchVal then tuples(i)(j) = newVal updated = true - } // for + } // cfor updated end update @@ -1487,6 +1470,72 @@ class Table (name: String, schema: Schema, domain: Domain, key: Schema) rem.size > 0 end delete + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** DELETE the tuple with the given primary key value. + * If there is an index, remove the tuple from the index as well. + * Return true iff at least the tuple is deleted. + * @param pkey the primary key value of the tuple to delete (FIX - also handle KeyType) + */ + def delete (pkey: ValueType): Boolean = + val t = find (pkey) + if t != null then + tuples -= t + if hasIndex then index -= pull (t, key)(0) + true + else + false + end delete + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** DELETE all tuples in set 'tups' from this table. + * If there is an index, remove those tuples from the index as well. + * Return true iff at least one tuple is deleted. + * @param tups the set tuple to delete + */ + def delete (tups: Set [Tuple]): Boolean = + for t <- tups do + tuples -= t + if hasIndex then index -= pull (t, key)(0) + tups.size > 0 + end delete + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** DELETE and CASCADE tuple in this parent table and all related tuples in the + * child tables based on the provided primary key value 'pkey2Del'. + * FIX - only cascades one level, should be recursive + * @param pkey2Del the primary key value of the tuple to delete, and used for cascade + */ + def deleteCascade (pkey2Del: ValueType): Boolean = + val deleted = delete (pkey2Del) // delete the tuple with PK = pkey2Del from the parent table + if deleted then + for childTab <- children do // delete all the matching tuples from the child tables + val childTabMIndex = childTab.mindex (key(0)) + if childTabMIndex contains pkey2Del then + val tupSet = childTabMIndex (pkey2Del) // matching set of tuples in childTab via its MIndex + childTab.delete (tupSet) + childTabMIndex -= pkey2Del + deleted // return whether any tuples were deleted + end deleteCascade + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** DELETE the last n tuples from this parent table and cascade the deletions to + * the corresponding tuples in all child tables. Uses the `deleteCascade` method. + * @param n the number of tuples to remove from the end of this parent table + */ + def deleteLast (n: Int): Boolean = + val idxOfpkey = schema.indexOf (key(0)) + val canDelete = rows > 0 + + if n > rows then + flaw ("deleteLast", s"total rows are less than $n, so delete all rows") + + cfor (0, min (n, rows)) { _ => + val tuple = tuples.last + deleteCascade (tuple(idxOfpkey)) + } // cfor + canDelete + end deleteLast + // C O N V E R T //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -1699,6 +1748,7 @@ class Table (name: String, schema: Schema, domain: Domain, key: Schema) //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** WRITE this table into a JavaScript Object Notation (JSON) file. + * FIX - correct the implementation * @param fileName the file name of the data file */ def writeJSON (fileName: String = name + ".json"): Unit = ??? @@ -1781,12 +1831,8 @@ class Table (name: String, schema: Schema, domain: Domain, key: Schema) cfor (0, r2.tuples.size) { j => val t_j = r2.tuples(j) if op (t_i(ia1), t_j(ia2)) then tups += t_i ++ t_j - } // for - } // for - -// for t <- tuples; u <- r2.tuples do -// if op (t(on(a1)), u(r2.on(a2))) then tups += t ++ u -// end for + } // cfor + } // cfor tups end tJoinTups diff --git a/src/main/scala/scalation/database/table/Table.scala.bak2 b/src/main/scala/scalation/database/table/Table.scala.bak2 new file mode 100644 index 000000000..d25df5f9e --- /dev/null +++ b/src/main/scala/scalation/database/table/Table.scala.bak2 @@ -0,0 +1,2161 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author John Miller, Sahil Varma + * @version 2.0 + * @date Fri Jun 17 11:19:14 EDT 2022 + * @see LICENSE (MIT style license file). + * + * @note Relational Algebra (RA) for Row-Oriented Relational DBMS + * + * RA Operators: rename, project, select, union, minus, intersect, product, join, + * leftJoin, divide, groupBy, aggregate, orderBy + * + * Most of the RA Operators have Unicode versions: @see `scalation.UnicodeTest` + * + * Types of Indices (for Unique, Non-Unique Indices): + * LinHashMap, LinHashMultiMap // ScalaTion's Linear Hash Maps + * HashMap, HashMultiMap // Scala's Hash Maps + * JHashMap, JHashMultiMap // Java's Hash Maps + * BpTreeMap, BpTreeMultiMap // ScalaTion's B+Tree Maps + * TreeMap, TreeMultiMap // Scala's Tree Maps + * JTreeMap, JTreeMultiMap // Java's Tree Maps + */ + +package scalation +package database +package table + +//import com.google.gson.Gson +//import com.google.gson.reflect.TypeToken + +import java.io.{FileInputStream, FileOutputStream, File} +import java.io.{ObjectInputStream, ObjectOutputStream, PrintWriter} + +// pick a type of Map for Unique `IndexMap` and for Non-Unique `MIndexMap` + +//import scalation.database.{LinHashMap => IndexMap} +//import scalation.database.{LinHashMultiMap => MIndexMap} + +//import scala.collection.mutable.{HashMap => IndexMap} +//import scalation.database.{HashMultiMap => MIndexMap} + +//import scalaTion.database.{JHashMap => IndexMap} +//import scalaTion.database.{JHashMultiMap => MIndexMap} + +import scalation.database.{BpTreeMap => IndexMap} +import scalation.database.{BpTreeMultiMap => MIndexMap} + +//import scala.collection.mutable.{TreeMap => IndexMap} +//import scalation.database.{TreeMultiMap => MIndexMap} + +//import scalation.database.{JTreeMap => IndexMap} +//import scalation.database.{JTreeMultiMap => MIndexMap} + +import scala.collection.mutable.{ArrayBuffer => Bag, IndexedSeq, Map} +import scala.math.max +import scala.runtime.ScalaRunTime.stringOf +import scala.util.control.Breaks.{breakable, break} + +import scalation.mathstat.{MatrixD, VectorD, VectorI, VectorL, VectorS, VectorT} + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Determine whether the bag of tuples ts1 is the same ts2. + * @param ts1 the first bag of tuples + * @param ts2 the second bag of tuples + */ +def sameTuples (ts1: Bag [Tuple], ts2: Bag [Tuple]): Boolean = + if ts1.size != ts2.size then return false + var i = 0 + while i < ts1.size do + if ! (ts1(i) eqElements ts2(i)) then return false + i += 1 + true +end sameTuples + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Determine whether two tuples have the same elements (in the same order). + * This extension mwethod serves as faster alternative to Scala's `sameElements` method. + * @param t the main tuple + */ +extension (t: Tuple) + infix def eqElements (u: Tuple): Boolean = + if t.length != u.length then return false + var i = 0 + while i < t.length do + if t(i) != u(i) then return false + i += 1 + true + end eqElements + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `Table` companion object provides factory methods for creating tables. + * Supported domains/data-types are 'D'ouble, 'I'nt, 'L'ong, 'S'tring, and 'T'imeNum. + * Note 'X' is for Long String (a formatting issue). + */ +object Table: + + private val debug = debugf ("Table", false) // debug function + private val flaw = flawf ("Table") // flaw function + private val cntr = Counter () // counter for generating unique names + + private var useFullPath = false // defaults to using relative file paths + private var limit = -1 // limit on number of lines to read + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Set the full-path flag to the value of parameter fullPath. + * @param fullPath flag indicating whether full or relative paths should be used + */ + def setFullPath (fullPath: Boolean = true): Unit = useFullPath = fullPath + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Set the limit on the number of lines to read to lim. + * @param lim the limit on the number of lines to read (<= 0 => unlimited) + */ + def setLimit (lim: Int): Unit = limit = lim + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a table given convenient string specifications. + * @param name the name of the table + * @param schema the attributes for the table + * @param domain_ the domains/data-types for attributes ('D', 'I', 'L', 'S', 'X', 'T') + * @param key the attributes forming the primary key + */ + def apply (name: String, schema: String, domain_ : String, key: String): Table = + new Table (name, strim (schema), strim (domain_).map (_.head), strim (key)) + end apply + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a new empty table with the same schema as an existing table. + * @param name the name of the new table + * @param tab the existing table + */ + def apply (name: String, tab: Table): Table = + new Table (name, tab.schema, tab.domain, tab.key) + end apply + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Read the table with the given name into memory loading its columns with data from + * the CSV file named fileName. The attribute names are read from the FIRST LINE. + * @param fileName the file name (or file-path) of the data file + * @param name the name of the table + * @param domain_ the domains/data-types (as one string) for attributes ('D', 'I', 'L', 'S', 'X', 'T') + * @param key the attributes forming the primary key + * @param pos_ the sequence of column positions in the input file to be used (null => select all) + * @param sep the element separation string/regex (e.g., "," ";" " +") + */ + def load (fileName: String, name: String, domain_ : String, key: String, + pos: Array [Int], sep: String): Table = + load (fileName, name, strim (domain_).map (_.head), key, pos, sep) + end load + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Read the table with the given name into memory loading its columns with data from + * the CSV file named fileName. The attribute names are read from the FIRST LINE. + * @see scalation.readFileIntoArray + * @param fileName the file name (or file-path) of the data file + * @param name the name of the table + * @param domain the domains/data-types for attributes ('D', 'I', 'L', 'S', 'X', 'T') + * @param key the attributes forming the primary key + * @param pos_ the sequence of column positions in the input file to be used (null => select all) + * @param sep the element separation string/regex (e.g., "," ";" " +") + */ + def load (fileName: String, name: String, domain: Domain, key: String, + pos_ : Array [Int] = null, sep: String = ","): Table = + + debug ("load", s"""fileName = $fileName, name = $name, domain = ${stringOf (domain)}, key = $key, + pos_ = $pos_, sep = '$sep'; useFullPath = $useFullPath, limit = $limit""") + + val pos = if pos_ == null then Array.range (0, domain.size) else pos_ + val schema = Array.ofDim [String] (domain.size) + + if pos.size != domain.size then flaw ("apply", "pos size should be same as domain size") + + var s: Table = null // new Table (name, schema, domain, strim (key)) + +// val lines = getFromURL_File (fileName) // read the CSV file + val lines = readFileIntoArray (fileName, useFullPath, limit) // read the CSV file + var l_no = 0 // the line number + + println (s"lines(0) = ${lines(0)}") + + for ln <- lines do // iterate by lines in file + + if l_no == 0 then // FIRST LINE - for schema + val header = ln.split (sep, -1).map (_.trim) // array of column names + debug ("load", s"header = ${stringOf (header)}") + cfor (0, pos.size) { j => schema(j) = header(pos(j)) } // use those at positions in pos + s = new Table (name, schema, domain, strim (key)) // make table after schema is formed + + else // REMAINING LINES + val token = ln.split (sep, -1).map (_.trim) // array of token strings + s.tuples += makeTuple (token, domain, pos) + + l_no += 1 + end for + s + end load + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Read the table with the given name into memory loading its columns with data from + * the CSV file named fileName. The attribute names are read from the FIRST LINE. + * Use a short-cut (not reliable) to determines the column domains, by applying + * the 'tuple2type' method to the SECOND LINE. + * Note: safer to pull a row without missing or zero values from the middle of the dataset + * @see `tableTest3` + * @see scalation.readFileIntoArray + * @param fileName the file name (or file-path) of the data file + * @param name the name of the table + * @param mumCol the number of columns + * @param key the attributes forming the primary key + */ + def load (fileName: String, name: String, numCol: Int, key: String): Table = + + val pos = Array.range (0, numCol) + val sep = "," + debug ("load", s"""fileName = $fileName, name = $name, numCol = $numCol, key = $key, + pos = $pos, sep = '$sep'; useFullPath = $useFullPath, limit = $limit""") + + val schema = Array.ofDim [String] (numCol) + val domain = Array.ofDim [Char] (numCol) + + var s: Table = null // new Table (name, schema, domain, strim (key)) + +// val lines = getFromURL_File (fileName) // read the CSV file + val lines = readFileIntoArray (fileName, useFullPath, limit) // read the CSV file + var l_no = 0 // the line number + + for ln <- lines do // iterate by lines in file + + if l_no == 0 then // FIRST LINE - for schema + val header = ln.split (sep, -1).map (_.trim) // array of column names + debug ("load", s"header = ${stringOf (header)}") + cfor (0, numCol) { j => schema(j) = header(j) } // collect from header + s = new Table (name, schema, domain, strim (key)) // make table after schema is formed + + else if l_no == 1 then // SECOND LINE - for domains + val token = ln.split (sep, -1).map (_.trim) // array of token strings + val dom = tuple2type (token) // guess domains from first data row + debug ("load", s"dom = ${stringOf (dom)}") + cfor (0, numCol) { j => domain(j) = dom(j) } // collect from dom + s.tuples += makeTuple (token, domain, pos) + + else // REMAINING LINES + val token = ln.split (sep, -1).map (_.trim) // array of token strings + s.tuples += makeTuple (token, domain, pos) + + l_no += 1 + end for + s + end load + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Make a tuple from an array of token strings, converting each each token + * according the corresponding domain specification. Use only the tokens + * in the array at the pos positions. + * @param token the array of token strings, e.g., Array ("5.0", "12", "Smith") + * @param domain the domains/data-types for attributes ('D', 'I', 'L', 'S', 'X', 'T') + * @param pos the positions in the token array to be used, e.g., Array (0, 2) + */ + def makeTuple (token: Array [String], domain: Domain, pos: Array [Int]): Tuple = + if token.size < pos.max then + flaw ("makeTuple", "not enough tokens for positions given in pos") + return null + + val tup = Array.ofDim [ValueType] (domain.size) // more robust than using token.size + cfor (0, pos.size) { j => + val nextToken = token(pos(j)) // get j-th token according to pos + tup(j) = domain(j) match + case 'D' => nextToken.mkDouble // Double + case 'I' => nextToken.toInt // Int + case 'L' => nextToken.toLong // Long + case 'S' | 'X' => nextToken // String or Long-String + case 'T' => TimeNum (nextToken) // TimeNum + case _ => { flaw ("makeTuple", s"domain($j) = ${domain(j)} not supported"); "?" } + } // cfor + tup + end makeTuple + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Given an array of strings (e.g., read from a file) with unknown domains, + * return the data-types (domains) by the lexical form of the strings. + * @see `scalation.typeOfStr` (in ValueType.scala) + * Caveat: may not be reliable since a column of doubles may start: 5, 7, 9.2, ... + * @param tup the type un-differentiated tuple as an array of strings + */ + def tuple2type (tup: Array [String]): Domain = + val dom = Array.ofDim [Char] (tup.size) + cfor (0, dom.size) { j => dom(j) = typeOfStr (tup(j)) } + dom + end tuple2type + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return whether the given table file exists in STORE_DIR. + * @param name the name of table. + */ + def exist (name: String): Boolean = File (STORE_DIR + name + SER).exists () + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** LOAD/Read the table with the given name into memory using serialization. + * @see save in `Table` class. + * @param name the name of the table to load + */ + def load (name: String): Table = + val ois = new ObjectInputStream (new FileInputStream (STORE_DIR + name + SER)) + val tab = ois.readObject.asInstanceOf [Table] + ois.close () + tab + end load + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** LOAD/Read the table with the given name into memory from a JSON file. + * @param fileName the file name of the JSON file + * @param name the name of the table to load + */ + def load (fileName: String, name: String): Table = + val jsonArr = readFileIntoArray (fileName) +// val nlines = jsonArr.size + val jsonStr: String = jsonArr(0) + debug ("load", s"jsonStr = ${jsonStr.slice (0, 5000)}") + val tab: Table = null // FIX - change to var +// val gson = new Gson () +// val tableType = new TypeToken [Table] ().getType // FIX - fails +// tab = gson.fromJson (jsonStr, tableType) + tab + end load + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a table from a matrix of doubles. + * @see the `toMatrix` mathod + * @param x the matrix containing the data + * @param name the name of the table + * @param schema the attribute/column names + * @param key the attributes forming the primary key + */ + def fromMatrix (x: MatrixD, name: String, schema: Schema, key: String): Table = + val domain = Array.fill (x.dim2)('D') // domain is all 'D' + val s = new Table (name, schema, domain, strim (key)) + + cfor (0, x.dim) { i => s.tuples += x(i).toArray } // i-th vector to tuple + s + end fromMatrix + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return basic statistics on the given column corresponding to SQL's + * aggregate functions: count, countd, min, max, sum, avg. + * @param cname the given column name + * @param colj the given column + */ + def stats (cname: String, colj: Array [ValueType]): Array [ValueType] = + Array (cname, count (colj), countd (colj), min (colj).toString, max (colj).toString, sum (colj), avg (colj)) + end stats + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the total number of elements in a column. + * @param colj the given column + */ + def count (colj: Array [ValueType]): ValueType = colj.size + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the number of distinct elements in a column. + * @param colj the given column + */ + def countd (colj: Array [ValueType]): ValueType = colj.distinct.size + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the minimum value of all the elements in a column. + * @param colj the given column + */ + def min (colj: Array [ValueType]): ValueType = colj.min (using ValueTypeOrd) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the maximum value of all the elements in a column. + * @param colj the given column + */ + def max (colj: Array [ValueType]): ValueType = colj.max (using ValueTypeOrd) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the average of all the elements for a numeric column or 0 otherwise. + * @param colj the given column + */ + def avg (colj: Array [ValueType]): ValueType = sum (colj).toDouble / colj.size + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the sum of all the elements for a numeric column or 0 otherwise. + * @param colj the given column + */ + def sum (colj: Array [ValueType]): ValueType = + val n = colj.size + colj(0) match + case _: Double => Σ (0, n) { i => colj(i).toDouble } + case _: Int => Σ (0, n) { i => colj(i).toDouble } + case _: Long => Σ (0, n) { i => colj(i).toDouble } + case _: String => -0.0 + case _: TimeNum => -0.0 + case null => -0.0 + end match + end sum + + def π (x: String)(r: Table): Table = r.project (strim (x)) + def σ (condition: String)(r: Table): Table = r.select (condition) + def σ (predicate: Predicate)(r: Table): Table = r.select (predicate) + +end Table + +import Table.{cntr, debug, flaw} + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `Table` class stores relational data and implements relational algebra operators. + * Supported domains/data-types are 'D'ouble, 'I'nt, 'L'ong, 'S'tring, and 'T'imeNum. + * @param name the name of the table + * @param schema the attributes for the table + * @param domain the domains/data-types for the attributes ('D', 'I', 'L', 'S', 'X', 'T') + * @param key the attributes forming the primary key + */ +class Table (name: String, schema: Schema, domain: Domain, key: Schema) + extends Tabular [Table] (name, schema, domain, key) + with Serializable: + + private [table] val tuples = Bag [Tuple] () // storage of tuples + private [table] val linkTypes = Map [String, Table] () // link types for foreign keys +// private [table] val index = IndexMap [KeyType, Tuple] () // index on primary key + private [table] val index = IndexMap [Tuple] () // index on primary key + private [table] var hasIndex = false // whether the primary index has been built +// private [table] val sindex = Map [String, IndexMap [ValueType, Tuple]] () // map of secondary unique indices + private [table] val sindex = Map [String, IndexMap [Tuple]] () // map of secondary unique indices +// private [table] val mindex = Map [String, MIndexMap [ValueType, Tuple]] () // map of secondary non-unique indices + private [table] val mindex = Map [String, MIndexMap [Tuple]] () // map of secondary non-unique indices + private val groupMap = Map [ValueType, Bag [Tuple]] () // map from group key to collection of tuples + + protected val countX = domain.count ((c: Char) => c == 'X') // count the number of eXtended Strings + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the cardinality (number of tuples) in this table. + */ + inline def rows: Int = tuples.size + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the j-th column in this table (or the passed in tuples) as an array of value-type. + * @param j the column to return + * @param tups the collection of tuples to use (defaults to all tuples in this table) + */ + def col (j: Int, tups: Bag [Tuple] = tuples): Array [ValueType] = + if j >= schema.size then + flaw ("col", s"column index j = $j exceeds the number of columns") + val c = Array.ofDim [ValueType] (tups.size) + cfor (0, c.size) { i => c(i) = tups(i)(j) } + c + end col + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return whether this table contains tuple u. + * @param u the tuple to look for + */ + infix def contains (u: Tuple): Boolean = tuples.exists (_ eqElements u) + + // I N T E G R I T Y C H E C K S + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Add LINKAGE from this table to the refTab, by adding a FOREIGN KEY CONSTRAINT + * to this table specifying the foreign key attribute fkey and the table it + * references refTab. If refTab does not have a primary index already, make one. + * Caveat: a foreign key may not be composite. + * Caveat: it is assumed that the foreign key and the primary key have the same name + * @param fkey the foreign key attribute + * @param refTab the table being referenced (to its primary key) + */ + def addLinkage (fkey: String, refTab: Table): Unit = + if ! refTab.hasIndex then refTab.create_index () + linkTypes += fkey -> refTab + refTab.children += this // add by Varma + end addLinkage + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Check that all the foreign keys values in tuple t satisfy their + * REFERENTIAL INTEGRITY CONSTRAINTS. + * @param t the tuple being checked for referential integrity + */ + def referenceCheck (t: Tuple): Boolean = + var satisfy = true + for (fkey, refTab) <- linkTypes do + debug ("referenceCheck", s"does fkey = $fkey reference a pkey in ${refTab.name}") +// val fkeyVal = new KeyType (pull (t, fkey)) + val fkeyVal = pull (t, fkey) + if refTab.hasIndex && refTab.index.getOrElse (fkeyVal, null) == null then + flaw ("referenceCheck", s"foreign key $fkey = $fkeyVal is not in table ${refTab.name}") + flaw ("referenceCheck", s"where the tuple is ${stringOf (t)}") + satisfy = false + end for + satisfy + end referenceCheck + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the i-th primary key. + * @param i the index in the tuples/row index + */ + inline def getPkey (i: Int): KeyType = new KeyType (pull (tuples(i), key)) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** CREATE/recreate the primary INDEX that maps the primary key to the tuple + * containing it. Warning, creating an index will remove DUPLICATES based + * on maintaining UNIQUENESS CONSTRAINT of primary key values. + * @param rebuild if rebuild is true, use old index to build new index; otherwise, create new index + */ + def create_index (rebuild: Boolean = false): Unit = + debug ("create_index", s"create an index of type ${index.getClass.getName}") + if rebuild then flaw ("create_index", "rebuilding off old primary key index has not yet been implemented") + index.clear () + val toRemove = Bag [Tuple] () + + cfor (0, tuples.size) { i => + val t_i = tuples(i) +// val pkey = new KeyType (pull (t_i, key)) // primary key + val pkey = pull (t_i, key)(0) // primary key + if index.getOrElse (pkey, null) == null then index += pkey -> t_i + else toRemove += t_i + } // cfor + + debug ("create_index", s"remove duplicate tuples = ${showT (toRemove)}") + tuples --= toRemove + hasIndex = true // indicate the PRIMARY INDEX now exists + end create_index + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** CREATE a secondary unique INDEX that maps a secondary key to the tuple + * containing it. Has no effect on duplicates; should first create a primary + * index to remove duplicates, otherwise, this index may skip tuples. + * @param atr the attribute/column to create the index on + */ + def create_sindex (atr: String): Unit = + debug ("create_sindex", s"create a secondary unique index of type ${index.getClass.getName}") + if ! hasIndex then flaw ("create_sindex", "should first create a primary index to eliminate duplicates") +// val newIndex = IndexMap [ValueType, Tuple] () + val newIndex = IndexMap [Tuple] () + + cfor (0, tuples.size) { i => + val t_i = tuples(i) + val skey = (pull (t_i, atr)) // secondary (non-composite) key + newIndex += skey -> t_i // add key-value pair into new index + } // cfor + + sindex += atr -> newIndex // add new index into the sindex map + end create_sindex + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** CREATE a non-unique INDEX (multi-valued) that maps a non-unique attribute + * to the tuple containing it. + * @see `scalation.database.MultiMap` + * @param atr the attribute/column to create the non-unique index on + */ + def create_mindex (atr: String): Unit = + debug ("create_mindex", s"create a non-unique index of type ${index.getClass.getName}") +// val newIndex = MIndexMap [ValueType, Tuple] () + val newIndex = MIndexMap [Tuple] () + + cfor (0, tuples.size) { i => + val t_i = tuples(i) + val t_atr = (pull (t_i, atr)) // non-unique attribute + newIndex.addOne1 (t_atr, t_i) // add key-value pair into new index + } // cfor + + mindex += atr -> newIndex // add new index into the mindex map +// hasIndex = true // FIX - why, it is not a primary index + end create_mindex + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** DROP the primary INDEX that maps the primary key to the tuple containing it. + */ + def drop_index (): Unit = + index.clear () + hasIndex = false // the PRIMARY INDEX no longer exists + end drop_index + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** DROP a secondary INDEX that maps a secondary key to the tuple containing it. + * @param atr the attribute/column on which to drop the index + */ + def drop_sindex (atr: String): Unit = + val oldIndex = sindex.getOrElse (atr, null) + if oldIndex != null then + oldIndex.clear () + sindex -= atr + else + flaw ("drop_sindex", s"no index found for attribute = $atr") + end drop_sindex + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** DROP a non-unique INDEX that maps a non-unique attribute to the tuple containing it. + * @param atr the attribute/column on which to drop the index + */ + def drop_mindex (atr: String): Unit = + val oldIndex = mindex.getOrElse (atr, null) + if oldIndex != null then + oldIndex.clear () + mindex -= atr + else + flaw ("drop_mindex", s"no index found for attribute = $atr") + end drop_mindex + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the table restricted to the given range of rows. + * @param r the given range of rows + */ + def apply (r: Range): Table = + val s = new Table (s"${name}_a_${cntr.inc ()}", schema, domain, key) + + s.tuples ++= (for i <- r yield tuples(i)) + s + end apply + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the table restricted to the given collection of rows. + * @param pos the given collection of rows + */ + def apply (pos: collection.immutable.IndexedSeq [Int]): Table = + val s = new Table (s"${name}_a_${cntr.inc ()}", schema, domain, key) + + s.tuples ++= (for i <- pos yield tuples(i)) + s + end apply + + // R E L A T I O N A L G E B R A O P E R A T O R S + + // ================================================================== RENAME + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** RENAME this table, returning a shallow copy of this table. + * Usage: customer rename "client" + *-------------------------------------------------------------------------- + * @param newName the new name for the table + */ + def rename (newName: String): Table = + val s = new Table (newName, schema, domain, key) + s.tuples ++= tuples // shallow copy + s + end rename + + // ================================================================= PROJECT + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** PROJECT the tuples in this table onto the given attribute names. + * Uaage: customer project (Array ("street", "ccity")) + *-------------------------------------------------------------------------- + * @param x the schema/attribute names to project onto + */ + def project (x: Schema): Table = + val newKey = if subset (key, x) then key else x + val s = new Table (s"${name}_p_${cntr.inc ()}", x, pull (x), newKey) + + s.tuples ++= (for t <- tuples yield pull (t, x)) + s + end project + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** PROJECT onto the columns with the given column positions (first is column 0). + * Uaage: customer.project (Array (1, 2)) + *-------------------------------------------------------------------------- + * @param cPos the column positions to project onto + */ + def project (cPos: IndexedSeq [Int]): Table = + val mxPos = cPos.max + if mxPos >= cols then flaw ("project", s"mxPos = $mxPos is too large for the number of columns") + + val newAtrs = (for c <- cPos yield schema(c)).toArray + val newKey = if subset (key, newAtrs) then key else newAtrs + val s = new Table (s"${name}_p_${cntr.inc ()}", newAtrs, pull (cPos), newKey) + + s.tuples ++= (for t <- tuples yield pull (t, cPos)) + s + end project + + // ========================================================== PROJECT-SELECT + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** SELECT elements from column a in this table that satisfy the atomic + * predicate apred and PROJECT onto that column. + * Usage: customer selproject ("ccity", _ > "Athens") + *-------------------------------------------------------------------------- + * @param a the attribute name of the column used for selection + * @param apred the atomic predicate (`Boolean` function) to be satisfied + */ + def selproject (a: String, apred: APredicate): Table = + val newAtr = Array (a) + val newDom = Array (domain(on(a))) + val s = new Table (s"${name}_s_${cntr.inc ()}", newAtr, newDom, newAtr) + + cfor (0, tuples.size) { i => + val ta = pull (tuples(i), a) + if apred (ta) then s.tuples += Array (ta) + } // for + +// for t <- tuples do +// val ta = pull (t, a) +// if apred (ta) then s.tuples += Array (ta) +// end for + s + end selproject + + // ================================================================== SELECT + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** SELECT the tuples in this table that satisfy the atomic predicate on column a. + * Usage: customer select ("ccity", _ == "Athens") + *-------------------------------------------------------------------------- + * @param a the attribute name of the column used for selection + * @param apred the atomic predicate (`Boolean` function) to be satisfied + */ + def select (a: String, apred: APredicate): Table = + val s = new Table (s"${name}_s_${cntr.inc ()}", schema, domain, key) + + cfor (0, tuples.size) { i => + val t_i = tuples(i) + if apred (pull (t_i, a)) then s.tuples += t_i + } // for + +// for t <- tuples if apred (pull (t, a)) do s.tuples += t + s + end select + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** SELECT the tuples in this table that satisfy the predicate. + * Usage: customer select (t => t(customer.on("ccity")) == "Athens") + *-------------------------------------------------------------------------- + * @param predicate the predicate (`Boolean` function) to be satisfied + */ + def select (predicate: Predicate): Table = + val s = new Table (s"${name}_s_${cntr.inc ()}", schema, domain, key) + + s.tuples ++= tuples.filter (predicate) + s + end select + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** SELECT the tuples in this table that satisfy the given simple (3 token) condition. + * Usage: customer select ("ccity == 'Athens'") + *-------------------------------------------------------------------------- + * @param condition the simple condition string "a1 op a2" to be satisfied, where + * a1 is attribute, op is comparison operator, a2 is attribute or value + */ + def select (condition: String): Table = + val s = new Table (s"${name}_s_${cntr.inc ()}", schema, domain, key) + + val (tok, twoAtrs) = parseCond (condition) + val (a1, op, a2) = (tok(0), tok(1), tok(2)) + debug ("select", s"condition: (a1, op, a2) = ($a1, $op, $a2), twoAtrs = $twoAtrs") + + s.tuples ++= selectTups (a1, op, a2, twoAtrs) + s + end select + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** SELECT via the INDEX the tuple with the given primary key value pkey. + * Returns an empty table if the primary index has not been created. + * Usage: customer select (new KeyType ("Mary")) + *-------------------------------------------------------------------------- + * @param pkey_ the primary key value + */ + def select (pkey_ : KeyType): Table = + val pkey = pkey_.key(0) // FIX + val s = new Table (s"${name}_s_${cntr.inc ()}", schema, domain, key) + + if hasIndex then + debug ("select", s"primary key pkey = $pkey") + val t = index.getOrElse (pkey, null) + if t != null then s.tuples += t + else + flaw ("select", s"must call 'create_index' before using indexed-select on table $name") + s + end select + + // =================================================================== UNION + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** UNION this table and r2. Check that the two tables are compatible. + * If they are not, return the first table. + * Caveat: Assumes the key from the first table still works (@see create_index) + * Acts like union-all, so to remove duplicates call create_index after union. + * Usage: deposit union loan + *-------------------------------------------------------------------------- + * @param r2 the second table + */ + infix def union (r2: Table): Table = + if incompatible (r2) then return this + val s = new Table (s"${name}_u_${cntr.inc ()}", schema, domain, key) + + s.tuples ++= tuples ++ r2.tuples + s + end union + + // =================================================================== MINUS + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Compute this table MINUS (set difference) table r2 (this - r2). Check that + * the two tables are compatible. If they are not, return the first table. + * Usage: account minus loan + *-------------------------------------------------------------------------- + * @param r2 the second table + */ + infix def minus (r2: Table): Table = + if incompatible (r2) then return this + val s = new Table (s"${name}_m_${cntr.inc ()}", schema, domain, key) + + cfor (0, tuples.size) { i => + val t_i = tuples(i) + if ! (r2 contains t_i) then s.tuples += t_i + } // for + +// for t <- tuples do if ! (r2 contains t) then s.tuples += t + s + end minus + + // =============================================================== INTERSECT + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** INTERSECT this table and r2. Check that the two tables are compatible. + * If they are not, return the first table. + * Usage: account intersect loan + *-------------------------------------------------------------------------- + * @param r2 the second table + */ + infix def intersect (r2: Table): Table = + if incompatible (r2) then return this + val s = new Table (s"${name}_i_${cntr.inc ()}", schema, domain, key) + + cfor (0, tuples.size) { i => + val t_i = tuples(i) + if r2 contains t_i then s.tuples += t_i + } // for + +// for t <- tuples do if r2 contains t then s.tuples += t + s + end intersect + + // ================================================================= PRODUCT + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Compute the CARTESIAN PRODUCT of this table and r2 (this × r2). + * Usage: customer product deposit + *-------------------------------------------------------------------------- + * @param r2 the second table + */ + infix def product (r2: Table): Table = + val newKey = key ++ r2.key // requires keys from both tables + val s = new Table (s"${name}_x_${cntr.inc ()}", disambiguate (schema, r2.schema), + domain ++ r2.domain, newKey) + + cfor (0, tuples.size) { i => + cfor (0, r2.tuples.size) { j => + s.tuples += tuples(i) ++ r2.tuples(j) + } // for + } // for + +// for t <- tuples; u <- r2.tuples do +// s.tuples += t ++ u +// end for + s + end product + + // ==================================================================== JOIN + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** JOIN this table and r2 keeping concatenated tuples that satisfy the predicate. + * Caveat: Assumes both keys are needed for the new key (depending on the + * predicate both may not be required). + * Usage: customer join ((t, u) => t(customer.on("cname")) == u(deposit.on("cname")), deposit) + *-------------------------------------------------------------------------- + * @param predicate the join predicate to be satisfied + * @param r2 the second table + */ + def join (predicate: Predicate2, r2: Table): Table = + val newKey = key ++ r2.key // requires keys from both tables + val s = new Table (s"${name}_j_${cntr.inc ()}", disambiguate (schema, r2.schema), + domain ++ r2.domain, newKey) + + cfor (0, tuples.size) { i => + val t_i = tuples(i) + cfor (0, r2.tuples.size) { j => + val t_j = r2.tuples(j) + if predicate (t_i, t_j) then s.tuples += t_i ++ t_j + } // for + } // for + +// for t <- tuples; u <- r2.tuples do +// if predicate (t, u) then s.tuples += t ++ u +// end for + s + end join + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Compute the THETA-JOIN of this table and r2 keeping concatenated tuples that + * satisfy the given simple (3 token) condition. + * Usage: customer join ("cname == cname"), deposit) + *-------------------------------------------------------------------------- + * @param condition the simple condition "a1 op a2" + * @param r2 the second table + */ + def join (condition: String, r2: Table): Table = + val tok = parseCond (condition)._1 + val (a1, op, a2) = (tok(0), tok(1), tok(2)) +// debug ("join", s"(a1, op, a2) = ($a1, $op, $a2)") + + val newKey = key ++ r2.key // requires keys from both tables + + val s = new Table (s"${name}_j_${cntr.inc ()}", disambiguate (schema, r2.schema), + domain ++ r2.domain, newKey) + + s.tuples ++= + (op match + case "==" => tJoinTups (a1, equ, a2, r2) + case "!=" => tJoinTups (a1, neq, a2, r2) + case "<" => tJoinTups (a1, <, a2, r2) + case "<=" => tJoinTups (a1, <=, a2, r2) + case ">" => tJoinTups (a1, >, a2, r2) + case ">=" => tJoinTups (a1, >=, a2, r2) + case _ => flaw ("join", s"$op is an unrecognized operator"); null) + s + end join + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Compute the EQUI-JOIN via the NESTED-LOOP JOIN (NLJ) algorithm of this table and + * r2 keeping concatenated tuples that are equal on specified attributes. + * Usage: customer join (Array ("cname"), Array ("cname"), deposit) + *-------------------------------------------------------------------------- + * @param x the subschema/attributes for the first/this table + * @param y the subschema/attributes for the second table + * @param r2 the second table + */ + def join (x: Schema, y: Schema, r2: Table): Table = + val newKey = if subset (x, key) then r2.key // three possibilities for new key + else if subset (y, r2.key) then key + else key ++ r2.key + + val s = new Table (s"${name}_j_${cntr.inc ()}", disambiguate (schema, r2.schema), + domain ++ r2.domain, newKey) + + val ix = pullPos (x) // using integers is faster than strings + val iy = r2.pullPos (y) + + cfor (0, tuples.size) { i => + val t_i = tuples(i) + cfor (0, r2.tuples.size) { j => + val t_j = r2.tuples(j) + if pull (t_i, ix) eqElements r2.pull (t_j, iy) then s.tuples += t_i ++ t_j + } // for + } // for + +// for t <- tuples; u <- r2.tuples do +// if pull (t, x) eqElements r2.pull (u, y) then s.tuples += t ++ u +// end for + s + end join + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Compute the EQUI-JOIN via the INDEX JOIN (IJ) algorithm of this table and the + * referenced table keeping concatenated tuples that are equal on the primary key + * and foreign key attributes. Uses a UNIQUE INDEX (UI) on the primary key. + * Caveat: Requires the foreign key table to be first [ fkey_table join ((fkey, pkey_table) ]. + * Usage: deposit join (("cname", customer)) + * as if join_, where the index is on the right, i.e., customer + *-------------------------------------------------------------------------- + * @param ref the foreign key reference (foreign key attribute, referenced table) + */ + def join (ref: (String, Table)): Table = +// show_foreign_keys () + val (fkey, refTab) = ref // foreign key, referenced table + + val s = new Table (s"${name}_j_${cntr.inc ()}", disambiguate (schema, refTab.schema), + domain ++ refTab.domain, key) + + if refTab.hasIndex then + cfor (0, tuples.size) { i => // iterate over fkey table + val t_i = tuples(i) +// val t_fkey = new KeyType (pull (t_i, fkey)) + val t_fkey = pull (t_i, fkey) + debug ("join", s"foreign key t_fkey = $t_fkey") + val t_j = refTab.index.getOrElse (t_fkey, null) // get u via pkey from refTab + if t_j != null then s.tuples += t_i ++ t_j // add concatenated tuples + } // cfor + else + flaw ("join", s"must call 'create_index' before using indexed-join on ${refTab.name}") + s + end join + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Compute the EQUI-JOIN via the INDEX JOIN (IJ) algorithm of this table and the + * referenced table keeping concatenated tuples that are equal on the primary key + * and foreign key attributes. Uses a NON-UNIQUE INDEX (NUI) on the foreign key. + * Caveat: Requires the foreign key table to be first [ fkey_table _join ((fkey, pkey_table) ]. + * Usage: deposit _join (("cname", customer)) + * where the index is on the left, i.e., deposit + *-------------------------------------------------------------------------- + * @param ref the foreign key reference (foreign key attribute, referenced table) + */ + def _join (ref: (String, Table)): Table = +// show_foreign_keys () + val (fkey, refTab) = ref // foreign key, referenced table + + val s = new Table (s"${name}_j_${cntr.inc ()}", disambiguate (schema, refTab.schema), + domain ++ refTab.domain, key) + + if hasIndex then + cfor (0, refTab.tuples.size) { j => // iterate over pkey/refTab table + val u = refTab.tuples(j) + val u_pkey = pull (u, key) + debug ("join", s"primary key u_pkey = $u_pkey") + val idx = mindex(key(0)) // select multi-index by attribute + val ts = idx.getOrElse (u_pkey(0), null) // get {t} via fkey from this table + if ts != null then + for t <- ts do s.tuples += t ++ u // add concatenated tuples + } // for + else + flaw (")join", s"must call 'create_index' before using indexed-join on $name") + +// for u <- refTab.tuples do // iterate over pkey/refTab table +// val u_pkey = pull (u, key) +// debug ("join", s"primary key u_pkey = $u_pkey") +// val idx = mindex(key(0)) // select multi-index by attribute +// val ts = idx.getOrElse (u_pkey(0), null) // get {t} via fkey from this table +// if ts != null then +// for t <- ts do s.tuples += t ++ u // add concatenated tuples +// end for + s + end _join + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Compute the EQUI-JOIN via the SORT-MERGE JOIN (SMJ) algorithm of this table and the + * referenced table keeping concatenated tuples that are equal on the primary key + * and foreign key attributes. + * Caveat: Requires the foreign key table to be first [ fkey_table _join_ ((fkey, pkey_table) ]. + * Usage: deposit _join_ (("cname", customer)) + * where both sides (left and right) must be in order + *-------------------------------------------------------------------------- + * @param ref the foreign key reference (foreign key attribute, referenced table) + */ + def _join_ (ref: (String, Table)): Table = +// show_foreign_keys () + val (fkey, refTab) = ref // foreign key, referenced table + + val s = new Table (s"${name}_j_${cntr.inc ()}", disambiguate (schema, refTab.schema), + domain ++ refTab.domain, key) + + val t_sz = tuples.size // number of typles in foreign key table + val pkey = refTab.key (0) // again requires non-composite primary keys + orderBy (fkey) // order the foreign key table + refTab.orderBy (pkey) // order the primary key table + + var i, j = 0 // cursors i and j for foreign, primary key tables + // Loop over both tables + while i < t_sz && j < refTab.tuples.size do + val t = tuples(i) // current tuple from foreign key table + val u = refTab.tuples(j) // current tuple from primary key table + + val t_k = pull (t, fkey) // foreign key value in current tuple + val u_k = pull (u, pkey) // primary key value in current tuple + + if t_k == u_k then + // If the keys match, concatenate the tuples and add to result table + s.tuples += (t ++ u) + i += 1 // move forward in foreign key table + j += 1 // move forward in primary key table + else if t_k < u_k then + // If foreign key is less than primary key, move forward in foreign key table + i += 1 + else + // If primary key is less than foreign key, move forward in primary key table + j += 1 + end while + + debug ("_join_", s"cursors: i = $i, j = $j") + s + end _join_ + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Compute the NATURAL JOIN via the NESTED LOOP JOIN (NLJ) algorithm of this table and + * r2 keeping concatenated tuples that agree on the common attributes. + * Usage: customer join deposit + *-------------------------------------------------------------------------- + * @param r2 the second table + */ + infix def join (r2: Table): Table = +// val common = schema intersect r2.schema // common attributes + val common = meet (schema, r2.schema) // common attributes + debug ("join", s"common = ${stringOf (common)}") + val rest = r2.schema diff common + val newKey = if subset (common, key) then r2.key // three possibilities for new key + else if subset (common, r2.key) then key + else key ++ r2.key + + val s = new Table (s"${name}_j_${cntr.inc ()}", schema ++ rest, + domain ++ r2.pull (rest), newKey) + val icommon = pullPos (common) // using integers id faster than strings + val jcommon = r2.pullPos (common) // using integers id faster than strings + + cfor (0, tuples.size) { i => + val t_i = tuples(i) + cfor (0, r2.tuples.size) { j => + val t_j = r2.tuples(j) + if pull (t_i, icommon) eqElements r2.pull (t_j, jcommon) then + s.tuples += t_i ++ r2.pull (t_j, rest) + } // for + } // for + +// for t <- tuples; u <- r2.tuples do +// if pull (t, common) eqElements r2.pull (u, common) then +// s.tuples += t ++ r2.pull (u, rest) +// end for + s + end join + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Compute the NATURAL JOIN via the INDEX JOIN (IJ) algorithm of this table and the + * referenced table keeping concatenated tuples that are equal on the primary key + * and foreign key attributes. Uses a UNIQUE INDEX (UI) on the primary key. + * Caveat: Requires the foreign key table to be first [ fkey_table join_ pkey_table ]. + * Usage: deposit join_ customer + * where the index is on the right, i.e., customer + *-------------------------------------------------------------------------- + * @param r2 the second table + */ + infix def join_ (r2: Table): Table = +// val common = schema intersect r2.schema // common attributes + val common = meet (schema, r2.schema) // common attributes + debug ("join", s"common = ${stringOf (common)}") + val rest = r2.schema diff common + val newKey = if subset (common, key) then r2.key // three possibilities for new key + else if subset (common, r2.key) then key + else key ++ r2.key + + val s = new Table (s"${name}_j_${cntr.inc ()}", schema ++ rest, + domain ++ r2.pull (rest), newKey) + + // implement IJ-UI + + s + end join_ + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Compute the NATURAL JOIN via the INDEX JOIN (IJ) algorithm of this table and the + * referenced table keeping concatenated tuples that are equal on the primary key + * and foreign key attributes. Uses a NON-UNIQUE INDEX () on the foreign key. + * Caveat: Requires the foreign key table to be first [ fkey_table _join pkey_table ]. + * Usage: deposit _join customer + * where the index is on the left, i.e., deposit + *-------------------------------------------------------------------------- + * @param r2 the second table + */ + infix def _join (r2: Table): Table = +// val common = schema intersect r2.schema // common attributes + val common = meet (schema, r2.schema) // common attributes + debug ("join", s"common = ${stringOf (common)}") + val rest = r2.schema diff common + val newKey = if subset (common, key) then r2.key // three possibilities for new key + else if subset (common, r2.key) then key + else key ++ r2.key + + val s = new Table (s"${name}_j_${cntr.inc ()}", schema ++ rest, + domain ++ r2.pull (rest), newKey) + + // implement IJ-NUI + + s + end _join + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Compute the EQUI-JOIN via the SORT-MERGE JOIN (SMJ) algorithm of this table and the + * referenced table keeping concatenated tuples that are equal on the primary key + * and foreign key attributes. + * Caveat: Requires the foreign key table to be first [ fkey_table _join_ pkey_table ]. + * Usage: deposit _join_ customer + * where both sides (left and right) must be in order + *-------------------------------------------------------------------------- + * @param r2 the second table + */ + infix def _join_ (r2: Table): Table = +// val common = schema intersect r2.schema // common attributes + val common = meet (schema, r2.schema) // common attributes + debug ("join", s"common = ${stringOf (common)}") + val rest = r2.schema diff common + val newKey = if subset (common, key) then r2.key // three possibilities for new key + else if subset (common, r2.key) then key + else key ++ r2.key + + val s = new Table (s"${name}_j_${cntr.inc ()}", schema ++ rest, + domain ++ r2.pull (rest), newKey) + + // implement SMJ + + s + end _join_ + + // =============================================================== LEFT-JOIN + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Compute the LEFT-EQUI-JOIN of this table and r2 keeping concatenated tuples + * that are equal on specified attributes. Also, keep all tuples in the left + * table padding the missing attributes with null. + * For right-join swap table1 and table2, e.g., table1.leftJoin (... table2) + * Usage: customer leftJoin (Array ("cname"), Array ("cname"), deposit) + *-------------------------------------------------------------------------- + * @param x the subschema/attributes for the left/first/this table + * @param y the subschema/attributes for the right/second table + * @param r2 the second table + */ + def leftJoin (x: Schema, y: Schema, r2: Table): Table = + val s = join (x, y, r2) + + val absentTuple = nullTuple (r2.domain) + val ss = s.project (schema) // join projected onto original schema + + cfor (0, tuples.size) { i => + val t_i = tuples(i) + if ! (ss contains t_i) then s.tuples += t_i ++ absentTuple + } // for + +// for t <- tuples if ! (ss contains t) do +// s.tuples += t ++ absentTuple +// end for + s + end leftJoin + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Compute the LEFT-EQUI-JOIN via the INDEX of this table and the referenced table keeping + * concatenated tuples that are equal on the primary key and foreign key attributes. + * Caveat: Requires the foreign key table to be first [ fkey_table join ((fkey, pkey_table) ]. + * Usage: deposit leftJoin (("cname", customer)) + *-------------------------------------------------------------------------- + * @param ref the foreign key reference (foreign key attribute, referenced table) + */ + def leftJoin (ref: (String, Table)): Table = + val (fkey, refTab) = ref + val s = join (ref) + + val absentTuple = nullTuple (refTab.domain) + val ss = s.project (schema) // join projected onto original schema + + cfor (0, tuples.size) { i => + val t_i = tuples(i) + if ! (ss contains t_i) then s.tuples += t_i ++ absentTuple + } // for + +// for t <- tuples if ! (ss contains t) do +// s.tuples += t ++ absentTuple +// end for + s + end leftJoin + + // ================================================================== DIVIDE + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** DIVIDE this table by table r2. Requires a tuple in the quotient part of + * this table to be paired with all tuples in table r2. + * Usage: deposit.project ("cname, bname") divide branch.project ("bname") + *-------------------------------------------------------------------------- + * @param r2 the second table + */ + infix def divide (r2: Table): Table = + val divisor = r2.schema + if ! subset (divisor, schema) then flaw ("divide", "divisor schema must be a subset of schema") + val quotient = schema diff divisor + val newKey = if subset (key, quotient) then key else quotient + val s = new Table (s"${name}_d_${cntr.inc ()}", quotient, pull (quotient), newKey) + + val q = project (quotient) + var keep = false + + cfor (0, q.tuples.size) { i => + val t_i = q.tuples(i) + keep = true + breakable { + for u <- r2.tuples do // change to cfor + if ! (this contains t_i ++ u) then { keep = false; break () } + end for + } // breakable + if keep then s.tuples += t_i + } // cfor + +// for t <- q.tuples do +// keep = true +// breakable { +// for u <- r2.tuples do +// if ! (this contains t ++ u) then { keep = false; break () } +// end for +// } // breakable +// if keep then s.tuples += t +// end for + s + end divide + + // ================================================================ GROUP-BY + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** GROUP this table BY the specified attribute, returning this table. + * Each value for attribute ag will be mapped to a collection of tuples. + * Usage: deposit groupBy "bname" + *-------------------------------------------------------------------------- + * @param ag the attribute to group by + */ + def groupBy (ag: String): Table = + if ! (schema contains ag) then + flaw ("groupBy", s"ag = $ag is not contained in schema") + + val col = on(ag) // the column number for ag + + cfor (0, tuples.size) { i => + val t_i = tuples(i) + val gkey = t_i(col) + val group = groupMap.getOrElseUpdate (gkey, Bag [Tuple] ()) + group += t_i // add tuple t_i to gkey's group + } // cfor + +// for t <- tuples do +// val gkey = t(col) +// val group = groupMap.getOrElseUpdate (gkey, Bag [Tuple] ()) +// group += t // add tuple t to gkey's group +// end for + this + end groupBy + + // =============================================================== AGGREGATE + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Assuming this table has been grouped by attribute ag, create a table + * where the first column is ag and the rest are AGGREGATE FUNCTIONs applied + * to their corresponding attributes. + * Usage: deposit aggregate ("bname", (avg, "balance")) + *-------------------------------------------------------------------------- + * @param ag the attribute the table has been grouped on + * @param f_as the aggregate function and the attribute to apply it to (as varargs) + */ + def aggregate (ag: String, f_as: (AggFunction, String)*): Table = + val n = f_as.size + 1 + val cols = Array.ofDim [Int] (n - 1) + val schm = Array.ofDim [String] (n) + schm(0) = ag + for j <- f_as.indices do + cols(j) = on(f_as(j)._2) // the column number for atr j + schm(j+1) = f_as(j)._2 + end for + val s = new Table (s"${name}_a_${cntr.inc ()}", schm, pull (schm), Array (ag)) + + for (gkey, tups) <- groupMap do + val t = Array.ofDim [ValueType] (n) + t(0) = gkey + for j <- f_as.indices do t(j+1) = f_as(j)._1 (col (cols(j), tups)) + s.tuples += t + end for + s + end aggregate + + // ================================================================ ORDER-BY + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** ORDER-BY the given attributes, i.e., reorder the tuples in this table into + * 'ascending' order. A stable sorting is used to allow sorting on multiple attributes. + * Usage: deposit orderBy "bname" + *-------------------------------------------------------------------------- + * @param x the subschema/attributes to order by + */ + def orderBy (x: String*): Table = + val s = new Table (s"${name}_o_${cntr.inc ()}", schema, domain, key) + + val perm = rankOrder (x*) + cfor (0, perm.size) { i => s.tuples += tuples(perm(i)) } + s + end orderBy + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** ORDER-BY-DESC the given attributes, i.e., reorder the tuples in this table into + * 'descending' order. A stable sorting is used to allow sorting on multiple attributes. + * Usage: deposit orderByDesc "bname" + *-------------------------------------------------------------------------- + * @param x the subschema/attributes to order by + */ + def orderByDesc (x: String*): Table = + val s = new Table (s"${name}_o_${cntr.inc ()}", schema, domain, key) + + val perm = rankOrder (x*).reverse + cfor (0, perm.size) { i => s.tuples += tuples(perm(i)) } + s + end orderByDesc + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the basic statistics for each column of this table. + */ + def stats: Table = + val s = new Table (s"${name}_stats", + Array ("column", "count", "countd", "min", "max", "sum", "avg"), + Array ('S', 'I', 'I', 'S', 'S', 'D', 'D'), Array ("column")) + + cfor (0, schema.size) { j => s.add (Table.stats (schema(j), col(j))) } + s + end stats + + // D A T A M A N I P U L A T I O N + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** ADD (insert) tuple t into this table checking to make sure the domains are correct. + * Also, checks referential integrity for any foreign keys in the tuple. + * Return true iff the tuple passes the type check and reference check. + * @param t the tuple to be inserted + */ + def add (t: Tuple): Table = + if typeCheck (t) && referenceCheck (t) then + if hasIndex then +// val pkey = new KeyType (pull (t, key)) // values for primary key part of t + val pkey = pull (t, key)(0) // values for primary key part of t + if index.getOrElse (pkey, null) == null then // check if it's a duplicate + index += pkey -> t // add to index map + tuples += t // add to tuples + else + flaw ("add", s"$name: tuple ${stringOf (t)} has a duplicate value for its primary key") + else + tuples += t // no index - allow duplicates + end if + this + end add + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** UPDATE the column with attribute name a using newVal for elements with value + * matchVal. Return true iff at least one tuple is updated. + * @param a the attribute name for the column to be updated + * @param newVal the value used to assign updated values + * @param matchVal the value to be matched to elements + */ + def update (a: String, newVal: ValueType, matchVal: ValueType): Boolean = + var updated = false + if hasIndex && (key contains a) then + flaw ("update", "attempt to update an indexed primary key: use delete and add") + + val j = on(a) + cfor (0, tuples.size) { i => + if tuples(i)(j) == matchVal then + tuples(i)(j) = newVal + updated = true + } // for + updated + end update + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** UPDATE the column with attribute name a using function func for elements with + * value matchVal. Return true iff at least one tuple is updated. + * @param a the attribute name for the column to be updated + * @param func the function used to assign updated values + * @param matchVal the value to be matched to elements + */ + def update (a: String, func: ValueType => ValueType, matchVal: ValueType): Boolean = + var updated = false + if hasIndex && (key contains a) then + flaw ("update", "attempt to update an indexed primary key: use delete and add") + + val funcVal = func (matchVal) + val j = on(a) + cfor (0, tuples.size) { i => + if tuples(i)(j) == matchVal then + tuples(i)(j) = funcVal + updated = true + } // cfor + updated + end update + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** DELETE all tuples in this table satisfying the deletion predicate. + * If there is an index, remove those tuples from the index as well. + * Return true iff at least one tuple is deleted. + * @param predicate the predicate that specifies which tuples to delete + */ + def delete (predicate: Predicate): Boolean = + val rem = tuples.filter (predicate) + cfor (0, rem.size) { i => + val t_i = rem(i) + tuples -= t_i // remove from tuples +// if hasIndex then index -= new KeyType (pull (t_i, key)) // remove from index map + if hasIndex then index -= pull (t_i, key)(0) // remove from index map + } // cfor + rem.size > 0 + end delete + + // C O N V E R T + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** CONVERT this table to a matrix of doubles by making the necessary + * type transformations. + * @see the `fromMatrix` method + * @param cols the column positions to use for forming the matrix + */ + def toMatrix (cols: Array [Int] = Array.range (0, schema.size)): MatrixD = + val (m, n) = (tuples.size, cols.size) + + val a = Array.ofDim [Double] (m, n) + cfor (0, n) { j => + val jj = cols(j) + domain(jj) match + case 'S' | 'X' => val x = VectorS.map2Int (col(jj).map (_.toString))._1 + cfor (0, m) { i => a(i)(j) = x(i).toDouble } + case 'T' => val x = VectorT.map2Long (col(jj).map (TimeNum.fromValueType (_)))._1 + cfor (0, m) { i => a(i)(j) = x(i).toDouble } + case _ => cfor (0, m) { i => a(i)(j) = tuples(i)(jj).toDouble } + } // cfor + + new MatrixD (m, n, a) + end toMatrix + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** CONVERT this table to a matrix and a vector of doubles by making the necessary + * type transformations. + * Usage: table -> (X, y) for linear algebra/regression problem Xb = y. + * @param cols the column positions to use for forming the matrix + * @param colj the column position to use for forming the vector + */ + def toMatrixV (cols: Array [Int] = Array.range (0, schema.size-1), + colj: Int = schema.size-1): (MatrixD, VectorD) = + val (m, n) = (tuples.size, cols.size) + + val a = Array.ofDim [Double] (m, n) + cfor (0, n) { j => + val jj = cols(j) + domain(j) match + case 'S' | 'X' => val x = VectorS.map2Int (col(jj).map (_.toString))._1 + cfor (0, m) { i => a(i)(j) = x(i).toDouble } + case 'T' => val x = VectorT.map2Long (col(jj).map (TimeNum.fromValueType (_)))._1 + cfor (0, m) { i => a(i)(j) = x(i).toDouble } + case _ => cfor (0, m) { i => a(i)(j) = tuples(i)(jj).toDouble } + } // cfor + + val b = Array.ofDim [Double] (m) + domain(colj) match + case 'S' | 'X' => val x = VectorS.map2Int (col(colj).map (_.toString))._1 + cfor (0, m) { i => b(i) = x(i).toDouble } + case 'T' => val x = VectorT.map2Long (col(colj).map (TimeNum.fromValueType (_)))._1 + cfor (0, m) { i => b(i) = x(i).toDouble } + case _ => cfor (0, m) { i => b(i) = tuples(i)(colj).toDouble } + + (new MatrixD (m, n, a), new VectorD (m, b)) + end toMatrixV + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** CONVERT the colj column of this table into a vector of doubles, etc. + * @param colj the column position to use for the vector + */ + def toVectorD (colj: Int = 0): VectorD = + val b = Array.ofDim [Double] (rows) + cfor (0, rows) { i => + b(i) = tuples(i)(colj).toDouble + } // cfor + new VectorD (rows, b) + end toVectorD + + def toVectorI (colj: Int = 0): VectorI = + val b = Array.ofDim [Int] (rows) + cfor (0, rows) { i => + b(i) = tuples(i)(colj).toInt + } // cfor + new VectorI (rows, b) + end toVectorI + + def toVectorL (colj: Int = 0): VectorL = + val b = Array.ofDim [Long] (rows) + cfor (0, rows) { i => + b(i) = tuples(i)(colj).toLong + } // cfor + new VectorL (rows, b) + end toVectorL + + def toVectorS (colj: Int = 0): VectorS = + val b = Array.ofDim [String] (rows) + cfor (0, rows) { i => + b(i) = tuples(i)(colj).toString + } // cfor + new VectorS (rows, b) + end toVectorS + + def toVectorT (colj: Int = 0): VectorT = + val b = Array.ofDim [TimeNum] (rows) + cfor (0, rows) { i => + b(i) = tuples(i)(colj).asInstanceOf [TimeNum] + } // cfor + new VectorT (rows, b) + end toVectorT + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return a copy of this table limited to the first n tuples/rows. + * @param n the number of tuples/rows to keep + */ + def limit (n: Int): Table = + val s = new Table (name + "_$n", schema, domain, key) + s.tuples ++= tuples.slice (0, n) + s + end limit + + // O U T P U T + + private val width_ = 18 // default column width + private val width = Array.fill (domain.size) (width_) // width for each column + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Reset the width of column col to w. + * @param col the column whose width is to be adjusted + * @param w the new width (# chars) for column col + */ + def resetWidth (col: Int, w: Int): Unit = width(col) = w + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** SHOW/print this table, one tuple per row. + * Formatting: regular column is 'width_' chars wide, 'X' is double that + * FIX - currently only works for width_, not width + * @param rng the range of tuples to show (e.g, 0 until 10), defaults to all + */ + def show (rng: Range = tuples.indices): Unit = + val len = width_ * (schema.size + countX) + println (s"\n>> Table $name with ${rng.size} rows, primary key = ${stringOf (key)}") + println ("|-" + "-" * len + "-|") + print ("| ") + cfor (0, schema.size) { j => + val wj = if domain(j) == 'X' then 2 * width_ else width_ + prt (schema(j), wj) + } // cfor + println (" |") + println ("|-" + "-" * len + "-|") + if rows > 0 then + for i <- rng do + print ("| ") + val tuple_i = tuples(i) + if tuple_i.size > domain.size then flaw ("show", s"tuple($i) has size ${tuple_i.size}") + cfor (0, tuple_i.size) { j => + val wj = if domain(j) == 'X' then 2 * width_ else width_ + prt (tuple_i(j), wj) + } // cfor + println (" |") + end for + println ("|-" + "-" * len + "-|") + end show + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** PRINT value-type v with a width of w. + * @param v the value to be printed + * @param w the width (# chars) for the column + */ + def prt (v: ValueType, w: Int): Unit = + val str = if v == null then "null" else v.toString + val w0 = str.size + val rem = w - w0 + val lft = max (rem / 2, 0) + val rht = max (rem - lft, 0) + print (" " * lft + str + " " * rht) + end prt + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** SHOW/print this table's primary index. + */ + def show_index (): Unit = + println (s"\n>> Table $name has indexed primary key = ${stringOf (key)}") + for (k, v) <- index do println (s"index: ${stringOf (k)} -> ${stringOf (v)}") + end show_index + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** SHOW/print this table's foreign keys. + */ + def show_foreign_keys (): Unit = + println (s"\n>> Table $name has foreign keys:") + for (fk, fkt) <- linkTypes do println (s"\t foreign key link = $fk references ${fkt.name}") + end show_foreign_keys + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** SAVE this table in a file using serialization. + * @see load in `Table` object + */ + def save (): Unit = + val oos = new ObjectOutputStream (new FileOutputStream (STORE_DIR + name + SER)) + oos.writeObject (this) + oos.close () + end save + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** WRITE this table into a Comma-Separated-Value (CSV) file with each tuple + * written to a line. + * @param fileName the file name of the data file (defaults to "name.csv") + */ + def writeCSV (fileName: String = name + ".csv"): Unit = + val out = new PrintWriter (DATA_DIR + fileName) + out.println (stringOf (schema).drop (6).dropRight (1)) + cfor (0, tuples.size) { i => + val t_i = stringOf (tuples(i)) + out.println (t_i.drop (6).dropRight (1)) + } // cfor + out.close + end writeCSV + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** WRITE this table into a JavaScript Object Notation (JSON) file. + * @param fileName the file name of the data file + */ + def writeJSON (fileName: String = name + ".json"): Unit = ??? + /* + val gson = new Gson () + val jsonStr = gson.toJson (this) + debug ("writeJSON", s"jsonStr = ${jsonStr.slice (0, min (jsonStr.size, 5000))}") + val out = new PrintWriter (DATA_DIR + fileName) + out.println (jsonStr) + out.close () + end writeJSON + */ + + // P R I V A T E / U T I L I T Y M E T H O D S + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the tuples in this table that satisfy the given simple (3 token) condition. + * @param a1 the left attribute + * @param op the comparison operator (==, !=, <, <=. >, >=) + * @param a2 the right attribute or value + * @param twoAtrs the whether a2 is an attribute or value + * @param tups the initial collection of tuples + */ + protected def selectTups (a1: String, op: String, a2: String, twoAtrs: Boolean, + tups: Bag [Tuple] = tuples): Bag [Tuple] = + if twoAtrs then // a1 and a2 are attributes + val a2_ = a2.toString + op match + case "==" => tups.filter (t => t(on(a1)) == t(on(a2_))) + case "!=" => tups.filter (t => t(on(a1)) != t(on(a2_))) + case "<" => tups.filter (t => t(on(a1)) < t(on(a2_))) + case "<=" => tups.filter (t => t(on(a1)) <= t(on(a2_))) + case ">" => tups.filter (t => t(on(a1)) > t(on(a2_))) + case ">=" => tups.filter (t => t(on(a1)) >= t(on(a2_))) + case _ => flaw ("select", s"$op is an unrecognized operator"); tups + else // a1 is attribute, a2 is value + val col = on(a1) + val a2_ : ValueType = string2Dom (a2, domain (col)) + op match + case "==" => tups.filter (t => t(col) == a2_) + case "!=" => tups.filter (t => t(col) != a2_) + case "<" => tups.filter (t => t(col) < a2_) + case "<=" => tups.filter (t => t(col) <= a2_) + case ">" => tups.filter (t => t(col) > a2_) + case ">=" => tups.filter (t => t(col) >= a2_) + case _ => flaw ("select", s"$op is an unrecognized operator"); tups + end selectTups + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Convert a `String` into a `ValueType` with the given domain. + * @param str the given string + * @param dom the domain/data-type to convert it into + */ + def string2Dom (str: String, dom: Char): ValueType = + dom match + case 'D' => str.toDouble + case 'I' => str.toInt + case 'L' => str.toLong + case 'S' | 'X' => str + case 'T' => TimeNum (str) + end string2Dom + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the theta-join tuples for this table and r2 keeping concatenated tuples that + * satisfy the comparison (theta) operator on the specified attributes. + * @param a1 the attribute from the first/this table + * @param op the comparison operator (==, !=, <, <=. >, >=) + * @param a2 the attribute from the second table + * @param r2 the second table + */ + private def tJoinTups (a1: String, op: (ValueType, ValueType) => Boolean, + a2: String, r2: Table): Bag [Tuple] = + val tups = Bag [Tuple] () + + val ia1 = on(a1) // using integers is faster than strinfs + val ia2 = r2.on(a2) // a2 is attribute name, ia2 is its column index + + cfor (0, tuples.size) { i => + val t_i = tuples(i) + cfor (0, r2.tuples.size) { j => + val t_j = r2.tuples(j) + if op (t_i(ia1), t_j(ia2)) then tups += t_i ++ t_j + } // for + } // for + +// for t <- tuples; u <- r2.tuples do +// if op (t(on(a1)), u(r2.on(a2))) then tups += t ++ u +// end for + tups + end tJoinTups + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the rank order of the tuples in this table by performing indirect + * merge-sort on the given attributes. + * @param x the attributes to indirectly sort on + */ + private def rankOrder (x: String*): Array [Int] = + var perm: Array [Int] = null // permutation giving rank order + + for j <- x.indices do + val col_j = col (on (x (j))) + perm = if j == 0 then (new MergeSortIndirect (col_j)()).isort () + else (new MergeSortIndirect (col_j)(perm)).isort () + end for + perm + end rankOrder + +end Table + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `tableTest` main function tests the `Table` class with queries on the + * Bank database. + * > runMain scalation.database.table.tableTest + */ +@main def tableTest (): Unit = + + // Data Definition Language + + val customer = Table ("customer", "cname, street, ccity", "S, S, S", "cname") + val branch = Table ("branch", "bname, assets, bcity", "S, D, S", "bname") + val deposit = Table ("deposit", "accno, balance, cname, bname", "I, D, S, S", "accno") + val loan = Table ("loan", "loanno, amount, cname, bname", "I, D, S, S", "loanno") + + //-------------------------------------------------------------------------- + banner ("Populate Database") + + customer += ("Peter", "Oak St", "Bogart") + += ("Paul", "Elm St", "Watkinsville") + += ("Mary", "Maple St", "Athens") + customer.show () + + branch += ("Alps", 20000000.0, "Athens") + += ("Downtown", 30000000.0, "Athens") + += ("Lake", 10000000.0, "Bogart") + branch.show () + + deposit += (11, 2000.0, "Peter", "Lake") + += (12, 1500.0, "Paul", "Alps") + += (13, 2500.0, "Paul", "Downtown") + += (14, 2500.0, "Paul", "Lake") + += (15, 3000.0, "Mary", "Alps") + += (16, 1000.0, "Mary", "Downtown") + deposit.show () + + loan += (21, 2200.0, "Peter", "Alps") + += (22, 2100.0, "Peter", "Downtown") + += (23, 1500.0, "Paul", "Alps") + += (24, 2500.0, "Paul", "Downtown") + += (25, 3000.0, "Mary", "Alps") + += (26, 1000.0, "Mary", "Lake") + loan.show () + + //-------------------------------------------------------------------------- + banner ("Show Table Statistics") + + customer.stats.show () + branch.stats.show () + deposit.stats.show () + loan.stats.show () + + //-------------------------------------------------------------------------- + banner ("Verify Usage Queries") + + import Table._ + + var a, q: Table = null + + banner (""" customer rename "client" """) + q = customer.rename ("client") + q.show () + + banner (""" customer.project (Array ("street", "ccity")) """) + q = customer.project (Array ("street", "ccity")) + q.show () + + banner (""" customer.project (Array (1, 2)) """) + q = customer.project (Array (1, 2)) + q.show () + + banner (""" customer.selproject ("ccity", _ > "Athens") """) + q = customer.selproject ("ccity", _ > "Athens") + q.show () + + banner (""" customer.select ("ccity", _ == "Athens") """) + q = customer.select ("ccity", _ == "Athens") + q.show () + + banner (""" customer.select (t => t(customer.on("ccity")) == "Athens") """) + q = customer.select (t => t(customer.on("ccity")) == "Athens") + q.show () + + banner (""" customer.select ("ccity == 'Athens'") """) + q = customer.select ("ccity == 'Athens'") + q.show () + + banner (""" customer.select (new KeyType ("Mary")) """) + q = customer.select (new KeyType ("Mary")) + q.show () + + banner (""" deposit union loan """) + a = deposit union loan // save as a for account + a.show () + + banner (""" a minus loan """) + q = a minus loan + q.show () + + banner (""" a intersect loan """) + q = a intersect loan + q.show () + + banner (""" customer product deposit """) + q = customer product deposit + q.show () + + banner (""" customer.join ((t, u) => t(customer.on("cname")) == u(deposit.on("cname")), deposit) """) + q = customer.join ((t, u) => t(customer.on("cname")) == u(deposit.on("cname")), deposit) + q.show () + + banner (""" customer.join ("cname == cname", deposit) """) + q = customer.join ("cname == cname", deposit) + q.show () + + banner (""" customer.join (Array ("cname"), Array ("cname"), deposit) """) + q = customer.join (Array ("cname"), Array ("cname"), deposit) + q.show () + + banner (""" deposit.join (("cname", customer)) """) + q = deposit.join (("cname", customer)) + q.show () + + banner (""" customer join deposit """) + q = customer join deposit + q.show () + + banner (""" customer.leftJoin (Array ("cname"), Array ("cname"), deposit) """) + q = customer.leftJoin (Array ("cname"), Array ("cname"), deposit) + q.show () + + banner (""" deposit.leftjoin (("cname", customer)) """) + deposit.create_index () + q = deposit.leftJoin (("cname", customer)) + q.show () + + banner (""" deposit.project ("cname, bname") divide branch.project ("bname") """) + q = deposit.project ("cname, bname") divide branch.project ("bname") + q.show () + + banner (""" deposit.groupBy ("bname") """) + q = deposit.groupBy ("bname") + q.show () + + banner (""" deposit.aggregate ("bname", (avg, "balance")) """) + q = deposit.aggregate ("bname", (avg, "balance")) + q.show () + + banner (""" deposit.orderBy ("bname") """) + q = deposit.orderBy ("bname") + q.show () + + banner (""" deposit.orderByDesc ("bname") """) + q = deposit.orderByDesc ("bname") + q.show () + + //-------------------------------------------------------------------------- + banner ("Example Queries") + + banner ("Names of customers who live in Athens") + val liveAthens = customer.σ ("ccity == 'Athens'").π ("cname") + liveAthens.show () + + banner ("Names of customers who bank (deposits) in Athens") + val bankAthens = (deposit ⋈ branch).σ ("bcity == 'Athens'").π ("cname") + bankAthens.show () + + banner ("Names of customers who live or bank in Athens") + val liveBank = customer.σ ("ccity == 'Athens'").π ("cname") ⋃ + (deposit ⋈ branch).σ ("bcity == 'Athens'").π ("cname") + liveBank.create_index () + liveBank.show () + + banner ("Names of customers who live and bank in the same city") + val sameCity = (customer ⋈ deposit ⋈ branch).σ ("ccity == bcity").π ("cname") + sameCity.create_index () + sameCity.show () + + banner ("Names and account numbers of customers with the largest balance") + val largest = deposit.π ("cname, accno") - (deposit ⋈ ("balance < balance", deposit)).π ("cname, accno") + largest.show () + + banner ("Names of customers who are silver club members") + val silver = (loan.π ("cname, bname") ⋂ deposit.π ("cname, bname")).π ("cname") + silver.create_index () + silver.show () + + banner ("Names of customers who are gold club members") + val gold = loan.π ("cname") - (loan.π ("cname, bname") - deposit.π ("cname, bname")).π ("cname") + gold.create_index () + gold.show () + + banner ("Names of branches located in Athens") + val inAthens = branch.σ ("bcity == 'Athens'").π ("bname") + inAthens.show () + + banner ("Names of customers who have deposits at all branches located in Athens") + val allAthens = deposit.π ("cname, bname") / inAthens + allAthens.create_index () + allAthens.show () + + banner ("Branch names and their average balances") + val avgBalance = deposit.γ ("bname").aggregate ("bname", (count, "accno"), (avg, "balance")) + avgBalance.show () + +end tableTest + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `tableTest2` main function tests the `Table` class with queries on the + * Student-Course-Professor database. + * > runMain scalation.database.table.tableTest2 + */ +@main def tableTest2 (): Unit = + + // Data Definition Language + + val student = Table ("student", "sid, sname, street, city, dept, level", + "I, S, S, S, S, I", "sid") + val professor = Table ("professor", "pid, pname, street, city, dept", + "I, S, S, S, S", "pid") + val course = Table ("course", "cid, cname, hours, dept, pid", + "I, X, I, S, I", "cid") + val takes = Table ("takes", "sid, cid", + "I, I", "sid, cid") + + //-------------------------------------------------------------------------- + banner ("Populate Database") + + student += (101, "Peter", "Oak St", "Bogart", "CS", 3) + += (102, "Paul", "Elm St", "Watkinsville", "CE", 4) + += (103, "Mary", "Maple St", "Athens", "CS", 4) + student.show () + + professor += (104, "DrBill", "Plum St", "Athens", "CS") + += (105, "DrJohn", "Pine St", "Watkinsville", "CE") + professor.show () + + course += (4370, "Database Management", 4, "CS", 104) + += (4720, "Comp. Architecture", 4, "CE", 104) + += (4760, "Computer Networks", 4, "CS", 105) + course.show () + + takes += (101, 4370) + += (101, 4720) + += (102, 4370) + += (102, 4760) + += (103, 4760) + takes.show () + + // Add links for foreign key contraints and efficient joins (will make any needed primary indices) + + takes.addLinkage ("sid", student) // takes sid references student sid + takes.addLinkage ("cid", course) // takes cid references course cid + course.addLinkage ("pid", professor) // course pid references professor pid + + //-------------------------------------------------------------------------- + banner ("Example Queries") + + banner ("locations of students") + val locs = student.project ("sname, city") + locs.show () + + banner ("living in Athens") + val inAthens = student.select ("city == 'Athens'") + inAthens.show () + + banner ("not living in Athens") + val notAthens = student minus inAthens + notAthens.show () + + banner ("student intersect inAthens") + val inters = student intersect inAthens + inters.show () + + banner ("in-Athens union not-in-Athens") + val unio = inAthens union notAthens + unio.show () + + banner ("course taken: course id") + val taken_id = takes.join (("sid", student)) + .project ("sname, cid") + taken_id.show () + + banner ("course taken: course name") + val taken_nm = takes.join (("sid", student)) + .join (("cid", course)) + .project ("sname, cname") + taken_nm.show () + + banner ("student taught by") + val taught_by = takes.join (("sid", student)) + .join (("cid", course)) + .join (("pid", professor)) + .project ("sname, pname") + taught_by.show () + +end tableTest2 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `tableTest3` main function tests the `Table` object and class by loading + * a dataset from a file. It loads the ScalaTion "covid_19" dataset/CSV file. + * - RELATIVE PATHS are from ScalaTion's DATA-DIR (@see Util.scala) + * - FULL PATHS are from the OS's root directory + * Defaults to RELATIVE PATHS; use `setFullPath` method to change. + * > runMain scalation.database.table.tableTest3 + */ +@main def tableTest3 (): Unit = + + //-------------------------------------------------------------------------- + // Verify access to file contents, comment out readFile once verified. + //-------------------------------------------------------------------------- + + val fileName = "covid_19.csv" + println (s"fileName = $fileName") +// readFile (fileName) // for RELATIVE PATHS +// readFile (fileName, fullPath = true) // for FULL PATHS + + //-------------------------------------------------------------------------- + // Use sample row/tuple in the middle of the file that has full information. + //-------------------------------------------------------------------------- + + val data_str = """ +12/29/2020,19658043,205972,184889.714,342639,3611,2372.857,1.04,27782,122664,106708, +253765556,1887683,1484784,0.134,7.5,4387280,4282967,31140,722024,333594,325788 +""" + + //-------------------------------------------------------------------------- + // Use this to guess the data-types/domains. See last step for making corrections. + //-------------------------------------------------------------------------- + + val domain = Table.tuple2type (strim (data_str)) + println (s"domain = ${stringOf (domain)}") + + //-------------------------------------------------------------------------- + // Data stored relative to the "scalation_2.0/data" directory, if not use full path. + // Call the Table.load method: + // def load (fileName: String, name: String, domain: Domain, key: String, + // pos_ : Array [Int] = null, sep: String = ","): Table = + //-------------------------------------------------------------------------- + + val covid = Table.load (fileName, "covid", domain, "date") + covid.show (0 until 200) + + //-------------------------------------------------------------------------- + // If this fails due to incorrect domains, save the domain that was printed, + // correct the domains that are incorrect, and try again. + //-------------------------------------------------------------------------- + + //-------------------------------------------------------------------------- + // for fullPath: Table.setFullPath () + // for limit: Table.setLimit (200) + //-------------------------------------------------------------------------- + + //-------------------------------------------------------------------------- + // Serialize and output the data into a JSON file (covid.json) in DATA_DIR + //-------------------------------------------------------------------------- + +// covid.writeJSON () + +end tableTest3 + diff --git a/src/main/scala/scalation/database/table/TableGen.scala b/src/main/scala/scalation/database/table/TableGen.scala index 044e9f99c..db2ffb7f0 100644 --- a/src/main/scala/scalation/database/table/TableGen.scala +++ b/src/main/scala/scalation/database/table/TableGen.scala @@ -58,12 +58,11 @@ object TableGen: if pkey contains atrj then // >> case PRIMARY KEY col(j) = if fkt == null then genUnique (j) // generate unique keys for attributes in pkey - else pullPkeys (atrj, fkt, j) // primary and foreign = copy pkey from fkt + else pullPkeys (fkt, j) // primary and foreign = copy pkey from fkt else if fkt != null then // >> case FOREIGN KEY - col(j) = pullPkeys (atrj, fkt, j) // foreign key = copy pkey from fkt + col(j) = pullPkeys (fkt, j) // foreign key = copy pkey from fkt else // >> case REGULAR ATTRIBUTE col(j) = genValue (j) // generate value for a regular attribute - end if j += 1 // increase attribute counter end while @@ -77,11 +76,10 @@ object TableGen: //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Randomly pull m primary key values out of the foreign key table (fkt). * Caveat: Currently only works for non-composite foreign keys. - * @param fkey the foreign key attribute * @param fkt the foreign key table (fkt) this_table references fkt * @param strm the random number stream to use (reduce redundancy) */ - def pullPkeys (fkey: String, fkt: Table, strm: Int): Vectr = + def pullPkeys (fkt: Table, strm: Int): Vectr = val k = fkt.rows // number of rows in fkt val ranRw = RandomVecI (dim = m, max = k-1, min = 0, unique = false, stream = strm) val rows = ranRw.igen // randomly select m rows diff --git a/src/main/scala/scalation/database/table/TimeComparison.scala b/src/main/scala/scalation/database/table/TimeComparison.scala index f5ca17112..c473e155d 100644 --- a/src/main/scala/scalation/database/table/TimeComparison.scala +++ b/src/main/scala/scalation/database/table/TimeComparison.scala @@ -48,13 +48,9 @@ import scalation.mathstat.{Plot, VectorD} val limitedDeposit = deposit.limit (sz) limitedCustomer.create_index() - var tableSortMerge, tableEquiIndex: Table = null - var tableThetaJoin, tableEquiJoin: Table = null - if selectedJoins.contains ("SortMerge") then println ("SortMerge Join") - rTime_SortMergeJoin(i) = timed (5, true) { - tableSortMerge = limitedDeposit._join_ (("cname", limitedCustomer)) }._2 + rTime_SortMergeJoin(i) = timed (5, true) { limitedDeposit._join_ (("cname", limitedCustomer)) }._2 if selectedJoins.contains ("Natural") then println ("Natural Join") @@ -63,18 +59,15 @@ import scalation.mathstat.{Plot, VectorD} if selectedJoins.contains ("Theta") then println ("Theta Join") - rTime_ThetaJoin(i) = timed (5, true) { - tableThetaJoin = limitedCustomer.join (("cname == cname"), limitedDeposit) }._2 + rTime_ThetaJoin(i) = timed (5, true) { limitedCustomer.join (("cname == cname"), limitedDeposit) }._2 if selectedJoins.contains ("Index") then println ("Equi join with Index") - rTime_IndexJoin(i) = timed (5, true) { - tableEquiIndex = limitedDeposit.join (("cname", limitedCustomer)) }._2 + rTime_IndexJoin(i) = timed (5, true) { limitedDeposit.join (("cname", limitedCustomer)) }._2 if selectedJoins.contains ("Equi") then println ("Equi join") - rTime_EquiJoin(i) = timed (5, true) { - tableEquiJoin = limitedCustomer.join (Array("cname"), Array("cname"), limitedDeposit) }._2 + rTime_EquiJoin(i) = timed (5, true) { limitedCustomer.join (Array("cname"), Array("cname"), limitedDeposit) }._2 if selectedJoins.contains ("Predicate") then println ("Predicate Join") diff --git a/src/main/scala/scalation/database/table/TimeComparison2.scala b/src/main/scala/scalation/database/table/TimeComparison2.scala new file mode 100644 index 000000000..7db91656e --- /dev/null +++ b/src/main/scala/scalation/database/table/TimeComparison2.scala @@ -0,0 +1,225 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author Sahil Varma + * @version 2.0 + * @date Sun Apr 27 21:04:18 EDT 2025 + * @see LICENSE (MIT style license file). + * + * @note Comparing Various Join Algorithms including Link Joins + */ + +package scalation +package database +package table + +import java.io._ + +import scala.collection.mutable.ArrayBuffer +import scala.io.Source + +import scalation.mathstat.{Plot, VectorD} +//import scalation.scala2d.writeImage + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `my_timer_function` generates Tables and Ltables of specified size and + * performs various joins iteratively reducing the size of tables. + * > runMain scalation.database.table.my_timer_function + */ +@main def my_timer_function (): Unit = + + val totalSize = 100000 + val stepSize = 10000 + val steps = totalSize / stepSize + val tSize = new VectorD (steps) + + // NLJs take too much time to run, so if running on large table size, comment the NLJs + val selectedJoins = Set ( +// "NatJoinNLJ", +// "NatJoinUI", +// "NatJoinNUI", +// "NatJoinLink", +// "EquiJoinNLJ", + "EquiJoinUI", +// "EquiJoinNUI", +// "EquiJoinLink", +// "PredJoinNLJ", + "SortMergeJoin") + + val t_NatJoinNLJ = new VectorD (steps) + val t_NatJoinUI = new VectorD (steps) + val t_NatJoinNUI = new VectorD (steps) + val t_NatJoinLink = new VectorD (steps) + + val t_EquiJoinNLJ = new VectorD (steps) + val t_EquiJoinUI = new VectorD (steps) + val t_EquiJoinNUI = new VectorD (steps) + val t_EquiJoinLink = new VectorD (steps) + + val t_PredJoinNLJ = new VectorD (steps) + val t_SortMergeJoin = new VectorD (steps) + + banner ("create Tables: customerT and depositT") + val customerT = Table ("customer", "cname, street, ccity", "S, S, S", "cname") + val depositT = Table ("deposit", "accno, balance, cname, bname", "I, D, S, S", "accno") + depositT.addLinkage ("cname", customerT) + + TableGen.popTable (customerT, totalSize) + TableGen.popTable (depositT, totalSize * 2) + + depositT.create_mindex ("cname") + + banner ("create LTables: customerLT and depositLT") + val customerLT = LTable ("customer", "cname, street, ccity", "S, S, S", "cname") + val depositLT = LTable ("deposit", "accno, balance, cname, bname", "I, D, S, S", "accno") + + customerLT.tuples ++= customerT.tuples + depositLT.tuples ++= depositT.tuples + + depositLT.addLinkage ("cname", customerLT) + depositLT.create_mindex ("cname") + + banner ("Perform Joins") + val rep = 10 + val a_cname = Array ("cname") + val pred = (t: Tuple, u: Tuple) => t(customerT.on("cname")) == u(depositT.on("cname")) + var joinedTab1: Table = null + var joinedTab2: Table = null + + for j <- 0 until steps do + val sz = totalSize - (j * stepSize) + + tSize(j) = sz + println (s"for j = $j: sz = $sz") + + // NATURAL JOINS + if selectedJoins contains "NatJoinlNLJ" then + t_NatJoinNLJ(j) = timedX (rep) { joinedTab1 = customerT join depositT }._2 + + if selectedJoins contains "NatJoinUI" then + t_NatJoinUI(j) = timedX (rep) { joinedTab1 = depositT join_ customerT }._2 + + if selectedJoins contains "NatJoinNUI" then + t_NatJoinNUI(j) = timedX (rep) { joinedTab2 = depositT _join customerT }._2 + + if selectedJoins contains "NaturalLink" then + t_NatJoinLink(j) = timedX (rep) { joinedTab1 = depositLT join customerLT }._2 + + // EQUI JOINS + if selectedJoins contains "EquiJoinNLJ" then + t_EquiJoinNLJ(j) = timedX (rep) { joinedTab1 = depositT.join (a_cname, a_cname, customerT) }._2 + + if selectedJoins contains "EquiJoinUI" then + t_EquiJoinUI(j) = timedX (rep) { joinedTab1 = depositT.join ("cname", customerT) }._2 + + if selectedJoins contains "EquiJoinNUI" then + t_EquiJoinNUI(j) = timedX (rep) { joinedTab2 = depositT._join ("cname", customerT) }._2 + + if selectedJoins contains "EquiJoinLink" then + t_EquiJoinLink(j) = timedX (rep) { joinedTab2 = depositLT.join ("cname", customerLT) }._2 + + // PREDICATE NLJ + if selectedJoins contains "PredJoinNLJ" then + t_PredJoinNLJ(j) = timedX (rep) { joinedTab1 = customerT.join (pred, depositT) }._2 + + // SORT MERGE JOIN + if selectedJoins contains "SortMergeJoin" then + t_SortMergeJoin(j) = timedX (rep) { joinedTab2 = depositT._join_ ("cname", customerT) }._2 + + customerT.deleteLast (stepSize) + customerLT.deleteLast (stepSize) + end for + +// joinedTab1.show () +// joinedTab2.show () + assert (checkTuples (joinedTab1.tuples, joinedTab2.tuples)) + // If assertion fails, generated join tables are correct but may be sorted and unsorted, due to nature of joins + + banner ("Show Timing Plots") + println ("SIZE VECTOR:\n" + tSize) + + def plotTimings (joinName: String, timeVec: VectorD): Unit = + println (s"joinName = $joinName \n $timeVec") + new Plot (tSize / stepSize, timeVec, null, "$joinName elapsedTime", lines = true) +// val plot = new Plot (tSize / stepSize, timeVec, null, "$joinName elapsedTime", lines = true) +// writeImage (joinName + ".png", plot) +// writeVectors2CSV (tSize, timeVec, joinName, "myResults.csv") + end plotTimings + + // NATURAL JOINS + if selectedJoins contains "NatJoinNLJ" then plotTimings ("NatJoinNLJ", t_NatJoinNLJ) + if selectedJoins contains "NatJoinUI" then plotTimings ("NatJoinUI", t_NatJoinUI) + if selectedJoins contains "NatJoinNUI" then plotTimings ("NatJoinNUI", t_NatJoinNUI) + if selectedJoins contains "NatJoinLink" then plotTimings ("NatJoinLink", t_NatJoinLink) + + // EQUI JOINS + if selectedJoins contains "EquiJoinNLJ" then plotTimings ("EquiJoinNLJ", t_EquiJoinNLJ) + if selectedJoins contains "EquiJoinUI" then plotTimings ("EquiJoinUI", t_EquiJoinUI) + if selectedJoins contains "EquiJoinNUI" then plotTimings ("EquiJoinNUI", t_EquiJoinNUI) + if selectedJoins contains "EquiJoinLink" then plotTimings ("EquiJoinLink", t_EquiJoinLink) + + // Predicate NLJ + if selectedJoins contains "PredJoinNLJ" then plotTimings ("PredJoinNLJ", t_PredJoinNLJ) + + // SORT MERGE JOIN + if selectedJoins contains "SortMergeJoin" then plotTimings ("SortMergeJoin", t_SortMergeJoin) + +end my_timer_function + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +def writeVectors2CSV (tSize: VectorD, valueVector: VectorD, valueRowHeading: String, fileName: String): Unit = + val file = new File (fileName) + var lines = List [String] () + var header = "" + + if file.exists () then + lines = Source.fromFile (fileName).getLines().toList + else + header = s"tSize,${tSize.mkString(",")}\n" + + val joinMethodExists = lines.exists (line => line.startsWith (s"$valueRowHeading,")) + + val clippedValues = valueVector.map (value => f"$value%.5f") + val newRow = s"$valueRowHeading,${clippedValues.mkString(",")}" + + if joinMethodExists then + // If the join method exists, overwrite it + val updatedLines = lines.map { + case line if line.startsWith (s"$valueRowHeading,") => newRow + "\n" // replace the old line with the new one + case line => line + "\n" // keep the other lines unchanged + } + // Write all the updated lines back to the file, including the header + val writer = new BufferedWriter (new FileWriter (fileName)) + updatedLines.foreach(writer.write) + writer.close() +// println(s"Data for $valueRowHeading updated in $fileName") + + else + // If the join method does not exist, append the new row + val writer = new BufferedWriter (new FileWriter (fileName, true)) + if lines.isEmpty then + writer.write (s"$header\n") + writer.write (s"$newRow\n") + writer.close () +// println (s"Data for $valueRowHeading appended to $fileName") +end writeVectors2CSV + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +def checkTuples (t1: ArrayBuffer [Tuple], t2: ArrayBuffer [Tuple]): Boolean = + + if t1.size != t2.size then + println("Table Size not Match") + return false + + var i = 0 + while i < t1.size do + if ! (t1(i) sameElements t2(i)) then + println ("TABLES NOT SAME AT: row index" + i) + return false + i += 1 + + println ("Both Tables have same elements, Join Successful") + true +end checkTuples + diff --git a/src/main/scala/scalation/database/table/VTable.scala b/src/main/scala/scalation/database/table/VTable.scala index 939fab502..c9da5d2e6 100644 --- a/src/main/scala/scalation/database/table/VTable.scala +++ b/src/main/scala/scalation/database/table/VTable.scala @@ -157,10 +157,8 @@ case class VTable (name_ : String, schema_ : Schema, domain_ : Domain, key_ : Sc flaw ("addE", "attempt to link to multiple targets vertices when edge type is unique") else vset += v - end if else flaw ("addE", s"elab = $elab not an edge type for $name") - end if debug (s"addE", s"$name: \t u.edge = ${u.edge}") this end addE @@ -194,10 +192,8 @@ case class VTable (name_ : String, schema_ : Schema, domain_ : Domain, key_ : Sc val vset = u.edge.getOrElse (elab, null) if vset == null then u.edge += elab -> vs else vset ++= vs - end if else flaw ("addEs", s"elab = $elab not an edge type for $name") - end if // debug ("addEs", s"edge = $edge") end addEs @@ -300,7 +296,7 @@ case class VTable (name_ : String, schema_ : Schema, domain_ : Domain, key_ : Sc * @param ref the foreign key reference (edge-label, referenced table) */ def expand (x: Schema, ref: (String, VTable)): VTable = - val (elab, refTab) = ref // edge-label, referenced table + val refTab = ref._2 // edge-label, referenced table // val x1 = schema intersect x // attributes from first table val x1 = meet (schema, x) // attributes from first table val x2 = meet (refTab.schema, x) // attributes from second table @@ -371,7 +367,6 @@ case class VTable (name_ : String, schema_ : Schema, domain_ : Domain, key_ : Sc if tab2 != null && tab2.isInstanceOf [VTable] then val vtab2 = tab2.asInstanceOf [VTable] for (k, vl) <- vtab2.edgeType if k != elab && k != elab2 do s.addEdgeType (k, vl._1, vl._2) - end if debug ("updateEdgeTypes", s"s.edgeType = ${s.edgeType}") end updateEdgeTypes @@ -400,7 +395,7 @@ case class VTable (name_ : String, schema_ : Schema, domain_ : Domain, key_ : Sc * @param ref the foreign key reference (edge-label, referenced table) */ def edgeTable (ref: (String, Table)): VTable = - val (elab, refTab) = ref // edge-label, referenced table + val refTab = ref._2 // edge-label, referenced table val newKey = key ++ refTab.key val newDom = pull (key) ++ refTab.pull (refTab.key) @@ -461,7 +456,6 @@ case class VTable (name_ : String, schema_ : Schema, domain_ : Domain, key_ : Sc // debug ("show", s"es.head = ${es.head}, elab = $elab") val x = es.getOrElse (elab, null) if x != null then prt (x.head.tuple(0), width_) - end if end for end if println (" |") diff --git a/src/main/scala/scalation/database/triplegraph/TripleGraph.scala b/src/main/scala/scalation/database/triplegraph/TripleGraph.scala index af7206d57..5ea6a8506 100644 --- a/src/main/scala/scalation/database/triplegraph/TripleGraph.scala +++ b/src/main/scala/scalation/database/triplegraph/TripleGraph.scala @@ -136,9 +136,8 @@ case class TripleGraph (label: Array [ValueType], //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Print this triple-graph in a deep sense with all the information. - * @param clip whether to clip out "Set(" and ")" */ - def printG (clip: Boolean = true): Unit = + def printG (): Unit = println (s"TripleGraph ($name, $size") println ("Triples: ") for e <- triples do println (s"\t $e") diff --git a/src/main/scala/scalation/database/triplegraph/TripleGraphMatcher.scala b/src/main/scala/scalation/database/triplegraph/TripleGraphMatcher.scala index 25c5cdf5b..6fb7d8122 100644 --- a/src/main/scala/scalation/database/triplegraph/TripleGraphMatcher.scala +++ b/src/main/scala/scalation/database/triplegraph/TripleGraphMatcher.scala @@ -129,7 +129,6 @@ trait TripleGraphMatcher (g: TripleGraph, q: TripleGraph): if ans != null then for i <- φ.indices do println (s"$i: ${φ(i)} == ? ${ans(i)}") for i <- φ.indices do assert (φ(i) == ans(i)) - end if φ end test diff --git a/src/main/scala/scalation/dynamics/BallFlight.scala b/src/main/scala/scalation/dynamics/BallFlight.scala index db4c13299..c4ead6027 100644 --- a/src/main/scala/scalation/dynamics/BallFlight.scala +++ b/src/main/scala/scalation/dynamics/BallFlight.scala @@ -11,6 +11,7 @@ package scalation package dynamics +import scala.annotation.nowarn import scala.math.{cos, sin, Pi} import scala.util.control.Breaks.{breakable, break} @@ -31,25 +32,25 @@ import scalation.mathstat._ */ @main def ballFlight (): Unit = - val n = 200 // maximum number of time points - val tm = 5.0 // simulate for a maximum of tm seconds - val g = 9.80665 // gravitational force (meters/second^2) -// val m = 45.93 // mass of a golf ball in grams - val aa = 15.00 // launch angle in degrees - val ss = 100.00 // swing speed in miles/hour - val sf = 1.49 // smash factor - val s = ss * sf * 1609.344 / 3600 // initial ball speed in meters/second - val a = aa * Pi / 180.0 // launch angle in radians - val p0 = VectorD (0.0, 0.0) // initial position (x, y) at time t0=0 - val v0 = VectorD (s * cos(a), s * sin(a)) // initial velocity (v_x, v_y) at t0 + val n = 200 // maximum number of time points + val tm = 5.0 // simulate for a maximum of tm seconds + val g = 9.80665 // gravitational force (meters/second^2) +// val m = 45.93 // mass of a golf ball in grams + val aa = 15.00 // launch angle in degrees + val ss = 100.00 // swing speed in miles/hour + val sf = 1.49 // smash factor + val s = ss * sf * 1609.344 / 3600 // initial ball speed in meters/second + val a = aa * Pi / 180.0 // launch angle in radians + val p0 = VectorD (0.0, 0.0) // initial position (x, y) at time t0=0 + val v0 = VectorD (s * cos(a), s * sin(a)) // initial velocity (v_x, v_y) at t0 println ("ball speed s = " + s) println ("launch angle a = " + a) println ("ball velocity v0 = " + v0) // define the system of Ordinary Differential Equations (ODEs) - def dx_dt (t: Double, x: Double) = v0(0) // ODE 1 - def dy_dt (t: Double, y: Double) = v0(1) - g * t // ODE 2 + @nowarn def dx_dt (t: Double, x: Double) = v0(0) // ODE 1 + @nowarn def dy_dt (t: Double, y: Double) = v0(1) - g * t // ODE 2 val odes: Array [Derivative] = Array (dx_dt, dy_dt) def exactSolution (t: Double) = VectorD (v0(0) * t, v0(1) * t - .5 * g * t * t) diff --git a/src/main/scala/scalation/dynamics/DormandPrince.scala b/src/main/scala/scalation/dynamics/DormandPrince.scala index 32dd7aa93..b4bfa9928 100644 --- a/src/main/scala/scalation/dynamics/DormandPrince.scala +++ b/src/main/scala/scalation/dynamics/DormandPrince.scala @@ -11,6 +11,7 @@ package scalation package dynamics +import scala.annotation.nowarn import scala.math.{abs, E, pow} import scala.util.control.Breaks.{break, breakable} @@ -119,7 +120,6 @@ object DormandPrince if error < tol then y += h * (b1*k1 + b3*k3 + b4*k4 + b5*k5 + b6*k6) tn += h - end if debug ("integrate2", s"for step n = $n: error = $error, y = $y") // if n == 10 then System.exit (0) @@ -216,7 +216,6 @@ object DormandPrince if error < tol then cfor (0, y.dim) { j => y(j) += h * (b1*k1(j) + b3*k3(j) + b4*k4(j) + b5*k5(j) + b6*k6(j)) } tn += h - end if debug ("integrate2", s"for step n = $n: error = $error, y = $y") // if n == 4 then System.exit (0) @@ -254,25 +253,25 @@ import DormandPrince._ banner (s"Test ODE Solver Dormand-Prince compute y(2) where y0 = y(0) = 1") banner ("Test `integrate` on y' = f(t, u) = 2.0 * t") - def derv1 (t: Double, y: Double) = y // solution to differential equation is e^t - var y_ = (t: Double) => E~^t // symbolic solution - var y = integrate (derv1, y0, t_) // numeric solution + @nowarn def derv1 (t: Double, y: Double) = y // solution to differential equation is e^t + var y_ = (t: Double) => E~^t // symbolic solution + var y = integrate (derv1, y0, t_) // numeric solution println (s"\n==> at t = $t_: y = $y") println (s"\n==> correct t~^2 + 1 = ${y_(t_)}") println (s"\n==> error = ${y_(t_) - y}") banner ("Test `integrate` on y' = f(t, u) = y") - def derv2 (t: Double, y: Double) = 2.0 * t // f(t, y( for differential equation is t^2 + 1 - y_ = t => t~^2 + 1 // symbolic solution - y = integrate (derv2, y0, t_) // numeric solution + @nowarn def derv2 (t: Double, y: Double) = 2.0 * t // f(t, y) for differential equation is t^2 + 1 + y_ = t => t~^2 + 1 // symbolic solution + y = integrate (derv2, y0, t_) // numeric solution println (s"\n==> at t = $t_: y = $y") println (s"\n==> correct: E~^t = ${y_(t_)} ") println (s"\n==> error = ${y_(t_) - y}") banner ("Test `integrate` on y' = f(t, u) = t + y") - def derv3 (t: Double, y: Double) = t + y // f(t, y) for ordinary differential equation - y_ = t => 2*E~^t - t - 1 // symbolic solution - y = integrate (derv3, y0, t_) // numeric solution + def derv3 (t: Double, y: Double) = t + y // f(t, y) for ordinary differential equation + y_ = t => 2*E~^t - t - 1 // symbolic solution + y = integrate (derv3, y0, t_) // numeric solution println (s"\n==> at t = $t_: y = $y") println (s"\n==> correct: 2*E~^t - t - 1 = ${y_(t_)}") println (s"\n==> error = ${y_(t_) - y}") @@ -291,8 +290,8 @@ end dormandPrinceTest */ @main def dormandPrinceTest2 (): Unit = - def dy0_dt (t: Double, y: VectorD) = y(0) - def dy1_dt (t: Double, y: VectorD) = y(0) - y(1) + @nowarn def dy0_dt (t: Double, y: VectorD) = y(0) + @nowarn def dy1_dt (t: Double, y: VectorD) = y(0) - y(1) val odes = Array [DerivativeV] (dy0_dt, dy1_dt) def y_(t: Double): VectorD = VectorD (E~^t, 0.5 * E~^t + 1.5 * E~^(-t)) @@ -307,8 +306,8 @@ end dormandPrinceTest yy(0) = y_(0) for i <- 1 to 50 do t(i) = i * 0.2 - yy(i) = y_(t(i)) // symbolic solution - y(i) = integrateVV (odes, y(0), t(i)) // numeric solution + yy(i) = y_(t(i)) // symbolic solution + y(i) = integrateVV (odes, y(0), t(i)) // numeric solution end for println (s"t = $t") @@ -351,9 +350,9 @@ end dormandPrinceTest2 */ banner ("Test DormandPrince on system of ODEs with y0 = 1.24 at t_ = 1.0") - def dx_dt (t: Double, p: VectorD) = p(1) * p(2) - def dy_dt (t: Double, p: VectorD) = -p(0) * p(2) - def dz_dt (t: Double, p: VectorD) = -.51 * p(0) * p(1) + @nowarn def dx_dt (t: Double, p: VectorD) = p(1) * p(2) + @nowarn def dy_dt (t: Double, p: VectorD) = -p(0) * p(2) + @nowarn def dz_dt (t: Double, p: VectorD) = -.51 * p(0) * p(1) val odes = Array [DerivativeV] (dx_dt, dy_dt, dz_dt) val ti = 0.2 @@ -380,7 +379,7 @@ end dormandPrinceTest3 */ @main def dormandPrinceTest4 (): Unit = - println ("dormandPriceTest4 not yet implemented") // FIX - implement + println ("dormandPriceTest4 not yet implemented") // FIX - implement end dormandPrinceTest4 diff --git a/src/main/scala/scalation/dynamics/Integrator.scala b/src/main/scala/scalation/dynamics/Integrator.scala index a1e9d4d28..d1d8367e0 100644 --- a/src/main/scala/scalation/dynamics/Integrator.scala +++ b/src/main/scala/scalation/dynamics/Integrator.scala @@ -14,6 +14,7 @@ package dynamics import scalation.mathstat._ /** Function type for derivative functions: f (t, y) where y is a scalar + * In case one the arguments t or y is not used in f, may use @noward on def, or @unused on argument */ type Derivative = (Double, Double) => Double @@ -83,7 +84,6 @@ trait Integrator: val y = new VectorD (n) for i <- 0 until n do y(i) = integrate (f(i), y0(i), t, t0, step) y - end if end integrateV //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: diff --git a/src/main/scala/scalation/dynamics/ModRosenbrock.scala b/src/main/scala/scalation/dynamics/ModRosenbrock.scala index 5c00b4c82..0ecc5bd4d 100644 --- a/src/main/scala/scalation/dynamics/ModRosenbrock.scala +++ b/src/main/scala/scalation/dynamics/ModRosenbrock.scala @@ -13,6 +13,7 @@ package scalation package dynamics +import scala.annotation.nowarn import scala.math.{abs, max, min, pow} import scalation.mathstat._ @@ -234,7 +235,6 @@ object ModRosenbrock val scale = max (safeScale * pow (err, - alphaDec), minScale) dt *= scale if dt < VSMALL then flaw ("adaptiveSolver_solve", "FatalErrorInFunction stepsize underflow") - end if if err <= 1 then go = false end while @@ -274,9 +274,9 @@ import ModRosenbrock._ // @see http://www.mathworks.com/help/techdoc/ref/ode23.html (Example 1) - def dx_dt (t: Double, p: VectorD) = p(1) * p(2) - def dy_dt (t: Double, p: VectorD) = -p(0) * p(2) - def dz_dt (t: Double, p: VectorD) = -0.51 * p(0) * p(1) + @nowarn def dx_dt (t: Double, p: VectorD) = p(1) * p(2) + @nowarn def dy_dt (t: Double, p: VectorD) = -p(0) * p(2) + @nowarn def dz_dt (t: Double, p: VectorD) = -0.51 * p(0) * p(1) val odes = Array [DerivativeV] (dx_dt, dy_dt, dz_dt) val ti = 0.2 diff --git a/src/main/scala/scalation/dynamics/Radau.scala b/src/main/scala/scalation/dynamics/Radau.scala index 3a8abe21b..c517bb61b 100644 --- a/src/main/scala/scalation/dynamics/Radau.scala +++ b/src/main/scala/scalation/dynamics/Radau.scala @@ -93,7 +93,7 @@ object Radau val lu = ident - jacob * (root6 * h / 6.0) breakable { - for k <- 1 to MAX_ITER do + for _ <- 1 to MAX_ITER do val fg = new VectorD (f.length) for i <- fg.indices do fg(i) = f(i) (tn_1 + h1_3, gn) val fy = new VectorD (f.length) diff --git a/src/main/scala/scalation/dynamics/Reactions.scala b/src/main/scala/scalation/dynamics/Reactions.scala index 3b6f28325..f0a3f03b8 100644 --- a/src/main/scala/scalation/dynamics/Reactions.scala +++ b/src/main/scala/scalation/dynamics/Reactions.scala @@ -11,6 +11,7 @@ package scalation package dynamics +import scala.annotation.nowarn import scalation.mathstat._ //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -39,23 +40,23 @@ import scalation.mathstat._ // define the system of Ordinary Differential Equations (ODEs) - // d[H2]/dt = - kf1 [H2] [O] + kb1 [H] [OH] - kf3 [H2] [OH] + kb3 [H2O] [H] - def dh2_dt (t: Double, c: VectorD) = -kf._1*c(0)*c(2) + kb._1*c(3)*c(4) - kf._3*c(0)*c(4) + kb._3*c(5)*c(3) + // d[H2]/dt = - kf1 [H2] [O] + kb1 [H] [OH] - kf3 [H2] [OH] + kb3 [H2O] [H] + @nowarn def dh2_dt (t: Double, c: VectorD) = -kf._1*c(0)*c(2) + kb._1*c(3)*c(4) - kf._3*c(0)*c(4) + kb._3*c(5)*c(3) - // d[O2]/dt = - kf2 [H] [O2] + kb2 [O] [OH] - def do2_dt (t: Double, c: VectorD) = -kf._2*c(3)*c(1) + kb._2*c(2)*c(4) + // d[O2]/dt = - kf2 [H] [O2] + kb2 [O] [OH] + @nowarn def do2_dt (t: Double, c: VectorD) = -kf._2*c(3)*c(1) + kb._2*c(2)*c(4) - // d[O]/dt = - kf1 [H2] [O] + kb1 [H] [OH] + kf2 [H] [O2] - kb2 [O] [OH] - def do_dt (t: Double, c: VectorD) = -kf._1*c(0)*c(2) + kb._1*c(3)*c(4) + kf._2*c(3)*c(1) - kb._2*c(2)*c(4) + // d[O]/dt = - kf1 [H2] [O] + kb1 [H] [OH] + kf2 [H] [O2] - kb2 [O] [OH] + @nowarn def do_dt (t: Double, c: VectorD) = -kf._1*c(0)*c(2) + kb._1*c(3)*c(4) + kf._2*c(3)*c(1) - kb._2*c(2)*c(4) - // d[H]/dt = + kf1 [H2] [O] - kb1 [H] [OH] - kf2 [H] [O2] + kb2 [O] [OH] + kf3 [H2] [OH] - kb3 [H2O] [H] - def dh_dt (t: Double, c: VectorD) = kf._1*c(0)*c(2) - kb._1*c(3)*c(4) - kf._2*c(3)*c(1) + kb._2*c(2)*c(4) + kf._3*c(0)*c(4) - kb._3*c(5)*c(3) + // d[H]/dt = + kf1 [H2] [O] - kb1 [H] [OH] - kf2 [H] [O2] + kb2 [O] [OH] + kf3 [H2] [OH] - kb3 [H2O] [H] + @nowarn def dh_dt (t: Double, c: VectorD) = kf._1*c(0)*c(2) - kb._1*c(3)*c(4) - kf._2*c(3)*c(1) + kb._2*c(2)*c(4) + kf._3*c(0)*c(4) - kb._3*c(5)*c(3) - // d[OH]/dt = + kf1 [H2] [O] - kb1 [H] [OH] + kf2 [H] [O2] - kb2 [O] [OH] - kf3 [H2] [OH] + kb3 [H2O] [H] - def doh_dt (t: Double, c: VectorD) = kf._1*c(0)*c(2) - kb._1*c(3)*c(4) + kf._2*c(3)*c(1) - kb._2*c(1)*c(4) - kf._3*c(0)*c(4) + kb._3*c(5)*c(3) + // d[OH]/dt = + kf1 [H2] [O] - kb1 [H] [OH] + kf2 [H] [O2] - kb2 [O] [OH] - kf3 [H2] [OH] + kb3 [H2O] [H] + @nowarn def doh_dt (t: Double, c: VectorD) = kf._1*c(0)*c(2) - kb._1*c(3)*c(4) + kf._2*c(3)*c(1) - kb._2*c(1)*c(4) - kf._3*c(0)*c(4) + kb._3*c(5)*c(3) - // d[H2O]/dt = + kf3 [H2] [OH] - kb3 [H2O] [H] - def dh2o_dt (t: Double, c: VectorD) = kf._3*c(0)*c(4) - kb._3*c(5)*c(3) + // d[H2O]/dt = + kf3 [H2] [OH] - kb3 [H2O] [H] + @nowarn def dh2o_dt (t: Double, c: VectorD) = kf._3*c(0)*c(4) - kb._3*c(5)*c(3) val odes: Array [DerivativeV] = Array (dh2_dt, do2_dt, do_dt, dh_dt, doh_dt, dh2o_dt) diff --git a/src/main/scala/scalation/dynamics/RungeKutta.scala b/src/main/scala/scalation/dynamics/RungeKutta.scala index 5d184c440..e89c89ab4 100644 --- a/src/main/scala/scalation/dynamics/RungeKutta.scala +++ b/src/main/scala/scalation/dynamics/RungeKutta.scala @@ -11,6 +11,7 @@ package scalation package dynamics +import scala.annotation.nowarn import scala.math.{abs, E, round} import scalation.mathstat._ @@ -28,7 +29,7 @@ import scalation.mathstat._ object RungeKutta extends Integrator: - private val debug = debugf ("RungeKutta", true) // debug function + private val debug = debugf ("RungeKutta", true) // debug function private val flaw = flawf ("RungeKutta") // flaw function private val ovf = Double.MaxValue / 10.0 // too big, may overflow @@ -131,25 +132,25 @@ import RungeKutta._ banner (s"Test ODE Solver Classic Runge-Kutta compute y(2) where y0 = y(0) = 1") banner ("Test `integrate` on y' = f(t, u) = 2.0 * t") - def derv1 (t: Double, y: Double) = 2.0 * t // solution to differential equation is t^2 - var y_ = (t: Double) => t~^2 + 1 // symbolic solution - var y = integrate (derv1, y0, t_) // numeric solution + @nowarn def derv1 (t: Double, y: Double) = 2.0 * t // solution to differential equation is t^2 + var y_ = (t: Double) => t~^2 + 1 // symbolic solution + var y = integrate (derv1, y0, t_) // numeric solution println (s"\n==> at t = $t_: y = $y") println (s"\n==> correct t~^2 + 1 = ${y_(t_)}") println (s"\n==> error = ${y_(t_) - y}") banner ("Test `integrate` on y' = f(t, u) = y") - def derv2 (t: Double, y: Double) = y // f(t, y( for differential equation is e^t - y_ = t => E~^t // symbolic solution - y = integrate (derv2, y0, t_) // numeric solution + @nowarn def derv2 (t: Double, y: Double) = y // f(t, y) for differential equation is e^t + y_ = t => E~^t // symbolic solution + y = integrate (derv2, y0, t_) // numeric solution println (s"\n==> at t = $t_: y = $y") println (s"\n==> correct: E~^t = ${y_(t_)} ") println (s"\n==> error = ${y_(t_) - y}") banner ("Test `integrate` on y' = f(t, u) = t + y") - def derv3 (t: Double, y: Double) = t + y // f(t, y) for ordinary differential equation - y_ = t => 2*E~^t - t - 1 // symbolic solution - y = integrate (derv3, y0, t_) // numeric solution + def derv3 (t: Double, y: Double) = t + y // f(t, y) for ordinary differential equation + y_ = t => 2*E~^t - t - 1 // symbolic solution + y = integrate (derv3, y0, t_) // numeric solution println (s"\n==> at t = $t_: y = $y") println (s"\n==> correct: 2*E~^t - t - 1 = ${y_(t_)}") println (s"\n==> error = ${y_(t_) - y}") @@ -176,9 +177,9 @@ end rungeKuttaTest println (s"\n==> at t = $t_: y = ${integrate (derv1, y0, t_)}") banner ("Test RungeKutta on System of ODEs with y0 = 1.24 at t_ = 1.0") - def dx_dt (t: Double, p: VectorD) = p(1) * p(2) - def dy_dt (t: Double, p: VectorD) = -p(0) * p(2) - def dz_dt (t: Double, p: VectorD) = -.51 * p(0) * p(1) + @nowarn def dx_dt (t: Double, p: VectorD) = p(1) * p(2) + @nowarn def dy_dt (t: Double, p: VectorD) = -p(0) * p(2) + @nowarn def dz_dt (t: Double, p: VectorD) = -.51 * p(0) * p(1) val odes = Array [DerivativeV] (dx_dt, dy_dt, dz_dt) val ti = 0.2 diff --git a/src/main/scala/scalation/dynamics/RungeKutta2.scala b/src/main/scala/scalation/dynamics/RungeKutta2.scala index 32898958f..a5aae3d48 100644 --- a/src/main/scala/scalation/dynamics/RungeKutta2.scala +++ b/src/main/scala/scalation/dynamics/RungeKutta2.scala @@ -15,6 +15,7 @@ package scalation package dynamics +import scala.annotation.nowarn import scala.math.{abs, E, round} import scalation.mathstat._ @@ -189,25 +190,25 @@ import RungeKutta2._ banner (s"Test ODE Solver ${solver.name} compute y(2) where y0 = y(0) = 1") banner ("Test `integrate` on y' = f(t, u) = 2.0 * t") - def derv1 (t: Double, y: Double) = 2.0 * t // solution to differential equation is t^2 - var y_ = (t: Double) => t~^2 + 1 // symbolic solution - var y = solver.integrate (derv1, y0, t_) // numeric solution + @nowarn def derv1 (t: Double, y: Double) = 2.0 * t // solution to differential equation is t^2 + var y_ = (t: Double) => t~^2 + 1 // symbolic solution + var y = solver.integrate (derv1, y0, t_) // numeric solution println (s"\n==> at t = $t_: y = $y") println (s"\n==> correct t~^2 + 1 = ${y_(t_)}") println (s"\n==> error = ${y_(t_) - y}") banner ("Test `integrate` on y' = f(t, u) = y") - def derv2 (t: Double, y: Double) = y // f(t, y( for differential equation is e^t - y_ = t => E~^t // symbolic solution - y = solver.integrate (derv2, y0, t_) // numeric solution + @nowarn def derv2 (t: Double, y: Double) = y // f(t, y) for differential equation is e^t + y_ = t => E~^t // symbolic solution + y = solver.integrate (derv2, y0, t_) // numeric solution println (s"\n==> at t = $t_: y = $y") println (s"\n==> correct: E~^t = ${y_(t_)} ") println (s"\n==> error = ${y_(t_) - y}") banner ("Test `integrate` on y' = f(t, u) = t + y") - def derv3 (t: Double, y: Double) = t + y // f(t, y) for ordinary differential equation - y_ = t => 2*E~^t - t - 1 // symbolic solution - y = solver.integrate (derv3, y0, t_) // numeric solution + def derv3 (t: Double, y: Double) = t + y // f(t, y) for ordinary differential equation + y_ = t => 2*E~^t - t - 1 // symbolic solution + y = solver.integrate (derv3, y0, t_) // numeric solution println (s"\n==> at t = $t_: y = $y") println (s"\n==> correct: 2*E~^t - t - 1 = ${y_(t_)}") println (s"\n==> error = ${y_(t_) - y}") @@ -238,9 +239,9 @@ end rungeKutta2Test banner ("Test RungeKutta on System of ODEs with y0 = 1.24 at t_ = 1.0") - def dx_dt (t: Double, p: VectorD) = p(1) * p(2) - def dy_dt (t: Double, p: VectorD) = -p(0) * p(2) - def dz_dt (t: Double, p: VectorD) = -.51 * p(0) * p(1) + @nowarn def dx_dt (t: Double, p: VectorD) = p(1) * p(2) + @nowarn def dy_dt (t: Double, p: VectorD) = -p(0) * p(2) + @nowarn def dz_dt (t: Double, p: VectorD) = -.51 * p(0) * p(1) val odes = Array [DerivativeV] (dx_dt, dy_dt, dz_dt) val ti = 0.2 diff --git a/src/main/scala/scalation/dynamics/RungeKutta3.scala b/src/main/scala/scalation/dynamics/RungeKutta3.scala index 4af71569c..079f61485 100644 --- a/src/main/scala/scalation/dynamics/RungeKutta3.scala +++ b/src/main/scala/scalation/dynamics/RungeKutta3.scala @@ -14,6 +14,7 @@ package scalation package dynamics +import scala.annotation.nowarn import scala.math.{abs, E, pow} import scala.util.control.Breaks.{break, breakable} @@ -93,7 +94,6 @@ class RungeKutta3 (val name: String, a: MatrixD, b: VectorD, b_ : VectorD, c: Ve if error < tol then y += h * (b dot k) // update scalar y tn += h // move ahead in time - end if debug ("integrate2", s"for step n = $n: error = $error, y = $y") // if n == 10 then System.exit (0) // for debugging @@ -157,7 +157,6 @@ class RungeKutta3 (val name: String, a: MatrixD, b: VectorD, b_ : VectorD, c: Ve if error < tol then cfor (0, y.dim) { j => y(j) += h * (b dot k(j)) } // update scalar y tn += h // move ahead in time - end if debug ("integrateVV", s"for step n = $n: error = $error, y = $y") // if n == 4 then System.exit (0) @@ -236,25 +235,25 @@ import RungeKutta3._ banner (s"Test ODE Solver ${solver.name} compute y(2) where y0 = y(0) = 1") banner ("Test `integrate` on y' = f(t, y) = y") - def derv1 (t: Double, y: Double) = y // f(t, y( for differential equation is e^t - var y_ = (t: Double) => E~^t // symbolic solution - var y = solver.integrate (derv1, y0, t_) // numeric solution + @nowarn def derv1 (t: Double, y: Double) = y // f(t, y) for differential equation is e^t + var y_ = (t: Double) => E~^t // symbolic solution + var y = solver.integrate (derv1, y0, t_) // numeric solution println (s"\n==> at t = $t_: y = $y") println (s"\n==> correct: E~^t = ${y_(t_)} ") println (s"\n==> error = ${y_(t_) - y}") banner ("Test `integrate` on y' = f(t, y) = 2.0 * t") - def derv2 (t: Double, y: Double) = 2.0 * t // solution to differential equation is t^2 + 1 - y_ = t => t~^2 + 1 // symbolic solution - y = solver.integrate (derv2, y0, t_) // numeric solution + @nowarn def derv2 (t: Double, y: Double) = 2.0 * t // solution to differential equation is t^2 + 1 + y_ = t => t~^2 + 1 // symbolic solution + y = solver.integrate (derv2, y0, t_) // numeric solution println (s"\n==> at t = $t_: y = $y") println (s"\n==> correct t~^2 + 1 = ${y_(t_)}") println (s"\n==> error = ${y_(t_) - y}") banner ("Test `integrate` on y' = f(t, y) = t + y") - def derv3 (t: Double, y: Double) = t + y // f(t, y) for ordinary differential equation - y_ = t => 2*E~^t - t - 1 // symbolic solution - y = solver.integrate (derv3, y0, t_) // numeric solution + def derv3 (t: Double, y: Double) = t + y // f(t, y) for ordinary differential equation + y_ = t => 2*E~^t - t - 1 // symbolic solution + y = solver.integrate (derv3, y0, t_) // numeric solution println (s"\n==> at t = $t_: y = $y") println (s"\n==> correct: 2*E~^t - t - 1 = ${y_(t_)}") println (s"\n==> error = ${y_(t_) - y}") @@ -278,8 +277,8 @@ end rungeKutta3Test banner (s"Test ODE Solver ${solver.name} compute y(1) where y0 = y(0) = 1,24") - def dy0_dt (t: Double, y: VectorD) = y(0) - def dy1_dt (t: Double, y: VectorD) = y(0) - y(1) + @nowarn def dy0_dt (t: Double, y: VectorD) = y(0) + @nowarn def dy1_dt (t: Double, y: VectorD) = y(0) - y(1) val odes = Array [DerivativeV] (dy0_dt, dy1_dt) def y_(t: Double): VectorD = VectorD (E~^t, 0.5 * E~^t + 1.5 * E~^(-t)) @@ -336,9 +335,9 @@ end rungeKutta3Test2 banner ("Test RungeKutta on System of ODEs with y0 = 1.24 at t_ = 1.0") - def dx_dt (t: Double, p: VectorD) = p(1) * p(2) - def dy_dt (t: Double, p: VectorD) = -p(0) * p(2) - def dz_dt (t: Double, p: VectorD) = -0.51 * p(0) * p(1) + @nowarn def dx_dt (t: Double, p: VectorD) = p(1) * p(2) + @nowarn def dy_dt (t: Double, p: VectorD) = -p(0) * p(2) + @nowarn def dz_dt (t: Double, p: VectorD) = -0.51 * p(0) * p(1) val odes = Array [DerivativeV] (dx_dt, dy_dt, dz_dt) val ti = 0.2 diff --git a/src/main/scala/scalation/mathstat/Bidiagonal.scala b/src/main/scala/scalation/mathstat/Bidiagonal.scala index d5956e420..4f1accbbf 100644 --- a/src/main/scala/scalation/mathstat/Bidiagonal.scala +++ b/src/main/scala/scalation/mathstat/Bidiagonal.scala @@ -94,7 +94,6 @@ class Bidiagonal (a: MatrixD): s = sdot (u(?, i), u(?, j), i) f = s / h for k <- i until m do u(k, j) += f * u(k, i) - end for end if q(i) = g // assign ith main diagonal element @@ -109,7 +108,6 @@ class Bidiagonal (a: MatrixD): for j <- l until m do s = sdot (u(i), u(j), l) for k <- l until n do u(j, k) += s * e(k) - end for end if val y = abs (q(i)) + abs (e(i)); if y > bm then bm = y @@ -127,7 +125,6 @@ class Bidiagonal (a: MatrixD): for j <- l until n do val s = sdot (u(i), v(?, j), l) for k <- l until n do v(k, j) += s * v(k, i) - end for end if for j <- l until n do { v(i, j) = 0.0; v(j, i) = 0.0 } v(i, i) = 1.0 @@ -154,7 +151,6 @@ class Bidiagonal (a: MatrixD): for j <- i until m do u(j, i) /= g else for j <- i until m do u(j, i) = 0.0 - end if u(i, i) += 1.0 end for end transformLHS diff --git a/src/main/scala/scalation/mathstat/Combinatorics.scala b/src/main/scala/scalation/mathstat/Combinatorics.scala index 49f736def..4e9927041 100644 --- a/src/main/scala/scalation/mathstat/Combinatorics.scala +++ b/src/main/scala/scalation/mathstat/Combinatorics.scala @@ -283,7 +283,6 @@ object Combinatorics: // prod *= SQRT_PI / 2~^ia // else // flaw ("gammaF", "only handle positive integer and halves cases") -// end if // prod // } // gammaF @@ -422,7 +421,7 @@ import Combinatorics._ println ("\nBuild Pascal's Triangle using choose (n, k)") val max = 16 for n <- 0 to max do - for i <- 1 to (max - n) / 2 do print ("\t") + cfor (0, (max - n) / 2) { _ => print ("\t") } for k <- 0 to n do val c = choose (n, k) if n % 2 == 1 then if c < 1000 then print (" ") else print (" ") diff --git a/src/main/scala/scalation/mathstat/Convert.scala b/src/main/scala/scalation/mathstat/Convert.scala index ba73edacc..64cac0a67 100644 --- a/src/main/scala/scalation/mathstat/Convert.scala +++ b/src/main/scala/scalation/mathstat/Convert.scala @@ -22,6 +22,6 @@ package mathstat def func2vector (f: FunctionS2S, ab: Interval, n: Int = 100): VectorD = val step = (ab._2 - ab._1) / n var x = ab._1 - step - VectorD (for i <- 0 to n yield { x += step; f(x) }) + VectorD (for _ <- 0 to n yield { x += step; f(x) }) end func2vector diff --git a/src/main/scala/scalation/mathstat/Correlogram.scala b/src/main/scala/scalation/mathstat/Correlogram.scala index 3bdae7f0c..060ab8cd2 100644 --- a/src/main/scala/scalation/mathstat/Correlogram.scala +++ b/src/main/scala/scalation/mathstat/Correlogram.scala @@ -109,9 +109,8 @@ trait Correlogram (y: VectorD, adjusted: Boolean = true): //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Plot both the Auto-Correlation Function (ACF) and the Partial Auto-Correlation * Function (PACF) with confidence bound. - * @param show whether to show the ACF, PACF values */ - def plotCorrelogram (show: Boolean = true): Unit = + def plotCorrelogram (): Unit = plotFunc (acF, "ACF") plotFunc (pacF, "PACF") end plotCorrelogram diff --git a/src/main/scala/scalation/mathstat/Eigen.scala b/src/main/scala/scalation/mathstat/Eigen.scala index debc58e7a..351bbac15 100644 --- a/src/main/scala/scalation/mathstat/Eigen.scala +++ b/src/main/scala/scalation/mathstat/Eigen.scala @@ -113,12 +113,12 @@ class Eigenvalue (a: MatrixD): for k <- 0 until ITERATIONS if converging do // major iterations converging = true - for l <- 0 until ITERATIONS do // minor iterations + cfor (0, ITERATIONS) { _ => // minor iterations val s = g(n - 1, n - 1) // the shift parameter val eye_g = eye (g.dim, g.dim) val (qq, rr) = (new Fac_QR (g - eye_g * s)).factor12 () g = rr.asInstanceOf [MatrixD] * qq.asInstanceOf [MatrixD] + eye_g * s // FIX - end for + } // cfor for i <- 0 until n do e(i) = g(i, i) // extract eigenvalues from diagonal val e0 = e(0) // consider one eigenvalue @@ -126,7 +126,6 @@ class Eigenvalue (a: MatrixD): converging = false // end major iterations else lastE = e0 // save this eigenvalue - end if println ("-" * 60) println (s"Eigenvalue: on iteration $k: g = $g") diff --git a/src/main/scala/scalation/mathstat/Fac_Cholesky.scala b/src/main/scala/scalation/mathstat/Fac_Cholesky.scala index c576ce54d..47193003f 100644 --- a/src/main/scala/scalation/mathstat/Fac_Cholesky.scala +++ b/src/main/scala/scalation/mathstat/Fac_Cholesky.scala @@ -58,7 +58,6 @@ class Fac_Cholesky (a: MatrixD) end for else l(j, j) = sqrt (EPS) - end if end for factored = true this @@ -91,7 +90,6 @@ class Fac_Cholesky (a: MatrixD) else flaw ("factor", s"sqrt of negative diff = $diff, setting l(j, j) to zero") l(j, j) = 0.0 - end if end for factored = true this @@ -118,7 +116,6 @@ class Fac_Cholesky (a: MatrixD) val l_jj = l(j, j) if l_jj == 0.0 then flaw ("factor", s"divide by zero l($j, $j) = $l_jj") l(i, j) = diff / l_jj - end if end for factored = true this diff --git a/src/main/scala/scalation/mathstat/Fac_LU.scala b/src/main/scala/scalation/mathstat/Fac_LU.scala index 0e38eec02..02fca5c1a 100644 --- a/src/main/scala/scalation/mathstat/Fac_LU.scala +++ b/src/main/scala/scalation/mathstat/Fac_LU.scala @@ -433,6 +433,8 @@ object Fac_LU: //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Return the inverse of matrix a by calling the inverse method in `Fac_LU`. * Note: `Fac_LU.inverse` is generally faster and more robust than `Fac_Inv.inverse`. + * Usage: inverse (a) + * Usage: inverse (a)(lu) // shortcut when lu is already computed * @param a the matrix whose inverse is sought * @param lu an LU factorization (use existing or make a new one) */ @@ -445,6 +447,8 @@ object Fac_LU: /** Compute the determinant of matrix a. The value of the determinant * indicates, among other things, whether there is a unique solution to a * system of linear equations (a nonzero determinant). + * Usage: det (a) + * Usage: det (a)(lu) // shortcut when lu is already computed * @param a the matrix whose determinant is sought * @param lu an LU factorization (use existing or make a new one) */ diff --git a/src/main/scala/scalation/mathstat/Fac_QR.scala b/src/main/scala/scalation/mathstat/Fac_QR.scala index d4c526f23..b83b2ea03 100644 --- a/src/main/scala/scalation/mathstat/Fac_QR.scala +++ b/src/main/scala/scalation/mathstat/Fac_QR.scala @@ -83,7 +83,6 @@ class Fac_QR (aa: MatrixD, needQ: Boolean = false) if at_k(k) < 0.0 then _norm = -_norm // make k-th Householder vector cfor (k, m) { i => at_k(i) /= _norm } at_k(k) += 1.0 - end if r(k, k) = -_norm // set the diagonal of r matrix cfor (k+1, p) { j => // transform all the rest of aa matrix diff --git a/src/main/scala/scalation/mathstat/Fac_QR_RR.scala b/src/main/scala/scalation/mathstat/Fac_QR_RR.scala index b82d4998e..87787a37f 100644 --- a/src/main/scala/scalation/mathstat/Fac_QR_RR.scala +++ b/src/main/scala/scalation/mathstat/Fac_QR_RR.scala @@ -6,6 +6,7 @@ * @see LICENSE (MIT style license file). * * @note Rank Revealing QR Matrix Factorization + * Simple way to determine matrix a's rank: Fac_QR_RR.rank (a) */ package scalation @@ -61,7 +62,6 @@ class Fac_QR_RR (aa: MatrixD, needQ: Boolean = true) if k_m != _rank then at.swap (k_m, _rank) // swap rows in at (columns in a) c.swap (k_m, _rank) // swap column lengths - end if colHouse (_rank) // perform kth factoring step for j <- _rank+1 until n do c(j) -= at(j, _rank) ~^ 2 _rank += 1 @@ -80,12 +80,26 @@ class Fac_QR_RR (aa: MatrixD, needQ: Boolean = true) end factor //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the rank (number of independent columns) in matrix 'aa'. + /** Return the rank (number of independent columns) of matrix 'aa'. */ def rank: Int = _rank end Fac_QR_RR + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `Fac_QR_RR` object provides a convenient way to determine the rank of matrix. + */ +object Fac_QR_RR: + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the rank (number of independent columns) of matrix 'a'. + * @param a the matrix whose rank is to be determined + */ + def rank (a: MatrixD): Int = new Fac_QR_RR (a, false).factor ().rank + +end Fac_QR_RR + import Fac_QR._ //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: diff --git a/src/main/scala/scalation/mathstat/Fac_SVD.scala b/src/main/scala/scalation/mathstat/Fac_SVD.scala index 4dc6cd100..ef139f227 100644 --- a/src/main/scala/scalation/mathstat/Fac_SVD.scala +++ b/src/main/scala/scalation/mathstat/Fac_SVD.scala @@ -6,6 +6,8 @@ * @see LICENSE (MIT style license file). * * @note Singular Value Decomposition (SVD) Matrix Factorization + * produces Compact SVD; for Full SVD see `fullSVD` method + * @see eecs16b.org/notes/sp24/note15.pdf * * @see Matrix Computations: Algorithm 8.6.1 Golub-Kahan SVD Step Algorithm * @see Matrix Computations: Algorithm 8.6.2 SVD Algorithm @@ -47,23 +49,23 @@ type FactorTypeFull = (MatrixD, MatrixD, MatrixD) class Fac_SVD (a: MatrixD) extends Factorization: - private val flaw = flawf ("Fac_SVD") // flaw function - private val debug = debugf ("Fac_SVD", false) // debug function + private val flaw = flawf ("Fac_SVD") // flaw function + private val debug = debugf ("Fac_SVD", false) // debug function - private val MAX_ITER = 100 // maximum number of iterations - private val m = a.dim // number of rows - private val n = a.dim2 // number of columns + private val MAX_ITER = 100 // maximum number of iterations + private val m = a.dim // number of rows + private val n = a.dim2 // number of columns if n > m then flaw ("init", s"Fac_SVD implementation requires m = $m >= n = $n") - private var l = 0 // lower index vs. k for zeroing out super-diagonal elements - private var f, g = 0.0 // typcally [ f g ] - private var h = 0.0 // [ 0 h ] - private var bmx = 0.0 // maximum column magnitude in the bidiagonal matrix - private var test_fconverge = true // whether singular values have reached converging magnitude - private var eps = EPSILON // adjustable small value + private var l = 0 // lower index vs. k for zeroing out super-diagonal elements + private var f, g = 0.0 // typically [ f g ] + private var h = 0.0 // [ 0 h ] + private var bmx = 0.0 // maximum column magnitude in the bidiagonal matrix + private var test_fconverge = true // whether singular values have reached converging magnitude + private var eps = EPSILON // adjustable small value - private var mat3: FactorType = null // result of factorization: 3 matrices + private var mat3: FactorType = null // result of factorization: 3 matrices // FIX: make naming of matrices consistent @@ -86,18 +88,18 @@ class Fac_SVD (a: MatrixD) * such that a = u *~ q * v.t. */ def factor123 (): FactorType = - if mat3 != null then return mat3 // matrix a has already been factored + if mat3 != null then return mat3 // matrix a has already been factored - val bid = new Bidiagonal (a) // class for making bidiagonal matrices - val (u, b, v) = bid.bidiagonalize () // factor a into a bidiagonal matrix b - val (e, q) = bid.e_q // get b's super-diagonal e and main diagonal q - bmx = bid.bmax // largest column magnitude in b - eps *= bmx // adjust eps based on bmx - var c, s = 0.0 // cosine, sine for rotations - var y = 0.0 // holds values from q and u - var z = 0.0 // holds values from q, v and normalization of (f, h) + val bid = new Bidiagonal (a) // class for making bidiagonal matrices + val (u, _, v) = bid.bidiagonalize () // factor a into a bidiagonal matrix b in (u, b, v) + val (e, q) = bid.e_q // get b's super-diagonal e and main diagonal q + bmx = bid.bmax // largest column magnitude in b + eps *= bmx // adjust eps based on bmx + var c, s = 0.0 // cosine, sine for rotations + var y = 0.0 // holds values from q and u + var z = 0.0 // holds values from q, v and normalization of (f, h) - for k <- n-1 to 0 by -1 do iterate (k) // diagonalization of the bidiagonal form + for k <- n-1 to 0 by -1 do iterate (k) // diagonalization of the bidiagonal form //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /* Iterate until the super-diagonal element e(k) is (near) zero. @@ -119,7 +121,6 @@ class Fac_SVD (a: MatrixD) else shiftFromBottom (k, e, q) qrTransform (l, k) - end if } // cfor end iterate @@ -132,11 +133,10 @@ class Fac_SVD (a: MatrixD) debug ("testFConvergence", s"(l, k) = ($l, $k)") z = q(k) - if l == k then // convergence indicated by l equaling k - if z < 0.0 then // make sure singular value is non-negative + if l == k then // convergence indicated by l equaling k + if z < 0.0 then // make sure singular value is non-negative q(k) = -z for j <- 0 until n do v(j, k) = -v(j, k) - end if true else false end testFConvergence @@ -148,20 +148,20 @@ class Fac_SVD (a: MatrixD) * @param k the upper index */ def cancellation (l : Int, k : Int): Unit = - c = 0.0; s = 1.0 // set cosine, sine + c = 0.0; s = 1.0 // set cosine, sine var converged = false var j = l - cfor (! converged && j <= k, j += 1) { // each column l to k - f = s * e(j) // sine * e(j) - e(j) *= c // cosine * e(j) + cfor (! converged && j <= k, j += 1) { // each column l to k + f = s * e(j) // sine * e(j) + e(j) *= c // cosine * e(j) - if abs (f) <= eps then // f near zero => return & test f convergence + if abs (f) <= eps then // f near zero => return & test f convergence converged = true else - g = q(j); h = hypot (f, g) // hypotenuse/norm + g = q(j); h = hypot (f, g) // hypotenuse/norm q(j) = h - c = g / h; s = -f / h // reset cosine, sine - rotateU (l-1, j) // rotation for columns l-1 and j of u + c = g / h; s = -f / h // reset cosine, sine + rotateU (l-1, j) // rotation for columns l-1 and j of u end if } // cfor end cancellation @@ -172,25 +172,25 @@ class Fac_SVD (a: MatrixD) * @param k the upper index */ def qrTransform (l : Int, k : Int): Unit = - c = 1.0; s = 1.0 // set cosine, sine - for j <- l+1 to k do // each column l+1 to k - g = e(j); h = s * g; g *= c // compute g, h - y = q(j); z = hypot (f, h) // hypotenuse/norm - e(j-1) = z // update e - - c = f / z; s = h / z // reset cosine, sine - f = bmx * c + g * s // compute f - g = -bmx * s + g * c; h = y * s // compute g, h + c = 1.0; s = 1.0 // set cosine, sine + for j <- l+1 to k do // each column l+1 to k + g = e(j); h = s * g; g *= c // compute g, h + y = q(j); z = hypot (f, h) // hypotenuse/norm + e(j-1) = z // update e + + c = f / z; s = h / z // reset cosine, sine + f = bmx * c + g * s // compute f + g = -bmx * s + g * c; h = y * s // compute g, h y *= c - rotateV (j-1, j) // update v + rotateV (j-1, j) // update v - z = hypot (f, h) // hypotenuse/norm - q(j-1) = z // update q + z = hypot (f, h) // hypotenuse/norm + q(j-1) = z // update q - c = f / z; s = h / z // reset cosine, sine - f = c * g + s * y // compute f + c = f / z; s = h / z // reset cosine, sine + f = c * g + s * y // compute f bmx = -s * g + c * y - rotateU (j-1, j) // update u + rotateU (j-1, j) // update u end for e(l) = 0.0 e(k) = f @@ -203,8 +203,8 @@ class Fac_SVD (a: MatrixD) * @param j2 the second column involved */ def rotateU (j1: Int, j2: Int): Unit = - for i <- 0 until m do // each row of u - y = u(i, j1) // changes to y and z affect outer scope + for i <- 0 until m do // each row of u + y = u(i, j1) // changes to y and z affect outer scope z = u(i, j2) u(i, j1) = y * c + z * s u(i, j2) = -y * s + z * c @@ -217,8 +217,8 @@ class Fac_SVD (a: MatrixD) * @param j2 the second column involved */ def rotateV (j1: Int, j2: Int): Unit = - for i <- 0 until n do // each row of v - bmx = v(i, j1) // changes to y and z affect outer scope + for i <- 0 until n do // each row of v + bmx = v(i, j1) // changes to y and z affect outer scope z = v(i, j2) v(i, j1) = bmx * c + z * s v(i, j2) = -bmx * s + z * c @@ -244,15 +244,15 @@ class Fac_SVD (a: MatrixD) end shiftFromBottom // final part for factor method - flip (u, q) // convert singluar values to positive, if any, adjusting u accordingly - reorder ((u, q, v)) // reorder so largest singular values come first - (u, q, v) // return left matrix, singular vector and right matrix + flip (u, q) // convert singular values to positive, if any, adjusting u accordingly + reorder ((u, q, v)) // reorder so largest singular values come first + (u, q, v) // return left matrix, singular vector and right matrix end factor123 //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Flip negative singular values to positive and set singular values close * to zero to zero. - * @param u the left orthongonal matrix + * @param u the left orthogonal matrix * @param s the vector of singular values */ def flip (u: MatrixD, s: VectorD): Unit = @@ -264,8 +264,8 @@ class Fac_SVD (a: MatrixD) //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Flip negative main diagonal elements in the singular vectors to positive. - * @param u the left orthongonal matrix - * @param v the right orthongonal matrix + * @param u the left orthogonal matrix + * @param v the right orthogonal matrix */ def flip (u: MatrixD, v: MatrixD): Unit = for j <- u.indices2 if u(j, j) < 0.0 do u(?, j) = u(?, j) * -1.0 @@ -281,12 +281,11 @@ class Fac_SVD (a: MatrixD) def reorder (ft: FactorType): Unit = val n = ft._2.dim for i <- 0 until n do - val j = ft._2.argmax (i, n) // index of largest element in s(i:n) + val j = ft._2.argmax (i , n) // index of largest element in s(i:n) if i != j then - ft._1.swapCol (i, j) // u left orthogonal matrix - ft._2.swap (i, j) // s diagonal matrix - ft._3.swapCol (i, j) // v right orthogonal matrix - end if + ft._1.swapCol (i, j) // u left orthogonal matrix + ft._2.swap (i, j) // s diagonal matrix + ft._3.swapCol (i, j) // v right orthogonal matrix end for end reorder @@ -300,13 +299,12 @@ class Fac_SVD (a: MatrixD) def testFSplitting (k : Int, e: VectorD, q: VectorD): Unit = breakable { for ll <- k to 0 by -1 do - l = ll // make global index l track loop variable ll + l = ll // make global index l track loop variable ll test_fconverge = false if abs (e(ll)) <= eps then debug ("Fac_SVD", s"e(ll) = ${e(ll)}") test_fconverge = true break () - end if if abs (q(ll-1)) <= eps then break () end for } // breakable @@ -317,10 +315,10 @@ class Fac_SVD (a: MatrixD) * @param b the constant vector */ def solve (b: VectorD): VectorD = - val (u, d, vt) = factor123 () // factor using SVD - val alpha = u.transpose * b // principle component regression -// vt *~ d.recip * alpha // estimate coefficients - vt *~ recip (d) * alpha // estimate coefficients - handles 0's + val (u, d, vt) = factor123 () // factor using SVD + val alpha = u.transpose * b // principle component regression +// vt *~ d.recip * alpha // estimate coefficients + vt *~ recip (d) * alpha // estimate coefficients - handles 0's end solve //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -328,7 +326,7 @@ class Fac_SVD (a: MatrixD) * a * a^-1 = I */ def inverse: MatrixD = - val (u, d, vt) = factor123 () // factor using SVD + val (u, d, vt) = factor123 () // factor using SVD vt *~ d.recip * u.transpose end inverse @@ -363,52 +361,91 @@ end Fac_SVD */ object Fac_SVD: - val a1 = MatrixD ((2, 2), 1.00, 2.00, // original matrix - 0.00, 2.00) // 2 by 2, bidiagonal + val a1 = MatrixD ((2, 2), 1.00, 2.00, // original matrix + 0.00, 2.00) // 2 by 2, bidiagonal - val a2 = MatrixD ((3, 2), 3.0, -1.0, // original matrix - 1.0, 3.0, // 3 by 2 + val a2 = MatrixD ((3, 2), 3.0, -1.0, // original matrix + 1.0, 3.0, // 3 by 2 1.0, 1.0) - val a3 = MatrixD ((3, 3), 1.0, 1.0, 0.0, // original matrix - 0.0, 2.0, 2.0, // 3 by 3, bidiagonal + val a3 = MatrixD ((3, 3), 1.0, 1.0, 0.0, // original matrix + 0.0, 2.0, 2.0, // 3 by 3, bidiagonal 0.0, 0.0, 3.0) - val a4 = MatrixD ((3, 3), 0.0, 1.0, 1.0, // original matrix - sqrt(2), 2.0, 0.0, // 3 by 3 + val a4 = MatrixD ((3, 3), 0.0, 1.0, 1.0, // original matrix + sqrt(2), 2.0, 0.0, // 3 by 3 0.0, 1.0, 1.0) - val a5 = MatrixD ((4, 4), 0.9501, 0.8913, 0.8214, 0.9218, // original matrix - 0.2311, 0.7621, 0.4447, 0.7382, // 4 by 4 + val a5 = MatrixD ((4, 4), 0.9501, 0.8913, 0.8214, 0.9218, // original matrix + 0.2311, 0.7621, 0.4447, 0.7382, // 4 by 4 0.6068, 0.4565, 0.6154, 0.1763, 0.4860, 0.0185, 0.7919, 0.4057) - val a6 = MatrixD ((3, 2), 4, 5, // original matrix - 6, 7, // 3 by 2 + val a6 = MatrixD ((3, 2), 4, 5, // original matrix + 6, 7, // 3 by 2 9, 8) - val a7 = MatrixD ((4, 4), 1.0, 2.0, 3.0, 4.0, // original matrix - 4.0, 3.0, 2.0, 1.0, // 4 by 4 + val a7 = MatrixD ((4, 4), 1.0, 2.0, 3.0, 4.0, // original matrix + 4.0, 3.0, 2.0, 1.0, // 4 by 4 5.0, 6.0, 7.0, 8.0, 8.0, 7.0, 6.0, 5.0) - val a8 = MatrixD ((5, 3), 0.44444444, 0.3333333, -1.3333333, // original matrix - 0.41111111, -0.3166667, -0.3333333, // 5 by 3 + val a8 = MatrixD ((5, 3), 0.44444444, 0.3333333, -1.3333333, // original matrix + 0.41111111, -0.3166667, -0.3333333, // 5 by 3 -0.18888889, 0.4833333, -0.3333333, -0.03333333, -0.6500000, 1.0000000, -0.63333333, 0.1500000, 1.0000000) //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert an SVD factoring to its full representation, returning the result as - * three matrices. + /** Convert an SVD factoring to a full matrix representation, returning the result as + * three matrices having the following dimensions: U (m-by-n), S (n-by-n) and V (n-by-n). + * This is the compact SVD representation, see the next methods of a full SVD representation. * @param u_s_v the 3-way factorization */ def factorFull (u_s_v: FactorType): FactorTypeFull = - val s = u_s_v._2.dim - val ss = new MatrixD (s, s); ss(?, ?) = s // turn vector into diagonal matrix + val s = u_s_v._2 // vector of singular values + val ss = new MatrixD (s.dim, s.dim); ss(?, ?) = s // turn vector into diagonal matrix (u_s_v._1, ss, u_s_v._3) end factorFull + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a full SVD factorization (rather than a compact one) where the three + * matrices have the following dimensions: U (m-by-m), S (m-by-n) and V (n-by-n). + * @see https://eecs16b.org/notes/sp24/note15.pdf + * @param a the m-by-n matrix to factor/decompose (requires m >= n) + */ + def fullSVD (a: MatrixD): FactorTypeFull = + val (m, n) = a.dims + val svd = new Fac_SVD (a) + val (u, s, v) = factorFull (svd.factor123 ()) // factor matrix a + if m - n > 0 then + var uu = u + cfor (0, m - n) { _ => uu = uu :^+ gsOrtho (uu) } // append an orthogonal column + (uu, + s ++ (new MatrixD (m - n, n)), // append rows of zeroes + v) // use original v + else + (u, s, v) + end fullSVD + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return a new normalized m-dimensional vector that is orthogonal to all + * columns in an orthonormal matrix u of dimension m-by-n (m > n), + * Use the Gram-Schmidt (SG) Orthogonalization Algorithm. + * v = a - Σ (a ⋅ uᵢ) * uᵢ + * @see https://www.cis.upenn.edu/~cis6100/Gram-Schmidt-Bjorck.pdf + * @param u the given m-by-n orthonormal matrix + */ + def gsOrtho (u: MatrixD): VectorD = + val m = u.dim + val ut = u.transpose + val a = VectorD.one (m) + val vs = VectorD (m) + cfor (0, m) { i => vs += ut(i) * (a dot ut(i)) } + val v = a - vs + v / v.norm + end gsOrtho + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Reduce the dimensionality of the u, s and v matrices from n to k. * If k = rank, there is no loss of information; when k < rank, multiplying @@ -418,9 +455,9 @@ object Fac_SVD: * @param k the desired dimensionality */ def reduce (u_s_v: FactorType, k: Int): FactorType = - (u_s_v._1(?, 0 until k), // slice columns from matrtix u - u_s_v._2(0 until k), // slice elements from vector s - u_s_v._3(?, 0 until k)) // slice columns from matrtix v + (u_s_v._1(?, 0 until k), // slice columns from matrix u + u_s_v._2(0 until k), // slice elements from vector s + u_s_v._3(?, 0 until k)) // slice columns from matrix v end reduce //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -438,18 +475,18 @@ object Fac_SVD: /** Test the SVD Factorization algorithm on matrix a by factoring the matrix * into a left matrix u, a vector s, and a right matrix v. Then multiply back * to recover the original matrix u *~ s * v.t. - * @param a the orginal matrix + * @param a the original matrix * @param u_s_v the given matrix a factored into three components * @param name the name of the test case */ def test (a: MatrixD, svd: Fac_SVD, name: String): Unit = banner (name) println (s"factor matrix a = $a") - val (u, s, v) = svd.factor123 () // factor matrix a + val (u, s, v) = svd.factor123 () // factor matrix a println (sline () + s"into (u, s, v) = ${(u, s, v)}") - val prod = u *~ s * v.transpose // compute the product - println (sline () + s"check: u *~ s * v.t = $prod") // should equal the original a matrix - println (s"prod - a = ${prod - a}") // difference should be close to 0 + val prod = u *~ s * v.transpose // compute the product + println (sline () + s"check: u *~ s * v.t = $prod") // should equal the original a matrix + println (s"prod - a = ${prod - a}") // difference should be close to 0 println (sline ()) assert (prod == a) println (sline ()) diff --git a/src/main/scala/scalation/mathstat/HeatMap.scala b/src/main/scala/scalation/mathstat/HeatMap.scala new file mode 100644 index 000000000..ea48b82a7 --- /dev/null +++ b/src/main/scala/scalation/mathstat/HeatMap.scala @@ -0,0 +1,295 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author John Miller + * @version 2.0 + * @date Mon Oct 27 17:08:17 EDT 2025 + * @see LICENSE (MIT style license file). + * + * @note HeatMap to Display the Values in a Matrix with Color-Coding + * e.g., Correlation Matrix, Lagged Cross Correlation Matrix + */ + +package scalation +package mathstat + +import scala.math.Pi + +import scalation.scala2d.{BasicStroke, Graphics, Graphics2D, Line, Rectangle, VizFrame, ZoomablePanel} +import scalation.scala2d.Colors._ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `HeatMap` class takes a matrix of values and displays them in color-coded + * cells where the color is based on the cell value (e.g., correlation). The + * value for the cell 'heat(i, j)' is also displayed. + *------------------------------------------------------------------------------ + * Zoom functionality has two options: + * (1) mouse wheel controls the amount of zooming (in/out); + * (2) mouse dragging repositions the objects in the panel (drawing canvas). + * @see ZoomablePanel + *------------------------------------------------------------------------------ + * @param heat the matrix of values (to be color-coded for display) + * @param name the column names for the matrix (defaults to null) + * @param _title the title of the heat-map (defaults to "HeatMap") + */ +class HeatMap (heat: MatrixD, name: Array [String] = null, _title: String = "HeatMap") + extends VizFrame (_title, null): + + private val flaw = flawf ("HeapMap") // flaw function + + /** Create a drawing canvas + */ + private val canvas = new HmCanvas (getW, getH, heat, name) + + if heat.dim2 != name.size then flaw ("init", s"requires heat.dim2 (${heat.dim2}) == name.size (${name.size}") + + getContentPane ().add (canvas) + setVisible (true) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Convert a HeatMap to a string. + */ + override def toString: String = canvas.toString + +end HeatMap + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `FramelessHeatMap` class should be used in embedded applications. + * @param frameW the width + * @param frameH the height + * @param heat the matrix of values (to be color-coded for display) + * @param name the column names for the matrix + */ +class FramelessHeatMap (frameW: Int, frameH: Int, heat: MatrixD, name: Array [String]): + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Dynamically create and return a drawing canvas. + */ + def canvas: HmCanvas = new HmCanvas (frameW, frameH, heat, name) + +end FramelessHeatMap + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Create a canvas on which to draw the heat-map. + * @param frameW the frame width + * @param frameH the frame height + * @param heat the matrix of values (to be color-coded to display) + * @param name the column names for the matrix + */ +class HmCanvas (frameW: Int, frameH: Int, heat: MatrixD, name: Array [String]) +// extends Panel: + extends ZoomablePanel: + + private val offset = 80 // offset heat-map with frame + private val baseX = offset + private val baseY = frameH - offset + private val frameWO = frameW - 2 * offset // subtract left and right offsets + private val frameHO = frameH - 2 * offset // subtract top and bottom offsets + private val cell = Rectangle () // each matrix cell has color assigned + private val axis = Line (0, 0, 0, 0) // use lines for axes + private val h_min = heat.mmin // minimum value in heat matrix + private val h_max = heat.mmax // maximum value in heat matrix + + private val mc = 255 * 6 // max color value times 6 + private val c = (VectorD.range (0 until 7) / 6.0).reverse // fractional value cutoffs + + setBackground (white) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Paint the canvas by drawing the rectangles (cells) making up the heat-map. + * @param gr low-resolution graphics environment + */ + override def paintComponent (gr: Graphics): Unit = + super.paintComponent (gr) + val g2d = gr.asInstanceOf [Graphics2D] // use hi-res + + g2d.setTransform (at) // used for zooming (at @see `ZoomablePanel`) + + val (nx, ny) = heat.dims + val (cellW, cellH) = (frameWO / nx, frameHO / ny) + var x_pos = 0 + var y_pos = 0 + + //:: Draw the axes + + g2d.setPaint (black) + g2d.setStroke (new BasicStroke (2.0f)) + axis.setLine (baseX - 1, baseY + 1, baseX + 10 + frameWO, baseY + 1) + g2d.draw (axis) + axis.setLine (baseX - 1, offset - 10, baseX - 1, baseY + 1) + g2d.draw (axis) + + //:: Draw the labels on the axes + + y_pos = baseY + 15 + for i <- heat.indices do + val x_val = i.toString + x_pos = offset - 8 + i * (frameWO) / heat.dim + g2d.drawString (x_val, x_pos, y_pos) + if name != null then + val (nam, back) = clip (name(i), 20) + drawRotatedString (g2d, nam, x_pos + cellW - 10 + back/2, y_pos + 10 + back/2) +// g2d.drawString (nam, x_pos + cellW / 2 - back, y_pos) + end for + + x_pos = baseX - 30 + for j <- heat.indices2 do + val y_val = j.toString + y_pos = offset + 20 + j * (frameHO) / heat.dim2 + g2d.drawString (y_val, x_pos, y_pos) + end for + + //:: Draw the cells making up the heat-map + + x_pos = baseX - 30 + y_pos = baseY + 15 + val shiftW = cellW / 2 - 8 + val shiftH = cellH / 2 + for i <- heat.indices do + x_pos = offset - 2 + i * (frameWO) / heat.dim + for j <- heat.indices2 do + val h_ij = heat(i, j) + y_pos = offset + 2 + j * (frameHO) / heat.dim2 + cell.setFrame (x_pos, y_pos, cellW, cellH) // x, y, w, h + val (col1, col2) = computeColor (h_ij) + g2d.setPaint (col1) + g2d.fill (cell) + g2d.setPaint (col2) + val (num, _) = clip (h_ij.toString, 4) + g2d.drawString (num, x_pos + shiftW, y_pos + shiftH) + end for + end for + end paintComponent + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Draw a text string rotated by the given angle. + * @param g2d hi-res graphics + * @param text the text string to be displayed + * @param cx center for x-coordinate + * @param cy center for y-coordinate + * @param angle the rotation angle in radians (defaults to Pi/4 radians or 45 degrees) + */ + def drawRotatedString (g2d: Graphics2D, text: String, + cx: Int, cy: Int, angle: Double = Pi/4): Unit = + val oldTx = g2d.getTransform // save current transform + val fm = g2d.getFontMetrics + val textW = fm.stringWidth (text) + + // Rotate around center (cx, cy), then draw text centered on cx + g2d.rotate (angle, cx.toDouble, cy.toDouble) + g2d.drawString (text, (cx - textW / 2), cy) + + g2d.setTransform (oldTx) // restore + end drawRotatedString + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Compute the color for cell (i, j) in the heat-map based on h_ij, returning + * the cell color and its complementary color (for writing a value). + * @param h_ij the value of cell (i, j) in the heat matrix + */ + def computeColor (h_ij: Double): (Color, Color) = + val frac = (h_ij - h_min) / (h_max - h_min).toDouble + + // high to low: M, B, C, G, Y, R, W + + val rgb = if frac > c(1) then + val z = (mc * (frac - c(1))).toInt; (z, 0, z) // magenta + else if frac > c(2) then + (0, 0, (mc * (frac - c(2))).toInt) // blue + else if frac > c(3) then + val z = (mc * (frac - c(3))).toInt; (0, z, z) // cyan + else if frac > c(4) then + (0, (mc * (frac - c(4))).toInt, 0) // green + else if frac > c(5) then + val z = (mc * (frac - c(5))).toInt; (z, z, 0) // yellow + else + ((mc * (frac - c(6))).toInt, 0, 0) // red + + val comple = 255 - (rgb._1 + rgb._2 + rgb._3) / 2 // complementary color + println (s"h_ij = $h_ij, frac = $frac, rgb = $rgb") + (new Color (rgb._1, rgb._2, rgb._3), + new Color (comple, comple, comple)) + end computeColor + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Clip/cut string s to mx characters and return half its pixel length. + * @param s the value to clip/cut + * @param mx the maximum number of characters + */ + def clip (s: String, mx: Int = 12): (String, Int) = + val len = math.min (s.length, mx) + (s.substring (0, len), 4 * (len - 1)) + end clip + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Convert a heat matrix to a string. + */ + override def toString: String = heat.toString + +end HmCanvas + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `heatMapTest` main function is used to test the `HeatMap` class. + * It tests creating a heat-map for a matrix + * > runMain scalation.mathstat.heatMapTest + */ +@main def heatMapTest (): Unit = + + import MatrixDOps.⊗ + + val x = VectorD (1, 2, 3, 4, 5, 6) + val heat = x ⊗ x + val name = Array ("v0", "v1", "v2", "v3", "v4", "v5") + + val hm = new HeatMap (heat, name, "HeatMap for matrix heat") + println (s"heatMap = $hm") + +end heatMapTest + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `heatMapTest2` main function is used to test the `HeatMap` class. + * It tests creating a heat-map for a CORRELATION matrix using a COVID-19 dataset. + * > runMain scalation.mathstat.heatMapTest2 + */ +@main def heatMapTest2 (): Unit = + + val fileName = "covid_19_weekly.csv" + + val (xy, name) = MatrixD.loadH (fileName, 1) // trim first column + val heat = xy.corr // correlation x_i vs. x_j + + val hm = new HeatMap (heat, name, "HeatMap for correlation matrix") + println (s"heatMap = $hm") + +end heatMapTest2 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `heatMapTest3` main function is used to test the `HeatMap` class. + * It tests creating a heat-map for a LAGGED CROSS CORRELATION matrix for name(0) + * using a COVID-19 dataset. + * > runMain scalation.mathstat.heatMapTest3 + */ +@main def heatMapTest3 (): Unit = + + val fileName = "covid_19_weekly.csv" + + val (xy, name) = MatrixD.loadH (fileName, 1) // trim first column + val (x, y) = (xy.not(?, 1), xy(?, 1)) // "new_deaths" is column 1 + swap (name, 0, 1) // swap to make name(0) = "new_deaths" + + val lags = 20 // check 20 lags 0, ..., 19 + val yx = y +^: x // prepend y "new_deaths" + val heat = new MatrixD (lags, yx.dim2) + for j <- yx.indices2; l <- 0 until lags do + heat(l, j) = y.ccorr (yx(?, j), l) // cross correlation y vs. x_j at lag l + + val hm = new HeatMap (heat.transpose, name, + s"HeatMap for lagged cross correlation matrix for ${name(0)}") + println (s"heatMap = $hm") + +end heatMapTest3 + diff --git a/src/main/scala/scalation/mathstat/Histogram.scala b/src/main/scala/scalation/mathstat/Histogram.scala index 19a2b6a1b..c47712039 100644 --- a/src/main/scala/scalation/mathstat/Histogram.scala +++ b/src/main/scala/scalation/mathstat/Histogram.scala @@ -14,13 +14,18 @@ package mathstat import scala.math.{ceil, floor, min} import scalation.random.{Normal, Uniform} -import scalation.scala2d.{BasicStroke, Graphics, Graphics2D, Line, Panel, Rectangle, VizFrame} +import scalation.scala2d.{BasicStroke, Graphics, Graphics2D, Line, Rectangle, VizFrame, ZoomablePanel} import scalation.scala2d.Colors._ //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `Histogram` class takes a vector of values, counts the number of values - * in each of several intervals and displays the counts vertically in a - * histogram. + * in each of several intervals and displays the counts vertically in a histogram. + *------------------------------------------------------------------------------ + * Zoom functionality has two options: + * (1) mouse wheel controls the amount of zooming (in/out); + * (2) mouse dragging repositions the objects in the panel (drawing canvas). + * @see ZoomablePanel + *------------------------------------------------------------------------------ * @param value the vector of values (want several per interval) * @param numIntervals the number of intervals (typically 5 to 100) * @param _title title of the histogram @@ -31,7 +36,7 @@ class Histogram (value: VectorD, numIntervals: Int = 40, _title: String = "Histo /** Create a drawing canvas */ - val canvas = new HCanvas (getW, getH, value, numIntervals, counts) + private val canvas = new HCanvas (getW, getH, value, numIntervals, counts) getContentPane ().add (canvas) setVisible (true) @@ -70,7 +75,8 @@ end FramelessHistogram * @param counts the counts per interval, if available */ class HCanvas (frameW: Int, frameH: Int, value: VectorD, numIntervals: Int, counts: VectorD = null) - extends Panel: +// extends Panel: + extends ZoomablePanel: private val EPSILON = 1E-9 private val offset = 50 @@ -94,7 +100,9 @@ class HCanvas (frameW: Int, frameH: Int, value: VectorD, numIntervals: Int, coun */ override def paintComponent (gr: Graphics): Unit = super.paintComponent (gr) - val g2d = gr.asInstanceOf [Graphics2D] // use hi-res + val g2d = gr.asInstanceOf [Graphics2D] // use hi-res + + g2d.setTransform (at) // used for zooming (at @see `ZoomablePanel`) var x_pos = 0 var y_pos = 0 @@ -156,7 +164,6 @@ class HCanvas (frameW: Int, frameH: Int, value: VectorD, numIntervals: Int, coun h else counts - end if end computeHistogram //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -170,7 +177,6 @@ class HCanvas (frameW: Int, frameH: Int, value: VectorD, numIntervals: Int, coun for i <- 0 until numIntervals do val h = histogram(i) * scale c(i) = VectorD (baseX + i * w, baseY.toDouble - h, w, h) - end for c end computeCoordinates @@ -197,23 +203,23 @@ end HCanvas val uniformDist = new VectorD (samples) for i <- 0 until samples do var sum = 0.0 - for j <- 0 until k do sum += uniformRV.gen + cfor (0, k) { _ => sum += uniformRV.gen } uniformDist(i) = sum end for val h1 = new Histogram (uniformDist, intervals, "Histogram for Sum of Uniform") - println ("histogram = " + h1) + println (s"histogram = $h1") val normalRV = Normal (0, 1) val normalDist = new VectorD (samples) - for (i <- 0 until samples) normalDist(i) = normalRV.gen + for i <- 0 until samples do normalDist(i) = normalRV.gen val h2 = new Histogram (normalDist, intervals, "Histogram for Normal") - println ("histogram = " + h2) + println (s"histogram = $h2") // val h3 = new Histogram (VectorD (0.0, 2.0, 3.0, 4.0, 4.5, 5.0, 5.5, 6.0, 6.5, 7.0, 8.0, 9.0), // 5, "Simple Histogram") -// println ("histogram = " + h3) +// println (s"histogram = $h3") end histogramTest diff --git a/src/main/scala/scalation/mathstat/MatrixCalc.scala b/src/main/scala/scalation/mathstat/MatrixCalc.scala index 8dba67304..c524454d4 100644 --- a/src/main/scala/scalation/mathstat/MatrixCalc.scala +++ b/src/main/scala/scalation/mathstat/MatrixCalc.scala @@ -101,7 +101,7 @@ end matrixCalc0 */ @main def matrixCalc2 (): Unit = - val csvFile = "scores.csv" + val csvFile = ".../...csv" val xx = MatrixD.load (csvFile, 1, 3, fullPath = true) // skip 1 row and 3 columns val n = xx.dim2 - 3 // last column should be empty @@ -109,9 +109,11 @@ end matrixCalc0 println (s"x = $x") - val w = x(0)(0 until n) // weights for grades +// val w = x(0)(0 until n) // weights for grades +// val w = VectorD (2,2,2.5,0.25,0.25,0.6,0.6,0.6,1.2) // 4 projects + val w = VectorD (2,2,2.5,0.25,0.25,0.75,0.75,1.5) // 3 projects println (s"total weight = ${w.sum}") // total weight - for i <- 1 until x.dim do x(i, n) = round (w dot x(i)(0 until n)).toDouble // weighted total + for i <- 0 until x.dim do x(i, n) = round (w dot x(i)(0 until n)).toDouble // weighted total println (s"new x = $x") // updated matrix end matrixCalc2 @@ -123,37 +125,14 @@ end matrixCalc2 */ @main def matrixCalc3 (): Unit = - val x1 = VectorD (81,93,80,100,100,95,77,94) // original +1 on Exam I - val x2 = VectorD (80,93,80,120,100,95,77,94) // corrected +20 on Homework - val x3 = VectorD (98,96,88,100,100,100,100,100,100) // make-up + val x1 = VectorD (80,90,80,100,100,90,80,90) + val x2 = VectorD (90,90,80,100,100,100,100,100,100) val w = VectorD (2,2,2.5,0.25,0.25,0.75,0.75,1.5) val w2 = VectorD (2,2,2.5,0.25,0.25,0.6,0.6,0.6,1.2) - println (s"w * x1 = ${w dot x1}") // 868 - println (s"w * x2 = ${w dot x2}") // 871 - println (s"w2 * x3 = ${w2 dot x3}") // 958 + println (s"w * x1 = ${w dot x1}") + println (s"w2 * x2 = ${w2 dot x2}") end matrixCalc3 - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `matrixCals4` main function allows for Custom calculations. - * > runMain scalation.mathstat.matrixCalc4 - */ -@main def matrixCalc4 (): Unit = - - val x = MatrixD ((7, 13), - 4.67647, 4.85294, 4.70588, 4.82353, 4.73529, 4.55882, 4.67647, 4.58824, 4.97059, 4.76471, 4.55882, 4.50000, 4.64706, // 6370 sp24 - 4.12500, 4.12500, 4.37500, 4.37500, 4.25000, 4.12500, 4.37500, 4.12500, 4.75000, 4.50000, 4.62500, 4.00000, 4.62500, // 4370 sum24 - 5.00000, 5.00000, 5.00000, 5.00000, 5.00000, 5.00000, 5.00000, 5.00000, 5.00000, 5.00000, 5.00000, 5.00000, 5.00000, // 6370 sum24 - 3.33333, 3.50000, 3.33333, 4.08333, 3.50000, 2.91667, 3.72727, 3.33333, 4.41667, 3.75000, 4.08333, 3.16667, 3.50000, // 4360 fa24 - 4.77778, 4.88889, 4.66667, 4.88889, 4.77778, 4.55556, 4.77778, 4.66667, 4.88889, 4.88889, 4.88889, 4.66667, 4.77778, // 6360 fa24 - 3.54545, 3.63636, 4.09091, 4.18182, 3.63636, 3.00000, 4.09091, 3.72727, 4.72727, 3.63636, 4.58333, 3.45455, 3.91667, // 4370 fa24 - 3.83333, 4.08333, 4.58333, 4.83333, 4.16667, 3.91667, 4.08333, 4.00000, 4.83333, 4.75000, 4.58333, 4.00000, 4.50000) // 6370 fa24 - - for i <- x.indices do println (x(i).mean) - println (x.sum / (7 * 13)) - -end matrixCalc4 - diff --git a/src/main/scala/scalation/mathstat/MatrixD.scala b/src/main/scala/scalation/mathstat/MatrixD.scala index ee3c6d0b1..ae11d6c1a 100644 --- a/src/main/scala/scalation/mathstat/MatrixD.scala +++ b/src/main/scala/scalation/mathstat/MatrixD.scala @@ -14,16 +14,24 @@ package mathstat import java.util.Arrays.copyOf import java.io.PrintWriter +import scala.annotation.unused import scala.collection.immutable.{IndexedSeq => IIndexedSeq, Set => ISet} import scala.collection.mutable.{ArrayBuffer, IndexedSeq, Set} -import scala.math.round +import scala.math.{min, round} import scala.util.control.Breaks.{break, breakable} -/** Top-level type definition for functions mapping: +/** Top-level type definition for functions mapping from `MatrixD`: */ type FunctionM2V = MatrixD => VectorD // matrix `MatrixD` to vector `VectorD` type FunctionM2M = MatrixD => MatrixD // matrix `MatrixD` to matrix `MatrixD` +/** Top-level type definition for functions mapping from `VectorD`: + */ +type FunctionV2M = VectorD => MatrixD // matrix `MatrixD` to vector `VectorD` +type FunctionV2MV = VectorD => (MatrixD, VectorD) // vector `VectorD` to (matrix, vector) (`MatrixD`, `VectorD`) +type FunctionVV2M = (VectorD, VectorD) => MatrixD // (vector, vector) (`VectorD`, VectorD`) to `MatrixD` + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Matricize a vector function (V2V) by applying it to each row of a matrix. * MatrixD (for i <- x.indices yield f(x(i))) @@ -86,11 +94,6 @@ class MatrixD (val dim: Int, if dim != v_dim || dim2 != v_dim2 then flaw ("init", s"dimensions are wrong: dims = ($dim, $dim2) vs. ($v_dim, $v_dim2)") // throw new Exception () - end if -// if dim == 0 || dim2 == 0 then -// flaw ("init", s"warning, a matrix dimension is zero: dims = ($dim, $dim2)") -// throw new Exception () -// end if end if /** The row index range @@ -187,6 +190,20 @@ class MatrixD (val dim: Int, new VectorD (ir.size, a) end apply + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the COLUMNS in range jr of this matrix for row i as a vector. + * usage: x(2, 3 until 6) + * @param jr the index range of columns to return + * @param i the row index + */ + def apply (i: Int, jr: Range): VectorD = + val v_i = v(i) + val j1 = jr.start + val a = Array.ofDim [Double] (jr.size) + cfor (jr) { j => a(j-j1) = v_i(j) } + new VectorD (jr.size, a) + end apply + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Return the ROWS in index set iset of this matrix as a new independent matrix. * usage: x(Set (3, 5, 7)) @@ -231,7 +248,7 @@ class MatrixD (val dim: Int, * @param all use the all rows indicator ? * @param j the column index */ - inline def apply (all: Char, j: Int): VectorD = + inline def apply (@unused all: Char, j: Int): VectorD = val a = Array.ofDim [Double] (dim) cfor (0, dim) { i => a(i) = v(i)(j) } new VectorD (dim, a) @@ -243,7 +260,7 @@ class MatrixD (val dim: Int, * @param all use the all rows indicator ? * @param jr the index range of columns to return */ - def apply (all: Char, jr: Range): MatrixD = + def apply (@unused all: Char, jr: Range): MatrixD = val j1 = jr.start val a = Array.ofDim [Double] (dim, jr.size) cfor (0, dim) { i => @@ -259,7 +276,7 @@ class MatrixD (val dim: Int, * @param all use the all rows indicator ? * @param jset the index set of columns to return */ - def apply (all: Char, jset: Set [Int]): MatrixD = + def apply (@unused all: Char, jset: Set [Int]): MatrixD = val a = Array.ofDim [Double] (dim, jset.size) cfor (0, dim) { i => val v_i = v(i); val a_i = a(i) @@ -275,7 +292,7 @@ class MatrixD (val dim: Int, * @param all use the all rows indicator ? * @param jdx the index set of columns to return */ - def apply (all: Char, jdx: IndexedSeq [Int]): MatrixD = + def apply (@unused all: Char, jdx: IndexedSeq [Int]): MatrixD = val a = Array.ofDim [Double] (dim, jdx.size) cfor (0, dim) { i => val v_i = v(i); val a_i = a(i) @@ -290,7 +307,7 @@ class MatrixD (val dim: Int, * usage: x(?) * @param diag use the all diagonal elements indicator ? */ - inline def apply (diag: Char): VectorD = + inline def apply (@unused diag: Char): VectorD = val a = Array.ofDim [Double] (minDim) cfor (0, minDim) { i => a(i) = v(i)(i) } new VectorD (minDim, a) @@ -334,7 +351,7 @@ class MatrixD (val dim: Int, * @param all use the all rows indicator ? * @param j the column index to exclude */ - def not (all: Char, j: Int): MatrixD = + def not (@unused all: Char, j: Int): MatrixD = if j == 0 then apply(?, j+1 until dim2) else if j == dim2-1 then apply(?, 0 until j) else apply(?, 0 until j) ++^ apply(?, j+1 until dim2) @@ -449,7 +466,7 @@ class MatrixD (val dim: Int, * @param j the column index * @param u the vector to assign */ - def update (all: Char, j: Int, u: VectorD): Unit = + def update (@unused all: Char, j: Int, u: VectorD): Unit = cfor (0, dim) { i => v(i)(j) = u(i) } end update @@ -472,7 +489,7 @@ class MatrixD (val dim: Int, * @param d2 use the all diagonal elements indicator ? * @param s the scalar value to assign */ - def update (d1: Char, d2: Char, s: Double): Unit = + def update (@unused d1: Char, @unused d2: Char, s: Double): Unit = cfor (0, minDim) { i => v(i)(i) = s } end update @@ -483,7 +500,7 @@ class MatrixD (val dim: Int, * @param d2 use the all diagonal elements indicator ? * @param u the vector to assign */ - def update (d1: Char, d2: Char, u: VectorD): Unit = + def update (@unused d1: Char, @unused d2: Char, u: VectorD): Unit = cfor (0, minDim) { i => v(i)(i) = u(i) } end update @@ -584,8 +601,8 @@ class MatrixD (val dim: Int, new MatrixD (dim2, dim, a) end transpose - inline def 𝐓: MatrixD = transpose // unicode (𝐓) mathematical bold capital T -// inline def Ƭ: MatrixD = transpose // unicode (Ƭ) + inline def ᵀ: MatrixD = transpose // Unicode (ᵀ) symbol for transpose +// inline def 𝐓: MatrixD = transpose // Unicode (𝐓) mathematical bold capital T //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Concatenate (row-wise) this matrix and matrix y (requires y to have the @@ -886,7 +903,7 @@ class MatrixD (val dim: Int, new MatrixD (dim, dim2, a) end *~ - inline def ⊙ (y: MatrixD): MatrixD = *~ (y) // unicode XNOR gate + inline def ⊙ (y: MatrixD): MatrixD = *~ (y) // Unicode XNOR gate //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Multiply this matrix by vector u to produce another matrix v_ij * u_j. @@ -903,7 +920,7 @@ class MatrixD (val dim: Int, new MatrixD (dim, dm, a) end *~ - inline def ⊙ (y: VectorD): MatrixD = *~ (y) // unicode XNOR gate + inline def ⊙ (y: VectorD): MatrixD = *~ (y) // Unicode XNOR gate //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Multiply vector u by this matrix to produce another matrix u_i * v_ij. @@ -1106,7 +1123,7 @@ class MatrixD (val dim: Int, //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Raise the elements in this matrix to the p-th power (e.g., x~^2 = x *~ x) * Being element-wise, x~^2 is not x * x. - * @param p the scalar power + * @param p the scalar power (double) */ def ~^ (p: Double): MatrixD = val a = Array.ofDim [Double] (dim, dim2) @@ -1117,6 +1134,20 @@ class MatrixD (val dim: Int, new MatrixD (dim, dim2, a) end ~^ + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Raise the elements in this matrix to the p-th power (e.g., x↑2 = x *~ x) + * Extended to handle a negative base. + * @param p the scalar power (rational number) + */ + def ↑ (p: Rat): MatrixD = + val a = Array.ofDim [Double] (dim, dim2) + cfor (0, dim) { i => + val v_i = v(i); val a_i = a(i) + cfor (0, dim2) { j => a_i(j) = v_i(j) ↑ p } + } // cfor + new MatrixD (dim, dim2, a) + end ↑ + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Raise this matrix to the p-th power (for some integer p >= 1) using * a divide and conquer algorithm and matrix multiplication (x~^^2 = x * x). @@ -1199,7 +1230,7 @@ class MatrixD (val dim: Int, new MatrixD (dim, y.dim, a) end dot - inline def ∙ (y: MatrixD): MatrixD = dot (y) // unicode bullet point + inline def ∙ (y: MatrixD): MatrixD = dot (y) // Unicode bullet point //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Return the dot product of this matrix and vector y. @@ -1219,7 +1250,7 @@ class MatrixD (val dim: Int, new VectorD (dim2, a) end dot - inline def ∙ (y: VectorD): VectorD = dot (y) // unicode bullet point + inline def ∙ (y: VectorD): VectorD = dot (y) // Unicode bullet point //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Return the 'valid' (no padding) convolution of cofilter matrix c and input matrix x. @@ -1246,7 +1277,7 @@ class MatrixD (val dim: Int, * Usage: c conv_ x * @param x the input/data matrix */ - inline infix def conv_ (x: MatrixD): MatrixD = reverse.conv (x) // FIX - may neeed another reverse method + inline infix def conv_ (x: MatrixD): MatrixD = reverse.conv (x) // FIX - may need another reverse method //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Return the 'same' (with padding) convolution of cofilter matrix c and input matrix x. @@ -1355,6 +1386,12 @@ class MatrixD (val dim: Int, } // breakable end showDiff + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Iterate over this matrix row by row applying the given function. + * @param f the function to apply + */ + def foreach [U] (f: VectorD => U): Unit = { var i = 0; while i < dim do { f (this(i)); i += 1 } } + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Map each row of this matrix by applying function f to each row vector and * returning the collected result as a vector. @@ -1397,7 +1434,7 @@ class MatrixD (val dim: Int, end map_ //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Log transform this matrix by using math.sqrt. + /** Square root transform this matrix by using math.sqrt. */ def sqrt: MatrixD = map_ (math.sqrt (_)) @@ -1421,6 +1458,26 @@ class MatrixD (val dim: Int, */ def expm1: MatrixD = map_ (math.expm1 (_)) + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Cos transform this matrix by using math.cos (the inverse of acos). + */ + def cos: MatrixD = map_ (math.cos (_)) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Inverse cos transform this matrix by using math.acos (the inverse of cos). + */ + def acos: MatrixD = map_ (math.acos (_)) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Sin transform this matrix by using math.sin (the inverse of asin). + */ + def sin: MatrixD = map_ (math.sin (_)) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Inverse sin transform this matrix by using math.asin (the inverse of sin). + */ + def asin: MatrixD = map_ (math.asin (_)) + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Compute the sum of this matrix, i.e., the sum of all its elements. * Σ (indices) { i => Σ (indices2) { j => v(i)(j) }} @@ -1488,7 +1545,7 @@ class MatrixD (val dim: Int, end min //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the minimum and maxinum value for each column in the matrix. + /** Return the minimum and maximum value for each column in the matrix. */ def min_max: MatrixD = MatrixD (min, max) @@ -1519,18 +1576,25 @@ class MatrixD (val dim: Int, end norm1 //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the square of the Frobenius-norm of this matrix, i.e., + /** Compute the Euclidean norm (2-norm) (or its square) of each column vector + * in this matrix. + */ + def normSq: VectorD = VectorD (indices2.map (apply(?, _).normSq)) + def norm: VectorD = VectorD (indices2.map (apply(?, _).norm)) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Compute the square of the Frobenius norm of this matrix, i.e., * the sum of the squared values over all the elements (sse). * Σ (indices) { i => apply(i).normSq } */ def normFSq: Double = var sum = 0.0 - cfor (0, dim) {i => sum += apply(i).normSq } + cfor (0, dim) { i => sum += apply(i).normSq } sum end normFSq //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the Frobenius-norm of 'this' matrix, i.e., the square root of + /** Compute the Frobenius norm of this matrix, i.e., the square root of * the sum of the squared values over all the elements (sqrt (sse)). * @see en.wikipedia.org/wiki/Matrix_norm#Frobenius_norm */ @@ -1547,9 +1611,14 @@ class MatrixD (val dim: Int, end mean //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the matrix/grand mean of this matrix. + /** Compute the row means of this matrix. + * VectorD (for i <- indices yield apply(i).mean) */ - def mmean: Double = sum / (dim * dim2) + def meanRow: VectorD = + val a = Array.ofDim [Double] (dim) + cfor (0, dim) { i => a(i) = apply(i).mean } + new VectorD (a.size, a) + end meanRow //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Compute the column variances of this matrix. @@ -1560,21 +1629,64 @@ class MatrixD (val dim: Int, new VectorD (a.size, a) end variance + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Compute the row variances of this matrix. + */ + def varianceRow: VectorD = + val a = Array.ofDim [Double] (dim) + cfor (0, dim) { i => a(i) = apply(i).variance } + new VectorD (a.size, a) + end varianceRow + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Compute the column standard deviations of this matrix. */ def stdev: VectorD = variance.sqrt //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the column maean and standard deviations of this matrix. + /** Compute the row standard deviations of this matrix. + */ + def stdevRow: VectorD = varianceRow.sqrt + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Compute the column mean and standard deviations of this matrix. */ def mu_sig: MatrixD = MatrixD (mean, stdev) + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Compute the matrix/grand mean of this matrix. + */ + def mmean: Double = sum / (dim * dim2) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Compute the matrix/overall average variance of this matrix. + */ + def mvariance: Double = variance.mean + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Compute the matrix/overall average standard deviation of this matrix. + */ + def mstdev: Double = math.sqrt (mvariance) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Compute the column first Q1 (1/4) and third Q3 (3/4) quartiles of this matrix. + */ + def q1_q3: MatrixD = + val a = new MatrixD (2, dim2) + cfor (0, dim2) { j => a(?, j) = apply(?, j).q1_q3 } + a + end q1_q3 + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Return a matrix that is in the reverse row order of this matrix. */ def reverse: MatrixD = new MatrixD (dim, dim2, v.reverse) + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return whether this matrix is square (# row = # columns) + */ + inline def isSquare: Boolean = dim == dim2 + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Return whether this matrix is symmetric (i.e, equals its transpose). */ @@ -1693,7 +1805,7 @@ class MatrixD (val dim: Int, * @see stats.stackexchange.com/questions/97051/] * building-the-connection-between-cosine-similarity-and-correlation-in-r */ - def cos: MatrixD = + def cosSim: MatrixD = val cs = MatrixD.eye (dim2, dim2) // cosine matrix cfor (0, cs.dim) { i => @@ -1706,13 +1818,15 @@ class MatrixD (val dim: Int, cs(j, i) = cs (i, j) }} // cfor cs - end cos + end cosSim //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Convert this matrix to the same matrix, i.e., return this matrix. */ def toMatrixD: MatrixD = this + private [mathstat] def toArray: Array [Array [Double]] = v + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Convert this matrix to a matrix where all the elements have integer values. */ @@ -1737,10 +1851,25 @@ class MatrixD (val dim: Int, //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Write this matrix to a CSV-formatted text file with name fileName. + * Each row in the matrix will be stored a line in the file, with values comma separated. * @param fileName the name of file to hold the data + * @param header the optional header (first line) for the file, giving the column headings + * @param fullPath flag indicating whether the user wants full or relative file paths + * defaults to false (relative paths) */ - def write (fileName: String): Unit = - val out = new PrintWriter (fileName) + def write (fileName: String, header: Array [String] = null, + fullPath: Boolean = false): Unit = + val path = if fullPath then fileName + else DATA_DIR + fileName // relative to DATA_DIR + val out = new PrintWriter (path) + if header != null then + cfor (0, dim2) { j => + out.print (header(j)) + if j < dim2-1 then out.print (",") + } // cfor + out.println () + end if + cfor (0, dim) { i => cfor (0, dim2) { j => out.print (v(i)(j)) @@ -1748,9 +1877,35 @@ class MatrixD (val dim: Int, } // cfor out.println () } // cfor + println (s"MatrixD.write: matrix with dims = $dims written to $path") out.close end write + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + // Helper methods for Autograd + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Returns the dimensions of the matrix as a list. + * The first element is the number of rows and the second is the number of columns. + * @return a List [Int] containing the matrix dimensions. + */ + def shape: List [Int] = List (dim, dim2) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Creates a new matrix with the same dimensions as this matrix where every element + * is set to zero. + * @return a MatrixD filled with zeros. + */ + def zerosLike: MatrixD = MatrixD.fill (dim, dim2, 0) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Creates a new matrix with the same dimensions as this matrix where every element + * is set to one. + * @return a MatrixD filled with ones. + */ + def onesLike: MatrixD = MatrixD.fill (dim, dim2, 1) + end MatrixD @@ -1837,27 +1992,37 @@ object MatrixD: x end fromVector + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Extract the i-th vectors from a pair of matrices. + * @param x_y the matrix pair + * @param i the extraction index + */ + inline def at (x_y: (MatrixD, MatrixD), i: Int): (VectorD, VectorD) = (x_y._1(i), x_y._2(i)) + private val DEF_SEP = ',' // default character separating the values private val PROGRESS = 1000 // give feedback at progress count //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Create a matrix by reading from a text file, e.g., a CSV file. + * @see the `write` method in the class to write a matrix to a CSV file. * @param fileName the name of file holding the data * @param skip the initial number of lines/rows to skip * @param skipCol the initial number of columns to skip * @param sp the character used to separate values (',', '\t', ...) * @param fullPath flag indicating whether to use full-path or path relative to 'DATA_DIR' * defaults to false (relative paths) + * @param stop the line/row number to stop before reaching the EOF (exclusive) */ def load (fileName: String, skip: Int = 0, skipCol: Int = 0, - sp: Char = DEF_SEP, fullPath: Boolean = false): MatrixD = + sp: Char = DEF_SEP, fullPath: Boolean = false, stop: Int = MAX_INTEGER): MatrixD = val lines = readFileIntoArray (fileName, fullPath) // array of strings/lines val m = lines.length // number lines in the file - val mm = m - skip // number of lines with data + val ms = min (m, stop) // line number to stop before + val mm = ms - skip // number of lines with data val a = Array.ofDim [Array [Double]] (mm) // array buffer to hold data values - var n = -1 // number of values in a row (TBD) + var n = -1 // number of values in a row (assigned below) - cfor (skip, m) { i => + cfor (skip, ms) { i => val j = i - skip a(j) = for str <- lines(i).split (sp).drop (skipCol) yield str.mkDouble if (j+1) % PROGRESS == 0 then println (s"load: read $j data rows so far ...") @@ -1868,6 +2033,38 @@ object MatrixD: new MatrixD (mm, n, a) end load + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a matrix and a header by reading from a text file, e.g., a CSV file. + * Assumes the column names (header) is in the first line of the file. + * @see the `write` method in the class to write a matrix to a CSV file. + * @param fileName the name of file holding the data + * @param skipCol the initial number of columns to skip + * @param sp the character used to separate values (',', '\t', ...) + * @param fullPath flag indicating whether to use full-path or path relative to 'DATA_DIR' + * defaults to false (relative paths) + * @param stop the line/row number to stop before reaching the EOF (exclusive) + */ + def loadH (fileName: String, skipCol: Int = 0, sp: Char = DEF_SEP, + fullPath: Boolean = false, stop: Int = MAX_INTEGER): (MatrixD, Array [String]) = + val lines = readFileIntoArray (fileName, fullPath) // array of strings/lines + val m = lines.length // number lines in the file + val ms = min (m, stop) // line number to stop before + val hd = lines(0).split (sp).drop (skipCol) // get the column header from line 0 + val mm = ms - 1 // number of lines with data + val a = Array.ofDim [Array [Double]] (mm) // array buffer to hold data values + var n = -1 // number of values in a row (assigned below) + + cfor (1, ms) { i => + val j = i - 1 + a(j) = for str <- lines(i).split (sp).drop (skipCol) yield str.mkDouble + if (j+1) % PROGRESS == 0 then println (s"load: read $j data rows so far ...") + if n < 0 then n = a(j).length + else if a(j).length != n then flaw ("load", s"row $j has the wrong length ${a(j).length} != $n") + } // cfor + println (s"load: read in an $mm-by-$n matrix from $fileName") + (new MatrixD (mm, n, a), hd) + end loadH + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Create a matrix by reading from a text file, e.g., a CSV file. * Convert string columns into ordinal/integer columns. @@ -1913,8 +2110,7 @@ object MatrixD: * VectorS ("low", "medium", "high") for 0, 1, 2 */ def mkOrdinal (str: String, ordStr: VectorS): Int = - val (xe, map) = ordStr.map2Int // @see `VectorS` - map (str) // return the str mapped to an integer + ordStr.map2Int._2 (str) // @see `VectorS`, return str mapped to an integer end mkOrdinal //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -1922,7 +2118,7 @@ object MatrixD: * Use readFileIter to only read the necessary columns from the file. * @param fileName the name of file holding the data * @param xCols the columns that are to make up the x-matrix - * @param yCol the column that is to make up the y-vector (use the defualt -1 to skip this) + * @param yCol the column that is to make up the y-vector (use the default -1 to skip this) * @param skip the initial number of lines to skip * @param sp the character used to separate values (',', '\t', ...) * @param fullPath flag indivating whether to use full-path or path relative to 'DATA_DIR' @@ -1965,7 +2161,8 @@ object MatrixD: end one //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a matrix of dimensions dim by dim2 where all elements equal zero. + /** Create an identity matrix of dimensions dim by dim2 where all elements equal zero, + * except the main diagonal where elements are set to one. * @param dim the row dimension * @param dim2 the column dimension */ @@ -1975,6 +2172,21 @@ object MatrixD: x end eye + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a diagonal matrix of dimensions dim by dim2 where all elements equal zero, + * except the main diagonal where elements are set to y_i. + * @param dim the row dimension + * @param dim2 the column dimension + * @param y the vector to set the main diagonal to + */ + def diag (dim: Int, dim2: Int, y: VectorD): MatrixD = + val x = new MatrixD (dim, dim2) + val n = x.minDim + if y.dim < n then flaw ("diag", s"the dimension of vector y = ${y.dim} is too small to fill diagonal") + cfor (0, x.minDim) { i => x(i, i) = y(i) } // set diagonal to y_i + x + end diag + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Create a matrix of dimensions dim by dim2 where all elements equal to the given value. * @param dim the row dimension @@ -1994,7 +2206,7 @@ object MatrixD: //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Compute the outer product of vector x and vector y. The result of the * outer product is a matrix where element (i, j) is the product of i-th element - * of x with the j-th element of y. + * of x with the j-th element of y, an x.dim by y.dim matrix. * @param x the first vector * @param y the second vector */ @@ -2004,7 +2216,7 @@ object MatrixD: new MatrixD (x.dim, y.dim, a) end outer - inline def ⊗ (x: VectorD, y: VectorD): MatrixD = outer (x, y) // unicode tensor product + inline def ⊗ (x: VectorD, y: VectorD): MatrixD = outer (x, y) // Unicode tensor product end MatrixD @@ -2014,19 +2226,27 @@ end MatrixD * operations, so that one can write 2.0 + x as well as x + 2.0. */ object MatrixDOps: - extension (a: Double) - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the element-wise sum (or difference, product, quotient) of - * scalar a and matrix x. - * @param a the scalar first operand - * @param x the vector second operand - */ + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Compute the element-wise sum (or difference, product, quotient) of scalar a + * and matrix x, e.g., 2.0 * x. + * @param a the scalar first operand + * @param x the vector second operand + */ + extension (a: Double) def + (x: MatrixD): MatrixD = x + a def - (x: MatrixD): MatrixD = -x + a def * (x: MatrixD): MatrixD = x * a def / (x: MatrixD): MatrixD = x.recip * a + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Compute the outer product of vector x and vector y. + * @param x the vector first operand + * @param y the vector second operand + */ + extension (x: VectorD) + inline def ⊗ (y: VectorD): MatrixD = MatrixD.outer (x, y) + end MatrixDOps diff --git a/src/main/scala/scalation/mathstat/MatrixD2.scala b/src/main/scala/scalation/mathstat/MatrixD2.scala index 843fdc8dd..5757885ae 100644 --- a/src/main/scala/scalation/mathstat/MatrixD2.scala +++ b/src/main/scala/scalation/mathstat/MatrixD2.scala @@ -21,7 +21,6 @@ class MatrixD2 (val dim: Int, v = Array.ofDim [Double] (dim, dim2) else if dim != v.length || dim2 != v(0).length then flaw ("init", "dimensions are wrong") - end if private val flaw = flawf ("MatrixD2") // partial invocation of flaw function diff --git a/src/main/scala/scalation/mathstat/MatrixI.scala b/src/main/scala/scalation/mathstat/MatrixI.scala index 3f54a6216..20199357d 100644 --- a/src/main/scala/scalation/mathstat/MatrixI.scala +++ b/src/main/scala/scalation/mathstat/MatrixI.scala @@ -58,7 +58,6 @@ object MatrixI: flaw ("isIntegral", s"x($i, $j) = $x_ij is not integer-valued") allint = false break () - end if end for } // breakable allint diff --git a/src/main/scala/scalation/mathstat/PartiallyOrdered.scala b/src/main/scala/scalation/mathstat/PartiallyOrdered.scala new file mode 100644 index 000000000..4330ad448 --- /dev/null +++ b/src/main/scala/scalation/mathstat/PartiallyOrdered.scala @@ -0,0 +1,71 @@ + +package scalation +package mathstat + +// https://docs.scala-lang.org/contribute/bug-reporting-guide.html +// trait copied from Scala 3.7.0 API +// https://www.scala-lang.org/api/3.7.0/scala/math/PartiallyOrdered.html +// code for testing "unused implicit parameter" warning when using "-Wunused:all", + +trait PartiallyOrdered[+A] extends Any: + + type AsPartiallyOrdered[B] = B => PartiallyOrdered[B] + + /** Result of comparing `'''this'''` with operand `that`. + * Returns `None` if operands are not comparable. + * If operands are comparable, returns `Some(x)` where + * - `x < 0` iff `'''this''' < that` + * - `x == 0` iff `'''this''' == that` + * - `x > 0` iff `'''this''' > that` + */ + infix def tryCompareTo [B >: A: AsPartiallyOrdered](that: B): Option[Int] + + def < [B >: A: AsPartiallyOrdered](that: B): Boolean = + (this tryCompareTo that) match + case Some(x) if x < 0 => true + case _ => false + + def > [B >: A: AsPartiallyOrdered](that: B): Boolean = + (this tryCompareTo that) match + case Some(x) if x > 0 => true + case _ => false + + def <= [B >: A: AsPartiallyOrdered](that: B): Boolean = + (this tryCompareTo that) match + case Some(x) if x <= 0 => true + case _ => false + + def >= [B >: A: AsPartiallyOrdered](that: B): Boolean = + (this tryCompareTo that) match + case Some(x) if x >= 0 => true + case _ => false + + +case class MyPartialOrder (x: Int, y: Int) extends PartiallyOrdered [MyPartialOrder]: + + infix def tryCompareTo [B >: MyPartialOrder: AsPartiallyOrdered](that: B): Option[Int] = + + that match + case that: MyPartialOrder if x == that.x && y == that.y => Some (0) + case that: MyPartialOrder if x <= that.x && y <= that.y => Some (-1) + case that: MyPartialOrder if x >= that.x && y >= that.y => Some (1) + case _ => None + +/* + if ! that.isInstanceOf [MyPartialOrder] then return None + val b = that.asInstanceOf [MyPartialOrder] + if x == b.x && y == b.y then Some (0) + else if x <= b.x && y <= b.y then Some (-1) + else if x >= b.x && y >= b.y then Some (1) + else None + +[warn] -- [E198] Unused Symbol Warning: scalation_2.0/src/main/scala/scalation/mathstat/PartiallyOrdered.scala:41:65 +[warn] 41 | infix def tryCompareTo [B >: MyPartialOrder: AsPartiallyOrdered](that: B): Option[Int] = +[warn] | ^ +[warn] | unused implicit parameter + +This there another solution besides using @nowarn (@unused did not work)? +For version 3.7.0, adding an explicit return, makes the warning go away. This should be fixed in 3.7.1 +Fixed by 3.7.1-RC2 +*/ + diff --git a/src/main/scala/scalation/mathstat/Pivoting.scala b/src/main/scala/scalation/mathstat/Pivoting.scala index 470333a0f..45048ddc3 100644 --- a/src/main/scala/scalation/mathstat/Pivoting.scala +++ b/src/main/scala/scalation/mathstat/Pivoting.scala @@ -46,10 +46,13 @@ end Pivoting //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `PivotingTest` object is used to test the `Pivoting` trait. - * > runMain scalation.mathstat.PivotingTest +/** The `pivotingTest` main function is used to test the `Pivoting` trait. + * > runMain scalation.mathstat.pivotingTest */ -object PivotingTest extends App with Pivoting: +@main def pivotingTest (): Unit = + + object Pivoting_ extends Pivoting + import Pivoting_._ val a = MatrixD ((3, 3), 1, 2, 3, 4, 5, 6, @@ -61,5 +64,5 @@ object PivotingTest extends App with Pivoting: println (s"a1 = ${reorderRows (a, piv)}") println (s"a2 = ${reorderCols (a, piv)}") -end PivotingTest +end pivotingTest diff --git a/src/main/scala/scalation/mathstat/Plot.scala b/src/main/scala/scalation/mathstat/Plot.scala index d45e7e9dd..0f178772a 100644 --- a/src/main/scala/scalation/mathstat/Plot.scala +++ b/src/main/scala/scalation/mathstat/Plot.scala @@ -30,14 +30,15 @@ import scalation.scala2d.Colors._ * @param x the x vector of data values (horizontal), use null to use y's index * @param y the y vector of data values (primary vertical, black) * @param z the z vector of data values (secondary vertical, red) to compare with y - * @param _title the title of the plot + * @param title the title of the plot * @param lines flag for generating a line plot */ -class Plot (x: VectorD, y: VectorD, z: VectorD = null, _title: String = "Plot y vs. x", lines: Boolean = false) - extends VizFrame (_title, null): +class Plot (x: VectorD, y: VectorD, z: VectorD = null, title: String = "Plot y vs. x", lines: Boolean = false) + extends VizFrame (title, null): + + private val xx: VectorD = if x == null then VectorD.range (0, y.dim) else x + private val canvas = new Canvas (xx, y, z, getW, getH, lines) - val xx: VectorD = if x == null then VectorD.range (0, y.dim) else x - val canvas = new Canvas (xx, y, z, getW, getH, lines) getContentPane.add (canvas, BorderLayout.CENTER) setVisible (true) @@ -51,14 +52,14 @@ object Plot: //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Create a plot object from integer vectors. - * @param x the x vector of data values (horizontal) - * @param y the y vector of data values (primary vertical) - * @param z the z vector of data values (secondary vertical) to compare with y - * @param _title the title of the plot - * @param lines flag for generating a line plot + * @param x the x vector of data values (horizontal) + * @param y the y vector of data values (primary vertical) + * @param z the z vector of data values (secondary vertical) to compare with y + * @param title the title of the plot + * @param lines flag for generating a line plot */ - def apply (x: VectorI, y: VectorI, z: VectorI = null, _title: String, lines: Boolean = false): Plot = - new Plot (x.toDouble, y.toDouble, if z == null then null else z.toDouble, _title, lines) + def apply (x: VectorI, y: VectorI, z: VectorI = null, title: String, lines: Boolean = false): Plot = + new Plot (x.toDouble, y.toDouble, if z == null then null else z.toDouble, title, lines) end apply //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -72,8 +73,9 @@ object Plot: */ def drawAxes (g2d: Graphics2D, baseX: Int, baseY: Int, frameW: Int, frameH: Int, offset: Int, minX: Double, maxY: Double, deltaX: Double, deltaY: Double): Unit = - val stepsX = 10 // number of x-steps for axis - val stepsY = 10 // number of y-steps for axis + + val stepsX = 10 // number of x-steps for axis + val stepsY = 10 // number of y-steps for axis val axis = Line (0, 0, 0, 0) g2d.setPaint (black) @@ -90,7 +92,7 @@ object Plot: var x_pos = 0 var y_pos = baseY + 15 - var step = deltaX / stepsX // for x-axis + var step = deltaX / stepsX // for x-axis for j <- 0 to stepsX do val x_val = clip (minX + j * step) x_pos = offset - 8 + j * (frameW - 2 * offset) / stepsX @@ -99,8 +101,8 @@ object Plot: // Draw the labels on the y-axis - x_pos = baseX - 30 - step = deltaY / stepsY // for y-axis + x_pos = baseX - 40 + step = deltaY / stepsY // for y-axis for j <- 0 to stepsY do val y_val = clip (maxY - j * step) y_pos = offset + 2 + j * (frameH - 2 * offset) / stepsY @@ -109,12 +111,13 @@ object Plot: end drawAxes //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert value to string and cut out the first four characters. - * @param x the value to convert and cut + /** Convert value to string and cut out the first five characters. + * Caveat: only work for numbers < million, may rescale data otherwise + * @param x the value to convert and clip/cut */ def clip (x: Double): String = val s = x.toString - s.substring (0, min (s.length, 4)) + s.substring (0, min (s.length, 5)) end clip end Plot @@ -151,13 +154,13 @@ class Canvas (x: VectorD, y: VectorD, z: VectorD, width: Int, height: Int, lines // extends Panel: extends ZoomablePanel: - private val EPSILON = 1E-9 // number close to zero - private val SCALE = 10 // FIX - pass as a parameter - private val offset = 80 // offset frame to axis - private val frameW = width // frame width - private val frameH = height // frame height - private val baseX = offset // base for x-axis - private val baseY = frameH - offset // base for y-axis + private val EPSILON = 1E-9 // number close to zero + private val SCALE = 10 // FIX - pass as a parameter + private val offset = 80 // offset frame to axis + private val frameW = width // frame width + private val frameH = height // frame height + private val baseX = offset // base for x-axis + private val baseY = frameH - offset // base for y-axis private val minX = floor (SCALE * x.min) / SCALE.toDouble private val maxX = ceil (x.max + EPSILON) @@ -178,25 +181,25 @@ class Canvas (x: VectorD, y: VectorD, z: VectorD, width: Int, height: Int, lines */ override def paintComponent (gr: Graphics): Unit = super.paintComponent (gr) - val g2d = gr.asInstanceOf [Graphics2D] // use hi-res graphics + val g2d = gr.asInstanceOf [Graphics2D] // use hi-res graphics - g2d.setTransform (at) // used for zooming (at @see `ZoomablePanel`) + g2d.setTransform (at) // used for zooming (at @see `ZoomablePanel`) Plot.drawAxes (g2d, baseX, baseY, frameW, frameH, offset, minX, maxY, deltaX, deltaY) //:: Draw the dots for the data points being plotted - var x_pos = 0 // current x position - var y_pos = 0 // current y position - var px_pos = 0 // previous x position - var py_pos = 0 // previous y position + var x_pos = 0 // current x position + var y_pos = 0 // current y position + var px_pos = 0 // previous x position + var py_pos = 0 // previous y position for i <- 0 until y.dim do val xx = round ((x(i) - minX) * (frameW - 2 * offset)) - x_pos = (xx / deltaX).asInstanceOf [Int] + offset + x_pos = (xx / deltaX).toInt + offset val yy = round ((maxY - y(i)) * (frameH - 2 * offset)) - y_pos = (yy / deltaY).asInstanceOf [Int] + offset - dot.setFrame (x_pos, y_pos, diameter, diameter) // x, y, w, h + y_pos = (yy / deltaY).toInt + offset + dot.setFrame (x_pos, y_pos, diameter, diameter) // x, y, w, h g2d.setPaint (black) g2d.fill (dot) @@ -205,10 +208,9 @@ class Canvas (x: VectorD, y: VectorD, z: VectorD, width: Int, height: Int, lines if i != 0 && lines then g2d.setStroke (new BasicStroke (1.0f)) g2d.drawLine (px_pos+1, py_pos+1, x_pos+1, y_pos+1) - end if - px_pos = x_pos // update previous x - py_pos = y_pos // update previous y + px_pos = x_pos // update previous x + py_pos = y_pos // update previous y end for g2d.setStroke (new BasicStroke (2.0f)) @@ -216,10 +218,10 @@ class Canvas (x: VectorD, y: VectorD, z: VectorD, width: Int, height: Int, lines if z != null then for i <- 0 until min (y.dim, z.dim) do val xx = round ((x(i) - minX) * (frameW - 2 * offset)) - x_pos = (xx / deltaX).asInstanceOf [Int] + offset + x_pos = (xx / deltaX).toInt + offset val yy = round ((maxY - z(i)) * (frameH - 2 * offset)) - y_pos = (yy / deltaY).asInstanceOf [Int] + offset - dot.setFrame (x_pos, y_pos, diameter, diameter) // x, z, w, h + y_pos = (yy / deltaY).toInt + offset + dot.setFrame (x_pos, y_pos, diameter, diameter) // x, z, w, h g2d.setPaint (red) g2d.fill (dot) @@ -227,10 +229,9 @@ class Canvas (x: VectorD, y: VectorD, z: VectorD, width: Int, height: Int, lines if i != 0 && lines then g2d.setStroke (new BasicStroke (1.0f)) g2d.drawLine (px_pos+1, py_pos+1, x_pos+1, y_pos+1) - end if - px_pos = x_pos // update previous x - py_pos = y_pos // update previous y + px_pos = x_pos // update previous x + py_pos = y_pos // update previous y end for end if diff --git a/src/main/scala/scalation/mathstat/PlotC.scala b/src/main/scala/scalation/mathstat/PlotC.scala index 15b9947d0..ea854494c 100644 --- a/src/main/scala/scalation/mathstat/PlotC.scala +++ b/src/main/scala/scalation/mathstat/PlotC.scala @@ -6,6 +6,9 @@ * @see LICENSE (MIT style license file). * * @note Contour Plots for z = f(x, y) using color-coding for z + * Shows relative elevation by color and implicitly contour curves + * + * see www.math.uri.edu/~bkaskosz/fall21/sec-12-3.pdf */ package scalation @@ -13,9 +16,12 @@ package mathstat import scala.collection.mutable.ArrayBuffer import scala.math.{ceil, floor, round} + import scalation.scala2d._ import scalation.scala2d.Colors._ +import VectorDOps._ + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `PlotC` class takes a function f and displays color-coded values for * z = f(x, y) over a two dimensional grid defined the lower lb and upper ub bounds. @@ -31,22 +37,23 @@ import scalation.scala2d.Colors._ * @param lb the lower bounds on the plotting domain * @param ub the upper bounds on the plotting domain * @param path the points on a path (e.g., a search path) + * @param opt the optimal value if known (defaults to null) * @param deltaF estimate of the range of possible functional values (if < 0, will be computed) * @param lbF the lower bound on the functional value - * @param _title the title of the plot + * @param title the title of the plot */ class PlotC (f: FunctionV2S, lb: VectorD, ub: VectorD, path: ArrayBuffer [VectorD] = null, opt: VectorD = VectorD.nullv, private var deltaF: Double = -1.0, private var lbF: Double = 0.0, - _title: String = "Contour Plot of f(x, y)") - extends VizFrame (_title, null): + title: String = "Contour Plot of f(x, y)") + extends VizFrame (title, null): - private val _1_3 = 1.0 / 3.0 // one third - private val _2_3 = 2.0 / 3.0 // two thirds - private val offset = 50 // offset frame to axis - private val frameW = getW // frame width - private val frameH = getH // frame height - private val baseX = offset // base for x-axis - private val baseY = frameH - offset // base for y-axis + private val debug = debugf ("PlotC", false) // debug function + + private val offset = 50 // offset frame to axis + private val frameW = getW // frame width + private val frameH = getH // frame height + private val baseX = offset // base for x-axis + private val baseY = frameH - offset // base for y-axis private val minX = floor (lb(0)) private val maxX = ceil (ub(0)) @@ -58,7 +65,7 @@ class PlotC (f: FunctionV2S, lb: VectorD, ub: VectorD, path: ArrayBuffer [Vector private val width_ = 9 private val diameter = 6 private val square = Rectangle () - private val nsquares = 80.0 // number of squares per direction x, y + private val nsquares = 200.0 // number of squares per direction x, y private val dot = Ellipse () //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -76,9 +83,12 @@ class PlotC (f: FunctionV2S, lb: VectorD, ub: VectorD, path: ArrayBuffer [Vector */ override def paintComponent (gr: Graphics): Unit = super.paintComponent (gr) - val g2d = gr.asInstanceOf [Graphics2D] // use hi-res graphics + val g2d = gr.asInstanceOf [Graphics2D] // use hi-res graphics + val mc = 255 // max color value + val c = VectorD (1.0, 0.8, 0.6, 0.4, 0.2, 0.1, 0.001) // fractional value cutoffs + val d = 1.0 / c // use reciprocal for multiplier - g2d.setTransform (at) // used for zooming (at @see `ZoomablePanel`) + g2d.setTransform (at) // used for zooming (at @see `ZoomablePanel`) Plot.drawAxes (g2d, baseX, baseY, frameW, frameH, offset, minX, maxY, deltaX, deltaY) @@ -92,21 +102,41 @@ class PlotC (f: FunctionV2S, lb: VectorD, ub: VectorD, path: ArrayBuffer [Vector var y = lb(1) while y <= ub(1) do val vec = VectorD (x, y) - val frac = (f(vec) - lbF) / deltaF // fractional way from lower to upper bound - - val rgb = - if frac > _2_3 then ( ((frac-_2_3) * 765).toInt, ((1-frac) * 765).toInt, 0 ) - else if frac > _1_3 then ( 0, ((frac-_1_3) * 765).toInt, ((_2_3-frac) * 765).toInt ) - else ( ((_1_3-frac) * 400).toInt, 0, ((frac) * 765).toInt ) - - println (s"(x, y) = $vec, lbF = $lbF, frac = $frac, rgb = $rgb") + val fv = f(vec) + val frac = (fv - lbF) / deltaF // fractional way from lower to upper bound + + // high to low: M, B, C, G, Y, R, W + + val rgb = if frac > c(1) then + val z = (mc * (c(0) - frac)).toInt; (z, 0, z) // magenta + else if frac > c(2) then + (0, 0, (mc * d(1) * (c(1) - frac)).toInt) // blue + else if frac > c(3) then + val z = (mc * d(2) * (c(2) - frac)).toInt; (0, z, z) // cyan + else if frac > c(4) then + (0, (mc * d(3) * (c(3) - frac)).toInt, 0) // green + else if frac > c(5) then + val z = (mc * d(4) * (c(4) - frac)).toInt; (z, z, 0) // yellow + else if frac > c(6) then + ((mc * d(5) * (c(5) - frac)).toInt, 0, 0) // red + else + val z = (mc * d(6) * (c(6) - frac)).toInt; (z, z, z) // gray +/* + val rgb = if frac > _2_3 then + ( ((frac-_2_3) * 765).toInt, ((1-frac) * 765).toInt, 0 ) + else if frac > _1_3 then + ( 0, ((frac-_1_3) * 765).toInt, ((_2_3-frac) * 765).toInt ) + else + ( ((_1_3-frac) * 400).toInt, 0, ((frac) * 765).toInt ) +*/ + debug ("paintComponent", s"(x, y) = $vec, fv = $fv, lbF = $lbF, frac = $frac, rgb = $rgb") val color = new Color (rgb._1, rgb._2, rgb._3) val xx = round ((x - lb(0)) * (frameW - 2 * offset)) - x_pos = (xx / deltaX).asInstanceOf [Int] + offset + x_pos = (xx / deltaX).toInt + offset val yy = round ((ub(1) - y) * (frameH - 2 * offset)) - y_pos = (yy / deltaY).asInstanceOf [Int] + offset - diameter - square.setFrame (x_pos, y_pos, width_, width_) // x, y, w, h + y_pos = (yy / deltaY).toInt + offset - diameter + square.setFrame (x_pos, y_pos, width_, width_) // x, y, w, h g2d.setPaint (color) g2d.fill (square) y += deltaY / nsquares @@ -117,29 +147,31 @@ class PlotC (f: FunctionV2S, lb: VectorD, ub: VectorD, path: ArrayBuffer [Vector //:: Draw the dots for the points on a search path, if given if path != null then - val basicStroke = g2d.getStroke.asInstanceOf[BasicStroke] - val dashedLine = Line (0, 0, 0, 0) - val dashedStroke = new BasicStroke(0.5, basicStroke.getEndCap, basicStroke.getLineJoin, 1.0, Array[Float](2), 0); + val basicStroke = g2d.getStroke.asInstanceOf [BasicStroke] + val dashedLine = Line (0, 0, 0, 0) + val dashedStroke = new BasicStroke (0.5, basicStroke.getEndCap, basicStroke.getLineJoin, + 1.0, Array [Float](2), 0); var xPosPrev: Int = Int.MinValue var yPosPrev: Int = Int.MinValue for p <- path do // Draw point in path. val xx = round ((p(0) - lb(0)) * (frameW - 2 * offset)) - x_pos = (xx / deltaX).asInstanceOf [Int] + offset + x_pos = (xx / deltaX).toInt + offset val yy = round ((ub(1) - p(1)) * (frameH - 2 * offset)) - y_pos = (yy / deltaY).asInstanceOf [Int] + offset - diameter - dot.setFrame (x_pos, y_pos, diameter, diameter) // x, y, w, h + y_pos = (yy / deltaY).toInt + offset - diameter + dot.setFrame (x_pos, y_pos, diameter, diameter) // x, y, w, h g2d.setPaint (darkyellow) g2d.fill (dot) - // Draw line connecting previous point to this point. + // Draw line connecting previous point to this point if xPosPrev != Int.MinValue && yPosPrev != Int.MinValue then - dashedLine.setLine(xPosPrev + (diameter/2.0), yPosPrev + (diameter/2.0), x_pos + (diameter/2.0), y_pos + (diameter/2.0)) - g2d.setStroke(dashedStroke) + dashedLine.setLine (xPosPrev + (diameter/2.0), yPosPrev + (diameter/2.0), + x_pos + (diameter/2.0), y_pos + (diameter/2.0)) + g2d.setStroke (dashedStroke) g2d.setPaint (black) - g2d.draw(dashedLine) - g2d.setStroke(basicStroke) + g2d.draw (dashedLine) + g2d.setStroke (basicStroke) // Update previous positions. xPosPrev = x_pos @@ -147,13 +179,14 @@ class PlotC (f: FunctionV2S, lb: VectorD, ub: VectorD, path: ArrayBuffer [Vector end if if opt != VectorD.nullv then - val xx = round((opt(0) - lb(0)) * (frameW - 2 * offset)) - x_pos = (xx / deltaX).asInstanceOf[Int] + offset + val xx = round ((opt(0) - lb(0)) * (frameW - 2 * offset)) + x_pos = (xx / deltaX).toInt + offset val yy = round((ub(1) - opt(1)) * (frameH - 2 * offset)) - y_pos = (yy / deltaY).asInstanceOf[Int] + offset - diameter - dot.setFrame(x_pos + (diameter/4.0), y_pos + (diameter/4.0), diameter/2.0, diameter/2.0) // x, y, w, h - g2d.setPaint(black) - g2d.fill(dot) + y_pos = (yy / deltaY).toInt + offset - diameter + dot.setFrame (x_pos + (diameter/4.0), y_pos + (diameter/4.0), + diameter/2.0, diameter/2.0) // x, y, w, h + g2d.setPaint (black) + g2d.fill (dot) end if end paintComponent @@ -184,8 +217,9 @@ class PlotC (f: FunctionV2S, lb: VectorD, ub: VectorD, path: ArrayBuffer [Vector x += deltaX / nsquares end while - lbF = minF // lower bounds on functional values for f - deltaF = maxF - minF // range of functional values for f + lbF = minF // lower bounds on functional values for f + deltaF = maxF - minF // range of functional values for f + debug ("resetBounds", s"lbF = $lbF, deltaF = $deltaF") end resetBounds //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -197,7 +231,8 @@ end PlotC //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `plotCTest` main function is used to test the `PlotC` class. +/** The `plotCTest` main function is used to test the `PlotC` class. For additional examples, + * @see `RidgeRegression`, `LassoRegression`, `BridgeRegression` in `modeling`. * @see scalation.scala2d.writeImage * > runMain scalation.mathstat.plotCTest */ diff --git a/src/main/scala/scalation/mathstat/PlotM.scala b/src/main/scala/scalation/mathstat/PlotM.scala index b4d1b8c74..0c435fed4 100644 --- a/src/main/scala/scalation/mathstat/PlotM.scala +++ b/src/main/scala/scalation/mathstat/PlotM.scala @@ -25,24 +25,24 @@ import scalation.scala2d.Colors._ * (2) mouse dragging repositions the objects in the panel (drawing canvas). * @see ZoomablePanel *------------------------------------------------------------------------------ - * @param x_ the x vector of data values (horizontal) - * @param y_ the y matrix of data values where y(i) is the i-th vector (vertical) - * @param label the label/legend/key for each curve in the plot - * @param _title the title of the plot - * @param lines flag for generating a line plot + * @param x_ the x vector of data values (horizontal) + * @param y_ the y matrix of data values where y(i) is the i-th vector (vertical) + * @param label the label/legend/key for each curve in the plot + * @param title the title of the plot + * @param lines flag for generating a line plot */ class PlotM (x_ : VectorD, y_ : MatrixD, var label: Array [String] = null, - _title: String = "PlotM y_i vs. x for each i", lines: Boolean = false) - extends VizFrame (_title, null): + title: String = "PlotM y_i vs. x for each i", lines: Boolean = false) + extends VizFrame (title, null): - val xa: VectorD = if x_ == null then VectorD.range (0, y_.dim2) else x_ + private val xa: VectorD = if x_ == null then VectorD.range (0, y_.dim2) else x_ - private val EPSILON = 1E-9 // number close to zero - private val offset = 70 // offset frame to axis - private val frameW = getW // frame width - private val frameH = getH // frame height - private val baseX = offset // base for x-axis - private val baseY = frameH - offset // base for y-axis + private val EPSILON = 1E-9 // number close to zero + private val offset = 70 // offset frame to axis + private val frameW = getW // frame width + private val frameH = getH // frame height + private val baseX = offset // base for x-axis + private val baseY = frameH - offset // base for y-axis private val minX = floor (xa.min) private val maxX = ceil (xa.max + EPSILON) @@ -77,7 +77,7 @@ class PlotM (x_ : VectorD, y_ : MatrixD, var label: Array [String] = null, extends ZoomablePanel: setBackground (white) - val colors = Array (red, green, blue, black, yellow, cyan, magenta) + private val colors = Array (red, green, blue, black, yellow, cyan, magenta) //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Paint the canvas by plotting the data points. @@ -85,9 +85,9 @@ class PlotM (x_ : VectorD, y_ : MatrixD, var label: Array [String] = null, */ override def paintComponent (gr: Graphics): Unit = super.paintComponent (gr) - val g2d = gr.asInstanceOf [Graphics2D] // use hi-res graphics + val g2d = gr.asInstanceOf [Graphics2D] // use hi-res graphics - g2d.setTransform (at) // used for zooming (at @see `ZoomablePanel`) + g2d.setTransform (at) // used for zooming (at @see `ZoomablePanel`) Plot.drawAxes (g2d, baseX, baseY, frameW, frameH, offset, minX, maxY, deltaX, deltaY) @@ -97,8 +97,8 @@ class PlotM (x_ : VectorD, y_ : MatrixD, var label: Array [String] = null, //:: Draw the dots for the data points being plotted - var x_pos = 0 // current x position - var y_pos = 0 // current y position + var x_pos = 0 // current x position + var y_pos = 0 // current y position for i <- 0 until y_.dim do val y_i = y_(i) @@ -107,25 +107,24 @@ class PlotM (x_ : VectorD, y_ : MatrixD, var label: Array [String] = null, g2d.setPaint (color) if i < label.length then g2d.drawString (label(i), offset * (i + 2), frameH - 30) - var px_pos = 0 // previous x position - var py_pos = 0 // previous y position + var px_pos = 0 // previous x position + var py_pos = 0 // previous y position for j <- xa.indices do val xx = round ((xa(j) - minX) * (frameW - 2 * offset)) - x_pos = (xx / deltaX).asInstanceOf [Int] + offset + x_pos = (xx / deltaX).toInt + offset val yy = round ((maxY - y_i(j)) * (frameH - 2 * offset)) - y_pos = (yy / deltaY).asInstanceOf [Int] + offset - dot.setFrame (x_pos, y_pos, diameter, diameter) // x, y, w, h + y_pos = (yy / deltaY).toInt + offset + dot.setFrame (x_pos, y_pos, diameter, diameter) // x, y, w, h g2d.fill (dot) // connect with lines if j != 0 && lines then g2d.setStroke (new BasicStroke (1.0f)) g2d.drawLine (px_pos+1, py_pos+1, x_pos+1, y_pos+1) - end if - px_pos = x_pos // update previous x - py_pos = y_pos // update previous y + px_pos = x_pos // update previous x + py_pos = y_pos // update previous y end for end for diff --git a/src/main/scala/scalation/mathstat/Probability.scala b/src/main/scala/scalation/mathstat/Probability.scala index 47ce66eae..74e75ead9 100644 --- a/src/main/scala/scalation/mathstat/Probability.scala +++ b/src/main/scala/scalation/mathstat/Probability.scala @@ -129,10 +129,8 @@ object Probability: for i <- idx if x(i) <= thres do { nu(y(i)) += 1; cnt += 1 } else for i <- idx if x(i) > thres do { nu(y(i)) += 1; cnt += 1 } - end if else for i <- idx if x(i) == vl do { nu(y(i)) += 1; cnt += 1 } - end if debug ("freq", s"k = $k, vl = $vl, cont = $cont, thres = $thres, nu = $nu") (cnt.toDouble / x.dim, nu) // return fraction and frequency vector end freq @@ -155,10 +153,8 @@ object Probability: for i <- x.indices if x(i) <= thres do cnt += 1 else for i <- x.indices if x(i) > thres do cnt += 1 - end if else for i <- x.indices if x(i) == vl do cnt += 1 - end if cnt end count diff --git a/src/main/scala/scalation/mathstat/RTensor4D.scala b/src/main/scala/scalation/mathstat/RTensor4D.scala index 157f095c6..e73d969cd 100644 --- a/src/main/scala/scalation/mathstat/RTensor4D.scala +++ b/src/main/scala/scalation/mathstat/RTensor4D.scala @@ -256,14 +256,14 @@ class RTensor4D (val dim: Int, val dim2: VectorI, val dim3: VectorI, val dim4: I * @param j 2nd dimension (column) index of the tensor * @param x the matrix to be updated at the above position in the tensor */ - def update (i: Int, j: Int, x: MatrixD): Unit = v(i) = null // FIX x.toArray + def update (i: Int, j: Int, x: MatrixD): Unit = v(i)(j) = x.toArray //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Update a single matrix of the tensor to the given matrix. * @param i 1st dimension (row) index of the tensor * @param x the matrix to be updated at the above position in the tensor */ - def update (i: Int, x: RTensorD): Unit = v(i) = null // FIX x.toArray + def update (i: Int, x: RTensorD): Unit = ??? // FIX v(i) = x.toArray //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Set all the tensor element values to x. diff --git a/src/main/scala/scalation/mathstat/RTensorD.scala b/src/main/scala/scalation/mathstat/RTensorD.scala index 995661898..ce50fae78 100644 --- a/src/main/scala/scalation/mathstat/RTensorD.scala +++ b/src/main/scala/scalation/mathstat/RTensorD.scala @@ -31,7 +31,7 @@ end sizes2 * @param d fixed size of the 2nd/3rd level/dimension (column/sheet) of the tensor */ def dupDim (dim: Int, d: Int): VectorI = - VectorI (for i <- 0 until dim yield d) + VectorI (for _ <- 0 until dim yield d) end dupDim @@ -65,7 +65,6 @@ class RTensorD (val dim: Int, val dim2: VectorI, val dim3: Int, for i <- indices do v(i) = Array.ofDim (dim2(i), dim3) else if dim != v.length || dim3 != v(0)(0).length then flaw ("init", "dimensions are wrong") - end if /** Format string used for printing vector values (change using setFormat) */ @@ -162,7 +161,6 @@ class RTensorD (val dim: Int, val dim2: VectorI, val dim3: Int, val u = Array.ofDim [Double] (is.size, js.size, dim3) for i <- is.indices; j <- js.indices; k <- indices3 do u(i)(j)(k) = v(is(i))(js(j))(k) new RTensorD (u) - end if end apply //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -183,7 +181,6 @@ class RTensorD (val dim: Int, val dim2: VectorI, val dim3: Int, val u = Array.ofDim [Double] (is.size, js.size, ks.size) for i <- is.indices; j <- js.indices; k <- ks.indices do u(i)(j)(k) = v(is(i))(js(j))(ks(k)) new RTensorD (u) - end if end apply //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -233,7 +230,7 @@ class RTensorD (val dim: Int, val dim2: VectorI, val dim3: Int, * @param i 1st dimension (row) index of the tensor * @param x the matrix to be updated at the above position in the tensor */ - def update (i: Int, x: MatrixD): Unit = v(i) = null // FIX x.toArray + def update (i: Int, x: MatrixD): Unit = v(i) = x.toArray //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Set all the tensor element values to x. @@ -457,11 +454,9 @@ object RTensorD: if z.dim < x.dim then flaw ("freq", "z is required to have at the number elements in x") return null - end if if dim2.dim != 2 then flaw ("freq", "dim2 must have dimension 2: one for x and one for z") return null - end if val t = RTensorD (dim2(0), dim2(1), dim3) for i <- x.indices do t(x(i), z(i), y(i)) += 1 t diff --git a/src/main/scala/scalation/mathstat/Scala2LaTeX.scala b/src/main/scala/scalation/mathstat/Scala2LaTeX.scala new file mode 100644 index 000000000..27b795938 --- /dev/null +++ b/src/main/scala/scalation/mathstat/Scala2LaTeX.scala @@ -0,0 +1,343 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author Yousef Fekri Dabanloo, John Miller + * @version 2.0 + * @date Sat Sep 20 17:13:38 EDT 2025 + * @see LICENSE (MIT style license file). + * + * @note Support for Converting ScalaTion Equations and Matrices to LaTeX + */ + +package scalation +package mathstat + +import scala.collection.mutable.{LinkedHashSet => LSET} +import scala.util.matching.Regex + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `Scala2LaTeX object provides methods for converting ScalaTion Equations + * into LaTeX. + */ +object Scala2LaTeX: + + /** Customize LaTeX operator rendering per transform *name* here (no `case` needed) + */ + private val TRANSFORM_LATEX: Map [String, String] = Map ("pow" -> "\\text{pow}", + "root" -> "\\text{root}", + "log" -> "\\log", + "sin" -> "\\sin", + "cos" -> "\\cos") + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Render a TransformT to a LaTeX operator using its .name. + * @param t the transform type + */ + private def latexOpFromTransform (t: TransformT): String = + TRANSFORM_LATEX.getOrElse (t.name, s"\\text{${t.name}}") + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Convert a single feature token into a LaTeX term with coefficient. + * Supported tokens: + * + * const + * ylK + * fI(ylK) + * xeJlK + * gJ,T(xeJlK) (J = exo index, T = transform index for exo J) + * + * @param feat raw feature token (trimmed) + * @param fEndo_enabled enabled transforms for y (order matters) + * @param fExo_enabled enabled transforms per exo variable (order matters) + * @param coeffSym e.g., "\\beta" + * @param termIdx coefficient subscript index + */ + private def feature2LatexTerm (feat: String, fEndo_enabled: LSET [TransformT], + fExo_enabled: Array [LSET [TransformT]], + coeffSym: String, termIdx: Int): String = + // Regex patterns + val R_CONST: Regex = raw"""^const$$""".r + val R_YL: Regex = raw"""^yl(\d+)$$""".r + val R_FY: Regex = raw"""^f\d+\(yl(\d+)\)$$""".r + + val R_XEL: Regex = raw"""^xe(\d+)l(\d+)$$""".r + val R_GX: Regex = raw"""^g\d+,\d+\(xe(\d+)l(\d+)\)$$""".r + + def withCoeff (body: String): String = s"${coeffSym}_${termIdx}\\, $body" + + feat match + case R_CONST() => + s"${coeffSym}_${termIdx}" + + case R_YL(k) => + val lag = k.toInt + withCoeff (s"y_{t-$lag}") + + case R_FY(i, k) => + val idx = i.toInt + val lag = k.toInt + val endoArr = fEndo_enabled.toArray + val op = if 0 <= idx && idx < endoArr.length + then latexOpFromTransform(endoArr(idx)) + else s"\\text{f$idx}" + withCoeff (s"$op\\big(y_{t-$lag}\\big)") + + case R_XEL(j, k) => + val exo = j.toInt + val lag = k.toInt + withCoeff (s"x^{(${exo})}_{t-$lag}") + + case R_GX(tIdx, j, k) => + // Trust the inner xel indices. + val exo = j.toInt + val lag = k.toInt + val trIx = tIdx.toInt + val op = + if 0 <= exo && exo < fExo_enabled.length then + val exoArr = fExo_enabled(exo).toArray + if 0 <= trIx && trIx < exoArr.length then latexOpFromTransform (exoArr(trIx)) + else s"\\text{g$trIx}" + else s"\\text{g$trIx}" + withCoeff (s"$op\\big(x^{(${exo})}_{t-$lag}\\big)") + + case _ => + // Fallback: still has coefficient, render raw token safely + withCoeff (s"\\text{${feat.replace ("\\", "\\\\")}}") + end feature2LatexTerm + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Build a LaTeX equation from feature tokens. + * @param features the feature names/tokens (any order/subset) + * @param fEndo_enabled enabled endo transforms (order matters) + * @param fExo_enabled enabled exo transforms per variable (order matters) + * @param includeHat whether to render \hat{y}_t on LHS + * @param coeffSymbol coefficient symbol, e.g. "\\beta" + * @param errorSymbol error term, e.g. "\\varepsilon_t" + */ + def features2LatexEquation (features: Array [String], fEndo_enabled: LSET [TransformT], + fExo_enabled: Array [LSET [TransformT]], includeHat: Boolean = true, + coeffSymbol: String = "\\beta", + errorSymbol: String = "\\varepsilon_t"): String = + + val lhs = if includeHat then "\\hat{y}_t" else "y_t" + val terms = features.zipWithIndex.map { case (f, i) => + feature2LatexTerm (f.trim, fEndo_enabled, fExo_enabled, coeffSymbol, i) } + val eq = s"$lhs \\,=\\, ${terms.mkString (" \\,+\\, ")} \\,+\\, $errorSymbol" + eq.replace (raw"\\_", raw"\_") + end features2LatexEquation + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Make a LaTeX equation with the begin and end tags. + * @param equation the LaTex equation proper + */ + def make_equation (equation: String): String = + s"\\begin{equation*}\n$equation\n\\end{equation*}" + + private val preamble: String = """ +\documentclass{article} +\usepackage{amsmath} % For advanced math features like \text, \frac, etc. +\usepackage[utf8]{inputenc} % To handle various characters +\begin{document} +""" + + private val table_hdr: String = """\begin{table}[h] +\centering +""" + + private val table_tlr: String = """\end{tabular} +\end{table} +""" + + private val endln = " \\\\ \\hline \n" + private val endl2 = " \\\\ \\hline \\hline \n" + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Make a LaTeX table from a matrix. + * @param caption the caption for the table + * @param name the table name + * @param x the matrix of data + * @param colName the column names + * @param rowName the row names + */ + def make_table (caption: String, name: String, x: MatrixD, + colName: String = null, rowName: Array [String] = null): String = + var n = x.dim2 + if rowName != null then n += 1 + val cs = "|c"*n + "|" + val sb = new StringBuilder () + sb ++= s"\\caption{$caption}\n" + sb ++= s"\\label{tab:$name}\n" + sb ++= s"\\begin{tabular}{$cs} \\hline \n" + if colName != null then + sb ++= colName.replace (",", " &") + endl2 + for i <- x.indices do + if rowName != null then + sb ++= rowName(i) + " &" + sb ++= x(i).toString.replace ("VectorD(", "").replace (")", "").replace (",", " &") + endln + s"$table_hdr${sb.mkString}$table_tlr" + end make_table + + def make_body (): String = + + """ +section {Auto MPG} +(398 x 7) +\url{https://archive.ics.uci.edu/dataset/9/auto+mpg} + +section {House Price Regression Dataset} +(1000 x 8) +\url{https://www.kaggle.com/datasets/prokshitha/home-value-insights } + +section {Each Group Picks a Dataset} +(more rows and columns) + """ + + end make_body + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Make a LaTeX document. + * @param body the body of the LaTex document + */ + def make_doc (body: String): String = + s"$preamble\n$body\n\\end{document}" + end make_doc + +end Scala2LaTeX + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `scala2LaTeXTest` main method tests the `Scala2LaTeX` object by making a LaTeX + * equation from a ScalaTion model specification. + * > runMain scalation.mathstat.scala2LaTeXTest + */ +@main def scala2LaTeXTest (): Unit = + + import Scala2LaTeX._ + + val features = Array ("new\\_deaths", "icu\\_patients") + val fEndo = LSET [TransformT] () + val fExo = Array [LSET [TransformT]] () + val latex = make_doc (make_equation (features2LatexEquation (features, fEndo, fExo))) + println (latex) + +end scala2LaTeXTest + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `scala2LaTeXTest2` main method tests the `Scala2LaTeX` object by making a LaTeX + * table from a matrix. + * @see `scalation.modeling.Regression` + * > runMain scalation.mathstat.scala2LaTeXTest2 + */ +@main def scala2LaTeXTest2 (): Unit = + + import Scala2LaTeX._ + + // 16 data points: one x1 x2 x3 y + // Const Lat Elev Long Temp County + val xy = MatrixD ((16, 5), 1.0, 29.767, 41.0, 95.367, 56.0, // Harris + 1.0, 32.850, 440.0, 96.850, 48.0, // Dallas + 1.0, 26.933, 25.0, 97.800, 60.0, // Kennedy + 1.0, 31.950, 2851.0, 102.183, 46.0, // Midland + 1.0, 34.800, 3840.0, 102.467, 38.0, // Deaf Smith + 1.0, 33.450, 1461.0, 99.633, 46.0, // Knox + 1.0, 28.700, 815.0, 100.483, 53.0, // Maverick + 1.0, 32.450, 2380.0, 100.533, 46.0, // Nolan + 1.0, 31.800, 3918.0, 106.400, 44.0, // El Paso + 1.0, 34.850, 2040.0, 100.217, 41.0, // Collington + 1.0, 30.867, 3000.0, 102.900, 47.0, // Pecos + 1.0, 36.350, 3693.0, 102.083, 36.0, // Sherman + 1.0, 30.300, 597.0, 97.700, 52.0, // Travis + 1.0, 26.900, 315.0, 99.283, 60.0, // Zapata + 1.0, 28.450, 459.0, 99.217, 56.0, // Lasalle + 1.0, 25.900, 19.0, 97.433, 62.0) // Cameron + + val colName = "Const, Lat, Elev, Long, Temp" + val caption = "Texas Temperatures Regression" + val name = "Texas-Temps" + val latex = make_doc (make_table (caption, name, xy, colName)) + println (latex) + +end scala2LaTeXTest2 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `scala2LaTeXTest3` main method tests the `Scala2LaTeX` object by making a LaTeX + * table from matrix of QoF metric vs. model. This test does In-Sample testing. + * @see `scalation.modeling.Regression` + * > runMain scalation.mathstat.scala2LaTeXTest3 + */ +@main def scala2LaTeXTest3 (): Unit = + + import Scala2LaTeX._ + + // 16 data points: one x1 x2 x3 y + // Const Lat Elev Long Temp County + val xy = MatrixD ((16, 5), 1.0, 29.767, 41.0, 95.367, 56.0, // Harris + 1.0, 32.850, 440.0, 96.850, 48.0, // Dallas + 1.0, 26.933, 25.0, 97.800, 60.0, // Kennedy + 1.0, 31.950, 2851.0, 102.183, 46.0, // Midland + 1.0, 34.800, 3840.0, 102.467, 38.0, // Deaf Smith + 1.0, 33.450, 1461.0, 99.633, 46.0, // Knox + 1.0, 28.700, 815.0, 100.483, 53.0, // Maverick + 1.0, 32.450, 2380.0, 100.533, 46.0, // Nolan + 1.0, 31.800, 3918.0, 106.400, 44.0, // El Paso + 1.0, 34.850, 2040.0, 100.217, 41.0, // Collington + 1.0, 30.867, 3000.0, 102.900, 47.0, // Pecos + 1.0, 36.350, 3693.0, 102.083, 36.0, // Sherman + 1.0, 30.300, 597.0, 97.700, 52.0, // Travis + 1.0, 26.900, 315.0, 99.283, 60.0, // Zapata + 1.0, 28.450, 459.0, 99.217, 56.0, // Lasalle + 1.0, 25.900, 19.0, 97.433, 62.0) // Cameron + + val x = xy.not (?, xy.dim2-1) // matrix of predictor variable values + val y = xy(?, xy.dim2-1) // original scale response vector + val xy_ = xy(?, 1 until xy.dim2) // drop the column of all ones from xy + val x_ = x(?, 1 until x.dim2) // drop the column of all ones from x + + val n_q = 15 // number of core metrics + val r_q = 0 until 15 // range of core metrics + + val colName = "Metric, Regression, Ridge, Lasso, Transformed, Symbolic" // column names -- which models + val rowName = modeling.qoF_names.take(n_q) // row names -- which QoF metric + + banner ("Regression Model") + val mod1 = modeling.Regression (xy)() // Regression model + val qof1 = mod1.trainNtest ()()._2(r_q) // train and test on full dataset + println (mod1.summary ()) + + banner ("Ridge Regression Model") + val mod2 = modeling.RidgeRegression (xy_)() // Ridge Regression model + val qof_ = mod2.trainNtest ()()._2(r_q) // train and test on full dataset + val qof2 = modeling.RidgeRegression.fix_smape (mod2, y, qof_) + println (mod2.summary ()) + + banner ("Lasso Regression Model") + modeling.RidgeRegression.hp("lambda") = 0.02 // adjust shrinkage hyper-parameter + val mod3 = modeling.LassoRegression (xy)() // Lasso Regression model + val qof3 = mod3.trainNtest ()()._2(r_q) // train and test on full dataset + println (mod3.summary ()) + + banner ("Transfomed Regression Model") + val mod4 = new modeling.TranRegression (x, y) // Transformed Regression model + // defaults to log1p, try other transformations + val qof4 = mod4.trainNtest ()()._2(r_q) // train and test on full dataset + println (mod4.summary ()) + + banner ("Symbolic Regression Model") + val mod5 = modeling.SymRidgeRegression.quadratic (x_, y) // simple Symbolic Regression model + // try other forms for Symbolic Regression + val qof5 = mod5.trainNtest ()()._2(r_q) // train and test on full dataset + println (mod5.summary ()) + + banner ("Copy-paste the LateX table below into a .tex file") + + val qofs = MatrixD (qof1, qof2, qof3, qof4, qof5).transpose // put QoFs in a matrix + + val caption = "Texas Temperatures: Regression, Ridge, Lasso, Transformed" // LaTex figure caption + val name = "Texas-Temps" + + val latex = make_doc (make_table (caption, name, qofs, colName, rowName)) + println (latex) + +end scala2LaTeXTest3 + diff --git a/src/main/scala/scalation/mathstat/StatBootstrap.scala b/src/main/scala/scalation/mathstat/StatBootstrap.scala new file mode 100644 index 000000000..1f3222d39 --- /dev/null +++ b/src/main/scala/scalation/mathstat/StatBootstrap.scala @@ -0,0 +1,140 @@ + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author John Miller + * @version 2.0 + * @date Sun Nov 2 13:27:50 EST 2025 + * @see LICENSE (MIT style license file). + * + * @note Bootstrap Resampling (with Replacement) + * + * @see en.wikipedia.org/wiki/Bootstrapping_(statistics) + */ + +package scalation +package mathstat + +import scala.math.sqrt + +import scalation.random.{RandomVecD, RandomVecI, RandomVecIR} + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `StatBootstrap` class is used to create bootstrap samples and compute + * statistics based on these pseudo-samples. Given a sample y from a population + * (typically unknown), create several pseudo-samples (bootstrap samples). + * This allows confidence intervals to be created with requiring distribution + * assumptions, such as the data are Normally distributed. + * @param y the original sample of data + * @param unbiased whether the estimators are restricted to be unbiased + * @param stream the random number stream to use + */ +class StatBootstrap (y: VectorD, unbiased: Boolean = true, stream: Int = 0) + extends Statistic ("bootstrap", unbiased): + + private val len = y.dim // size of the vector + private val r_idx = RandomVecIR (len, len-1, 0, stream) // random index generator + + private var samples: MatrixD = null // will be a n by len matrix + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Make/generate n bootstrap samples (resampling with replacement). + * @param n the number of sample to make + */ + def makeSamples (n: Int): Unit = + samples = MatrixD (for _ <- 0 until n yield y(r_idx.igen)) + end makeSamples + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Compute/estimate the bootstrap sample mean. + */ + inline def bmean: Double = samples.mmean + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Compute/estimate the bootstrap sample variance. + */ + def bvariance: Double = samples.mvariance + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Compute/estimate the bootstrap sample standard deviation. + */ + def bstdev: Double = samples.mstdev + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Compute the bootstrap confidence interval (lo, hi) for the given confidence level + * using the bootstrap percentile method. + * @see stat.rutgers.edu/home/mxie/RCPapers/bootstrap.pdf + * @param p_ the confidence level + */ + def binterval (p_ : Double = .95): (Double, Double) = + val α_2 = (1.0 - p_) / 2.0 + val (lo, hi) = (α_2, 1.0 - α_2) + val means = samples.mean + (means.quantile (lo), means.quantile (hi)) + end binterval + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Compute the bootstrap confidence interval half-width (ihw) for the given confidence + * level using the t-distribution. Assumes the data follows a Normal distribution. + * @param p_ the confidence level + */ + def binterval_ (p_ : Double = .95): Double = + val df = samples.dim - 1 // degrees of freedom + t_sigma (bstdev, df, p_) / sqrt (samples.dim) + end binterval_ + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the summary bootstrap statistics as a row/Array. + */ + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Generate a row of bootstrap statistical results as a string. + */ + def toString2: String = + "| %11s | %5s | %10.3f | %10.3f | %10.3f | %10.3f | %10.3f |".format ( + name, num, min, max, bmean, bstdev, binterval_ ()) + end toString2 + +end StatBootstrap + + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `statBootstrapTest` main function is used to test the `StatBootstrap` class. + * Test population, sample, and bootstrap statistics. + * > runMain scalation.mathstat.statBootstrapTest + */ +@main def statBootstrapTest (): Unit = + + val (popSize, maxValue) = (500, 100) + val sampSize = 100 + + banner ("Generate a Population Vector") + val pgen = RandomVecD (popSize, maxValue, 0) + val pop = pgen.gen + println (s"population pop = $pop") + + banner ("Take a Sample from the Population Vector") + val sgen = RandomVecI (sampSize, maxValue, 0) + val samp = pop (sgen.igen) + println (s"sample samp = $samp") + + banner ("Compute Population Statistics") + val stat0 = new Statistic ("pop", false) + stat0.tallyVec (pop) + println (Statistic.labels) + println (stat0) + + banner ("Compute Sample Statistics") + val stat1 = new StatBootstrap (samp) + stat1.tallyVec (samp) + println (Statistic.labels) + println (stat1) + + banner ("Compute Bootstrap Statistics") + stat1.makeSamples (200) + println (Statistic.labels) + println (stat1.toString2) + val (mu, ihw) = (stat1.bmean, stat1.binterval_ ()) + val lo_hi = (mu - ihw, mu + ihw) + println (s"binterval_ = $lo_hi") + println (s"binterval = ${stat1.binterval ()}") + +end statBootstrapTest + diff --git a/src/main/scala/scalation/mathstat/StatTable.scala b/src/main/scala/scalation/mathstat/StatTable.scala index de6905386..230cc845c 100644 --- a/src/main/scala/scalation/mathstat/StatTable.scala +++ b/src/main/scala/scalation/mathstat/StatTable.scala @@ -48,10 +48,8 @@ end StatTable println ("Create a StatTable called Test") val stats = VEC [Statistic] () - for i <- 0 until 50 do stats += new Statistic () - for j <- 0 until 50 do - for i <- 0 until 50 do stats(j).tally (i) - end for + cfor (0, 50) { _ => stats += new Statistic () } + for j <- 0 until 50; i <- 0 until 50 do stats(j).tally (i) new StatTable ("Test", stats) end statTableTest diff --git a/src/main/scala/scalation/mathstat/Statistic.scala b/src/main/scala/scalation/mathstat/Statistic.scala index dc350a754..90a2f8787 100644 --- a/src/main/scala/scalation/mathstat/Statistic.scala +++ b/src/main/scala/scalation/mathstat/Statistic.scala @@ -6,6 +6,25 @@ * @see LICENSE (MIT style license file). * * @note Sample Statistics Collection, Confidence Intervals and Significance Tests + * + * Let F be the CDF for the Standard Normal Distribution, p be the probability, and + * y be a critical values for the random variable Y + * F(y) = P{Y <= y} + * p = F(y) = .975 + * y = F-inv(p) = 1.96 + * α, the significance level, is the probability of making a Type I error in hypothesis testing, i.e., + * α = P(reject null hypothesis | null hypothesis is true) + * In ONE-TAIL hypothesis testing, .025 of the mass is in the right tail (unusually large) + * Assuming the data follows the Standard Normal Distribution, how likely is it to see a value >= 1.96 + * α = .05 + * p = 1 - α = .95 + * In TWO-TAIL hypothesis testing, .025 of the mass is in the left tail and .025 is in right tail (unusually extreme) + * Assuming the data follows the Standard Normal Distribution, how likely is it to see a value <= 1.96 or >= 1.96 + * α = .05 + * p = 1 - α/2 = .975 + *-------------------------------------------------------------------------- + * The confidence level is the probability of NOT making a Type I error + * p_ = 1 - α */ package scalation @@ -15,17 +34,27 @@ import scala.collection.mutable.{ArrayBuffer => VEC} //import scala.collection.mutable.{ListBuffer => VEC} import scala.math.{abs, sqrt} -private val flaw = flawf ("top") // flaw function +private val flaw = flawf ("top") // flaw function + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Return the critical value for the z-distribution with a confidence level of p_. + * Uses inverse CDF (iCDF) `Quantile.normalInv`. + * @param p_ the confidence level (two tails) + */ +def z_crit (p_ : Double = .95): Double = + val p = 1.0 - (1.0 - p_) / 2.0 // e.g., .95 --> .975 (two tails) + random.Quantile.normalInv (p) // e.g., should return ~ 1.96 +end z_crit //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Compute the product of the critical value from the z-distribution (Standard * Normal) and the standard deviation of the vector. * @param sig the standard deviation of the vector/sample - * @param p the confidence level + * @param p_ the confidence level (two tails) */ -def z_sigma (sig: Double, p: Double = .95): Double = - val pp = 1.0 - (1.0 - p) / 2.0 // e.g., .95 --> .975 (two tails) - val z = random.Quantile.normalInv (pp) +def z_sigma (sig: Double, p_ : Double = .95): Double = + val p = 1.0 - (1.0 - p_) / 2.0 // e.g., .95 --> .975 (two tails) + val z = random.Quantile.normalInv (p) z * sig end z_sigma @@ -34,10 +63,10 @@ end z_sigma * using the z-distribution. * @param sig the standard deviation of the vector/sample * @param n the length of the vector/sample - * @param p the confidence level + * @param p_ the confidence level */ -def z_interval (sig: Double, n: Int, p: Double = .95): Double = - z_sigma (sig, p) / sqrt (n) +def z_interval (sig: Double, n: Int, p_ : Double = .95): Double = + z_sigma (sig, p_) / sqrt (n) end z_interval //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -47,27 +76,40 @@ end z_interval * @param mu the estimated mean of the vector/sample * @param sig the standard deviation of the vector/sample * @param n the length of the vector/sample - * @param p the significance/confidence level + * @param p_ the confidence level * @param show whether to show details of the test */ -def z_meanTest (mu0: Double, mu: Double, sig: Double, n: Int, p: Double = .95, +def z_meanTest (mu0: Double, mu: Double, sig: Double, n: Int, p_ : Double = .95, show: Boolean = true): Boolean = - val ihw = z_interval (sig, n, p) + val ihw = z_interval (sig, n, p_) if show then println (s"z_meanTest: | $mu - $mu0 | <=? $ihw") abs (mu - mu0) <= ihw end z_meanTest +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Return the critical value for the t-distribution with a confidence level of p_ + * and error degrees of freedom of df. + * Uses inverse CDF (iCDF) `Quantile.studentTInv`. + * @param df the error degrees of freedom + * @param p_ the confidence level (two tails) + */ +def t_crit (df: Int, p_ : Double = .95): Double = + if df < 1 then { flaw ("interval", "must have at least 2 observations"); return 0.0 } + val p = 1.0 - (1.0 - p_) / 2.0 // e.g., .95 --> .975 (two tails) + random.Quantile.studentTInv (p, df) // e.g., @df = 10 should return ~ 2.228 +end t_crit + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Compute the product of the critical value from the t-distribution and the * standard deviation of the vector. * @param sig the standard deviation of the vector/sample - * @param df the degrees of freedom - * @param p the confidence level + * @param df the error degrees of freedom + * @param p_ the confidence level (two tails) */ -def t_sigma (sig: Double, df: Int, p: Double = .95): Double = +def t_sigma (sig: Double, df: Int, p_ : Double = .95): Double = if df < 1 then { flaw ("interval", "must have at least 2 observations"); return 0.0 } - val pp = 1.0 - (1.0 - p) / 2.0 // e.g., .95 --> .975 (two tails) - val t = random.Quantile.studentTInv (pp, df) + val p = 1.0 - (1.0 - p_) / 2.0 // e.g., .95 --> .975 (two tails) + val t = random.Quantile.studentTInv (p, df) t * sig end t_sigma @@ -76,10 +118,10 @@ end t_sigma * using the t-distribution. * @param sig the standard deviation of the vector/sample * @param n the length of the vector/sample - * @param p the confidence level + * @param p_ the confidence level */ -def t_interval (sig: Double, n: Int, p: Double = .95): Double = - t_sigma (sig, n-1, p) / sqrt (n) +def t_interval (sig: Double, n: Int, p_ : Double = .95): Double = + t_sigma (sig, n-1, p_) / sqrt (n) end t_interval //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -89,12 +131,12 @@ end t_interval * @param mu the estimated mean of the vector/sample * @param sig the standard deviation of the vector/sample * @param n the length of the vector/sample - * @param p the significance/confidence level + * @param p_ the confidence level * @param show whether to show details of the test */ -def t_meanTest (mu0: Double, mu: Double, sig: Double, n: Int, p: Double = .95, +def t_meanTest (mu0: Double, mu: Double, sig: Double, n: Int, p_ : Double = .95, show: Boolean = true): Boolean = - val ihw = t_interval (sig, n, p) + val ihw = t_interval (sig, n, p_) if show then println (s"t_meanTest: | $mu - $mu0 | <=? $ihw") abs (mu - mu0) <= ihw end t_meanTest @@ -106,6 +148,7 @@ end t_meanTest * TimeStatistic.scala. * @param name the name for this statistic (e.g., 'waitingTime') * @param unbiased whether the estimators are restricted to be unbiased + * set to false for population (or MLE) statistics */ class Statistic (val name: String = "stat", unbiased: Boolean = true): @@ -177,12 +220,12 @@ class Statistic (val name: String = "stat", unbiased: Boolean = true): def tallyVec (v: VectorD): Unit = for x <- v do tally (x) //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the number of samples. + /** Return the number of instances in sample. */ inline def num: Int = n //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the number of samples as a double. + /** Return the number of instances in sample as a double. */ inline def nd: Double = n.toDouble @@ -233,20 +276,20 @@ class Statistic (val name: String = "stat", unbiased: Boolean = true): //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Compute the confidence interval half-width for the given confidence level * using the t-distribution. - * @param p the confidence level + * @param p_ the confidence level */ - def interval (p: Double = .95): Double = + def interval (p_ : Double = .95): Double = val df = n - 1 // degrees of freedom - t_sigma (stdev, df, p) / sqrt (nd) + t_sigma (stdev, df, p_) / sqrt (nd) end interval //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Compute the confidence interval half-width for the given confidence level * using the z-distribution. - * @param p the confidence level + * @param p_ the confidence level */ - def interval_z (p: Double = .95): Double = - z_sigma (stdev, p) / sqrt (nd) + def interval_z (p_ : Double = .95): Double = + z_sigma (stdev, p_) / sqrt (nd) end interval_z //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -351,6 +394,7 @@ end Statistic //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `statisticTest` main function is used to test the `Statistic` class. + * Test the collection of sample statistics. * > runMain scalation.mathstat.statisticTest */ @main def statisticTest (): Unit = @@ -361,7 +405,7 @@ end Statistic banner ("Test sample statistics") val stat1 = new Statistic () - for i <- 1 to 1000 do stat1.tally (rv.gen) + cfor (0, 1000) { _ => stat1.tally (rv.gen) } println (Statistic.labels) println (stat1) @@ -370,3 +414,21 @@ end Statistic end statisticTest + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `statisticTest2` main function is used to test the `Statistic` class. + * Show critical values for z- and t-distributions. + * @see www.brockport.edu/live/files/6864-standardnormaldistributiontablepdf + * @see www.stat.purdue.edu/~lfindsen/stat503/t-Dist.pdf + * > runMain scalation.mathstat.statisticTest + */ +@main def statisticTest2 (): Unit = + + banner ("Critical Values for z- and t-distributions at p_ = .90 and .95") + println ("z_crit (0.90) = " + z_crit (0.90)) + println ("z_crit () = " + z_crit ()) + println ("t_crit (10, 0.90) = " + t_crit (10, 0.90)) + println ("t_crit (10) = " + t_crit (10)) + +end statisticTest2 + diff --git a/src/main/scala/scalation/mathstat/TensorD.scala b/src/main/scala/scalation/mathstat/TensorD.scala index 9cdbf703e..839cb8b50 100644 --- a/src/main/scala/scalation/mathstat/TensorD.scala +++ b/src/main/scala/scalation/mathstat/TensorD.scala @@ -16,18 +16,23 @@ package scalation package mathstat +import scala.annotation.unused import scala.collection.mutable.IndexedSeq -import scala.math.round import scala.runtime.ScalaRunTime.stringOf +import scalation.modeling.ActivationFun +import scalation.modeling.ActivationFun.{eLU, setA2, gaussian, geLU, logistic, logit, lreLU, setA, reLU, sigmoid} + +import TensorD._ + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Tensorize a vector function (V2V) by applying it to each (row, column) of a tensor. * @param f the vector function to tensorize * @param x the tensor to apply the function to */ -def tensorize(f: FunctionV2V)(x: TensorD): TensorD = - val t = new TensorD(x.dim) - cfor(x.indices)(i => cfor(x.indices2)(j => t(i, j) = f(x(i, j)))) +def tensorize (f: FunctionV2V)(x: TensorD): TensorD = + val t = new TensorD (x.dim, x.dim2, f(x(0, 0)).dim) + cfor (x.indices) { i => cfor (x.indices2) { j => t(i, j) = f(x(i, j)) }} t end tensorize @@ -64,12 +69,14 @@ end comple * @param v the 3D array for holding the tensor elements */ class TensorD (val dim: Int, val dim2: Int, val dim3: Int, - private [mathstat] var v: Array [Array [Array [Double]]] = null) + private [mathstat] var v: Array [Array [Array [Double]]] = null) extends Serializable: private val flaw = flawf ("TensorD") // flaw flag private val TAB = "\t\t" // use "\t" for scala and "\t\t" for sbt + private val _shape = List (dim, dim2, dim3) // list of the dimensions of the tensor + val indices = 0 until dim // index range for the first level/dimension val indices2 = 0 until dim2 // index range for the second level/dimension val indices3 = 0 until dim3 // index range for the third level/dimension @@ -80,7 +87,6 @@ class TensorD (val dim: Int, val dim2: Int, val dim3: Int, v = Array.ofDim [Double] (dim, dim2, dim3) else if dim != v.length || dim2 != v(0).length || dim3 != v(0)(0).length then flaw ("init", "dimensions are wrong") - end if /** Format string used for printing vector values (change using setFormat) */ @@ -122,7 +128,8 @@ class TensorD (val dim: Int, val dim2: Int, val dim3: Int, * @param i the 1st dimension (row) index of the tensor * @param j the 2nd dimension (column) index of the tensor */ - def apply (i: Int, j: Int): VectorD = VectorD (v(i)(j).toIndexedSeq) + def apply (i: Int, j: Int): VectorD = VectorD (v(i)(j)) +// def apply (i: Int, j: Int): VectorD = VectorD (v(i)(j).toIndexedSeq) //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Retrieve the i, k-th VECTOR from the tensor x_i:k. @@ -130,9 +137,9 @@ class TensorD (val dim: Int, val dim2: Int, val dim3: Int, * @param all use the all columns indicator ? * @param k the 3rd dimension (sheet) index of the tensor */ - def apply (i: Int, all: Char, k: Int): VectorD = + def apply (i: Int, @unused all: Char, k: Int): VectorD = val a = Array.ofDim [Double] (dim2) - cfor (0, dim2) {j => a(j) = v(i)(j)(k)} + cfor (0, dim2) { j => a(j) = v(i)(j)(k) } new VectorD (dim2, a) end apply @@ -142,9 +149,9 @@ class TensorD (val dim: Int, val dim2: Int, val dim3: Int, * @param j the 2nd dimension (column) index of the tensor * @param k the 3rd dimension (sheet) index of the tensor */ - def apply (all: Char, j: Int, k: Int): VectorD = + def apply (@unused all: Char, j: Int, k: Int): VectorD = val a = Array.ofDim [Double] (dim) - cfor (0, dim) {i => a(i) = v(i)(j)(k)} + cfor (0, dim) { i => a(i) = v(i)(j)(k) } new VectorD (dim, a) end apply @@ -161,7 +168,7 @@ class TensorD (val dim: Int, val dim2: Int, val dim3: Int, * @param all use the all rows indicator ? * @param j the 2nd dimension (column) index of the tensor */ - def apply (all: Char, j: Int): MatrixD = + def apply (@unused all: Char, j: Int): MatrixD = val a = Array.ofDim [Double] (dim, dim3) cfor (0, dim) { i => cfor (0, dim3) { k => a(i)(k) = v(i)(j)(k) }} new MatrixD (dim, dim3, a) @@ -174,7 +181,7 @@ class TensorD (val dim: Int, val dim2: Int, val dim3: Int, * @param all2 use the all columns indicator ? * @param k the 3rd dimension (sheet) index of the tensor */ - inline def apply (all: Char, all2: Char, k: Int): MatrixD = + inline def apply (@unused all: Char, @unused all2: Char, k: Int): MatrixD = val a = Array.ofDim [Double] (dim, dim2) cfor (0, dim) { i => cfor (0, dim2) { j => a(i)(j) = v(i)(j)(k) }} new MatrixD (dim, dim2, a) @@ -182,10 +189,12 @@ class TensorD (val dim: Int, val dim2: Int, val dim3: Int, //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Retrieve the ii._1 to ii._2 row slice of the tensor. - * @param ii 1st dimension (row) indices of the tensor + * @param ii 1st dimension (row) (start, end) indices of the tensor */ def apply (ii: (Int, Int)): TensorD = new TensorD (v.slice (ii._1, ii._2)) + inline def apply (ir: Range): TensorD = apply ((ir.start, ir.end)) + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Retrieve the ii._1 to ii._2, jj._1 to jj._2 row-column slice of the tensor. * @param ii 1st dimension (row) indices of the tensor (null => all) @@ -198,6 +207,8 @@ class TensorD (val dim: Int, val dim2: Int, val dim3: Int, new TensorD (u) end apply + inline def apply (ir: Range, jr: Range): TensorD = apply ((ir.start, ir.end), (jr.start, jr.end)) + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Retrieve the ii._1 to ii._2, jj._1 to jj._2, kk._1 to kk._2 * row-column-sheet slice of the tensor. @@ -230,6 +241,10 @@ class TensorD (val dim: Int, val dim2: Int, val dim3: Int, new TensorD (u) end apply + inline def apply (is: VectorI): TensorD = apply (is.toArray) + + inline def apply (is: Seq[Int]): TensorD = apply (is.toArray) + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Retrieve the is, js row-column selections from the tensor. * @param is 1st dimension (row) indices of the tensor (null => all) @@ -303,7 +318,7 @@ class TensorD (val dim: Int, val dim2: Int, val dim3: Int, * @param all2 a character indicating all columns should be included (typically '?'). * @param sheet the index of the sheet to extract from. */ - def apply (ir: Range, all2: Char, sheet: Int): MatrixD = + def apply (ir: Range, @unused all2: Char, sheet: Int): MatrixD = val slicedArray = v.slice (ir.start, ir.end).map (_.map (_(sheet))) new MatrixD (ir.size, dim2, slicedArray) end apply @@ -383,7 +398,7 @@ class TensorD (val dim: Int, val dim2: Int, val dim3: Int, * @param k 3rd dimension (sheet) index of the tensor * @param x the vector for updating the tensor at the above position */ - def update (i: Int, all: Char, k: Int, x: VectorD): Unit = + def update (i: Int, @unused all: Char, k: Int, x: VectorD): Unit = cfor (indices2) { j => v(i)(j)(k) = x(j)} end update @@ -395,7 +410,7 @@ class TensorD (val dim: Int, val dim2: Int, val dim3: Int, * @param k 3rd dimension (sheet) index of the tensor * @param x the vector for updating the tensor at the above position */ - def update (all: Char, j: Int, k: Int, x: VectorD): Unit = + def update (@unused all: Char, j: Int, k: Int, x: VectorD): Unit = cfor (indices) { i => v(i)(j)(k) = x(i)} end update @@ -416,7 +431,7 @@ class TensorD (val dim: Int, val dim2: Int, val dim3: Int, * @param j 2nd dimension (column) index of the tensor * @param x the matrix for updating the tensor at the above position */ - def update (all: Char, j: Int, x: MatrixD): Unit = + def update (@unused all: Char, j: Int, x: MatrixD): Unit = cfor (indices) { i => cfor (indices3) { k => v(i)(j)(k) = x(i, k)}} end update @@ -428,19 +443,19 @@ class TensorD (val dim: Int, val dim2: Int, val dim3: Int, * @param k the 3rd dimension (sheet) index of the tensor * @param x the matrix for updating the tensor at the above position */ - def update (all: Char, all2: Char, k: Int, x: MatrixD): Unit = + def update (@unused all: Char, @unused all2: Char, k: Int, x: MatrixD): Unit = cfor (indices) { i => cfor (indices2) { j => v(i)(j)(k) = x(i, j)}} end update //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Update a slice of the tensor with values from a given matrix. - * @param ir n the range of rows in the tensor to update. + * @param ir the range of rows in the tensor to update. * @param all2 a character indicating all columns should be updated (typically '?'). * @param sheet the index of the sheet in the tensor to update. * @param matrix the matrix containing the values to update the tensor with. * @throws IllegalArgumentException if the dimensions of the row range and matrix do not match. */ - def update (ir: Range, all2: Char, sheet: Int, matrix: MatrixD): Unit = + def update (ir: Range, @unused all2: Char, sheet: Int, matrix: MatrixD): Unit = require (ir.size == matrix.dim && dim2 == matrix.dim2, "Dimensions do not match the specified range and matrix.") @@ -451,13 +466,13 @@ class TensorD (val dim: Int, val dim2: Int, val dim3: Int, //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Update a slice of the tensor with values from a given 3D block (matrix over multiple sheets). - * @param all1 a character indicating all rows should be updated (typically '?'). + * @param all a character indicating all rows should be updated (typically '?'). * @param all2 a character indicating all columns should be updated (typically '?'). * @param kr the range of sheets in the tensor to update. * @param tensorBlk the 3D block (rows x columns x sheets) containing the values to update the tensor with. * @throws IllegalArgumentException if the dimensions of the tensor block do not match the tensor's dimensions. */ - def update (all1: Char, all2: Char, kr: Range, tensorBlk: TensorD): Unit = + def update (@unused all: Char, @unused all2: Char, kr: Range, tensorBlk: TensorD): Unit = require (dim == tensorBlk.dim && dim2 == tensorBlk.dim2, s"Row and column dimensions do not match: tensor.dim = $dim, $dim2; tensorBlk.dim = ${tensorBlk.dim}, ${tensorBlk.dim2}.") require (kr.size == tensorBlk.dim3, @@ -470,6 +485,35 @@ class TensorD (val dim: Int, val dim2: Int, val dim3: Int, } // cfor end update + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Update a slice of the tensor with values from a given 3D block. + * @param ir the range of rows in the tensor to update. + * @param jr the range of columns in the tensor to update. + * @param kr the range of sheets in the tensor to update. + * @param tensorBlk the 3D block containing the values to write. + * @throws IllegalArgumentException if the dimensions do not match. + */ + def update (ir: Range, jr: Range, kr: Range, tensorBlk: TensorD): Unit = + require (ir.size == tensorBlk.dim, + s"Row dimensions do not match: ir.size = ${ir.size}, tensorBlk.dim = ${tensorBlk.dim}.") + require (jr.size == tensorBlk.dim2, + s"Column dimensions do not match: jr.size = ${jr.size}, tensorBlk.dim2 = ${tensorBlk.dim2}.") + require (kr.size == tensorBlk.dim3, + s"Sheet dimensions do not match: kr.size = ${kr.size}, tensorBlk.dim3 = ${tensorBlk.dim3}.") + + val i0 = ir.start + val j0 = jr.start + val k0 = kr.start + + cfor (ir.indices) { ii => + cfor (jr.indices) { jj => + cfor (kr.indices) { kk => + v(i0 + ii)(j0 + jj)(k0 + kk) = tensorBlk(ii, jj, kk) + } // cfor + } // cfor + } // cfor + end update + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Set all the tensor element values to x. * @param x the value to set all elements to @@ -482,10 +526,409 @@ class TensorD (val dim: Int, val dim2: Int, val dim3: Int, } // cfor end set +// Replace element operations: addition, subtraction, multiplication +// with Generalized function to perform element-wise operations with broadcasting. +// Supports TensorD, MatrixD, VectorD, and Double. + + type Broadcastable = TensorD | MatrixD | VectorD | Double + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + private def elementWiseDispatch (b: Broadcastable, op: (Double, Double) => Double, + inPlace: Boolean = false): TensorD = + b match + case t: TensorD => elementWiseOp (t, op, inPlace) + case m: MatrixD => broadcastAndApply (m, op, inPlace) + case v: VectorD => broadcastAndApply (v, op, inPlace) + case s: Double => elementWiseScalarOp (s, op, inPlace) + end elementWiseDispatch + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Generalized function to perform element-wise operations with broadcasting. + * @param that the input tensor/matrix/vector/double + * @param op the operation (e.g., addition, subtraction, multiplication) + */ + private def elementWiseOp (that: TensorD, op: (Double, Double) => Double, + inPlace: Boolean = false): TensorD = + val outShape = broadcastShapes (shape, that.shape) + val left = broadcastTo (this, outShape) + val right = broadcastTo (that, outShape) + + val (d1, d2, d3) = (outShape(0), outShape(1), outShape(2)) + val c = + if inPlace then + require (outShape == shape, + s"In-place op not allowed when broadcast changes shape: $shape -> $outShape") + this + else + new TensorD (d1, d2, d3) + + cfor (0, d1) { i => + cfor (0, d2) { j => + cfor (0, d3) { k => c(i, j, k) = op(left(i, j, k), right(i, j, k)) } + } // Vfor + } // cfor + c + end elementWiseOp + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + private def elementWiseScalarOp (scalar: Double, op: (Double, Double) => Double, + inPlace: Boolean = false): TensorD = + val c = + if inPlace then + this + else + new TensorD (dim, dim2, dim3) + + cfor (indices) { i => + cfor (indices2) { j => + cfor (indices3) { k => c(i, j, k) = op(this (i, j, k), scalar) } + } // cfor + } // cfor + c + end elementWiseScalarOp + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Generalized function to broadcast MatrixD or VectorD and apply an element-wise operation. + * @param b the matrix or vector + * @param op the operation (e.g., addition, subtraction) + */ + private def broadcastAndApply (b: MatrixD | VectorD, op: (Double, Double) => Double, + inPlace: Boolean): TensorD = + val broadcastedTensor = b match + case m: MatrixD => + val shape = broadcastShapes (this.shape, List(m.dim, m.dim2, 1)) + broadcastMatrix (m, Some((shape(0), shape(1), shape(2)))) + case v: VectorD => + val shape = broadcastShapes (this.shape, List(1, v.dim, 1)) + broadcastVector (v, 0, Some ((shape(0), shape(1), shape(2)))) + + elementWiseOp (broadcastedTensor, op, inPlace) + end broadcastAndApply + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + // Element-wise Operations + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + + inline def + (b: Broadcastable): TensorD = elementWiseDispatch (b, _ + _) + + inline def - (b: Broadcastable): TensorD = elementWiseDispatch (b, _ - _) + + inline def * (b: Broadcastable): TensorD = elementWiseDispatch (b, _ * _) + + inline def / (b: Broadcastable): TensorD = elementWiseDispatch (b, _ / _) + + // FIX: Implement TensorD *~ for generalized tensor-tensor multiplication (tensordot) + inline def *~ (b: Broadcastable): TensorD = elementWiseDispatch (b, _ * _) + + inline def ~^ (b: Broadcastable): TensorD = elementWiseDispatch (b, math.pow) + + inline def max (that: TensorD): TensorD = elementWiseOp (that, math.max) + + inline def min (that: TensorD): TensorD = elementWiseOp (that, math.min) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + // Element-wise Operations (In-Place) + // Avoiding for now to prevent accidental data modification in Autograd + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + +// inline def += (b: Broadcastable): TensorD = elementWiseDispatch (b, _ + _, inPlace = true) +// +// inline def -= (b: Broadcastable): TensorD = elementWiseDispatch (b, _ - _, inPlace = true) +// +// inline def *= (b: Broadcastable): TensorD = elementWiseDispatch (b, _ * _, inPlace = true) +// +// inline def /= (b: Broadcastable): TensorD = elementWiseDispatch (b, _ / _, inPlace = true) +// +// FIX: Implement TensorD *~ for generalized tensor-tensor multiplication (tensordot) +// inline def *~= (b: Broadcastable): TensorD = elementWiseDispatch (b, _ * _, inPlace = true) +// +// inline def ~^= (b: Broadcastable): TensorD = elementWiseDispatch (b, math.pow, inPlace = true) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + // Scalar Reductions + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Sum of all elements. + */ + def sum: Double = + var total = 0.0 + cfor (0, dim) { i => + cfor (0, dim2) { j => + cfor (0, dim3) { k => total += this (i, j, k) } + } // cfor + } // cfor + total + end sum + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Mean of all elements. + */ + def mean: Double = sum / (dim * dim2 * dim3).toDouble + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Variance of all elements. + */ + def variance: Double = + val mu = mean + map_ (x => math.pow (x - mu, 2)).sum / (dim * dim2 * dim3).toDouble + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Standard deviation of all elements. + */ + def std: Double = math.sqrt (variance) + + def normFSq: Double = map_ (x => x * x).sum + + def normF: Double = math.sqrt (normFSq) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + // Axis-wise Reductions + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + + def meanAlongAxis (axis: Int): TensorD = TensorD.meanAlongAxis (this, axis) + + def stdAlongAxis (axis: Int): TensorD = TensorD.stdAlongAxis (this, axis) + + def sumAlongAxis (axis: Int): TensorD = TensorD.sumAlongAxis (this, axis) + + def varianceAlongAxis (axis: Int): TensorD = TensorD.varianceAlongAxis (this, axis) + + def standardize (axis: Int): TensorD = TensorD.standardize (this, axis) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + // Unary and Scalar Element-wise Operations + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Absolute value. + */ + def abs: TensorD = map_ (math.abs) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Ceil each element. + */ + def ceil: TensorD = map_ (math.ceil) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Clip elements between min and max. + */ + def clipByValue (minVal: Double, maxVal: Double): TensorD = + require (minVal <= maxVal, s"clipByValue: minVal ($minVal) should not be greater than maxVal ($maxVal)") + map_ (x => math.max (minVal, math.min (maxVal, x))) + end clipByValue + + def clipByNorm (maxNorm: Double): TensorD = + val currentNorm = normF + if currentNorm > maxNorm and currentNorm > 0.0 then + this * (maxNorm / currentNorm) + else + this + end clipByNorm + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Exponential. + */ + def exp: TensorD = map_ (math.exp) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Floor each element. + */ + def floor: TensorD = map_ (math.floor) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Log base e (natural log). + */ + def log: TensorD = + map_ (v => if v > 0 then math.log (v) + else throw new ArithmeticException (s"log is not defined for non-positive value: $v")) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Log base 10. */ + def log10: TensorD = + map_ (v => if v > 0 then math.log10 (v) + else throw new ArithmeticException (s"log10 is not defined for non-positive value: $v")) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Log base-n. */ + def logBase (base: Double): TensorD = + map_ (v => if v > 0 then math.log (v) / math.log (base) + else throw new ArithmeticException (s"log base $base is undefined for non-positive: $v")) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Max with a scalar. + */ + def maxScalar (s: Double): TensorD = map_(x => math.max (x, s)) + + def maxValue: Double = flattenToVector.reduce (math.max) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Min with a scalar. + */ + def minScalar (s: Double): TensorD = map_(x => math.min (x, s)) + + def minValue: Double = flattenToVector.reduce (math.min) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Reciprocal. + */ + def reciprocal: TensorD = + map_ (v => if v != 0.0 then 1.0 / v + else throw new ArithmeticException ("Division by zero in reciprocal")) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Round each element. + */ + def round: TensorD = map_ (x => math.round (x).toDouble) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Sign of each element (-1, 0, 1). + */ + def sign: TensorD = map_ (math.signum) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Square root. + */ + def sqrt: TensorD = + map_ (v => if v >= 0 then math.sqrt (v) + else throw new ArithmeticException (s"sqrt is not defined for negative value: $v")) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Raise each element to an integer power. + */ + def ~^ (s: Int): TensorD = elementWiseScalarOp (s, math.pow) +// def ** (s: Int): TensorD = elementWiseScalarOp (s, math.pow) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Negate the tensor (unary `-`). + */ + inline def unary_- : TensorD = this * (-1.0) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + // Activation Functions + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + + def id: TensorD = TensorD.id_ (this) + + def reLU: TensorD = TensorD.reLU_ (this) + + def lreLU (alpha: Double = 0.2): TensorD = TensorD.lreLU_ (this, alpha) + + def eLU(alpha: Double = 1.0): TensorD = TensorD.eLU_ (this, alpha) + + def tanh: TensorD = TensorD.tanh_ (this) + + def sigmoid: TensorD = TensorD.sigmoid_ (this) + + def gaussian: TensorD = TensorD.gaussian_ (this) + + def geLU: TensorD = TensorD.geLU_ (this) + + def softmax: TensorD = TensorD.softmax_ (this) + + def logit: TensorD = TensorD.logit_ (this) + + def logistic (a: Double = 1.0, b: Double = 1.0, c: Double = 1.0): TensorD = + TensorD.logistic_ (this, a, b, c) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + // Other Operations + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + + def zerosLike: TensorD = TensorD.zerosLike (this) + + def onesLike: TensorD = TensorD.onesLike (this) + + def fullLike (value: Double): TensorD = TensorD.fullLike (this, value) + + override def equals (obj: Any): Boolean = + obj match + case that: TensorD if this.dims == that.dims => + indices.forall { i => + indices2.forall { j => + indices3.forall { k => math.abs (this(i, j, k) - that(i, j, k)) <= 1e-9 } + } // forall + } // forall + case _ => false + end equals + + override def hashCode (): Int = + dims.hashCode * 31 + + (for i <- indices; j <- indices2; k <- indices3 yield this (i, j, k).##).## + end hashCode + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Compute the dot product of two vectors stored as tensors. + * Both tensors must have shape (n, 1, 1), representing row vectors. + * Leverages the existing VectorD dot method. + * Returns a scalar wrapped in a tensor of shape (1, 1, 1). + * @param b the tensor to take the dot product with + */ + infix def dot (b: TensorD): TensorD = + val (mA, nA, dA) = dims + val (mB, nB, dB) = b.dims + require (nA == 1 && dA == 1 && nB == 1 && dB == 1 && mA == mB, + s"dot is only for vectors with shape (1, n, 1). Got shapes ${dims} and ${b.dims}") + + // Extract the row from each tensor as a VectorD. + val vA = new VectorD (mA, Array.tabulate (mA)(i => this(i, 0, 0))) + val vB = new VectorD (mB, Array.tabulate (mB)(i => b(i, 0, 0))) + + TensorD ((1, 1, 1), vA dot vB) + end dot + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Matrix‑matrix product for batch‑first tensors. + * Expected shapes + * A : (1, m, k) + * B : (1, k, n) + * Returns + * C : (1, m, n) + */ + infix def matmul (b: TensorD): TensorD = + val (bA, _, kA) = dims + val (bB, kB, _) = b.dims + + require (bA == 1 && bB == 1 && kA == kB, + s"matmul requires shapes (1,m,k) × (1,k,n); got $dims × ${b.dims}") + + bmm (b) + end matmul + + def slice (i: Int): MatrixD = this(i) + + def setSlice (i: Int, d: MatrixD): Unit = this.update (i, d) + + infix def bmm (b: TensorD): TensorD = + val (dA, mA, kA) = dims + val (dB, kB, nB) = b.dims + + require(kA == kB, + s"BMM requires matching inner dims: got kA=$kA vs kB=$kB") + + val dOut = + if dA == dB then dA + else if dA == 1 then dB + else if dB == 1 then dA + else throw IllegalArgumentException (s"BMM batch dims must match or one must be 1; got ($dA, $dB)") + + val out = TensorD.fill (dOut, mA, nB, 0.0) + + val a0 = slice (0) + val c0 = b.slice (0) + + for b_ <- 0 until dOut do + val a = if dA == 1 then a0 else slice (b_) + val c = if dB == 1 then c0 else b.slice (b_) + out.setSlice (b_, a * c) + out + end bmm + +// Comments out old implementation of element-wise operations + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Add this tensor and tensor b. * @param b the tensor to add (requires leDimensions) - */ def + (b: TensorD): TensorD = val c = new TensorD (dim, dim2, dim3) cfor (indices) { i => @@ -495,11 +938,11 @@ class TensorD (val dim: Int, val dim2: Int, val dim3: Int, } // cfor c end + + */ //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Add this tensor and scalar s. * @param s the scalar to add - */ def + (s: Double): TensorD = val c = new TensorD (dim, dim2, dim3) cfor (indices) { i => @@ -509,11 +952,11 @@ class TensorD (val dim: Int, val dim2: Int, val dim3: Int, } // cfor c end + + */ //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** From this tensor subtract tensor b. * @param b the tensor to add (requires leDimensions) - */ def - (b: TensorD): TensorD = val c = new TensorD (dim, dim2, dim3) cfor (indices) { i => @@ -523,11 +966,11 @@ class TensorD (val dim: Int, val dim2: Int, val dim3: Int, } // cfor c end - + */ //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** From this tensor subtract scalar s. * @param s the scalar to add - */ def - (s: Double): TensorD = val c = new TensorD (dim, dim2, dim3) cfor (indices) { i => @@ -537,11 +980,11 @@ class TensorD (val dim: Int, val dim2: Int, val dim3: Int, } // cfor c end - + */ //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Multiply this tensor by scalar s. * @param s the scalar to multiply by - */ def * (s: Double): TensorD = val c = new TensorD (dim, dim2, dim3) cfor (indices) { i => @@ -551,6 +994,21 @@ class TensorD (val dim: Int, val dim2: Int, val dim3: Int, } // cfor c end * + */ + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Multiply element-wise (Hadamard product) this tensor by tensor b. + * @param b the tensor to add (requires leDimensions) + def *~ (b: TensorD): TensorD = + val c = new TensorD (dim, dim2, dim3) + cfor (indices) { i => + cfor (indices2) { j => + cfor (indices3) { k => c.v(i)(j)(k) = v(i)(j)(k) * b.v(i)(j)(k) } + } // cfor + } // cfor + c + end *~ + */ //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Multiply (multi-linear product) this tensor by three matrices b, c and d. @@ -583,20 +1041,6 @@ class TensorD (val dim: Int, val dim2: Int, val dim3: Int, e end * - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Multiply element-wise (Hadamard product) this tensor by tensor b. - * @param b the tensor to add (requires leDimensions) - */ - def *~ (b: TensorD): TensorD = - val c = new TensorD (dim, dim2, dim3) - cfor (indices) { i => - cfor (indices2) { j => - cfor (indices3) { k => c.v(i)(j)(k) = v(i)(j)(k) * b.v(i)(j)(k) } - } // cfor - } // cfor - c - end *~ - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Map each row of this tensor by applying function f to each row matrix and * returning the collected result as a matrix. @@ -634,9 +1078,16 @@ class TensorD (val dim: Int, val dim2: Int, val dim3: Int, x end map_ + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Get the shape of this tensor as a list of integers. + * @return the shape of the tensor + */ + inline def shape: List [Int] = _shape + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Flatten this tensor in row-major fashion, returning a matrix containing * all the elements from the tensor. + * @return a `MatrixD` containing all elements of the tensor in row-major order. */ def flatten: MatrixD = val a = Array.ofDim [Double] (dim * dim2, dim3) @@ -649,6 +1100,101 @@ class TensorD (val dim: Int, val dim2: Int, val dim3: Int, new MatrixD (a.length, a(0).length, a) end flatten + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Flatten this 3D tensor into a 1D vector in row-major order. + * This method iterates through all elements of the tensor and stores them + * sequentially in a 1D array, which is then wrapped in a `VectorD` object. + * @return a `VectorD` containing all elements of the tensor in row-major order. + */ + def flattenToVector: VectorD = + val arr = new Array [Double] (dim * dim2 * dim3) + var idx = 0 + cfor (0, dim) { i => + cfor (0, dim2) { j => + cfor (0, dim3) { k => arr(idx) = v(i)(j)(k); idx += 1 } + } // cfor + } // cfor + new VectorD (arr.length, arr) + end flattenToVector + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Reshape this tensor to a new shape while preserving the order of elements. + * This method ensures that the total number of elements remains the same + * and rearranges the elements into the specified dimensions. + * @param newShape the desired shape as a sequence of three integers + * @return a new `TensorD` with the specified shape + * @throws IllegalArgumentException if the total number of elements does not match + */ + def reshape (newShape: Seq [Int]): TensorD = + val (newDim, newDim2, newDim3) = (newShape(0), newShape(1), newShape(2)) + + require (dim * dim2 * dim3 == newDim * newDim2 * newDim3, + s"reshape requires the same number of elements: current=${dim * dim2 * dim3}, new=${newDim * newDim2 * newDim3}") + + val out = TensorD.fill (newDim, newDim2, newDim3, 0.0) + + val flat = flattenToVector + var idx = 0 + cfor (0, newDim) { i => + cfor (0, newDim2) { j => + cfor (0, newDim3) { k => out(i, j, k) = flat(idx); idx += 1 } + } // cfor + } // cfor + out + end reshape + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Permute the axes of this tensor according to the specified order. + * This method rearranges the dimensions of the tensor based on the given + * permutation of axes. The input `axes` specifies the new order of the axes. + * For example, if the original tensor has shape (a, b, c) and `axes` is + * Seq(1, 2, 0), the resulting tensor will have shape (b, c, a). + * @param axes the sequence specifying the new order of the axes + * @return a new `TensorD` with permuted axes + * @throws IllegalArgumentException if `axes` does not contain a valid permutation + */ + def permute (axes: Seq [Int]): TensorD = + require (axes.length == 3 && axes.sorted == Seq (0, 1, 2), + s"permute requires a valid permutation of axes 0, 1, 2, got: $axes") + + val oldShape = shape + val newShape = axes.map (oldShape) + + val out = TensorD.fill (newShape(0), newShape(1), newShape(2), 0.0) + + // Compute inverse axes: tells where each new index came from + val invAxes = Array.ofDim [Int](3) + cfor (0, 3) { i => invAxes (axes(i)) = i } + + cfor (0, newShape(0)) { i => + cfor (0, newShape(1)) { j => + cfor (0, newShape(2)) { k => + val newIdx = Array (i, j, k) + val origI = newIdx(invAxes(0)) + val origJ = newIdx(invAxes(1)) + val origK = newIdx(invAxes(2)) + out(i, j, k) = v(origI)(origJ)(origK) + } // cfor + } // cfor + } // cfor + out + end permute + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Transpose/swap two axes of this tensor. + * @param i first axis index (0..2) + * @param j second axis index (0..2) + * @return a new TensorD with axes i and j swapped + * @throws IllegalArgumentException if an axis index is out of range + */ + def transpose (i: Int, j: Int): TensorD = + val axes = Array (0, 1, 2) + val tmp = axes(i) + axes(i) = axes(j) + axes(j) = tmp + permute (axes.toIndexedSeq) + end transpose + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Check whether the dimensions of this tensor are less than or equal to * le those of the other tensor b. @@ -665,7 +1211,7 @@ class TensorD (val dim: Int, val dim2: Int, val dim3: Int, val x = new TensorD (dim, dim2, dim3) cfor (indices) { i => cfor (indices2) { j => - cfor (indices3) { k => x.v(i)(j)(k) = round (v(i)(j)(k)).toDouble } + cfor (indices3) { k => x.v(i)(j)(k) = math.round (v(i)(j)(k)).toDouble } } // cfor } // cfor x @@ -824,6 +1370,349 @@ object TensorD: new TensorD (dim, dim2, dim3, a) end fill + // ---------------------------------------------------------------- + // Additional methods for autograd purposes + // ---------------------------------------------------------------- + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a new tensor filled with zeros, having the same dimensions as the given tensor. + * @param tensor the tensor to mimic in dimensions. + * @return A new tensor filled with zeros. + */ + def zerosLike (tensor: TensorD): TensorD = fill (tensor.dim, tensor.dim2, tensor.dim3, 0.0) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a new tensor filled with ones, based on the given dimensions tuple. + * @param dims a tuple representing the shape of the tensor (dim, dim2, dim3). + * @return a new tensor filled with ones. + */ + def ones (dims: (Int, Int, Int)): TensorD = fill (dims._1, dims._2, dims._3, 1.0) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a new tensor filled with ones, having the same dimensions as the given tensor. + * @param tensor the tensor to mimic in dimensions. + * @return a new tensor filled with ones. + */ + def onesLike (tensor: TensorD): TensorD = ones (tensor.dim, tensor.dim2, tensor.dim3) + + def fullLike (t: TensorD, value: Double): TensorD = fill (t.dim, t.dim2, t.dim3, value) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Sum all elements of the tensor along the specified axis. + * @param tensor the tensor to sum over. + * @param axis the axis along which to sum (0 = rows, 1 = columns, 2 = sheets). + * @return A new TensorD with the reduced dimension. + */ + def sumAlongAxis (tensor: TensorD, axis: Int): TensorD = + axis match + case 0 => // sum along rows (collapse row dimension) + val result = new TensorD (1, tensor.dim2, tensor.dim3) + cfor (tensor.indices2) { j => + cfor (tensor.indices3) { k => + var sum = 0.0 + cfor (tensor.indices) { i => sum += tensor(i, j, k) } + result(0, j, k) = sum + } // cfor + } // cfor + result + + case 1 => // sum along columns (collapse column dimension) + val result = new TensorD (tensor.dim, 1, tensor.dim3) + cfor (tensor.indices) { i => + cfor (tensor.indices3) { k => + var sum = 0.0 + cfor (tensor.indices2) { j => sum += tensor(i, j, k) } + result(i, 0, k) = sum + } // cfor + } // cfor + result + + case 2 => // sum along sheets (collapse sheet dimension) + val result = new TensorD (tensor.dim, tensor.dim2, 1) + cfor (tensor.indices) { i => + cfor (tensor.indices2) { j => + var sum = 0.0 + cfor (tensor.indices3) { k => sum += tensor(i, j, k) } + result(i, j, 0) = sum + } // cfor + } // cfor + result + + case _ => throw new IllegalArgumentException (s"Invalid axis: $axis. Must be 0, 1, or 2.") + end sumAlongAxis + + def meanAlongAxis (x: TensorD, axis: Int): TensorD = + require (axis >= 0 && axis < x.shape.length, s"Invalid axis: $axis") + sumAlongAxis (x, axis) / x.shape(axis).toDouble + end meanAlongAxis + + def varianceAlongAxis (x: TensorD, axis: Int): TensorD = + require (axis >= 0 && axis < x.shape.length, s"Invalid axis: $axis") + val mu = meanAlongAxis (x, axis) + val variance = sumAlongAxis ((x - mu).map_ (v => v * v), axis) / (x.shape(axis).toDouble + 1e-8) + variance + end varianceAlongAxis + + def stdAlongAxis (x: TensorD, axis: Int): TensorD = + require (axis >= 0 && axis < x.shape.length, s"Invalid axis: $axis") + val variance = varianceAlongAxis (x, axis) + variance.map_ (math.sqrt) + end stdAlongAxis + + def standardize (x: TensorD, axis: Int): TensorD = + require (axis >= 0 && axis < x.shape.length, s"Invalid axis: $axis") + val meanVal = meanAlongAxis (x, axis) + val stdVal = stdAlongAxis (x, axis) + (x - meanVal) / (stdVal + 1e-8) + end standardize + + def diag (s: Double, size: Int): TensorD = + val tensor = new TensorD (size, size, size) + cfor (0, size) { i => tensor(i, i, i) = s } + tensor + end diag + + def scalar (s: Double): TensorD = TensorD ((1, 1, 1), s) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a TensorD from a VectorD with default shape (length, 1, 1). + * @param v the vector to convert + * @return A TensorD of shape (length, 1, 1) + */ + def fromVector (v: VectorD, axis: Int = 0): TensorD = broadcastVector (v, axis) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a TensorD from a MatrixD with default shape (1, rows, cols). + * @param m the matrix to convert + * @return A TensorD of shape (1, rows, cols) + */ + def fromMatrix (m: MatrixD, shape: Option [(Int, Int, Int)] = None): TensorD = + TensorD.broadcastMatrix (m, shape) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Broadcast a MatrixD into a 3D tensor (TensorD) in batch‑first layout. + * - Base tensor is created with batch=1, rows=m.dim, cols=m.dim2 + * - We explicitly fill base(0,i,j) = m(i,j) so there’s n + * - If you request a larger batch, we replicate that slice across batch + */ + def broadcastMatrix (m: MatrixD, shape: Option [(Int, Int, Int)] = None): TensorD = + val rows = m.dim // r + val cols = m.dim2 // c + val sliceSize = rows * cols + + // Flatten in **column‑major** order: for each col, for each row + val flat = Array.tabulate (sliceSize) { idx => + val col = idx / rows + val row = idx % rows + m(row, col) + } + + // Build the base (1 × rows × cols) + val base = TensorD ((1, rows, cols), flat*) + + // If a larger batch is requested, replicate the same slice + shape match + case Some ((b, r, c)) => + require (r == rows && c == cols, + s"broadcastMatrix: ($r, $c) must match ($rows, $cols)") + if b == 1 then base + else + val outFlat = new Array [Double](b * sliceSize) + var off = 0 + var bb = 0 + while bb < b do + System.arraycopy(flat, 0, outFlat, off, sliceSize) + off += sliceSize; bb += 1 + end while + TensorD ((b, rows, cols), outFlat*) + case None => + base + end match + end broadcastMatrix + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Broadcast a VectorD into a 3D tensor (TensorD), allowing partial broadcasting. + * The default shape is determined by the axis parameter: + * - If axis == 0, the default is a column vector: (v.dim, 1, 1) + * - If axis == 1, the default is a row vector: (1, v.dim, 1) + * - If axis == 2, the default is a sheet vector: (1, 1, v.dim) + * If a shape is provided, a base tensor is created with the default shape and then + * expanded to the given shape using `broadcastTo`. + */ + def broadcastVector (v: VectorD, axis: Int = 0, shape: Option [(Int, Int, Int)] = None): TensorD = + val data = v.toArray + + val baseShape: (Int, Int, Int) = axis match + case 0 => (v.dim, 1, 1) // column vector + case 1 => (1, v.dim, 1) // row vector + case 2 => (1, 1, v.dim) // sheet vector + case _ => throw new Exception ("Axis must be 0 (column), 1 (row), or 2 (sheet)") + + val base = TensorD(baseShape, data *) + + shape match + case Some ((d1, d2, d3)) => broadcastTo (base, List (d1, d2, d3)) + case None => base + end broadcastVector + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Compute the broadcasted shape for two 3D shapes aShape and bShape. + * - If one dimension is 1 and the other is R, pick R. + * - If both are the same, pick that value. + * - Otherwise, throw an error for mismatched dimensions. + */ + def broadcastShapes (aShape: List [Int], bShape: List [Int]): List [Int] = + require (aShape.size == 3 && bShape.size == 3, "Only supports 3D shapes currently") + + // Perform broadcasting logic across all dimensions + aShape.zip (bShape).map { case (a, b) => + if a == b then a + else if a == 1 then b + else if b == 1 then a + else throw new IllegalArgumentException(s"Incompatible shapes: $aShape vs $bShape") + } // map + end broadcastShapes + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Expand a TensorD 'src' to 'newShape' if needed. + * If src.shape == newShape, just return src. Otherwise replicate data along + * any dimension that was 1 in src.shape but is >1 in newShape. + */ + def broadcastTo (src: TensorD, newShape: List [Int]): TensorD = + val oldShape = src.shape + if oldShape == newShape then src + else + val (d1, d2, d3) = (newShape(0), newShape(1), newShape(2)) + val out = new TensorD (d1, d2, d3) + + cfor (0, d1) { i => + val iSrc = if oldShape(0) == 1 then 0 else i + cfor (0, d2) { j => + val jSrc = if oldShape(1) == 1 then 0 else j + cfor (0, d3) { k => + val kSrc = if oldShape(2) == 1 then 0 else k + out(i, j, k) = src(iSrc, jSrc, kSrc) + } // cfor + } // cfor + } // cfor + out + end broadcastTo + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Concatenate a sequence of 3D tensors along the specified axis (0, 1, or 2). + * @param xs sequence of TensorD to concatenate + * @param axis dimension along which to concatenate (0, 1, or 2) + */ + def concat(xs: Seq [TensorD], axis: Int): TensorD = + require (xs.nonEmpty, "concat: input sequence is empty") + require (axis >= 0 && axis <= 2, s"concat: invalid axis $axis") + + // All tensors must be 3D + xs.foreach { x => + require (x.shape.length == 3, + s"concat: all tensors must be 3D, got shape ${x.shape}") } + + // Shapes of all tensors + val shapes = xs.map (_.shape) + + val B0 = shapes.head(0) + val T0 = shapes.head(1) + val D0 = shapes.head(2) + + // Validate shapes based on axis + axis match + case 0 => // concat along rows + xs.foreach { x => + val s = x.shape + require (s(1) == T0 && s(2) == D0, + s"concat axis=0: T and D must match. Expected ($T0,$D0), got (${s(1)},${s(2)})") } + + case 1 => // concat along sequence + xs.foreach { x => + val s = x.shape + require (s(0) == B0 && s(2) == D0, + s"concat axis=1: B and D must match. Expected ($B0,$D0), got (${s(0)},${s(2)})") } + + case 2 => // concat along features + xs.foreach { x => + val s = x.shape + require (s(0) == B0 && s(1) == T0, + s"concat axis=2: B and T must match. Expected ($B0,$T0), got (${s(0)},${s(1)})") } + end match + + // Compute output shape + val B_out = axis match + case 0 => shapes.map (_(0)).sum + case 1 => B0 + case 2 => B0 + val T_out = axis match + case 0 => T0 + case 1 => shapes.map (_(1)).sum + case 2 => T0 + val D_out = axis match + case 0 => D0 + case 1 => D0 + case 2 => shapes.map (_(2)).sum + + // Allocate result and copy blocks into output + val out = TensorD.fill (B_out, T_out, D_out, 0.0) + + var cursor = 0 + axis match + // axis = 0: grow batch dimension + case 0 => + for x <- xs do + val Bi = x.shape.head + out(cursor until cursor + Bi, 0 until T0, 0 until D0) = x + cursor += Bi + // axis = 1: grow sequence dimension + case 1 => + for x <- xs do + val Ti = x.shape(1) + out(0 until B0, cursor until cursor + Ti, 0 until D0) = x + cursor += Ti + // axis = 2: grow feature dimension + case 2 => + for x <- xs do + val Di = x.shape(2) + out(0 until B0, 0 until T0, cursor until cursor + Di) = x + cursor += Di + end match + out + end concat + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + // Activations + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + + def id_ (yp: TensorD): TensorD = yp + + def reLU_ (yp: TensorD): TensorD = yp.map_ (reLU) + + def lreLU_ (yp: TensorD, alpha: Double): TensorD = { setA (alpha); yp.map_ (lreLU) } + + def eLU_ (yp: TensorD, alpha: Double): TensorD = { setA2 (alpha); yp.map_ (eLU) } + + def tanh_ (yp: TensorD): TensorD = yp.map_ (math.tanh) + + def sigmoid_ (yp: TensorD): TensorD = yp.map_ (sigmoid) + + def gaussian_ (yp: TensorD): TensorD = yp.map_ (gaussian) + + def geLU_ (yp: TensorD): TensorD = yp.map_ (geLU) + + def softmax_ (yp: TensorD): TensorD = tensorize (ActivationFun.softmax_)(yp) + + def logit_ (yp: TensorD): TensorD = yp.map_ (logit) + + def logistic_ (yp: TensorD, a: Double, b: Double, c: Double): TensorD = yp.map_ (logistic(_, a, b, c)) + + // Other operations + + def max (x: TensorD, y: TensorD): TensorD = x.max (y) + + def min (x: TensorD, y: TensorD): TensorD = x.min (y) + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Compute the cross-correlation tensor for the given data matrix for up to * maxLags. @@ -923,7 +1812,7 @@ end tensorDTest //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `TensorDTest2` main function is used to test the `TensorD` class. +/** The `tensorDTest2` main function is used to test the `TensorD` class. * It tests pulling matrices and vectors from the tensor. * > runMain scalation.mathstat.tensorDTest2 */ @@ -970,7 +1859,7 @@ end tensorDTest2 //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `TensorDTest3` main function is used to test the `TensorD` class. +/** The `tensorDTest3` main function is used to test the `TensorD` class. * It tests the use of tensors and matrices for convolutional operation needed in * Convolutional Nets. * > runMain scalation.mathstat.tensorDTest3 @@ -997,3 +1886,309 @@ end tensorDTest2 end tensorDTest3 + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `tensorDTest4` main function is used to test the `TensorD` class. + * It tests all element-wise operations with TensorD, MatrixD, VectorD, and scalars + * that can actually broadcast properly. + * > runMain scalation.mathstat.tensorDTest4 + */ +@main def tensorDTest4 (): Unit = + + // Create a base 3D tensor of shape (2,3,4) + val t1 = TensorD ( (2, 3, 4), + // 24 elements for shape (2,3,4) + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, + 13,14, 15, 16, 17, 18,19, 20, 21,22, 23, 24 + ) + println (s"Tensor t1 => shape = (2,3,4):\n$t1") + + // Another same-shape tensor (2,3,4) + val t2 = TensorD ( (2, 3, 4), + // 24 more elements + 2, 3, 4, 5, 6, 7, 8, 9, 10,11, 12, 13, + 14,15, 16,17, 18,19, 20,21, 22,23, 24, 25 + ) + println (s"Tensor t2 => shape = (2,3,4):\n$t2") + + // Matrix of shape (2,3) => broadcasts to (2,3,1), then final (2,3,4) + val m1 = MatrixD ((2, 3), 1, 2, 3, + 4, 5, 6) + println (s"Matrix m1 => shape = (2,3):\n$m1") + + // Vector of length 3 => broadcasts to (1,3,1), then final (2,3,4) + val v1 = VectorD (1, 2, 3) + println (s"Vector v1 => length = 3:\n$v1") + + // Scalar + val s = 2.0 + println (s"Scalar s => $s") + + // ------------------- Broadcasting Tests ------------------- + banner ("Addition Tests") + println (s"t1 + t2:\n${t1 + t2}") + println (s"t1 + m1:\n${t1 + m1}") + println (s"t1 + v1:\n${t1 + v1}") + println (s"t1 + s :\n${t1 + s}") + + banner ("Subtraction Tests") + println (s"t1 - t2:\n${t1 - t2}") + println (s"t1 - m1:\n${t1 - m1}") + println (s"t1 - v1:\n${t1 - v1}") + println (s"t1 - s :\n${t1 - s}") + + banner ("Multiplication Tests") + println (s"t1 * t2:\n${t1 * t2}") + println (s"t1 * m1:\n${t1 * m1}") + println (s"t1 * v1:\n${t1 * v1}") + println (s"t1 * s :\n${t1 * s}") + + banner ("Division Tests") + println (s"t1 / t2:\n${t1 / t2}") + println (s"t1 / m1:\n${t1 / m1}") + println (s"t1 / v1:\n${t1 / v1}") + println (s"t1 / s :\n${t1 / s}") + + banner ("Element-wise Hadamard Product ( *~ )") + println (s"t1 *~ t2:\n${t1 *~ t2}") + println (s"t1 *~ m1:\n${t1 *~ m1}") + println (s"t1 *~ v1:\n${t1 *~ v1}") + + banner ("Negation Tests") + println (s"-t1:\n${-t1}") + +end tensorDTest4 + + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `tensorDTest5` main function is used to test the `TensorD` class. + * It tests other operations with TensorD, MatrixD, and VectorD. + * > runMain scalation.mathstat.tensorDTest5 + */ +@main def tensorDTest5 (): Unit = + + // Create two TensorD objects that behave as row vectors (n, 1, 1) + val A = VectorD (1, 2, 3) + val B = VectorD (4, 5, 6) + val tensorA = TensorD.fromVector (A) + val tensorB = TensorD.fromVector (B) + + // Compute dot product + val result = tensorA.dot (tensorB) + + // Expected result: (1*4 + 2*5 + 3*6) = 32.0 + println (s"Dot product result: $result") + assert (result(0)(0)(0) == 32.0, s"Test failed! Expected 32.0 but got $result") + + println ("✅ dot product test passed!") + + // ---------------- MatrixD Inputs ---------------- + val C = MatrixD((2, 3), 1, 2, 3, + 4, 5, 6) + val D = MatrixD ((3, 2), 7, 8, + 9, 10, + 11, 12) + + println (s"C :\n$C") + println (s"D :\n$D") + + // ---------------- Convert to TensorD (batch-first) ---------------- + val tensorC = TensorD.fromMatrix (C, Some((1, 2, 3))) // (1, 2, 3) + val tensorD = TensorD.fromMatrix (D, Some((1, 3, 2))) // (1, 3, 2) + + println (s"tensorC : \n${tensorC(0)}") + println (s"tensorD : \n${tensorD(0)}") + println (s"C shape: ${C.dims}") + println (s"D shape: ${D.dims}") + println (s"tensorC shape: ${tensorC.shape}") + println (s"tensorD shape: ${tensorD.shape}") + + // ---------------- Perform Matrix Multiplication ---------------- + val resultMat = tensorC.matmul (tensorD) // (1, 2, 2) + println (s"Matmul result:\n${resultMat(0)}") + + // ---------------- Expected Result ---------------- + // Computed as: [1 2 3] * D = [58 64], [4 5 6] * D = [139 154] + val expected = fromMatrix (MatrixD ((2, 2), 58.0000, 64.0000, + 139.000, 154.000)) + + // ---------------- Assert and Pass ---------------- + assert (resultMat == expected, s"Test failed! Expected ${expected} but got ${resultMat}") + println ("✅ matmul test passed!") + +end tensorDTest5 + + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `tensorDTest6` main function is used to test the `TensorD` class. + * It tests operations with with and without broadcasting. + * > runMain scalation.mathstat.tensorDTest6 + */ +@main def tensorDTest6 (): Unit = + + println ("==> Test 1: No Broadcasting") + + val A0 = MatrixD ((2, 3), 1, 2, 3, + 4, 5, 6) + val A1 = MatrixD ((2, 3), 7, 8, 9, + 10, 11, 12) + val B0 = MatrixD ((3, 2), 1, 2, + 3, 4, + 5, 6) + val B1 = MatrixD ((3, 2), 7, 8, + 9, 10, + 11, 12) + + val tensorA1 = TensorD (A0, A1) // Shape: (2, 2, 3) + val tensorB1 = TensorD (B0, B1) // Shape: (2, 3, 2) + + val result1 = tensorA1.bmm (tensorB1) + val expected1 = TensorD (A0 * B0, A1 * B1) + + println (s"Result 1:\n$result1") + assert (result1 == expected1, "❌ Test 1 failed!") + println ("✅ Test 1 passed (No broadcasting)") + + // --------------------------------------------------------- + + println ("==> Test 2: Broadcast A") + + val tensorA2 = TensorD (A0) // Shape: (1, 2, 3) + val result2 = tensorA2.bmm (tensorB1) + val expected2 = TensorD (A0 * B0, A0 * B1) + + println (s"Result 2:\n$result2") + assert (result2 == expected2, "❌ Test 2 failed!") + println ("✅ Test 2 passed (Broadcast A)") + + // --------------------------------------------------------- + + println ("==> Test 3: Broadcast B") + + val tensorB3 = TensorD (B0) // Shape: (1, 3, 2) + val result3 = tensorA1.bmm (tensorB3) + val expected3 = TensorD (A0 * B0, A1 * B0) + + println (s"Result 3:\n$result3") + assert (result3 == expected3, "❌ Test 3 failed!") + println ("✅ Test 3 passed (Broadcast B)") + + println ("🎉 All bmm tests passed successfully!") + +end tensorDTest6 + + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `tensorDTest7` main function is used to test the `TensorD` class. + * It tests permuting tensors. + * > runMain scalation.mathstat.tensorDTest7 + */ +@main def tensorDTest7 (): Unit = + + banner ("TensorD Permute Function - Axis Permutation Tests") + + // Create a small reference tensor with unique values + // Shape: (2, 3, 4) + val t = TensorD ((2, 3, 4), + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, // Slice 0 (i = 0) + 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 // Slice 1 (i = 1) + ) + + println (s"Original shape: ${t.shape}") + println (s"Original tensor (flattened): ${t.flattenToVector.mkString(", ")}") + + // Define all valid permutations of (0, 1, 2) + val permutations = Seq ( + Seq (0, 1, 2), // identity + Seq (0, 2, 1), + Seq (1, 0, 2), + Seq (1, 2, 0), + Seq (2, 0, 1), + Seq (2, 1, 0)) + + // Check that double-permute restores the original shape and values + for perm <- permutations do + val permuted = t.permute(perm) + val reversePerm = perm.zipWithIndex.sortBy(_._1).map(_._2) // inverse permutation + val unpermuted = permuted.permute(reversePerm) + + val isSame = t.flattenToVector == unpermuted.flattenToVector + val shapeOk = t.shape == unpermuted.shape + + assert (isSame && shapeOk, + s"❌ Failed on permutation $perm → reversed as $reversePerm") + + println (s"✅ Permute $perm → unpermute $reversePerm: Passed") + end for + + println ("\n🎉 All permute tests passed successfully!") + +end tensorDTest7 + + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `tensorDTest8` main function is used to test the `TensorD` class. + * It tests concatenating tensors. + * > runMain scalation.mathstat.tensorDTest8 + */ +@main def tensorDTest8 (): Unit = + + banner ("TensorD Concat Tests") + + // ----------------------------------- + // axis = 0 + banner ("Testing concat along axis 0") + + val a0 = TensorD ((1 ,2, 2), 1, 2, + 3, 4) + + val b0 = TensorD ((1, 2, 2), 5, 6, + 7, 8) + + val c0 = TensorD.concat (Seq (a0, b0), 0) + + assert ((c0(0).flatten.toList ++ c0(1).flatten.toList) == + (a0.flattenToVector.toList ++ b0.flattenToVector.toList), + s"❌ concat axis 0 failed: got\n$c0") + + banner ("✅ concat axis 0 passed") + + // ----------------------------------- + // axis = 1 + banner ("Testing concat along axis 1") + + val a1 = TensorD ((1, 1, 2), 1, 2) + + val b1 = TensorD ((1, 2, 2), 3, 4, + 5, 6) + + val c1 = TensorD.concat (Seq (a1, b1), 1) + + assert (c1(?, 0).flatten.toList == a1.flattenToVector.toList and + c1(0 until c1.dim, 1 until c1.dim2, 0 until c1.dim3).flattenToVector.toList == b1.flattenToVector.toList, + s"❌ concat axis 1 failed:\n$c1") + + println ("✅ concat axis 1 passed") + + // ----------------------------------- + // axis = 2 + banner ("Testing concat along axis 2") + + val a2 = TensorD ((1, 2, 1), 1, + 2) + + val b2 = TensorD ((1, 2, 2), 3, 4, + 5, 6) + + val c2 = TensorD.concat (Seq (a2, b2), 2) + + assert (c2(?, ?, 0).flatten.toList == a2.flattenToVector.toList and + c2(0 until c2.dim, 0 until c2.dim2, 1 until c2.dim3).flattenToVector.toList == b2.flattenToVector.toList, + s"❌ concat axis 2 failed:\n$c2") + + println ("✅ concat axis 2 passed") + + println ("\n🎉 All concat tests passed successfully!") + +end tensorDTest8 + diff --git a/src/main/scala/scalation/mathstat/TimeStatistic.scala b/src/main/scala/scalation/mathstat/TimeStatistic.scala index 1f39a0be4..ae6018e34 100644 --- a/src/main/scala/scalation/mathstat/TimeStatistic.scala +++ b/src/main/scala/scalation/mathstat/TimeStatistic.scala @@ -179,7 +179,7 @@ end TimeStatistic banner ("Test sample statistics") val stat1 = new Statistic () - for i <- 1 to 1000 do stat1.tally (rv.gen) + cfor (0, 1000) { _ => stat1.tally (rv.gen) } println (Statistic.labels) println (stat1) diff --git a/src/main/scala/scalation/mathstat/TnT_Split.scala b/src/main/scala/scalation/mathstat/TnT_Split.scala index 37d9072b8..acd720c01 100644 --- a/src/main/scala/scalation/mathstat/TnT_Split.scala +++ b/src/main/scala/scalation/mathstat/TnT_Split.scala @@ -17,8 +17,14 @@ import scala.collection.mutable.IndexedSeq import scalation.random.PermutedVecI //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `TnT_Split` object provides methods for splitting datasets into testing-sets - * and training-sets. +/** The `TnT_Split` object provides methods for splitting datasets into TESTING-sets (.) + * and TRAINING-sets (-). There are three options on how to split the full dataset: + * (1) select at RANDOM `n_test` indices for the test-set (@see tnT_SplitTest`) + * | -.--.---.--.----.--.---.-----.| + * (2) select the FIRST `n_test` indices for the test-set (@see tnT_SplitTest2`) + * | test-set | training-set | + * (3) select the LAST `n_test` indices for the test-set (@see tnT_SplitTest3`) + * | training-set | test-set | */ object TnT_Split: @@ -34,8 +40,8 @@ object TnT_Split: end makePermGen //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the indices for the test-set. - * @oaram permGen the permutation generator + /** Return the indices for the test-set. If rando = false, pick the FIRST indices. + * @oaram permGen the permutation generator (may be null when rando = false) * @param n_test the size of test-set * @param rando whether to select indices randomly or in blocks (defaults to true) */ @@ -49,6 +55,23 @@ object TnT_Split: else Set.range (0, n_test) // ordered indices end testIndices2 + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the indices for the test-set. If rando = false, pick the LAST indices. + * @oaram permGen the permutation generator (may be null when rando = false) + * @param n_total the size of full dataset (train plus test) + * @param n_test the size of test-set + * @param rando whether to select indices randomly or in blocks (defaults to true) + */ + def testIndices (permGen: PermutedVecI, n_total: Int, n_test: Int, rando: Boolean): IndexedSeq [Int] = + (if rando then permGen.igen (0 until n_test) // permuted indices + else VectorI.range (n_total - n_test, n_total)).toMuIndexedSeq // ordered indices + end testIndices + + def testIndices2 (permGen: PermutedVecI, n_total: Int, n_test: Int, rando: Boolean): Set [Int] = + if rando then permGen.igen (0 until n_test).toSet [Int] // permuted indices + else Set.range (n_total - n_test, n_total) // ordered indices + end testIndices2 + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Split the dataset given as a combined data-response matrix into a testing-set * and training-set based on the given indices. @@ -119,6 +142,7 @@ import TnT_Split.{makePermGen, testIndices} //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `tnT_SplitTest` main function tests the `TnT_Split` object using the Texas * Temperatures dataset. It is split into a testing-set and a training-set. + * Tests when rando = true and RANDOMLY picks the indices for the testing-set. * > runMain scalation.mathstat.tnT_SplitTest */ @main def tnT_SplitTest (): Unit = @@ -148,7 +172,7 @@ import TnT_Split.{makePermGen, testIndices} banner ("Testing-set indices") val permGen = makePermGen (xy.dim) // make a permutation generator val n_test = (0.4 * xy.dim).toInt // determine the size of the test-set (40%) - val idx = testIndices (permGen, n_test) // produce the indices for the test-set + val idx = testIndices (permGen, n_test) // produce the indices for the test-set (RANDOMLY) println (s"n_test = $n_test, idx = $idx") // Test with combined data-response matrix @@ -179,3 +203,131 @@ import TnT_Split.{makePermGen, testIndices} end tnT_SplitTest + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `tnT_SplitTest2` main function tests the `TnT_Split` object using the Texas + * Temperatures dataset. It is split into a testing-set and a training-set. + * Tests when rando = false and picks the FIRST indices for the testing-set. + * > runMain scalation.mathstat.tnT_SplitTest2 + */ +@main def tnT_SplitTest2 (): Unit = + + // Combined data-response matrix + // 16 data points: one x1 x2 x3 y + // Lat Elev Long Temp County + val xy = MatrixD ((16, 5), 1.0, 29.767, 41.0, 95.367, 56.0, // 0. Harris + 1.0, 32.850, 440.0, 96.850, 48.0, // 1. Dallas + 1.0, 26.933, 25.0, 97.800, 60.0, // 2. Kennedy + 1.0, 31.950, 2851.0, 102.183, 46.0, // 3. Midland + 1.0, 34.800, 3840.0, 102.467, 38.0, // 4. Deaf Smith + 1.0, 33.450, 1461.0, 99.633, 46.0, // 5. Knox + 1.0, 28.700, 815.0, 100.483, 53.0, // 6. Maverick + 1.0, 32.450, 2380.0, 100.533, 46.0, // 7. Nolan + 1.0, 31.800, 3918.0, 106.400, 44.0, // 8. El Paso + 1.0, 34.850, 2040.0, 100.217, 41.0, // 9. Collington + 1.0, 30.867, 3000.0, 102.900, 47.0, // 10. Pecos + 1.0, 36.350, 3693.0, 102.083, 36.0, // 11. Sherman + 1.0, 30.300, 597.0, 97.700, 52.0, // 12. Travis + 1.0, 26.900, 315.0, 99.283, 60.0, // 13. Zapata + 1.0, 28.450, 459.0, 99.217, 56.0, // 14. Lasalle + 1.0, 25.900, 19.0, 97.433, 62.0) // 15. Cameron + + println (s"xy = $xy") + + banner ("Testing-set indices") + val n_test = (0.4 * xy.dim).toInt // determine the size of the test-set (40%) + val idx = testIndices (null, n_test, rando = false) // produce the indices for the test-set (FIRST) + println (s"n_test = $n_test, idx = $idx") + + // Test with combined data-response matrix + + banner ("TnT Split combined data-response matrix") + val (xy_test, xy_train) = TnT_Split (xy, idx) // TnT split the dataset xy (row split) + + banner ("Testing-set") + println (s"xy_test = $xy_test") + + banner ("Training-set") + println (s"xy_train = $xy_train") + + // Test with separate data matrix and response vector + + banner ("TnT Split separate data matrix and response vector") + val (x, y) = (xy.not (?, 4), xy(?, 4)) // make data matrix and response vector (column split) + + val (x_test, x_train, y_test, y_train) = TnT_Split (x, y, idx) // TnT split the dataset (x, y) (row split) + + banner ("Testing-set") + println (s"x_test = $x_test") + println (s"y_test = $y_test") + + banner ("Training-set") + println (s"x_train = $x_train") + println (s"y_train = $y_train") + +end tnT_SplitTest2 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `tnT_SplitTest3` main function tests the `TnT_Split` object using the Texas + * Temperatures dataset. It is split into a testing-set and a training-set. + * Tests when rando = false and picks the LAST indices for the testing-set. + * > runMain scalation.mathstat.tnT_SplitTest3 + */ +@main def tnT_SplitTest3 (): Unit = + + // Combined data-response matrix + // 16 data points: one x1 x2 x3 y + // Lat Elev Long Temp County + val xy = MatrixD ((16, 5), 1.0, 29.767, 41.0, 95.367, 56.0, // 0. Harris + 1.0, 32.850, 440.0, 96.850, 48.0, // 1. Dallas + 1.0, 26.933, 25.0, 97.800, 60.0, // 2. Kennedy + 1.0, 31.950, 2851.0, 102.183, 46.0, // 3. Midland + 1.0, 34.800, 3840.0, 102.467, 38.0, // 4. Deaf Smith + 1.0, 33.450, 1461.0, 99.633, 46.0, // 5. Knox + 1.0, 28.700, 815.0, 100.483, 53.0, // 6. Maverick + 1.0, 32.450, 2380.0, 100.533, 46.0, // 7. Nolan + 1.0, 31.800, 3918.0, 106.400, 44.0, // 8. El Paso + 1.0, 34.850, 2040.0, 100.217, 41.0, // 9. Collington + 1.0, 30.867, 3000.0, 102.900, 47.0, // 10. Pecos + 1.0, 36.350, 3693.0, 102.083, 36.0, // 11. Sherman + 1.0, 30.300, 597.0, 97.700, 52.0, // 12. Travis + 1.0, 26.900, 315.0, 99.283, 60.0, // 13. Zapata + 1.0, 28.450, 459.0, 99.217, 56.0, // 14. Lasalle + 1.0, 25.900, 19.0, 97.433, 62.0) // 15. Cameron + + println (s"xy = $xy") + + banner ("Testing-set indices") + val n_test = (0.4 * xy.dim).toInt // determine the size of the test-set (40%) + val idx = testIndices (null, xy.dim, n_test, rando = false) // produce the indices for the test-set (LAST) + println (s"n_test = $n_test, idx = $idx") + + // Test with combined data-response matrix + + banner ("TnT Split combined data-response matrix") + val (xy_test, xy_train) = TnT_Split (xy, idx) // TnT split the dataset xy (row split) + + banner ("Testing-set") + println (s"xy_test = $xy_test") + + banner ("Training-set") + println (s"xy_train = $xy_train") + + // Test with separate data matrix and response vector + + banner ("TnT Split separate data matrix and response vector") + val (x, y) = (xy.not (?, 4), xy(?, 4)) // make data matrix and response vector (column split) + + val (x_test, x_train, y_test, y_train) = TnT_Split (x, y, idx) // TnT split the dataset (x, y) (row split) + + banner ("Testing-set") + println (s"x_test = $x_test") + println (s"y_test = $y_test") + + banner ("Training-set") + println (s"x_train = $x_train") + println (s"y_train = $y_train") + +end tnT_SplitTest3 + diff --git a/src/main/scala/scalation/mathstat/Transform.scala b/src/main/scala/scalation/mathstat/Transform.scala index 07163c841..c40b31689 100644 --- a/src/main/scala/scalation/mathstat/Transform.scala +++ b/src/main/scala/scalation/mathstat/Transform.scala @@ -5,7 +5,8 @@ * @date Thu Mar 13 14:06:11 EDT 2025 * @see LICENSE (MIT style license file). * - * @note Support for Transformation Functions with their Inverse + * @note Support for Common Transformation Functions with their Inverse + * @see `modeling.TranRegression` for `box_cox` and `yeo_john` transformations * * https://www.infoq.com/news/2023/10/foreign-function-and-memory-api/ */ @@ -13,206 +14,607 @@ package scalation package mathstat -import scala.math._ +import scala.annotation.unused + +import VectorDOps._ + +type VecMat = VectorD | MatrixD //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Thee `⚬` extension method performs function composition f ⚬ g. +/** The `⚬` extension method performs function composition f ⚬ g. + * The composition f ⚬ g = f (g (.)) maps A -g-> B -f-> R. * @see www.scala-lang.org/api/current/scala/Function1.html * @tparam A the type to which function `g` can be applied * @tparam B the type to which function `f` can be applied * @tparam R the return type for f ⚬ g + * @param f the function from domain B to range R */ -extension [A, B, R](f: B => R) +extension [A, B, R] (f: B => R) def ⚬ (g: A => B): A => R = f compose g //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Transform` trait supports the use of transformation functions, such that it +/** The `mu_sig` and `min_max` extend the methods defined for vectors and matrices + * to work for types that are either `VectorD` or `MatrixD`. + * @param x the argument that is either a `VectorD` or `MatrixD`. + */ +extension (x: VecMat) + + def mu_sig: VecMat = + x match + case xV: VectorD => xV.mu_sig + case xM: MatrixD => xM.mu_sig + + def min_max: VecMat = + x match + case xV: VectorD => xV.min_max + case xM: MatrixD => xM.min_max + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `Transform` object defines hyper-parameters for Transforms. + */ +object Transform: + + /** Base hyper-parameter specification for regression based time series models. + */ + val hp = new HyperParameter + hp += ("a", 1.0, 1.0) // the multiplier (positive) + hp += ("an", -1.0, -1.0) // the multiplier (negative) + hp += ("c", -1.0, -1.0) // the reciprocal power + hp += ("f", 0.1, 0.1) // the frequency for cos, sin + hp += ("l", 0.4, 0.4) // the Box-Cox lambda (l) + hp += ("p", 1.5, 1.5) // the real power + hp += ("q", 0.67, 0.67) // the rational power + hp += ("r", 0.5, 0.5) // the root + hp += ("s", 0.0, 0.0) // the amount of shift + hp += ("ss", 1.0, 1.0) // the amount of shift for log + + val a = hp("a").toDouble + val an = hp("an").toDouble + val c = hp("c").toDouble + val f = hp("f").toDouble + val l = hp("l").toDouble + val p = hp("p").toDouble + val q = hp("q").toDouble + val r = hp("r").toDouble + val s = hp("s").toDouble + val ss = hp("ss").toDouble + +end Transform + +import Transform._ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `TranformT` enum defines the types of transforms. + * @param name the name of the transform + * @param form a function to instantiate a `Transform` + * @param wlu the default vectors of parameters (w) and their lower (l) and upper bounds (u) + */ +enum TransformT (val name: String, val form: VecMat => Transform, val wlu: TransformS): + + case Norm extends TransformT ("NormForm", // x -> (x - μ)/σ + x => NormForm (x), + null) // NormForm (linear rescaling) + + case MinMax extends TransformT ("MinMaxForm", // x -> l + (x-min)(u-l)/(max-min) + x => MinMaxForm (x), + null) // MinMaxForm (linear rescaling) + + case Center extends TransformT ("CenterForm", // x -> x - μ + x => CenterForm (x), + null) // CenterForm (linear rescaling) + + case Pow extends TransformT ("PowForm", // x -> (x + s)^p + x => PowForm (x.asInstanceOf [VectorD]), + new TransformS ((s, p), (0.0, 1.1), (10.0, 4.0))) // PowForm real p in [1.1, 4.0] + + case PowR extends TransformT ("PowRForm", // x -> (x + s)↑q + x => PowRForm (x.asInstanceOf [VectorD]), + new TransformS ((s, q), (0.0, 0.1), (10.0, 0.9))) // PowRForm rational q in [0.1, 0.9] + + case Boxcox extends TransformT ("BoxcoxForm", // x -> (x^l - 1) / l + x => BoxcoxForm (x.asInstanceOf [VectorD]), + new TransformS ((l, 0.0), (-4.0, 0.0), (4.0, 0.0))) // PowForm real l in [-4.0, 4.0] + + case Root extends TransformT ("RootForm", // x -> (x + s)^r + x => PowForm (x.asInstanceOf [VectorD]), + new TransformS ((s, r), (0.0, 0.1), (10.0, 0.9))) // RootForm (PowForm r in [0.1, 0.9]) + + case Recip extends TransformT ("RecipForm", // x -> (x + s)^c + x => PowForm (x.asInstanceOf [VectorD]), + new TransformS ((s, c), (0.0, -4.0), (10.0, -0.1))) // RecipForm (PowForm c in [-4.0, -0.1]) + + case Log extends TransformT ("LogForm", // x -> log (a x + ss) + x => LogForm (x.asInstanceOf [VectorD]), + new TransformS ((ss, a), (0.0, 0.1), (10.0, 10.0))) // LogForm (replaces PowForm in (-0.1, 0.1)) + + case Log1p extends TransformT ("Log1pForm", // x -> log (x + 1) + x => Log1pForm (x), + null) // LogForm (replaces PowForm in (-0.1, 0.1)) + + case Exp extends TransformT ("ExpForm", // x -> exp (a x + s) + x => ExpForm (x.asInstanceOf [VectorD]), + new TransformS ((s, a), (0.0, 0.1), (10.0, 10.0))) // ExpForm (replaces PowForm above 4) + + case NExp extends TransformT ("NExpForm", // x -> exp (an x + s) + x => ExpForm (x.asInstanceOf [VectorD]), + new TransformS ((s, an), (-10.0, -10.0), (0.0, -0.1))) // NExpForm (replaces RecipForm below -4) + + case Cos extends TransformT ("CosForm", // x -> cos (2π f x + s) + x => CosForm (x.asInstanceOf [VectorD]), + new TransformS ((s, f), (0.0, 0.01), (10.0, 10.0))) // CosForm (for wave forms) + + case Sin extends TransformT ("SinForm", // x -> sin (2π f x + s) + x => SinForm (x.asInstanceOf [VectorD]), + new TransformS ((s, f), (0.0, 0.01), (10.0, 10.0))) // SinForm (for wave forms) + +end TransformT + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `TransformS` class holds a transform specification that contains default values + * for the parameters w and their bounds, e.g., w in x -> f(w_0 + w_1 * x) + * @param w the vector of two nonlinear parameters: shift w_0 and scale w_1 parameters + * @param l the vector containing the lower bounds of shift and scale parameters +a* @param u the vector containing the upper bounds of shift and scale parameters + */ +case class TransformS (w: VectorD = VectorD (0, 1), + l: VectorD = VectorD (0, 0.1), + u: VectorD = VectorD (10, 10)): + + def this (wlu: (Double, Double)*) = this (VectorD (wlu(0)), VectorD (wlu(1)), VectorD (wlu(2))) + +end TransformS + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `Transform` trait supports the use of transformation functions and makes it * is easy to take the inverse transform. When a transformation uses arguments, * they are remembered for use by the inverse transformation. - * @param x the vector or matrix being transformed + * + * @note: the CENTERED lu interval [-2, 2] reduces collinearity, but may yield NaN + * due to a negative base (x < 0) raised to a `Double` power (x~^p). + * Switch to a `Rat` power (x↑r) with an odd denominator, e.g., r = Rat(1, 3) to avoid NaN. + * Otherwise, use a positive interval such as [0, 4] or [1, 5]. + * @see `Rat`, `pow_` in CommonFunctions.scala, and `↑` in ValueType.scala + * + * @param w the transform argument vector or matrix (in subclasses pass x_ for input or w for weights) + * @param centered whether the normalization should CENTER the data (defaults to true) */ -trait Transform (x: VectorD | MatrixD = null): +trait Transform (w: VecMat, centered: Boolean = true): - protected var lu: VectorD = VectorD (1, 2) // optional default range/bounds [l .. u] - protected var b: MatrixD = null // optional argument matrix + protected var lu: VectorD = // min-max default range/bounds [l .. u] + if centered then VectorD (-2, 2) // centered to reduce collinearity, similar range to z + else VectorD (0, 4) // shifted to positive values, allows (-x)^.5 +// else VectorD (1, 5) // shifted away from zero, allows log (x) - if x != null then - x match - case xM: MatrixD => setB (xM) - case xV: VectorD => setB (MatrixD (xV).transpose) + protected val b: MatrixD = // transform argument matrix + w match + case wV: VectorD => MatrixD (wV).ᵀ + case wM: MatrixD => wM - def setLU (_lu: VectorD): Unit = lu = _lu // set the default bounds - def setB (x: MatrixD): Unit = b = x // set the argument matrix - def f (x: MatrixD): MatrixD // transformation function + inline def b_ : VectorD = b(?, 0) // get 0-th column of the argument matrix + def setLU (_lu: VectorD): Unit = lu = _lu // set the default bounds to custom value + + def f (x: MatrixD): MatrixD // transformation function (matrix level) def fi (y: MatrixD): MatrixD // inverse transformation function - val f: FunctionV2V = (x: VectorD) => f(MatrixD(x).transpose)(?, 0) - val fi: FunctionV2V = (y: VectorD) => fi(MatrixD(y).transpose)(?, 0) -end Transform + val f: FunctionV2V = (x: VectorD) => f(MatrixD (x).ᵀ)(?, 0) // vector level + val fi: FunctionV2V = (y: VectorD) => fi(MatrixD (y).ᵀ)(?, 0) + + val f_ : FunctionS2S = (x: Double) => f(MatrixD ((1, 1), x))(0, 0) // scalar level + val fi_ : FunctionS2S = (y: Double) => fi(MatrixD ((1, 1), y))(0, 0) + + def df (x: VectorD): MatrixD = null // partial derivative of f + + def df (x: MatrixD): MatrixD = // column-by-column partial derivative of f wrt to w + var jMatrix = df (x(?, 0)) + for j <- 1 until x.dim2 do jMatrix = jMatrix ++^ df (x(?, j)) + jMatrix + end df + + def df (x: MatrixD, i: Int): MatrixD = // partial derivative of each column wrt wi + if i == 0 || i == 1 then + var jMatrix = MatrixD (df (x(?, 0))(?, i)).ᵀ + for j <- 1 until x.dim2 do jMatrix = jMatrix :^+ df (x(?, j))(?, i) + jMatrix + else + df(x) + end df + + def testV (x: VectorD): Unit = + val y = f (x) + val z = fi (y) + println (s"y = $y, \nz = $z") + end testV + + def testM (x: MatrixD): Unit = + val y = f (x) + val z = fi (y) + println (s"y = $y, \nz = $z") + end testM +end Transform + +// @note: NormForm will make some of the normalized data negative +// MinMaxForm will do the same when centered = true //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `zForm` class applies the z-transformation (subtract mean and divide by standard deviation). +/** The `NormForm` class applies the Z-transformation/normalization/standardization + * (subtract mean b(0) and divide by standard deviation b(1)). + * Like ''StandardScalar'' in sk-learn. + * @see www.geeksforgeeks.org/machine-learning/standardscaler-minmaxscaler-and-robustscaler-techniques-ml/ + * + * x -> (x - μ)/σ robust version: (x - μ)/(σ + ε) or (x - μ)/√(σ² + ε) + * + * @param x_ the input vector or matrix to be transformed (needed to get w) + * @param robust whether to add a small value (ε) to the standard deviation to avoid DBZ + * when a whole column has zero stdev (σ) it should be removed in pre-processing */ -class zForm (x: VectorD | MatrixD) extends Transform (x): - override def setB (x: MatrixD): Unit = b = x.mu_sig - def f (x: MatrixD): MatrixD = (x - b(0)) / b(1) - def fi (y: MatrixD): MatrixD = (y *~ b(1)) + b(0) +class NormForm (x_ : VecMat, robust: Boolean = true) extends Transform (x_.mu_sig): + + val ε = 1E-8 + def f (x: MatrixD): MatrixD = + if robust then (x - b(0)) / (b(1) + ε) + else (x - b(0)) / b(1) + def fi (y: MatrixD): MatrixD = + if robust then y *~ (b(1) + ε) + b(0) + else y *~ b(1) + b(0) + +end NormForm + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `rangeForm` class transforms values to the default range/bounds lu. +/** The `MinMaxForm` class applies the MIN-MAX-transformation to move the data into the range [l .. u] + * (subtract min, multiply by bounds lu range over min-max range and add lower bound). + * Like ''MinMaxScaler'' in sk-learn. + * @see www.geeksforgeeks.org/machine-learning/standardscaler-minmaxscaler-and-robustscaler-techniques-ml/ + * + * x -> l + (x-min)(u-l)/(max-min) + * + * @param x_ the input vector or matrix to be transformed (needed to get w) + * @param centered whether the normalization should CENTER the data (defaults to true) */ -class rangeForm (x: VectorD | MatrixD) extends Transform (x): - override def setB (x: MatrixD): Unit = b = x.min_max +class MinMaxForm (x_ : VecMat, centered: Boolean = true) extends Transform (x_.min_max, centered): + def f (x: MatrixD): MatrixD = (x - b(0)) * (lu(1) - lu(0)) / (b(1) - b(0)) + lu(0) def fi (y: MatrixD): MatrixD = (y - lu(0)) *~ (b(1) - b(0)) /(lu(1) - lu(0)) + b(0) +end MinMaxForm + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `CenterForm` class applies the CENTER-transformation so the new mean will be zero + * (subtract original mean b(0)). + * + * x -> x - μ + * + * @param x_ the input vector or matrix to be transformed (needed to get w) + */ +class CenterForm (x_ : VecMat) extends Transform (x_.mu_sig): + + def f (x: MatrixD): MatrixD = x - b(0) + def fi (y: MatrixD): MatrixD = y + b(0) + +end CenterForm + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `logForm` object applies the log-transformation. +/** The `PowForm` class applies a shifted s = b_(0) and scaled p = b_(1) POWER-transformation. + * For shift s and power p, + * + * x -> (x + s)^p + * + * @param w the transform argument vector (w -> b) */ -object logForm extends Transform (): - def f (x: MatrixD): MatrixD = x.log - def fi (y: MatrixD): MatrixD = y.exp +class PowForm (w: VectorD = VectorD (s, p)) extends Transform (w): + + def f (x: MatrixD): MatrixD = (x + b_(0)) ~^ b_(1) + def fi (y: MatrixD): MatrixD = y ~^ (1/b_(1)) - b_(0) + override def df (x: VectorD): MatrixD = MatrixD (((x + b_(0)) ~^ (b_(1) - 1)) * b_(1), + f(x) * (x + b_(0)).log).ᵀ +end PowForm + +class RootForm (w: VectorD = VectorD (s, r)) extends PowForm (w) + +class RecipForm (w: VectorD = VectorD (s, c)) extends PowForm (w) + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `log1pForm` object applies the log1p-transformation (log (z+1)). +/** The `PowRForm` class applies a shifted s = b_(0) and scaled q = b_(1) POWER-transformation. + * For shift s and power q where q is converted to r a nearby rational number with an odd denominator + * + * x -> (x + s)↑q + * + * @param w the transform argument vector (w -> b) */ -object log1pForm extends Transform (): - def f (x: MatrixD): MatrixD = x.log1p - def fi (y: MatrixD): MatrixD = y.expm1 +class PowRForm (w: VectorD = VectorD (s, q)) extends Transform (w): + + private val r = Rat.fromDouble3 (b_(1)) // nearby rational number with an odd denominator + private val ri = r.recip + + def f (x: MatrixD): MatrixD = (x + b_(0)) ↑ r + def fi (y: MatrixD): MatrixD = y ↑ ri - b_(0) + override def df (x: VectorD): MatrixD = MatrixD (((x + b_(0)) ↑ (r - 1)) * r.toDouble, + f(x) * (x + b_(0)).log).ᵀ +end PowRForm + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `cosForm` class applies the cosine-transformation. +/** The `BoxcoxForm` class applies a and scaled p = b_(0) BOX-COX-transformation. + * When l is zero, switch to a log-transformation. For power l (λ in textbooks), + * + * x -> (x^l - 1) / l + * + * @param w the transform argument vector (w -> b) */ -class cosForm (x: VectorD) extends Transform (x): - def f (x: MatrixD): MatrixD = x.map_ (z => cos (b(0, 0) * Piby2 * z)) - def fi (y: MatrixD): MatrixD = y.map_ (z => acos (z) / (b(0, 0) * Piby2)) +class BoxcoxForm (w: VectorD = VectorD (l)) extends Transform (w): + + private val l = b_(0) + + def f (x: MatrixD): MatrixD = (x ~^ l - 1) / l + def fi (y: MatrixD): MatrixD = (y * l + 1) ~^ (1/l) + override def df (x: VectorD): MatrixD = MatrixD (x ~^ (l-1), + (l * x + 1) ~^ (1/l-1)).ᵀ +end BoxcoxForm + + +// FIX - TBD: add (1) Modulus Transformation and (2) Inverse Hyperbolic Sine (asinh) + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `sinForm` class applies the sine-transformation. +/** The `LogForm` class applies a shifted b_(0) and scaled b_(1) LOG-transformation. + * Note: the default w = (1, 1) corresponds to log1p and its inverse to expm1. + * For shift s and scale a, + * + * x -> log (ax + s) + * + * @param w the transform argument vector (w -> b) */ -class sinForm (x: VectorD) extends Transform (x): - def f (x: MatrixD): MatrixD = x.map_ (z => sin (b(0, 0) * Piby2 * z)) - def fi (y: MatrixD): MatrixD = y.map_ (z => asin (z) / (b(0, 0) * Piby2)) +class LogForm (w: VectorD = VectorD (ss, a)) extends Transform (w): + + def f (x: MatrixD): MatrixD = (x * b_(1) + b_(0)).log + def fi (y: MatrixD): MatrixD = (y.exp - b_(0)) / b_(1) + override def df (x: VectorD): MatrixD = MatrixD (1 / (x * b_(1) + b_(0)), + x / (x * b_(1) + b_(0))).ᵀ +end LogForm + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `powForm` class applies the power-transformation x^p for power p > 1. +/** The `Log1pForm` class applies a shifted b_(0) and scaled b_(1) LOG-transformation. + * @note: the given w = (1, 1) corresponds to log1p and its inverse to expm1. + * For shift s and scale a, + * + * x -> log (ax + s) + * + * @param x_ the transform argument is null */ -class powForm (x: VectorD) extends Transform (x): - def f (x: MatrixD): MatrixD = x ~^ b(0, 0) - def fi (y: MatrixD): MatrixD = y ~^ (1/b(0, 0)) +class Log1pForm (@unused x_ : VecMat = null) extends Transform (VectorD (1, 1)): + + def f (x: MatrixD): MatrixD = (x * b_(1) + b_(0)).log + def fi (y: MatrixD): MatrixD = (y.exp - b_(0)) / b_(1) + override def df (x: VectorD): MatrixD = MatrixD (1 / (x * b_(1) + b_(0)), + x / (x * b_(1) + b_(0))).ᵀ +end Log1pForm //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `transformTest` tests the `Transform` class at the vector level. +/** The `ExpForm` class applies a shifted b_(0) and scaled b_(1) EXP-transformation. + * For shift s and scale a, + * + * x -> exp (ax + s) + * + * @param w the transform argument vector (w -> b) + */ +class ExpForm (w: VectorD = VectorD (s, a)) extends Transform (w): + + def f (x: MatrixD): MatrixD = (x * b_(1) + b_(0)).exp + def fi (y: MatrixD): MatrixD = (y.log - b_(0)) / b_(1) + override def df (x: VectorD): MatrixD = MatrixD (f(x), + f(x) * x).ᵀ +end ExpForm + +class NExpForm (w: VectorD = VectorD (s, an)) extends ExpForm (w) + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `CosForm` class applies a shifted b_(0) and scaled b_(1) COSINE-transformation. + * For phase shift b_(0) = φ = s and frequency b_(1) = f with scaling 2πf, + * + * x -> cos (2πfx + φ) + * + * @param w the transform argument vector (w -> b) + */ +class CosForm (w: VectorD = VectorD (s, f)) extends Transform (w): + + def f (x: MatrixD): MatrixD = (x * (b_(1) * _2Pi) + b_(0)).cos + def fi (y: MatrixD): MatrixD = (y.acos - b_(0)) / (b_(1) * _2Pi) + override def df (x: VectorD): MatrixD = MatrixD (-(x * (b_(1) * _2Pi) + b_(0)).sin, + -(x * (b_(1) * _2Pi) + b_(0)).sin * (x * _2Pi)).ᵀ +end CosForm + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `SinForm` class applies a shifted b_(0) and scaled b_(1) SINE-transformation. + * For phase shift b_(0) = φ = s and frequency b_(1) = f with scaling 2πf, + * + * x -> sin (2πfx + φ) + * + * @param w the transform argument vector (w -> b) + */ +class SinForm (w: VectorD = VectorD (s, f)) extends Transform (w): + + def f (x: MatrixD): MatrixD = (x * (b_(1) * _2Pi) + b_(0)).sin + def fi (y: MatrixD): MatrixD = (y.asin - b_(0)) / (b_(1) * _2Pi) + override def df (x: VectorD): MatrixD = MatrixD ((x * (b_(1) * _2Pi) + b_(0)).cos, + (x * (b_(1) * _2Pi) + b_(0)).cos * (x * _2Pi)).ᵀ +end SinForm + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `transformTest` tests the classes extending the `Transform` trait at the vector level. * > runMain scalation.mathstat.transformTest */ @main def transformTest (): Unit = - val x = VectorD (3, 5, 6) + import TransformT._ + + val x = VectorD (1.0, 1.5, 2.0, 2.5) + var form: Transform = null println (s"x = $x") - banner ("zForm Transformation") - val zForm1 = zForm (x) // set the argument vector - var y = zForm1.f (x) - var z = zForm1.fi (y) - println (s"y = $y, z = $z") - - banner ("rangeForm Transformation") - val rangeForm1 = rangeForm (x) // set the argument vector - y = rangeForm1.f (x) - z = rangeForm1.fi (y) - println (s"y = $y, z = $z") - - banner ("logForm Transformation") - y = logForm.f (x) - z = logForm.fi (y) - println (s"y = $y, z = $z") - - banner ("log1pForm Transformation") - y = log1pForm.f (x) - z = log1pForm.fi (y) - println (s"y = $y, z = $z") - - banner ("cosForm Transformation") - val cosForm1 = cosForm (VectorD (0.25)) // set the argument vector - y = cosForm1.f (x) - z = cosForm1.fi (y) - println (s"y = $y, z = $z") - - banner ("sinForm Transformation") - val sinForm1 = sinForm (VectorD (0.25)) // set the argument vector - y = sinForm1.f (x) - z = sinForm1.fi (y) - println (s"y = $y, z = $z") - - banner ("powForm Transformation") - val powForm1 = powForm (VectorD (1.5)) // set the argument vector - y = powForm1.f (x) - z = powForm1.fi (y) - println (s"y = $y, z = $z") + banner ("NormForm Transformation: (x - μ)/σ") + form = Norm.form (x) + form.testV (x) + + banner ("MinMaxForm Transformation: l + (x-min)(l-u)/(max-min)") + form = MinMax.form (x) + form.testV (x) + + banner ("CenterForm Transformation: x - μ") + form = Center.form (x) + form.testV (x) + + banner ("PowForm Transformation: x^1.5") + form = Pow.form (Pow.wlu.w) + form.testV (x) + println (s"df: ${form.df (x)}") + form.testV (-x) // fails for negative numbers (gives NaN) + + banner ("PowRForm Transformation: x↑.67") + form = PowR.form (PowR.wlu.w) + form.testV (x) + println (s"df: ${form.df (x)}") + form.testV (-x) // works for negative numbers + + banner ("BoxcoxForm Transformation: x^.4") + form = Boxcox.form (Boxcox.wlu.w) + form.testV (x) + println (s"df: ${form.df (x)}") + form.testV (-x) // fails for negative numbers (gives NaN) + + banner ("RootForm Transformation: x^.5") + form = Root.form (Root.wlu.w) + form.testV (x) + println (s"df: ${form.df (x)}") + + banner ("RecipForm Transformation: x^-1") + form = Recip.form (Recip.wlu.w) + form.testV (x) + println (s"df: ${form.df (x)}") + + banner ("LogForm Transformation: log1p (x)") + form = Log.form (Log.wlu.w) + form.testV (x) + println (s"df: ${form.df (x)}") + + banner ("Log1pForm Transformation: log1p (x)") + form = Log1p.form (x) + form.testV (x) + println (s"df: ${form.df (x)}") + + banner ("ExpForm Transformation: exp (x)") + form = Exp.form (Exp.wlu.w) + form.testV (x) + println (s"df: ${form.df (x)}") + + banner ("NExpForm Transformation: exp (-x)") + form = NExp.form (NExp.wlu.w) + form.testV (x) + println (s"df: ${form.df (x)}") + + banner ("CosForm Transformation: cos (2πfx + φ)") + form = Cos.form (Cos.wlu.w) + form.testV (x) + println (s"df: ${form.df (x)}") + + banner ("SinForm Transformation: sin (2πfx + φ)") + form = Sin.form (Sin.wlu.w) + form.testV (x) + println (s"df: ${form.df (x)}") end transformTest //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `transformTest2` tests the `Transform` class at the matrix level. +/** The `transformTest2` tests the classes extending the `Transform` trait at the matrix level. * > runMain scalation.mathstat.transformTest2 */ @main def transformTest2 (): Unit = - val x = MatrixD ((3, 2), 3, 1, - 5, 2, - 6, 3) + import TransformT._ + + val x = MatrixD ((4, 2), 1.0, 1, + 1.5, 2, + 2.0, 3, + 2.5, 4) + var form: Transform = null println (s"x = $x") - banner ("zForm Transformation") - val zForm1 = zForm (x) // set the argument vector - var y = zForm1.f (x) - var z = zForm1.fi (y) - println (s"y = $y, z = $z") - - banner ("rangeForm Transformation") - val rangeForm1 = rangeForm (x) // set the argument vector - y = rangeForm1.f (x) - z = rangeForm1.fi (y) - println (s"y = $y, z = $z") - - banner ("logForm Transformation") - y = logForm.f (x) - z = logForm.fi (y) - println (s"y = $y, z = $z") - - banner ("log1pForm Transformation") - y = log1pForm.f (x) - z = log1pForm.fi (y) - println (s"y = $y, z = $z") - - banner ("cosForm Transformation") - val cosForm1 = cosForm (VectorD (0.25)) // set the argument vector - y = cosForm1.f (x) - z = cosForm1.fi (y) - println (s"y = $y, z = $z") - - banner ("sinForm Transformation") - val sinForm1 = sinForm (VectorD (0.25)) // set the argument vector - y = sinForm1.f (x) - z = sinForm1.fi (y) - println (s"y = $y, z = $z") - - banner ("powForm Transformation") - val powForm1 = powForm (VectorD (1.5)) // set the argument vector - y = powForm1.f (x) - z = powForm1.fi (y) - println (s"y = $y, z = $z") + banner ("NormForm Transformation: (x - μ)/σ") + form = Norm.form (x) + form.testM (x) + + banner ("MinMaxForm Transformation: a + (x-min)(b-a)/(max-min)") + form = MinMax.form (x) + form.testM (x) + + banner ("CenterForm Transformation: x - μ") + form = Center.form (x) + form.testM (x) + + banner ("PowForm Transformation: x^1.5") + form = Pow.form (Pow.wlu.w) + form.testM (x) + println (s"df: ${form.df (x)}") + + banner ("PowRForm Transformation: x↑.67") + form = PowR.form (PowR.wlu.w) + form.testM (x) + println (s"df: ${form.df (x)}") + + banner ("RootForm Transformation: x^.5") + form = Root.form (Root.wlu.w) + form.testM (x) + println (s"df: ${form.df (x)}") + + banner ("RecipForm Transformation: x^-1") + form = Recip.form (Recip.wlu.w) + form.testM (x) + println (s"df: ${form.df (x)}") + + banner ("LogForm Transformation: log1p (x)") + form = Log.form (Log.wlu.w) + form.testM (x) + println (s"df: ${form.df (x)}") + + banner ("ExpForm Transformation: exp (x)") + form = Exp.form (Exp.wlu.w) + form.testM (x) + println (s"df: ${form.df (x)}") + + banner ("NExpForm Transformation: exp (-x)") + form = NExp.form (NExp.wlu.w) + form.testM (x) + println (s"df: ${form.df (x)}") + + banner ("CosForm Transformation: cos (2πfx + φ)") + form = Cos.form (Cos.wlu.w) + form.testM (x) + println (s"df: ${form.df (x)}") + + banner ("SinForm Transformation: sin (2πfx + φ)") + form = Sin.form (Sin.wlu.w) + form.testM (x) + println (s"df: ${form.df (x)}") end transformTest2 //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `transformTest3` tests the `Transform` class at the matrix level. +/** The `transformTest3` tests the `Transform` class' ability to compose transformations. * > runMain scalation.mathstat.transformTest3 */ @main def transformTest3 (): Unit = @@ -220,24 +622,84 @@ end transformTest2 val x = VectorD (3, 5, 6, 2, 1, 3, 2, 4, 6, 87, 1000) println (s"x = $x") - banner ("zForm Transformation") - val zForm1 = zForm (x) // set the argument vector - var y = zForm1.f (x) - var z = zForm1.fi (y) + banner ("NormForm (z) Transformation") + val zForm = NormForm (x) + var y = zForm.f (x) + var z = zForm.fi (y) println (s"y = $y, \nz = $z") - banner ("powForm Transformation") - val powForm1 = powForm (VectorD (1.5)) // set the argument vector - y = powForm1.f (x) - z = powForm1.fi (y) + banner ("PowForm Transformation") + val powForm = PowForm () + y = powForm.f (x) + z = powForm.fi (y) println (s"y = $y, \nz = $z") - val fsc = (zForm1.f(_: VectorD)) ⚬ (powForm1.f(_: VectorD)) ⚬ (zForm1.fi(_: VectorD)) - val ysc = fsc(y) - println(s"ysc = ${ysc}") + val fsc = (zForm.f(_: VectorD)) ⚬ (powForm.f(_: VectorD)) ⚬ (zForm.fi(_: VectorD)) + val ysc = fsc (y) + println (s"ysc = ${ysc}") - val ysc2 = fsc(y(0 until 3)) - println(s"ysc = ${ysc2}") + val ysc2 = fsc (y(0 until 3)) + println (s"ysc = ${ysc2}") end transformTest3 + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `transformTest4` tests the `Transform` class by plotting the transformations. + * > runMain scalation.mathstat.transformTest4 + */ +@main def transformTest4 (): Unit = + + import TransformT._ + + val x = VectorD.range (1 until 100) / 25.0 + 1.0 + var form: Transform = null + println (s"x = $x") + + banner ("NormForm Transformation: (x - μ)/σ") + form = Norm.form (x) + val y1 = form.f (x) + + banner ("MinMaxForm Transformation: a + (x-min)(b-a)/(max-min)") + form = MinMax.form (x) + val y2 = form.f (x) + + banner ("PowForm Transformation: x^1.5") + form = Pow.form (Pow.wlu.w) + val y3 = form.f (x) + + banner ("RootForm Transformation: x^.5") + form = Root.form (Root.wlu.w) + val y4 = form.f (x) + + banner ("RecipForm Transformation: x^-1") + form = Recip.form (Recip.wlu.w) + val y5 = form.f (x) + + banner ("LogForm Transformation: log1p (x)") + form = Log.form (Log.wlu.w) + val y6 = form.f (x) + +/* + banner ("ExpForm Transformation: exp (x)") + form = Exp.form (Exp.wlu.w) + val y7 = form.f (x) +*/ + + banner ("NExpForm Transformation: exp (-x)") + form = NExp.form (NExp.wlu.w) + val y8 = form.f (x) + + banner ("CosForm Transformation: cos (2πfx + φ)") + form = Cos.form (Cos.wlu.w) + val y9 = form.f (x) + + banner ("SinForm Transformation: sin (2πfx + φ)") + form = Sin.form (Sin.wlu.w) + val y10 = form.f (x) + + new PlotM (x, MatrixD (y1, y2, y3, y4, y5, y6, y8, y9, y10), + Array ("Norm", "MinMax", "Pow", "Root", "Recip", "Log", "NExp", "Cos", "Sin")) + +end transformTest4 + diff --git a/src/main/scala/scalation/mathstat/Transform.scala.bak2 b/src/main/scala/scalation/mathstat/Transform.scala.bak2 new file mode 100644 index 000000000..66a50d8e2 --- /dev/null +++ b/src/main/scala/scalation/mathstat/Transform.scala.bak2 @@ -0,0 +1,320 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author John Miller, Yousef Fekri Dabanloo + * @version 2.0 + * @date Thu Mar 13 14:06:11 EDT 2025 + * @see LICENSE (MIT style license file). + * + * @note Support for Transformation Functions with their Inverse + * + * https://www.infoq.com/news/2023/10/foreign-function-and-memory-api/ + */ + +package scalation +package mathstat + +import scala.math._ + +import VectorDOps._ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `⚬` extension method performs function composition f ⚬ g. + * @see www.scala-lang.org/api/current/scala/Function1.html + * @tparam A the type to which function `g` can be applied + * @tparam B the type to which function `f` can be applied + * @tparam R the return type for f ⚬ g + */ +extension [A, B, R](f: B => R) + + def ⚬ (g: A => B): A => R = f compose g + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `mu_sig` and `min_max` extend the methods defined for vectors and matrices + * to work for types that are either `VectorD` or `MatrixD`. + * @param x the argument that is either a `VectorD` or `MatrixD`. + */ +extension (x: VectorD | MatrixD) + + def mu_sig: VectorD | MatrixD = + x match + case xV: VectorD => xV.mu_sig + case xM: MatrixD => xM.mu_sig + + def min_max: VectorD | MatrixD = + x match + case xV: VectorD => xV.min_max + case xM: MatrixD => xM.min_max + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `TranformT` enum defines the types of transforms. + * @param name the name of the transform + * @param form the class of the transform + * @param w_l_u the vectors of parameters and their bounds + */ +enum TransformT (val name: String, val form: VectorD => Transform, val w_l_u: TransformS): + case Pow extends TransformT ("pow", x => PowForm (x), + TransformS (VectorD (0.0, 1.5), VectorD (0.0, 1.1), VectorD (10.0, 3.0))) // PowForm + case Root extends TransformT ("root", x => PowForm (x), + TransformS (VectorD (0.0, 0.5), VectorD (0.0, 0.2), VectorD (10.0, 0.9))) // RootForm (PowForm in 0.2, 0.9) + case Log extends TransformT ("log", x => LogForm (x), + TransformS (VectorD (0.0, 1.0), VectorD (0.0, 0.1), VectorD (10.0, 10.0))) // LogForm + case Sin extends TransformT ("sin", x => SinForm (x), + TransformS (VectorD (0.0, 1.0), VectorD (0.0, 0.1), VectorD (10.0, 10.0))) // SinForm + case Cos extends TransformT ("cos", x => CosForm (x), + TransformS (VectorD (0.0, 1.0), VectorD (0.0, 0.1), VectorD (10.0, 10.0))) // CosForm +end TransformT + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `TransformS` class is transform specification contains default values for the + * parameters w and their bounds, e.g., w in x -> f(w_0 + w_1 * x) + * @param w the vector of two nonlinear parameters: shift w_0 and scale w_1 parameters + * @param l the vector containing the lower bounds of shift and scale parameters + * @param u the vector containing the upper bounds of shift and scale parameters + */ +case class TransformS (w: VectorD = VectorD (0.0, 1.0), + l: VectorD = VectorD (0.0, 0.1), + u: VectorD = VectorD (10.0, 10.0)) + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `Transform` trait supports the use of transformation functions, such that it + * is easy to take the inverse transform. When a transformation uses arguments, + * they are remembered for use by the inverse transformation. + * @param w the transform argument vector or matrix + */ +trait Transform (w: VectorD | MatrixD): + + protected var lu: VectorD = VectorD (2, 5) // optional default range/bounds [l .. u] + protected val b: MatrixD = // transform argument matrix + w match + case wV: VectorD => MatrixD (wV).transpose + case wM: MatrixD => wM + + inline def b_ : VectorD = b(?, 0) // get 0-th column of the argument matrix + def setLU (_lu: VectorD): Unit = lu = _lu // set the default bounds + def f (x: MatrixD): MatrixD // transformation function + def fi (y: MatrixD): MatrixD // inverse transformation function + + val f: FunctionV2V = (x: VectorD) => f(MatrixD(x).transpose)(?, 0) + val fi: FunctionV2V = (y: VectorD) => fi(MatrixD(y).transpose)(?, 0) + + def df (x: VectorD): MatrixD = null // partial derivative of f + + def df (x: MatrixD): MatrixD = // column-by-column partial derivative of f wrt to w + var jMatrix = df (x(?, 0)) + for j <- 1 until x.dim2 do jMatrix = jMatrix ++^ df (x(?, j)) + jMatrix + end df + + def df (x: MatrixD, i: Int): MatrixD = // partial derivative of each column wrt wi + if i == 0 || i == 1 then + var jMatrix = MatrixD (df (x(?, 0))(?, i)).transpose + for j <- 1 until x.dim2 do jMatrix = jMatrix :^+ df (x(?, j))(?, i) + jMatrix + else + df(x) + end df + +end Transform + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `ZForm` class applies the z-transformation (subtract mean b(0) and divide by + * standard deviation b(1)). + * @param x_ the input vector or matrix to be transformed (needed to get w) + */ +class ZForm (x_ : VectorD | MatrixD) extends Transform (x_.mu_sig): + def f (x: MatrixD): MatrixD = (x - b(0)) / b(1) + def fi (y: MatrixD): MatrixD = (y *~ b(1)) + b(0) + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `RangeForm` class transforms values to the default range/bounds lu. + * @param x_ the input vector or matrix to be transformed (needed to get w) + */ +class RangeForm (x_ : VectorD | MatrixD) extends Transform (x_.min_max): + def f (x: MatrixD): MatrixD = (x - b(0)) * (lu(1) - lu(0)) / (b(1) - b(0)) + lu(0) + def fi (y: MatrixD): MatrixD = (y - lu(0)) *~ (b(1) - b(0)) /(lu(1) - lu(0)) + b(0) + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `LogForm` class applies a shifted b_(0) and scaled b_(1) log-transformation. + * Note: the default w = (0, 1) corresponds to log1p and its inverse to expm1. + * @param w the transform argument vector (w -> b) + */ +class LogForm (w: VectorD = VectorD (0, 1)) extends Transform (w): + def f (x: MatrixD): MatrixD = x.map_ (z => log (z * b_(1) + b_(0))) + def fi (y: MatrixD): MatrixD = y.map_ (z => (exp (z) - b_(0)) / b_(1)) + override def df (x: VectorD): MatrixD = MatrixD (x / (x * b_(1) + b_(0)), + 1 / (x * b_(1) + b_(0))).transpose + +// FIX - add ExpForm, BoxCoxForm, YeoJohnsonForm + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `CosForm` class applies a shifted b_(0) and scaled b_(1) cosine-transformation. + * @param w the transform argument vector (w -> b) + */ +class CosForm (w: VectorD = VectorD (0, 1)) extends Transform (w): + def f (x: MatrixD): MatrixD = x.map_ (z => cos (z * (b_(1) * _2Pi) + b_(0))) + def fi (y: MatrixD): MatrixD = y.map_ (z => (acos (z) - b_(0)) / (b_(1) * _2Pi)) + override def df (x: VectorD): MatrixD = MatrixD (x.map (z => -sin (z * b_(1) * _2Pi + b_(0))) * (_2Pi * x), + x.map (z => -sin (z * b_(1) * _2Pi + b_(0))) * _2Pi).transpose + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `SinForm` class applies a shifted b_(0) and scaled b_(1) sine-transformation. + * @param w the transform argument vector (w -> b) + */ +class SinForm (w: VectorD = VectorD (0, 1)) extends Transform (w): + def f (x: MatrixD): MatrixD = x.map_ (z => sin (z * (b_(1) * _2Pi) + b_(0))) + def fi (y: MatrixD): MatrixD = y.map_ (z => (asin (z) - b_(0)) / (b_(1) * _2Pi)) + override def df (x: VectorD): MatrixD = MatrixD (x.map (z => cos (z * b_(1) * _2Pi + b_(0))) * (_2Pi * x), + x.map (z => cos (z * b_(1) * _2Pi + b_(0))) * _2Pi).transpose + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `PowForm` class applies a shifted s = b_(0) and scaled p = b_(1) power-transformation, + * (x + s)^p for power p > 1. It defaults to x^2. + * @param w the transform argument vector (w -> b) + */ +class PowForm (w: VectorD = VectorD (0, 2)) extends Transform (w): + def f (x: MatrixD): MatrixD = (x + b_(0)) ~^ b_(1) + def fi (y: MatrixD): MatrixD = y ~^ (1/b_(1)) - b_(0) + override def df (x: VectorD): MatrixD = MatrixD (((x + b_(0)) ~^ (b_(1) - 1)) * b_(1), + f(x) * (x + b_(0)).map (z => log(z))).transpose + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `transformTest` tests the `Transform` class at the vector level. + * > runMain scalation.mathstat.transformTest + */ +@main def transformTest (): Unit = + + val x = VectorD (1, 2, 3) + println (s"x = $x") + + banner ("ZForm Transformation") + val zForm = ZForm (x) + var y = zForm.f (x) + var z = zForm.fi (y) + println (s"y = $y, \nz = $z") + + banner ("RangeForm Transformation") + val rangeForm = RangeForm (x) + y = rangeForm.f (x) + z = rangeForm.fi (y) + println (s"y = $y, \nz = $z") + + banner ("LogForm Transformation") + val logForm = LogForm () + y = logForm.f (x) + z = logForm.fi (y) + println (s"y = $y, \nz = $z") + println (s"df: ${logForm.df (x)}") + + banner ("CosForm Transformation") + val cosForm = CosForm () + y = cosForm.f (x) + z = cosForm.fi (y) + println (s"y = $y, \nz = $z") + println (s"df: ${cosForm.df (x)}") + + banner ("SinForm Transformation") + val sinForm = SinForm () + y = sinForm.f (x) + z = sinForm.fi (y) + println (s"y = $y, \nz = $z") + println (s"df: ${sinForm.df (x)}") + + banner ("PowForm Transformation") + val powForm = PowForm () + y = powForm.f (x) + z = powForm.fi (y) + println (s"y = $y, \nz = $z") + println (s"df: ${powForm.df (x)}") + +end transformTest + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `transformTest2` tests the `Transform` class at the matrix level. + * > runMain scalation.mathstat.transformTest2 + */ +@main def transformTest2 (): Unit = + + val x = MatrixD ((3, 2), 3, 1, + 5, 2, + 6, 3) + println (s"x = $x") + + banner ("ZForm Transformation") + val zForm = ZForm (x) + var y = zForm.f (x) + var z = zForm.fi (y) + println (s"y = $y, \nz = $z") + + banner ("RangeForm Transformation") + val rangeForm = RangeForm (x) + y = rangeForm.f (x) + z = rangeForm.fi (y) + println (s"y = $y, \nz = $z") + + banner("LogForm Transformation") + val logForm = LogForm () + y = logForm.f(x) + z = logForm.fi(y) + println (s"y = $y, \nz = $z") + println (s"df: ${logForm.df (x)}") + + banner ("CosForm Transformation") + val cosForm = CosForm () + y = cosForm.f (x) + z = cosForm.fi (y) + println (s"y = $y, \nz = $z") + println (s"df: ${cosForm.df (x)}") + + banner ("SinForm Transformation") + val sinForm = SinForm () + y = sinForm.f (x) + z = sinForm.fi (y) + println (s"y = $y, \nz = $z") + println (s"df: ${sinForm.df(x)}") + + banner ("PowForm Transformation") + val powForm = PowForm () + y = powForm.f (x) + z = powForm.fi (y) + println (s"y = $y, \nz = $z") + println (s"df: ${powForm.df (x)}") + +end transformTest2 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `transformTest3` tests the `Transform` class' ability to compose transformations. + * > runMain scalation.mathstat.transformTest3 + */ +@main def transformTest3 (): Unit = + + val x = VectorD (3, 5, 6, 2, 1, 3, 2, 4, 6, 87, 1000) + println (s"x = $x") + + banner ("ZForm Transformation") + val zForm = ZForm (x) + var y = zForm.f (x) + var z = zForm.fi (y) + println (s"y = $y, \nz = $z") + + banner ("PowForm Transformation") + val powForm = PowForm () + y = powForm.f (x) + z = powForm.fi (y) + println (s"y = $y, \nz = $z") + + val fsc = (zForm.f(_: VectorD)) ⚬ (powForm.f(_: VectorD)) ⚬ (zForm.fi(_: VectorD)) + val ysc = fsc (y) + println (s"ysc = ${ysc}") + + val ysc2 = fsc (y(0 until 3)) + println (s"ysc = ${ysc2}") + +end transformTest3 + diff --git a/src/main/scala/scalation/mathstat/VectorC.scala b/src/main/scala/scalation/mathstat/VectorC.scala index 6959f3d36..8b4817fe3 100644 --- a/src/main/scala/scalation/mathstat/VectorC.scala +++ b/src/main/scala/scalation/mathstat/VectorC.scala @@ -15,10 +15,9 @@ package mathstat import java.util.Arrays.copyOf -import scala.collection.immutable.{IndexedSeq => IIndexedSeq} -import scala.collection.immutable.Set -import scala.collection.generic._ -import scala.collection.mutable._ +import scala.collection.generic.DefaultSerializable +import scala.collection.immutable.{IndexedSeq => IIndexedSeq, Set} +import scala.collection.mutable.IndexedSeq import scala.runtime.ScalaRunTime.stringOf import scala.util.control.Breaks.{break, breakable} @@ -44,7 +43,6 @@ class VectorC (val dim: Int, else if dim > v.length then flaw ("init", s"vector dimension is larger than space: dim = $dim > v.length = ${v.length}") assert (dim <= v.length) // make this a fatal flaw - end if //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Return the length of this vector. @@ -137,7 +135,6 @@ class VectorC (val dim: Int, else b.v(k) = v(i) k += 1 - end if end for (a, b) end split @@ -738,11 +735,9 @@ class VectorC (val dim: Int, else iqsort (rk, pivot + 1, r_) // recursively sort right partition r_ = pivot - 1 - end if end while else iselsort (rk, p, r) // use simple sort when small - end if rk end iqsort @@ -755,7 +750,6 @@ class VectorC (val dim: Int, iqsort (rk, pivot + 1, r) // recursively sort right partition else iselsort (rk, p, r) // use simple sort when small - end if rk end iqsort_ */ @@ -828,7 +822,6 @@ class VectorC (val dim: Int, if v(j) < v(k) then j else if v(i) < v(k) then k else i else if v(j) > v(k) then j else if v(i) > v(k) then k else i - end if end med3 //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: diff --git a/src/main/scala/scalation/mathstat/VectorD.scala b/src/main/scala/scalation/mathstat/VectorD.scala index 1bfae21be..0827428f3 100644 --- a/src/main/scala/scalation/mathstat/VectorD.scala +++ b/src/main/scala/scalation/mathstat/VectorD.scala @@ -17,10 +17,9 @@ package mathstat //import java.lang.foreign.ValueLayout.JAVA_DOUBLE import java.util.Arrays.copyOf -import scala.collection.immutable.{IndexedSeq => IIndexedSeq} -import scala.collection.immutable.Set -import scala.collection.generic._ -import scala.collection.mutable._ +import scala.collection.generic.DefaultSerializable +import scala.collection.immutable.{IndexedSeq => IIndexedSeq, Set} +import scala.collection.mutable.{ArrayBuffer, IndexedSeq} import scala.runtime.ScalaRunTime.stringOf import scala.util.control.Breaks.{break, breakable} @@ -57,7 +56,6 @@ class VectorD (val dim: Int, else if dim > v.length then flaw ("init", s"vector dimension is larger than space: dim = $dim > v.length = ${v.length}") assert (dim <= v.length) // make this a fatal flaw - end if //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Return the length of this vector. @@ -148,6 +146,13 @@ class VectorD (val dim: Int, */ override def drop (n: Int = 1): VectorD = new VectorD (dim - n, v.drop (n)) + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return a vector containing all but the last n elements of this vector. + * @author Yousef Fekri Dabanloo + * @param n the number of elements to be dropped + */ + override def dropRight (n: Int = 1): VectorD = new VectorD (dim - n, v.dropRight (n)) + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Split the elements from this vector to form two vectors: one from the elements in * idx (e.g., testing set) and the other from elements not in idx (e.g., training set). @@ -216,25 +221,36 @@ class VectorD (val dim: Int, //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Update the i-th element (or in range) of this vector. * @param i the index of the element to update - * @param a the updated value to assign + * @param a the updated value to be assigned */ def update (i: Int, a: Double): Unit = v(i) = a //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Update the i-th element (or in range) of this vector. * @param i the index of the element to update - * @param a the update value to assign + * @param a the update value to be assigned */ def update (r: Range, a: Double): Unit = cfor (r) { i => v(i) = a } //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Update the i-th element (or in range) of this vector. * @param i the index of the element to update - * @param y the update vector/indexed sequence to assign + * @param y the update vector/indexed sequence to be assigned */ def update (r: Range, y: VectorD): Unit = cfor (r) { i => v(i) = y.v(i) } def update (r: Range, y: IndexedSeq [Double]): Unit = cfor (r) { i => v(i) = y(i) } + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Update all the element of this vector and return this updated vector. + * @param y the other vector to be assigned + * + def := (y: VectorD): VectorD = + if y.dim != dim then flaw (":=", s"dimension dim = $dim != y.dim = ${y.dim}") + v = y.v + this + end := + */ + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Set all elements in this vector to scalar zero. */ @@ -335,10 +351,10 @@ class VectorD (val dim: Int, /** Compute the element-wise sum (or difference, product, quotient) of this and scalar a. * @param a the scalar second operand */ - def + (a: Double): VectorD = new VectorD (dim, cfor (dim) { i => v(i) + a }) - def - (a: Double): VectorD = new VectorD (dim, cfor (dim) { i => v(i) - a }) - def * (a: Double): VectorD = new VectorD (dim, cfor (dim) { i => v(i) * a }) - def / (a: Double): VectorD = new VectorD (dim, cfor (dim) { i => v(i) / a }) + inline def + (a: Double): VectorD = new VectorD (dim, cfor (dim) { i => v(i) + a }) + inline def - (a: Double): VectorD = new VectorD (dim, cfor (dim) { i => v(i) - a }) + inline def * (a: Double): VectorD = new VectorD (dim, cfor (dim) { i => v(i) * a }) + inline def / (a: Double): VectorD = new VectorD (dim, cfor (dim) { i => v(i) / a }) //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Compute the element-wise sum (or difference, product, quotient) of vectors this and y. @@ -381,10 +397,17 @@ class VectorD (val dim: Int, //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Compute the element-wise power function of this vector raised to scalar a. - * @param the scalar second operand + * @param a the scalar second operand (double) */ def ~^ (a: Double): VectorD = new VectorD (dim, cfor (dim) { i => v(i) ~^ a }) + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Compute the element-wise power function of this vector raised to scalar a. + * Extended to handle a negative base. + * @param a the scalar second operand (rational number) + */ + def ↑ (a: Rat): VectorD = new VectorD (dim, cfor (dim) { i => v(i) ↑ a }) + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Determine whether this vector and vector y are nearly equal. * @param y the other vector @@ -528,18 +551,18 @@ class VectorD (val dim: Int, sum end dot - inline def ∙ (y: VectorD): Double = dot (y) // unicode bullet point + inline def ∙ (y: VectorD): Double = dot (y) // Unicode bullet point - inline def ∙ (y: IndexedSeq [Double]): Double = dot (y) // unicode bullet point + inline def ∙ (y: IndexedSeq [Double]): Double = dot (y) // Unicode bullet point - inline def ∙ (y: IIndexedSeq [Double]): Double = dot (y) // unicode bullet point + inline def ∙ (y: IIndexedSeq [Double]): Double = dot (y) // Unicode bullet point //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Return the 'valid' (no padding) convolution of cofilter vector c and input vector x. * Take the dot product of c (this) with a slice of x, shift by one and repeat. * Usage: c conv x * Caveat: does not include reversal. - * @see `scalation.modeling.neuralnet.CoFilter_1D + * @see `scalation.modeling.neuralnet.CoFilter_1D` * @param x the input/data vector */ infix def conv (x: VectorD): VectorD = @@ -560,15 +583,16 @@ class VectorD (val dim: Int, inline infix def conv_ (x: VectorD): VectorD = reverse.conv (x) //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the 'same' (with padding) convolution of cofilter vector c and input vector x. + /** Return the 'same' (with zero padding) convolution of cofilter vector c and + * input vector x. * Same means that the size of the result is the same as the input. * Usage: c convs x * @param x the input/data vector - */ + */ infix def convs (x: VectorD): VectorD = val y = new VectorD (x.dim) cfor (y.indices) { k => - y(k) = Σ (indices) { j => if k-j in (0, x.dim-1) then v(j) * x(k-j) else 0.0 } + cfor (indices) { j => if k+j > 0 && k+j <= x.dim then y(k) += v(j) * x(k+j-1) } } // cfor y end convs @@ -577,6 +601,7 @@ class VectorD (val dim: Int, //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Return the 'full' convolution of cofilter vector c and input vector x. + * @note: `convf` is less common than `conv` or convs` * @param x the input/data vector */ infix def convf (x: VectorD): VectorD = @@ -721,6 +746,17 @@ class VectorD (val dim: Int, */ def norm1: Double = v.fold (0.0)((s, e) => s + math.abs (e)) + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Compute the q-norm (with the root) of this vector, i.e., ||x||_q^q + * @param q the power (^q) to apply to each element e + */ + def norm_qq (q: Double): Double = v.fold (0.0)((s, e) => s + math.abs (e~^q)) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Compute the infinity norm (max absolute value) of this vector. + */ + def normInf: Double = math.max (math.abs (min), math.abs (max)) + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Return the vector that is the element-wise absolute value of this vector. */ @@ -757,6 +793,26 @@ class VectorD (val dim: Int, */ def expm1: VectorD = map (math.expm1 (_)) + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Cos transform this vector by using math.cos (the inverse of acos). + */ + def cos: VectorD = map (math.cos (_)) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Inverse cos transform this vector by using math.acos (the inverse of cos). + */ + def acos: VectorD = map (math.acos (_)) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Sin transform this vector by using math.sin (the inverse of asin). + */ + def sin: VectorD = map (math.sin (_)) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Inverse sin transform this vector by using math.asin (the inverse of sin). + */ + def asin: VectorD = map (math.asin (_)) + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Return the vector containing the mid-points between adjacent elements. * VectorD (for i <- 1 until dim yield 0.5 * (v(i) + v(i-1))) @@ -873,6 +929,11 @@ class VectorD (val dim: Int, median (k) end quantile + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the first Q1 (1/4) and third Q3 (3/4) quartiles. Note: IQR = Q3 - Q1. + */ + def q1_q3: VectorD = VectorD (quantile (0.25), quantile (0.75)) + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Compute the averaged median, which is the median when dim is odd and * the average of the median and the next k-median when dim is even. @@ -903,7 +964,7 @@ class VectorD (val dim: Int, var (p_, r_) = (p, r) // use local cursors while p_ < r_ do val pivot = ipartition (rk, p_, r_) // partition into left (<=) and right (>=) - if pivot - p_ < r_ - pivot then // recurse on the smaller subarray + if pivot - p_ < r_ - pivot then // recurse on the smaller sub-array iqsort (rk, p_, pivot - 1) // recursively sort left partition p_ = pivot + 1 else @@ -994,7 +1055,6 @@ class VectorD (val dim: Int, if v(j) < v(k) then j else if v(i) < v(k) then k else i else if v(j) > v(k) then j else if v(i) > v(k) then k else i - end if end med3 //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -1279,6 +1339,12 @@ object VectorD: */ def apply (x: Double, xs: Double*): VectorD = new VectorD (xs.size + 1, x +: xs.toArray) + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a `VectorD` from a 2-tuple of doubles (`Double`, `Double`). + * @param x_y the 2-tuple of doubles + */ + def apply (x_y: (Double, Double)): VectorD = new VectorD (2, Array (x_y._1, x_y._2)) + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Create a `VectorD` from one or more values (repeated values String*). * For numeric types, assign missing value indicator upon format failure. @@ -1364,17 +1430,18 @@ end VectorD * operations, so that one can write 2.0 + x as well as x + 2.0. */ object VectorDOps: - extension (a: Double) - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the element-wise sum (or difference, product, quotient) of scalar a and vector x. - * @param a the scalar first operand - * @param x the vector second operand - */ - def + (x: VectorD): VectorD = x + a - def - (x: VectorD): VectorD = -x + a - def * (x: VectorD): VectorD = x * a - def / (x: VectorD): VectorD = x.recip * a + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Extension methods for `Double` <+> `VectorD`. Compute the element-wise sum + * (or difference, product, quotient) of scalar a and vector x. + * @param a the scalar first operand + * @param x the vector second operand + */ + extension (a: Double) + inline def + (x: VectorD): VectorD = x + a + inline def - (x: VectorD): VectorD = -x + a + inline def * (x: VectorD): VectorD = x * a + inline def / (x: VectorD): VectorD = x.recip * a end VectorDOps diff --git a/src/main/scala/scalation/mathstat/VectorI.scala b/src/main/scala/scalation/mathstat/VectorI.scala index 637825a14..65b1c92ab 100644 --- a/src/main/scala/scalation/mathstat/VectorI.scala +++ b/src/main/scala/scalation/mathstat/VectorI.scala @@ -13,10 +13,9 @@ package mathstat import java.util.Arrays.copyOf -import scala.collection.immutable.{IndexedSeq => IIndexedSeq} -import scala.collection.immutable.Set -import scala.collection.generic._ -import scala.collection.mutable._ +import scala.collection.generic.DefaultSerializable +import scala.collection.immutable.{IndexedSeq => IIndexedSeq, Set} +import scala.collection.mutable.IndexedSeq import scala.math.sqrt import scala.runtime.ScalaRunTime.stringOf @@ -37,7 +36,6 @@ class VectorI (val dim: Int, v = Array.ofDim [Int] (dim) else if dim > v.length then flaw ("init", s"vector dimension is larger than space: dim = $dim > v.length = $v.length") - end if //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Return the length of this vector. @@ -120,7 +118,6 @@ class VectorI (val dim: Int, else b.v(k) = v(i) k += 1 - end if end for (a, b) end split @@ -627,7 +624,6 @@ class VectorI (val dim: Int, iqsort (rk, q + 1, r) // recursively sort right partition else iselsort (rk, p, r) // use simple sort when small - end if rk end iqsort @@ -699,7 +695,6 @@ class VectorI (val dim: Int, if v(j) < v(k) then j else if v(i) < v(k) then k else i else if v(j) > v(k) then j else if v(i) > v(k) then k else i - end if end med3 //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: diff --git a/src/main/scala/scalation/mathstat/VectorL.scala b/src/main/scala/scalation/mathstat/VectorL.scala index 84aedcc26..9fd238e19 100644 --- a/src/main/scala/scalation/mathstat/VectorL.scala +++ b/src/main/scala/scalation/mathstat/VectorL.scala @@ -13,10 +13,9 @@ package mathstat import java.util.Arrays.copyOf -import scala.collection.immutable.{IndexedSeq => IIndexedSeq} -import scala.collection.immutable.Set -import scala.collection.generic._ -import scala.collection.mutable._ +import scala.collection.generic.DefaultSerializable +import scala.collection.immutable.{IndexedSeq => IIndexedSeq, Set} +import scala.collection.mutable.IndexedSeq import scala.runtime.ScalaRunTime.stringOf //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -36,7 +35,6 @@ class VectorL (val dim: Int, v = Array.ofDim [Long] (dim) else if dim > v.length then flaw ("init", s"vector dimension is larger than space: dim = $dim > v.length = $v.length") - end if //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Return the length of this vector. @@ -118,7 +116,6 @@ class VectorL (val dim: Int, else b.v(k) = v(i) k += 1 - end if end for (a, b) end split @@ -609,7 +606,6 @@ class VectorL (val dim: Int, iqsort (rk, q + 1, r) // recursively sort right partition else iselsort (rk, p, r) // use simple sort when small - end if rk end iqsort @@ -681,7 +677,6 @@ class VectorL (val dim: Int, if v(j) < v(k) then j else if v(i) < v(k) then k else i else if v(j) > v(k) then j else if v(i) > v(k) then k else i - end if end med3 //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: diff --git a/src/main/scala/scalation/mathstat/VectorS.scala b/src/main/scala/scalation/mathstat/VectorS.scala index 549cd45ff..ce63d3939 100644 --- a/src/main/scala/scalation/mathstat/VectorS.scala +++ b/src/main/scala/scalation/mathstat/VectorS.scala @@ -13,10 +13,9 @@ package mathstat import java.util.Arrays.copyOf -import scala.collection.immutable.{IndexedSeq => IIndexedSeq} -import scala.collection.immutable.Set -import scala.collection.generic._ -import scala.collection.mutable._ +import scala.collection.generic.DefaultSerializable +import scala.collection.immutable.{IndexedSeq => IIndexedSeq, Set} +import scala.collection.mutable.IndexedSeq import scala.runtime.ScalaRunTime.stringOf //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -36,7 +35,6 @@ class VectorS (val dim: Int, v = Array.ofDim [String] (dim) else if dim > v.length then flaw ("init", s"vector dimension is larger than space: dim = $dim > v.length = $v.length") - end if //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Return the length of this vector. @@ -118,7 +116,6 @@ class VectorS (val dim: Int, else b.v(k) = v(i) k += 1 - end if end for (a, b) end split @@ -631,7 +628,6 @@ class VectorS (val dim: Int, iqsort (rk, q + 1, r) // recursively sort right partition else iselsort (rk, p, r) // use simple sort when small - end if rk end iqsort @@ -703,7 +699,6 @@ class VectorS (val dim: Int, if v(j) < v(k) then j else if v(i) < v(k) then k else i else if v(j) > v(k) then j else if v(i) > v(k) then k else i - end if end med3 //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: diff --git a/src/main/scala/scalation/mathstat/VectorT.scala b/src/main/scala/scalation/mathstat/VectorT.scala index 53677296f..10de6c803 100644 --- a/src/main/scala/scalation/mathstat/VectorT.scala +++ b/src/main/scala/scalation/mathstat/VectorT.scala @@ -13,10 +13,9 @@ package mathstat import java.util.Arrays.copyOf -import scala.collection.immutable.{IndexedSeq => IIndexedSeq} -import scala.collection.immutable.Set -import scala.collection.generic._ -import scala.collection.mutable._ +import scala.collection.generic.DefaultSerializable +import scala.collection.immutable.{IndexedSeq => IIndexedSeq, Set} +import scala.collection.mutable.IndexedSeq import scala.runtime.ScalaRunTime.stringOf import TimeNum.{_0, _1, _2, _3, _4, _5, _6} @@ -38,7 +37,6 @@ class VectorT (val dim: Int, v = Array.ofDim [TimeNum] (dim) else if dim > v.length then flaw ("init", s"vector dimension is larger than space: dim = $dim > v.length = $v.length") - end if //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Return the length of this vector. @@ -120,7 +118,6 @@ class VectorT (val dim: Int, else b.v(k) = v(i) k += 1 - end if end for (a, b) end split @@ -344,7 +341,7 @@ class VectorT (val dim: Int, //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Sort the elements in this vector according to ord.lt (ascending order). */ - def sorted: VectorT = { val a = v.sorted (TimeNum.ord); new VectorT (a.size, a) } + def sorted: VectorT = { val a = v.sorted (using timeNumOrd); new VectorT (a.size, a) } //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Sort the elements in this vector according to cmp (use '_ > _' for descending order. @@ -651,7 +648,6 @@ class VectorT (val dim: Int, iqsort (rk, q + 1, r) // recursively sort right partition else iselsort (rk, p, r) // use simple sort when small - end if rk end iqsort @@ -723,7 +719,6 @@ class VectorT (val dim: Int, if v(j) < v(k) then j else if v(i) < v(k) then k else i else if v(j) > v(k) then j else if v(i) > v(k) then k else i - end if end med3 //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: diff --git a/src/main/scala/scalation/mathstat/test.aux b/src/main/scala/scalation/mathstat/test.aux new file mode 100644 index 000000000..f23e54680 --- /dev/null +++ b/src/main/scala/scalation/mathstat/test.aux @@ -0,0 +1 @@ +\relax diff --git a/src/main/scala/scalation/mathstat/test.log b/src/main/scala/scalation/mathstat/test.log new file mode 100644 index 000000000..df18c7b14 --- /dev/null +++ b/src/main/scala/scalation/mathstat/test.log @@ -0,0 +1,126 @@ +This is pdfTeX, Version 3.14159265-2.6-1.40.20 (TeX Live 2019) (preloaded format=pdflatex 2019.5.8) 18 JAN 2026 15:08 +entering extended mode + restricted \write18 enabled. + %&-line parsing enabled. +**test +(./test.tex +LaTeX2e <2018-12-01> +(/usr/local/texlive/2019/texmf-dist/tex/latex/base/article.cls +Document Class: article 2018/09/03 v1.4i Standard LaTeX document class +(/usr/local/texlive/2019/texmf-dist/tex/latex/base/size10.clo +File: size10.clo 2018/09/03 v1.4i Standard LaTeX file (size option) +) +\c@part=\count80 +\c@section=\count81 +\c@subsection=\count82 +\c@subsubsection=\count83 +\c@paragraph=\count84 +\c@subparagraph=\count85 +\c@figure=\count86 +\c@table=\count87 +\abovecaptionskip=\skip41 +\belowcaptionskip=\skip42 +\bibindent=\dimen102 +) +(/usr/local/texlive/2019/texmf-dist/tex/latex/amsmath/amsmath.sty +Package: amsmath 2018/12/01 v2.17b AMS math features +\@mathmargin=\skip43 + +For additional information on amsmath, use the `?' option. +(/usr/local/texlive/2019/texmf-dist/tex/latex/amsmath/amstext.sty +Package: amstext 2000/06/29 v2.01 AMS text + +(/usr/local/texlive/2019/texmf-dist/tex/latex/amsmath/amsgen.sty +File: amsgen.sty 1999/11/30 v2.0 generic functions +\@emptytoks=\toks14 +\ex@=\dimen103 +)) +(/usr/local/texlive/2019/texmf-dist/tex/latex/amsmath/amsbsy.sty +Package: amsbsy 1999/11/29 v1.2d Bold Symbols +\pmbraise@=\dimen104 +) +(/usr/local/texlive/2019/texmf-dist/tex/latex/amsmath/amsopn.sty +Package: amsopn 2016/03/08 v2.02 operator names +) +\inf@bad=\count88 +LaTeX Info: Redefining \frac on input line 223. +\uproot@=\count89 +\leftroot@=\count90 +LaTeX Info: Redefining \overline on input line 385. +\classnum@=\count91 +\DOTSCASE@=\count92 +LaTeX Info: Redefining \ldots on input line 482. +LaTeX Info: Redefining \dots on input line 485. +LaTeX Info: Redefining \cdots on input line 606. +\Mathstrutbox@=\box27 +\strutbox@=\box28 +\big@size=\dimen105 +LaTeX Font Info: Redeclaring font encoding OML on input line 729. +LaTeX Font Info: Redeclaring font encoding OMS on input line 730. +\macc@depth=\count93 +\c@MaxMatrixCols=\count94 +\dotsspace@=\muskip10 +\c@parentequation=\count95 +\dspbrk@lvl=\count96 +\tag@help=\toks15 +\row@=\count97 +\column@=\count98 +\maxfields@=\count99 +\andhelp@=\toks16 +\eqnshift@=\dimen106 +\alignsep@=\dimen107 +\tagshift@=\dimen108 +\tagwidth@=\dimen109 +\totwidth@=\dimen110 +\lineht@=\dimen111 +\@envbody=\toks17 +\multlinegap=\skip44 +\multlinetaggap=\skip45 +\mathdisplay@stack=\toks18 +LaTeX Info: Redefining \[ on input line 2844. +LaTeX Info: Redefining \] on input line 2845. +) +(/usr/local/texlive/2019/texmf-dist/tex/latex/base/inputenc.sty +Package: inputenc 2018/08/11 v1.3c Input encoding file +\inpenc@prehook=\toks19 +\inpenc@posthook=\toks20 +) (./test.aux) +\openout1 = `test.aux'. + +LaTeX Font Info: Checking defaults for OML/cmm/m/it on input line 5. +LaTeX Font Info: ... okay on input line 5. +LaTeX Font Info: Checking defaults for T1/cmr/m/n on input line 5. +LaTeX Font Info: ... okay on input line 5. +LaTeX Font Info: Checking defaults for OT1/cmr/m/n on input line 5. +LaTeX Font Info: ... okay on input line 5. +LaTeX Font Info: Checking defaults for OMS/cmsy/m/n on input line 5. +LaTeX Font Info: ... okay on input line 5. +LaTeX Font Info: Checking defaults for OMX/cmex/m/n on input line 5. +LaTeX Font Info: ... okay on input line 5. +LaTeX Font Info: Checking defaults for U/cmr/m/n on input line 5. +LaTeX Font Info: ... okay on input line 5. + +[1 + +{/usr/local/texlive/2019/texmf-var/fonts/map/pdftex/updmap/pdftex.map}] +(./test.aux) ) +Here is how much of TeX's memory you used: + 837 strings out of 492616 + 9568 string characters out of 6129482 + 70637 words of memory out of 5000000 + 4822 multiletter control sequences out of 15000+600000 + 4094 words of font info for 16 fonts, out of 8000000 for 9000 + 1141 hyphenation exceptions out of 8191 + 27i,4n,21p,254b,111s stack positions out of 5000i,500n,10000p,200000b,80000s + +Output written on test.pdf (1 page, 35742 bytes). +PDF statistics: + 24 PDF objects out of 1000 (max. 8388607) + 16 compressed objects within 1 object stream + 0 named destinations out of 1000 (max. 500000) + 1 words of extra memory for PDF output out of 10000 (max. 10000000) + diff --git a/src/main/scala/scalation/mathstat/test.pdf b/src/main/scala/scalation/mathstat/test.pdf new file mode 100644 index 000000000..9f58dea8b Binary files /dev/null and b/src/main/scala/scalation/mathstat/test.pdf differ diff --git a/src/main/scala/scalation/mathstat/test.tex b/src/main/scala/scalation/mathstat/test.tex new file mode 100644 index 000000000..4c9ac13c3 --- /dev/null +++ b/src/main/scala/scalation/mathstat/test.tex @@ -0,0 +1,10 @@ + +\documentclass{article} +\usepackage{amsmath} % For advanced math features like \text, \frac, etc. +\usepackage[utf8]{inputenc} % To handle various characters +\begin{document} +\begin{equation*} +\hat{y}_t \,=\, \beta_0\, \text{new\_deaths} \,+\, \beta_1\, \text{icu\_patients} \,+\, \varepsilon_t +\end{equation*} +\end{document} + diff --git a/src/main/scala/scalation/mathstat/test2.aux b/src/main/scala/scalation/mathstat/test2.aux new file mode 100644 index 000000000..2a3baf4c2 --- /dev/null +++ b/src/main/scala/scalation/mathstat/test2.aux @@ -0,0 +1,3 @@ +\relax +\@writefile{lot}{\contentsline {table}{\numberline {1}{\ignorespaces Texas Temperatures Regression}}{1}\protected@file@percent } +\newlabel{tab:Texas-Temps}{{1}{1}} diff --git a/src/main/scala/scalation/mathstat/test2.log b/src/main/scala/scalation/mathstat/test2.log new file mode 100644 index 000000000..ff24af531 --- /dev/null +++ b/src/main/scala/scalation/mathstat/test2.log @@ -0,0 +1,123 @@ +This is pdfTeX, Version 3.14159265-2.6-1.40.20 (TeX Live 2019) (preloaded format=pdflatex 2019.5.8) 18 JAN 2026 19:01 +entering extended mode + restricted \write18 enabled. + %&-line parsing enabled. +**test2 +(./test2.tex +LaTeX2e <2018-12-01> +(/usr/local/texlive/2019/texmf-dist/tex/latex/base/article.cls +Document Class: article 2018/09/03 v1.4i Standard LaTeX document class +(/usr/local/texlive/2019/texmf-dist/tex/latex/base/size10.clo +File: size10.clo 2018/09/03 v1.4i Standard LaTeX file (size option) +) +\c@part=\count80 +\c@section=\count81 +\c@subsection=\count82 +\c@subsubsection=\count83 +\c@paragraph=\count84 +\c@subparagraph=\count85 +\c@figure=\count86 +\c@table=\count87 +\abovecaptionskip=\skip41 +\belowcaptionskip=\skip42 +\bibindent=\dimen102 +) +(/usr/local/texlive/2019/texmf-dist/tex/latex/amsmath/amsmath.sty +Package: amsmath 2018/12/01 v2.17b AMS math features +\@mathmargin=\skip43 + +For additional information on amsmath, use the `?' option. +(/usr/local/texlive/2019/texmf-dist/tex/latex/amsmath/amstext.sty +Package: amstext 2000/06/29 v2.01 AMS text + +(/usr/local/texlive/2019/texmf-dist/tex/latex/amsmath/amsgen.sty +File: amsgen.sty 1999/11/30 v2.0 generic functions +\@emptytoks=\toks14 +\ex@=\dimen103 +)) +(/usr/local/texlive/2019/texmf-dist/tex/latex/amsmath/amsbsy.sty +Package: amsbsy 1999/11/29 v1.2d Bold Symbols +\pmbraise@=\dimen104 +) +(/usr/local/texlive/2019/texmf-dist/tex/latex/amsmath/amsopn.sty +Package: amsopn 2016/03/08 v2.02 operator names +) +\inf@bad=\count88 +LaTeX Info: Redefining \frac on input line 223. +\uproot@=\count89 +\leftroot@=\count90 +LaTeX Info: Redefining \overline on input line 385. +\classnum@=\count91 +\DOTSCASE@=\count92 +LaTeX Info: Redefining \ldots on input line 482. +LaTeX Info: Redefining \dots on input line 485. +LaTeX Info: Redefining \cdots on input line 606. +\Mathstrutbox@=\box27 +\strutbox@=\box28 +\big@size=\dimen105 +LaTeX Font Info: Redeclaring font encoding OML on input line 729. +LaTeX Font Info: Redeclaring font encoding OMS on input line 730. +\macc@depth=\count93 +\c@MaxMatrixCols=\count94 +\dotsspace@=\muskip10 +\c@parentequation=\count95 +\dspbrk@lvl=\count96 +\tag@help=\toks15 +\row@=\count97 +\column@=\count98 +\maxfields@=\count99 +\andhelp@=\toks16 +\eqnshift@=\dimen106 +\alignsep@=\dimen107 +\tagshift@=\dimen108 +\tagwidth@=\dimen109 +\totwidth@=\dimen110 +\lineht@=\dimen111 +\@envbody=\toks17 +\multlinegap=\skip44 +\multlinetaggap=\skip45 +\mathdisplay@stack=\toks18 +LaTeX Info: Redefining \[ on input line 2844. +LaTeX Info: Redefining \] on input line 2845. +) +(/usr/local/texlive/2019/texmf-dist/tex/latex/base/inputenc.sty +Package: inputenc 2018/08/11 v1.3c Input encoding file +\inpenc@prehook=\toks19 +\inpenc@posthook=\toks20 +) (./test2.aux) +\openout1 = `test2.aux'. + +LaTeX Font Info: Checking defaults for OML/cmm/m/it on input line 5. +LaTeX Font Info: ... okay on input line 5. +LaTeX Font Info: Checking defaults for T1/cmr/m/n on input line 5. +LaTeX Font Info: ... okay on input line 5. +LaTeX Font Info: Checking defaults for OT1/cmr/m/n on input line 5. +LaTeX Font Info: ... okay on input line 5. +LaTeX Font Info: Checking defaults for OMS/cmsy/m/n on input line 5. +LaTeX Font Info: ... okay on input line 5. +LaTeX Font Info: Checking defaults for OMX/cmex/m/n on input line 5. +LaTeX Font Info: ... okay on input line 5. +LaTeX Font Info: Checking defaults for U/cmr/m/n on input line 5. +LaTeX Font Info: ... okay on input line 5. + +[1 + +{/usr/local/texlive/2019/texmf-var/fonts/map/pdftex/updmap/pdftex.map}] +(./test2.aux) ) +Here is how much of TeX's memory you used: + 837 strings out of 492616 + 9576 string characters out of 6129482 + 76637 words of memory out of 5000000 + 4822 multiletter control sequences out of 15000+600000 + 4094 words of font info for 16 fonts, out of 8000000 for 9000 + 1141 hyphenation exceptions out of 8191 + 27i,7n,21p,255b,200s stack positions out of 5000i,500n,10000p,200000b,80000s + +Output written on test2.pdf (1 page, 18006 bytes). +PDF statistics: + 12 PDF objects out of 1000 (max. 8388607) + 7 compressed objects within 1 object stream + 0 named destinations out of 1000 (max. 500000) + 1 words of extra memory for PDF output out of 10000 (max. 10000000) + diff --git a/src/main/scala/scalation/mathstat/test2.pdf b/src/main/scala/scalation/mathstat/test2.pdf new file mode 100644 index 000000000..23972787b Binary files /dev/null and b/src/main/scala/scalation/mathstat/test2.pdf differ diff --git a/src/main/scala/scalation/mathstat/test2.tex b/src/main/scala/scalation/mathstat/test2.tex new file mode 100644 index 000000000..181db24e7 --- /dev/null +++ b/src/main/scala/scalation/mathstat/test2.tex @@ -0,0 +1,33 @@ + +\documentclass{article} +\usepackage{amsmath} % For advanced math features like \text, \frac, etc. +\usepackage[utf8]{inputenc} % To handle various characters +\begin{document} + +\begin{table}[h] +\centering +\caption{Texas Temperatures Regression} +\label{tab:Texas-Temps} +\begin{tabular}{|c|c|c|c|c|} \hline +Const & Lat & Elev & Long & Temp \\ \hline +1.00000 & 29.7670 & 41.0000 & 95.3670 & 56.0000 \\ \hline +1.00000 & 32.8500 & 440.000 & 96.8500 & 48.0000 \\ \hline +1.00000 & 26.9330 & 25.0000 & 97.8000 & 60.0000 \\ \hline +1.00000 & 31.9500 & 2851.00 & 102.183 & 46.0000 \\ \hline +1.00000 & 34.8000 & 3840.00 & 102.467 & 38.0000 \\ \hline +1.00000 & 33.4500 & 1461.00 & 99.6330 & 46.0000 \\ \hline +1.00000 & 28.7000 & 815.000 & 100.483 & 53.0000 \\ \hline +1.00000 & 32.4500 & 2380.00 & 100.533 & 46.0000 \\ \hline +1.00000 & 31.8000 & 3918.00 & 106.400 & 44.0000 \\ \hline +1.00000 & 34.8500 & 2040.00 & 100.217 & 41.0000 \\ \hline +1.00000 & 30.8670 & 3000.00 & 102.900 & 47.0000 \\ \hline +1.00000 & 36.3500 & 3693.00 & 102.083 & 36.0000 \\ \hline +1.00000 & 30.3000 & 597.000 & 97.7000 & 52.0000 \\ \hline +1.00000 & 26.9000 & 315.000 & 99.2830 & 60.0000 \\ \hline +1.00000 & 28.4500 & 459.000 & 99.2170 & 56.0000 \\ \hline +1.00000 & 25.9000 & 19.0000 & 97.4330 & 62.0000 \\ \hline +\end{tabular} +\end{table} + +\end{document} + diff --git a/src/main/scala/scalation/mathstat/test3.aux b/src/main/scala/scalation/mathstat/test3.aux new file mode 100644 index 000000000..d7a2dcf7e --- /dev/null +++ b/src/main/scala/scalation/mathstat/test3.aux @@ -0,0 +1,3 @@ +\relax +\@writefile{lot}{\contentsline {table}{\numberline {1}{\ignorespaces Texas Temperatures: Regression, Ridge, Lasso, Transformed}}{1}\protected@file@percent } +\newlabel{tab:Texas-Temps}{{1}{1}} diff --git a/src/main/scala/scalation/mathstat/test3.log b/src/main/scala/scalation/mathstat/test3.log new file mode 100644 index 000000000..5ff51afa2 --- /dev/null +++ b/src/main/scala/scalation/mathstat/test3.log @@ -0,0 +1,123 @@ +This is pdfTeX, Version 3.14159265-2.6-1.40.20 (TeX Live 2019) (preloaded format=pdflatex 2019.5.8) 11 FEB 2026 13:32 +entering extended mode + restricted \write18 enabled. + %&-line parsing enabled. +**test3 +(./test3.tex +LaTeX2e <2018-12-01> +(/usr/local/texlive/2019/texmf-dist/tex/latex/base/article.cls +Document Class: article 2018/09/03 v1.4i Standard LaTeX document class +(/usr/local/texlive/2019/texmf-dist/tex/latex/base/size10.clo +File: size10.clo 2018/09/03 v1.4i Standard LaTeX file (size option) +) +\c@part=\count80 +\c@section=\count81 +\c@subsection=\count82 +\c@subsubsection=\count83 +\c@paragraph=\count84 +\c@subparagraph=\count85 +\c@figure=\count86 +\c@table=\count87 +\abovecaptionskip=\skip41 +\belowcaptionskip=\skip42 +\bibindent=\dimen102 +) +(/usr/local/texlive/2019/texmf-dist/tex/latex/amsmath/amsmath.sty +Package: amsmath 2018/12/01 v2.17b AMS math features +\@mathmargin=\skip43 + +For additional information on amsmath, use the `?' option. +(/usr/local/texlive/2019/texmf-dist/tex/latex/amsmath/amstext.sty +Package: amstext 2000/06/29 v2.01 AMS text + +(/usr/local/texlive/2019/texmf-dist/tex/latex/amsmath/amsgen.sty +File: amsgen.sty 1999/11/30 v2.0 generic functions +\@emptytoks=\toks14 +\ex@=\dimen103 +)) +(/usr/local/texlive/2019/texmf-dist/tex/latex/amsmath/amsbsy.sty +Package: amsbsy 1999/11/29 v1.2d Bold Symbols +\pmbraise@=\dimen104 +) +(/usr/local/texlive/2019/texmf-dist/tex/latex/amsmath/amsopn.sty +Package: amsopn 2016/03/08 v2.02 operator names +) +\inf@bad=\count88 +LaTeX Info: Redefining \frac on input line 223. +\uproot@=\count89 +\leftroot@=\count90 +LaTeX Info: Redefining \overline on input line 385. +\classnum@=\count91 +\DOTSCASE@=\count92 +LaTeX Info: Redefining \ldots on input line 482. +LaTeX Info: Redefining \dots on input line 485. +LaTeX Info: Redefining \cdots on input line 606. +\Mathstrutbox@=\box27 +\strutbox@=\box28 +\big@size=\dimen105 +LaTeX Font Info: Redeclaring font encoding OML on input line 729. +LaTeX Font Info: Redeclaring font encoding OMS on input line 730. +\macc@depth=\count93 +\c@MaxMatrixCols=\count94 +\dotsspace@=\muskip10 +\c@parentequation=\count95 +\dspbrk@lvl=\count96 +\tag@help=\toks15 +\row@=\count97 +\column@=\count98 +\maxfields@=\count99 +\andhelp@=\toks16 +\eqnshift@=\dimen106 +\alignsep@=\dimen107 +\tagshift@=\dimen108 +\tagwidth@=\dimen109 +\totwidth@=\dimen110 +\lineht@=\dimen111 +\@envbody=\toks17 +\multlinegap=\skip44 +\multlinetaggap=\skip45 +\mathdisplay@stack=\toks18 +LaTeX Info: Redefining \[ on input line 2844. +LaTeX Info: Redefining \] on input line 2845. +) +(/usr/local/texlive/2019/texmf-dist/tex/latex/base/inputenc.sty +Package: inputenc 2018/08/11 v1.3c Input encoding file +\inpenc@prehook=\toks19 +\inpenc@posthook=\toks20 +) (./test3.aux) +\openout1 = `test3.aux'. + +LaTeX Font Info: Checking defaults for OML/cmm/m/it on input line 5. +LaTeX Font Info: ... okay on input line 5. +LaTeX Font Info: Checking defaults for T1/cmr/m/n on input line 5. +LaTeX Font Info: ... okay on input line 5. +LaTeX Font Info: Checking defaults for OT1/cmr/m/n on input line 5. +LaTeX Font Info: ... okay on input line 5. +LaTeX Font Info: Checking defaults for OMS/cmsy/m/n on input line 5. +LaTeX Font Info: ... okay on input line 5. +LaTeX Font Info: Checking defaults for OMX/cmex/m/n on input line 5. +LaTeX Font Info: ... okay on input line 5. +LaTeX Font Info: Checking defaults for U/cmr/m/n on input line 5. +LaTeX Font Info: ... okay on input line 5. + +[1 + +{/usr/local/texlive/2019/texmf-var/fonts/map/pdftex/updmap/pdftex.map}] +(./test3.aux) ) +Here is how much of TeX's memory you used: + 837 strings out of 492616 + 9576 string characters out of 6129482 + 77637 words of memory out of 5000000 + 4822 multiletter control sequences out of 15000+600000 + 4094 words of font info for 16 fonts, out of 8000000 for 9000 + 1141 hyphenation exceptions out of 8191 + 27i,7n,21p,255b,200s stack positions out of 5000i,500n,10000p,200000b,80000s + +Output written on test3.pdf (1 page, 19672 bytes). +PDF statistics: + 12 PDF objects out of 1000 (max. 8388607) + 7 compressed objects within 1 object stream + 0 named destinations out of 1000 (max. 500000) + 1 words of extra memory for PDF output out of 10000 (max. 10000000) + diff --git a/src/main/scala/scalation/mathstat/test3.pdf b/src/main/scala/scalation/mathstat/test3.pdf new file mode 100644 index 000000000..dfc912ab1 Binary files /dev/null and b/src/main/scala/scalation/mathstat/test3.pdf differ diff --git a/src/main/scala/scalation/mathstat/test3.tex b/src/main/scala/scalation/mathstat/test3.tex new file mode 100644 index 000000000..4158cf6ea --- /dev/null +++ b/src/main/scala/scalation/mathstat/test3.tex @@ -0,0 +1,32 @@ + +\documentclass{article} +\usepackage{amsmath} % For advanced math features like \text, \frac, etc. +\usepackage[utf8]{inputenc} % To handle various characters +\begin{document} + +\begin{table}[h] +\centering +\caption{Texas Temperatures: Regression, Ridge, Lasso, Transformed} +\label{tab:Texas-Temps} +\begin{tabular}{|c|c|c|c|c|c|} \hline +Metric & Regression & Ridge & Lasso & Transformed & Symbolic \\ \hline \hline +rSq &0.991921 & 0.991920 & 0.836978 & 0.986374 & 0.991340 \\ \hline +rSqBar &0.989902 & 0.989899 & 0.796222 & 0.982967 & 0.981443 \\ \hline +sst &941.938 & 941.938 & 941.938 & 941.938 & 941.938 \\ \hline +sse &7.60949 & 7.61128 & 153.557 & 12.8350 & 8.15712 \\ \hline +sde &0.712250 & 0.712333 & 2.18523 & 0.925020 & 0.737433 \\ \hline +mse0 &0.475593 & 0.475705 & 9.59730 & 0.802186 & 0.509820 \\ \hline +rmse &0.689633 & 0.689714 & 3.09795 & 0.895648 & 0.714017 \\ \hline +mae &0.531353 & 0.531765 & 2.43379 & 0.774972 & 0.550634 \\ \hline +smape &1.09478 & 1.09563 & 5.33198 & 1.63203 & 1.13279 \\ \hline +m &16.0000 & 16.0000 & 16.0000 & 16.0000 & 16.0000 \\ \hline +dfr &3.00000 & 3.00000 & 3.00000 & 3.00000 & 8.00000 \\ \hline +df &12.0000 & 12.0000 & 12.0000 & 12.0000 & 7.00000 \\ \hline +fStat &491.138 & 491.022 & 20.5365 & 289.553 & 100.165 \\ \hline +aic &-8.75748 & -8.75936 & -35.8447 & -12.9397 & 0.686565 \\ \hline +bic &-5.66713 & -5.66900 & -32.7543 & -9.84934 & 7.63986 \\ \hline +\end{tabular} +\end{table} + +\end{document} + diff --git a/src/main/scala/scalation/modeling/ActivationFun.scala b/src/main/scala/scalation/modeling/ActivationFun.scala index 71f788640..609f451be 100644 --- a/src/main/scala/scalation/modeling/ActivationFun.scala +++ b/src/main/scala/scalation/modeling/ActivationFun.scala @@ -341,7 +341,6 @@ object ActivationFun: else // normalize: Normal (0, 1) val (mu_x, sig_x) = (x.mean, x.stdev) normalize ((mu_x, sig_x)) (x) - end if end rescaleX //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -359,7 +358,6 @@ object ActivationFun: else // normalize: Normal (0, 1) val (mu_x, sig_x) = (x.mean, x.stdev) normalize ((mu_x, sig_x)) (x) - end if */ null end rescaleX @@ -381,7 +379,6 @@ object ActivationFun: val (mu_y, sig_y) = (y.mean, y.stdev) (normalizeV ((mu_y, sig_y)) (y), denormalizeV ((mu_y, sig_y))) // rescaling inverse - end if end rescaleY //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -401,7 +398,6 @@ object ActivationFun: val (mu_y, sig_y) = (y.mean, y.stdev) (normalize ((mu_y, sig_y)) (y), denormalize ((mu_y, sig_y))) // rescaling inverse - end if end rescaleY end ActivationFun @@ -505,6 +501,17 @@ end activationFunTest2 val geLUDf = geLUD (t) val softmaxDf = softmaxD (softmaxf) + // print out all the derivatives + println (s"idD = $idDf") + println (s"reLUD = $reLUDf") + println (s"lreLUD = $lreLUDf") + println (s"eLUD = $eLUDf") + println (s"tanhD = $tanhDf") + println (s"sigmoidD = $sigmoidDf") + println (s"gaussianD = $gaussianDf") + println (s"geLUD = $geLUDf") + println (s"softmaxD = $softmaxDf") + new Plot (t, idDf, null, "t vs. idD") new Plot (t, reLUDf, null, "t vs. reLUD") new Plot (t, lreLUDf, null, "t vs. lreLUD") diff --git a/src/main/scala/scalation/modeling/BridgeRegression.scala b/src/main/scala/scalation/modeling/BridgeRegression.scala new file mode 100644 index 000000000..4cec7c3f3 --- /dev/null +++ b/src/main/scala/scalation/modeling/BridgeRegression.scala @@ -0,0 +1,386 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author Yousef Fekri Dabanloo + * @version 2.0 + * @date Tue Jul 1 17:54:49 EDT 2025 + * @see LICENSE (MIT style license file). + * + * @note Model: Bridge Regression (Lq for q > 0 Shrinkage/Regularization) + * + * Caveat: currently only supports L0.5 Regularization (Bridge Regression with q = 0.5) + * Implements iterative re-weighted least squares (IRLS) to handle non-convex L0.5 penalty. + * + * Model: minimize ||y - Xb||^2 + lambda * \sum |b_i|^0.5 + * + * Reference: + * - S. K. M. Wong, "Bridge regression models and IRLS", + * Journal of Statistical Computation and Simulation, 1995. + * - Hastie, Tibshirani & Friedman (2009), Elements of Statistical Learning, Sec. on Bridge. + * + * Before calling the constructor, users should center their data; automatic by all factory methods. + */ + +package scalation +package modeling + +import scala.math.{abs, pow} + +import scalation.mathstat._ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `BridgeRegression` class supports L0.5 Regularization (Bridge Regression with q = 0.5) + * using Iterative Re-weighted Least Squares (IRLS) to handle non-convex L0.5 penalty. + * @param x the centered data/input m-by-n matrix + * @param y the centered response/output m-vector + * @param fname_ feature names + * @param hparam hyper-parameters: "lambda" (penalty), "maxIter", "tol", "eps" + * @param xℱ the transformation applied to x (e.g., Center or Norm) + * @param yℱ the transformation applied to y (e.g., Center) + */ +class BridgeRegression (x: MatrixD, y: VectorD, fname_ : Array [String] = null, + hparam: HyperParameter = RidgeRegression.hp, + xℱ: Transform = null, yℱ: Transform = null) + extends Predictor (x, y, fname_, hparam) + with Fit (dfr = x.dim2, df = x.dim - x.dim2 - 1): + // degrees of freedom: dfr = n, df = m - n - 1 as centered x matrix has 1 less column + // fix after training by moving a dof from error to model for each coefficient eliminated + // if not using an intercept df = (x.dim2, x.dim-x.dim2), correct by calling 'resetDF' method from `Fit` + + private val debug = debugf ("BridgeRegression", false) + private val lambda = hparam("lambda").toDouble // shrinkage parameter + private val sparse = hparam("sparse").toInt == 1 // whether to sparsify + private val maxIter = hparam("maxIter").toInt // maximum number of iterations for IWLS + private val tol = hparam("tol").toDouble // tolerance for convergence + private val eps = hparam("eps").toDouble // small constant to avoid division by zero + private val q = hparam("pow").toDouble // exponent/L_q norm + private val qq = 2 - q + + _modelName = s"BridgeRegression_$q" + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Train via IRLS: iterate solving weighted ridge until convergence. + * @param x_ the training/full data/input matrix (defaults to full x) + * @param y_ the training/full response/output vector (defaults to full y) + */ + def train (x_ : MatrixD = x, y_ : VectorD = y): Unit = + var b_old = new VectorD (x_.dim2) // initialize at zero + b = b_old.copy // initial b-vector + val xtX = x_.ᵀ * x_ // form modified normal equations: X^T X + λ W + val xty = x_.ᵀ * y_ + + var (go, it) = (true, 1) + while go && it <= maxIter do + val xtX_ = xtX.copy + val w = b.map (e => pow (abs (e) + eps, qq)) // compute weights w_i = (|b_i| + eps)^(2 - q) + for i <- w.indices do xtX_(i, i) += lambda * w(i) // add λ * w(i) to diagonal + + val fac = new Fac_Cholesky (xtX_) // solve for b via Cholesky + fac.factor () + b = fac.solve (xty) + if (b - b_old).norm < tol then // check convergence + debug ("train", s"converged after $it iterations") + go = false + b_old = b.copy + it += 1 + end while + + if go then debug ("train", s"completed $maxIter iterations without convergence") + if sparse then LassoRegression.sparsify (b) + debug ("train", s"IRLS estimates parameter b = $b") + val nz = b.countZero // count number of coefficients set to zero + if nz > 0 then resetDF (x.dim2 - nz, x.dim - x.dim2 - 1 + nz) + end train + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Test a predictive model y_ = f(x_) + e and return its QoF vector. + * Testing may be be in-sample (on the training set) or out-of-sample + * (on the testing set) as determined by the parameters passed in. + * Note: must call train before test. + * @param x_ the testing/full data/input matrix (defaults to full x) + * @param y_ the testing/full response/output vector (defaults to full y) + */ + def test (x_ : MatrixD = x, y_ : VectorD = y): (VectorD, VectorD) = + val yp = predict_ (x_) // make predictions + (yp, diagnose (y_, yp)) // return predictions and QoF vector + end test + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Predict the value of y = f(z) by evaluating the formula y = b dot z. + * It works on transformed values. + * @param z the new vector to predict + */ + def predict_ (z: VectorD): Double = b dot z + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Predict the value of vector y = f(x_, b). It works on transformed values. + * @param x_ the matrix to use for making predictions, one for each row + */ + def predict_ (x_ : MatrixD): VectorD = x_ * b + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Predict the value of y = f(z) by evaluating the formula y = b dot z. + * It is overridden to handle transformations. + * @param z the new vector to predict + */ + override def predict (z: VectorD): Double = + val zz = if xℱ == null then z else xℱ.f(MatrixD (z))(0) + if yℱ == null then b dot zz else yℱ.fi_(b dot zz) + end predict + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Predict the value of vector y = f(x_, b). It is overridden to handle transformations. + * @param x_ the matrix to use for making predictions, one for each row + */ + override def predict (x_ : MatrixD): VectorD = + val xx = if xℱ == null then x_ else xℱ.f(x_) + if yℱ == null then xx * b else yℱ.fi(xx * b) + end predict + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Quality-of-Fit summary reuses Fit.summary + * @param x_ the testing/full data/input matrix + * @param fname_ the array of feature/variable names + * @param b_ the parameters/coefficients for the model + * @param vifs the Variance Inflation Factors (VIFs) + */ + override def summary (x_ : MatrixD = getX, + fname_ : Array [String] = fname, + b_ : VectorD = b, + vifs: VectorD = vif ()): String = + super.summary (x_, fname_, b_, vifs) + end summary + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Build a sub-model that is restricted to the given columns of the data matrix. + * @param x_cols the columns that the new model is restricted to + * @param fname2 the variable/feature names for the new model (defaults to null) + */ + def buildModel (x_cols: MatrixD, fname2: Array [String] = null): BridgeRegression = + debug ("buildModel", s"${x_cols.dim} by ${x_cols.dim2}") + new BridgeRegression (x_cols, y, fname2, hparam) + end buildModel + +end BridgeRegression + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `BridgeRegression` companion object defines hyper-parameters and factory methods. + */ +object BridgeRegression extends Regularized: + + val hp = RidgeRegression.hp + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a Bridge Regression object from an xy matrix and center the data. + * @param xy the uncentered data/input m-by-n matrix, NOT augmented with a first column of ones + * and the uncentered response m-vector (combined) + * @param fname the feature/variable names (defaults to null) + * @param hparam includes the shrinkage hyper-parameter + * @param col the designated response column (defaults to the last column) + */ + def apply (xy: MatrixD, fname: Array [String] = null, + hparam: HyperParameter = hp)(col: Int = xy.dim2 - 1): BridgeRegression = + val (x, y) = (xy.not(?, col), xy(?, col)) + val xℱ = CenterForm (x) + val yℱ = CenterForm (y) + new BridgeRegression (xℱ.f(x), yℱ.f(y), fname, hparam, xℱ, yℱ) + end apply + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a Bridge Regression object from an x matrix and y vector and center the data. + * @param x the uncentered data/input m-by-n matrix, NOT augmented with a first column of ones + * @param y the uncentered response/output vector + * @param fname the feature/variable names (defaults to null) + * @param hparam includes the shrinkage hyper-parameter + */ + def center (x: MatrixD, y: VectorD, fname: Array [String] = null, + hparam: HyperParameter = hp): BridgeRegression = + val xℱ = CenterForm (x) + val yℱ = CenterForm (y) + new BridgeRegression (xℱ.f(x), yℱ.f(y), fname, hparam, xℱ, yℱ) + end center + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a Bridge Regression object from a data matrix and a response vector. + * This method provides data rescaling of x and centering of y. + * @param x the un-centered data/input m-by-n matrix, NOT augmented with a first column of ones + * @param y the un-centered response/output vector + * @param fname the feature/variable names (defaults to null) + * @param hparam the shrinkage hyper-parameter (0 => OLS) in the penalty term 'lambda * norm b' + */ + def rescale (x: MatrixD, y: VectorD, fname: Array [String] = null, + hparam: HyperParameter = hp): BridgeRegression = + val xℱ = NormForm (x) + val yℱ = CenterForm (y) + new BridgeRegression (xℱ.f(x), yℱ.f(y), fname, hparam, xℱ, yℱ) + end rescale + +end BridgeRegression + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `bridgeRegressionTest` main function tests the `BridgeRegression` class using + * the following regression equation. + * y = b dot x = b_1*x_1 + b_2*x_2. + * It compares `BridgeRegression` with `Regression` + * @see statmaster.sdu.dk/courses/st111/module03/index.html + * > runMain scalation.modeling.bridgeRegressionTest + */ +@main def bridgeRegressionTest (): Unit = + + // 5 data points: x_0 x_1 + val x = MatrixD ((5, 2), 36.0, 66.0, // 5-by-2 data matrix + 37.0, 68.0, + 47.0, 64.0, + 32.0, 53.0, + 1.0, 101.0) + val y = VectorD (745.0, 895.0, 442.0, 440.0, 1598.0) // 5-dim response vector + +// println ("model: y = b_0 + b_1*x_1 + b_2*x_2") + println ("model: y = b₀ + b₁*x₁ + b₂*x₂") // for Regression, remove b₀ for Bridge + println (s"x = $x") + println (s"y = $y") + + banner ("Regression") + val ox = VectorD.one (y.dim) +^: x // prepend a column of all 1's + val reg = new Regression (ox, y) // create a Regression model + reg.trainNtest ()() // train and test the model + + banner ("BridgeRegression with manual centering") + val mu_x = x.mean // column-wise mean of x + val mu_y = y.mean // mean of y + val x_c = x - mu_x // centered x (column-wise) + val y_c = y - mu_y // centered y + val mod = new BridgeRegression (x_c, y_c) // create a Bridge Regression model + mod.trainNtest ()() // train and test the model + + banner ("BridgeRegression with Auto-centering") + val amod = BridgeRegression.center (x, y) // create an auto-centered Bridge Regression model + amod.trainNtest ()() // train and test the model + + banner ("BridgeRegression with Rescaling") + val rmod = BridgeRegression.rescale (x, y) // create a rescaled Bridge Regression model + + rmod.trainNtest ()() // train and test the model + + banner ("Make one OOS Predictions") + val z = VectorD (20.0, 80.0) // new instance to predict + val _1z = 1.0 +: z // prepend 1 to z + val z_c = z - mu_x // center z + println (s"reg.predict ($z) = ${reg.predict (_1z)}") // predict using _1z + println (s"mod.predict ($z) = ${mod.predict (z_c) + mu_y}") // predict using z_c and add y's mean + println (s"amod.predict ($z) = ${amod.predict (z)}") // predict using z with auto-centering + println (s"rmod.predict ($z) = ${rmod.predict (z)}") // predict using z with rescaling + + banner ("Compare Summaries") + println (reg.summary ()) + println (mod.summary ()) + println (amod.summary ()) + println (rmod.summary ()) + +end bridgeRegressionTest + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `bridgeRegressionTest2` main function tests the `BridgeRegression` class + * on the AutoMPG dataset. + * > runMain scalation.modeling.bridgeRegressionTest2 + */ +@main def bridgeRegressionTest2 (): Unit = + + import Example_AutoMPG._ + + banner ("AutoMPG Regression") + val reg = new Regression (ox, y, ox_fname) // create a regression model (with intercept) + reg.trainNtest ()() // train and test the model + println (reg.summary ()) // parameter/coefficient statistics + + banner ("AutoMPG Bridge Regression") + val mod = new BridgeRegression (x, y, x_fname) // create a bridge regression model (no intercept) + mod.trainNtest ()() // train and test the model + println (mod.summary ()) + +end bridgeRegressionTest2 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `bridgeRegressionTest3` main function tests the multi-collinearity method in + * the `BridgeRegression` class using the following regression equation. + * y = b dot x = b_1*x_1 + b_2*x_2 + * Contour Plots for see, L2 penalty, see + L2 penalty, L1 penalty, sse + L1 penalty + * L.5 penalty, see _ L.5 penalty + * > runMain scalation.modeling.bridgeRegressionTest3 + */ +@main def bridgeRegressionTest3 (): Unit = + + val rvg = random.RandomVecD (100) + val nrm = random.NormalVec_c (100, 0, 50) + val x_1 = rvg.gen + val x_2 = rvg.gen + val x = MatrixD (x_1, x_2).ᵀ + + val b_ = VectorD (4, 5) + val y = x * b_ + nrm.gen + val xy = x :^+ y + println (s"Correlation matrix for xy: rho = ${xy.corr}") + + val x_c = x - x.mean + val y_c = y - y.mean + + banner ("Regression Model") + val mod = new Regression (x_c, y_c) + mod.trainNtest ()() + println (mod.summary ()) + FitM.showQofStatTable (mod.crossValidate ()) + var lambda = 0.0 + + banner ("Ridge Regression Model") + for i <- 1 to 10 do + lambda = 200.0 * i + RidgeRegression.hp("lambda") = lambda + val mod2 = new RidgeRegression (x_c, y_c) + mod2.trainNtest ()() + println (mod2.summary ()) + FitM.showQofStatTable (mod2.crossValidate ()) + end for + + banner ("Lasso Regression Model") + for i <- 1 to 10 do + lambda = 2000.0 * i + RidgeRegression.hp("lambda") = lambda + val mod2 = new LassoRegression (x_c, y_c) + mod2.trainNtest ()() + println (mod2.summary ()) + FitM.showQofStatTable (mod2.crossValidate ()) + end for + + banner ("Bridge Regression Model") + for i <- 1 to 10 do + lambda = 4000.0 * i + RidgeRegression.hp("lambda") = lambda + val mod2 = new BridgeRegression (x_c, y_c) + mod2.trainNtest ()() + println (mod2.summary ()) + FitM.showQofStatTable (mod2.crossValidate ()) + end for + + def f(b: VectorD): Double = (y - x * b).normSq + def f2(b: VectorD): Double = b.normSq * 2000.0 + def f3(b: VectorD): Double = f(b) + f2(b) + def f4(b: VectorD): Double = b.norm1 * 20000.0 + def f5(b: VectorD): Double = f(b) + f4(b) + def f6(b: VectorD): Double = b.norm_qq (0.5) * 40000.0 + def f7(b: VectorD): Double = f(b) + f6(b) + + val lb = VectorD (3, 4) + val ub = VectorD (5, 6) + new PlotC (f, lb, ub, title = "Contour plot of sse") + new PlotC (f2, lb, ub, title = "Contour plot of L2 penalty") + new PlotC (f3, lb, ub, title = "Contour Plot of sse + L2 penalty") + new PlotC (f4, lb, ub, title = "Contour Plot of L1 penalty") + new PlotC (f5, lb, ub, title = "Contour Plot of sse + L1 penalty") + new PlotC (f6, lb, ub, title = "Contour Plot of L.5 penalty") + new PlotC (f7, lb, ub, title = "Contour Plot of sse + L.5 penalty") + +end bridgeRegressionTest3 + diff --git a/src/main/scala/scalation/modeling/CollTest.scala b/src/main/scala/scalation/modeling/CollTest.scala new file mode 100644 index 000000000..8953918bf --- /dev/null +++ b/src/main/scala/scalation/modeling/CollTest.scala @@ -0,0 +1,31 @@ + +package scalation +package modeling + +import scalation.mathstat._ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `collTest` main method test for collinearity between column vectors x1 and x2. + * > runMain scalation.modeling.collTest + */ +@main def collTest (): Unit = +// one x1 x2 + val x = MatrixD ((4, 3), 1, 1, 1, + 1, 2, 2, + 1, 3, 3, + 1, 4, 0) // change 0 by .5 to 4 + val y = VectorD (1, 3, 3, 4) + +// Regression.hp("factorization") = "Fac_SVD" // uncomment for singular matrix + + for _ <- 0 to 8 do + banner (s"Test Increasing Collinearity: x_32 = ${x(3, 2)}") + println (s"x = $x") + println (s"x.corr = ${x.corr}") + val mod = new Regression (x, y) + mod.trainNtest ()() + println (mod.summary ()) + x(3, 2) += 0.5 + +end collTest + diff --git a/src/main/scala/scalation/modeling/Example_AutoMPG.scala b/src/main/scala/scalation/modeling/Example_AutoMPG.scala index 20c6e334e..1ffc26e67 100644 --- a/src/main/scala/scalation/modeling/Example_AutoMPG.scala +++ b/src/main/scala/scalation/modeling/Example_AutoMPG.scala @@ -26,6 +26,9 @@ object Example_AutoMPG: val xr_fname = Array ("cylinders", "displacement", "horsepower", "weight", "acceleration", "modelyear", "origin") + val xyr_fname = Array ("cylinders", "displacement", "horsepower", "weight", + "acceleration", "modelyear", "origin", "mpg") + /** the raw combined data matrix 'xyr' */ val xyr = MatrixD ((392, 8), 8, 307, 130, 3504, 12, 70, 1, 18, @@ -571,3 +574,16 @@ end example_AutoMPG_Regression end example_AutoMPG_QuadRegression + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `example_AutoMPG_write_csv` main function writes the AutoMPG dataset into + * a CSV file in the DATA director (.../scalation_2.0/data). + * @see archive.ics.uci.edu/ml/datasets/Auto+MPG + * > runMain scalation.modeling.example_AutoMPG_write_csv + */ +@main def example_AutoMPG_write_csv (): Unit = + + xyr.write ("auto_mpg.csv", xyr_fname) + +end example_AutoMPG_write_csv + diff --git a/src/main/scala/scalation/modeling/ExpRegression.scala b/src/main/scala/scalation/modeling/ExpRegression.scala index 72ae01b4b..d3e74e3c5 100644 --- a/src/main/scala/scalation/modeling/ExpRegression.scala +++ b/src/main/scala/scalation/modeling/ExpRegression.scala @@ -33,7 +33,7 @@ import scalation.optimization.quasi_newton.BFGS class ExpRegression (x: MatrixD, y: VectorD, fname_ : Array [String] = null, hparam: HyperParameter = null, nonneg: Boolean = true) extends Predictor (x, y, fname_, hparam) - with Fit (dfm = x.dim2 - 1, df = x.dim - x.dim2): + with Fit (dfr = x.dim2 - 1, df = x.dim - x.dim2): if nonneg && ! y.isNonnegative then flaw ("init", "response vector y must be nonnegative") @@ -43,7 +43,7 @@ class ExpRegression (x: MatrixD, y: VectorD, fname_ : Array [String] = null, // private var r_dev = -1.0 // residual dev: -LL, for full model // private var pseudo_rSq = -1.0 // McFaffen's pseudo R-squared - modelName = "ExpRegression" + _modelName = "ExpRegression" //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** For a given parameter vector b, compute -2 * Log-Likelihood (-2LL). @@ -96,9 +96,10 @@ class ExpRegression (x: MatrixD, y: VectorD, fname_ : Array [String] = null, def train_null (): Unit = val b0 = new VectorD (x.dim2) // use b0 = 0 for starting guess for parameters val bfgs = new BFGS (ll_null) // minimizer for -2l - val b_n = bfgs.solve (b0)._2 // find optimal solution for parameters + val b_n = bfgs.solve (b0)._2 // find optimal solution for parameters n_dev = ll_null (b_n) // measure of fitness for null model + println (s"train_null: n_dev = $n_dev") end train_null //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -137,10 +138,11 @@ class ExpRegression (x: MatrixD, y: VectorD, fname_ : Array [String] = null, //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Build a sub-model that is restricted to the given columns of the data matrix. * @param x_cols the columns that the new model is restricted to + * @param fname2 the variable/feature names for the new model (defaults to null) */ - override def buildModel (x_cols: MatrixD): ExpRegression = + def buildModel (x_cols: MatrixD, fname2: Array [String] = null): ExpRegression = debug ("buildModel", s"${x_cols.dim} by ${x_cols.dim2}") - new ExpRegression (x_cols, y, null, hparam, nonneg) + new ExpRegression (x_cols, y, fname2, hparam, nonneg) end buildModel end ExpRegression @@ -229,10 +231,10 @@ end expRegressionTest //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Test `ExpRegression` by simulating n-many observations. - * @param n number of observations - * @param k number of variables + * @param n number of observations (e.g., 10000) + * @param k number of variables (e.g., 5) */ - def test (n: Int = 10000, k: Int = 5): Unit = + def test (n: Int, k: Int): Unit = val u = new Uniform (0, 1) // uniform random val e = new Exponential (1) // exponential error val r = new Random () diff --git a/src/main/scala/scalation/modeling/FeatureSelection.scala b/src/main/scala/scalation/modeling/FeatureSelection.scala index 24cc8146d..6961db613 100644 --- a/src/main/scala/scalation/modeling/FeatureSelection.scala +++ b/src/main/scala/scalation/modeling/FeatureSelection.scala @@ -8,12 +8,16 @@ * @note Model Framework: Support for Feature Selection and Best-Step * * @see bookdown.org/max/FES/selection.html + * + * There are two important given instances the user may change (see below): + * qk the QoF metric index used for comparing models + * fullset_FS whether to use the full dataset or the training set for Feature Selection */ package scalation package modeling -import scala.collection.mutable.LinkedHashSet => LSET +import scala.collection.mutable.{LinkedHashSet => LSET} import scalation.mathstat._ @@ -23,14 +27,32 @@ import scalation.mathstat._ */ enum SelectionTech: - case Forward, Backward, Stepwise + case Forward, Backward, Stepwise, Beam end SelectionTech -// Change as needed the default (given instance) QoF metric used for Feature Selection +// G I V E N S + +// Change as needed the default (given instance) QoF metric used for Feature Selection (FS) + +given qk: Int = QoF.rSqBar.ordinal // which QoF metric index to use by default - Regression +//given qk: Int = QoF.smapeC.ordinal // which QoF metric index to use by default - Time Series + +// Change as needed the default (given instance) data to be used for Feature Selection (FS) + +//given fullset_FS: Boolean = true // use full dataset for Feature Selection +given fullset_FS: Boolean = false // use the training set for Feature Selection + // for example the training set used in the `validate` method + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Make a new restricted array of strings for the feature names based on the + * selected columns. + * @param fname the original/full set of feature names + * @param cols the selected columns + */ +def newFname (fname: Array [String], cols: LSET [Int]): Array [String] = cols.map (fname(_)).toArray +def newFname (fname: Array [String], cols: VectorI): Array [String] = cols.map (fname(_)).toArray -//given qk: Int = QoF.rSqBar.ordinal // which QoF metric index to use by default - Regression -given qk: Int = QoF.smapeIC.ordinal // which QoF metric index to use by default - Time Series //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `FeatureSelection` trait establishes a framework for feature selection, @@ -47,16 +69,17 @@ trait FeatureSelection: * Quality of Fit (QoF) measures/metrics for all steps. * @see `Fit` for index of QoF measures/metrics. * @param tech the feature selection technique to apply - * @param cross whether to include the cross-validation QoF measure + * @param cross indicator to include the cross-validation/validation QoF measure (defaults to "many") * @param qk index of Quality of Fit (QoF) to use for comparing quality */ - def selectFeatures (tech: SelectionTech, cross: Boolean = true)(using qk: Int): + def selectFeatures (tech: SelectionTech, cross: String = "many")(using qk: Int): (LSET [Int], MatrixD) = debug ("selectFeatures", s"select features based on QoF metric with index qk = $qk") tech match case SelectionTech.Forward => forwardSelAll (cross) case SelectionTech.Backward => backwardElimAll (1, cross) case SelectionTech.Stepwise => stepwiseSelAll (cross) + case SelectionTech.Beam => beamSelAll (cross) end selectFeatures //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -64,10 +87,10 @@ trait FeatureSelection: * to ADD into the model, returning the features/variables added and the new * Quality of Fit (QoF) measures/metrics for all steps. * @see `Fit` for index of QoF measures/metrics. - * @param cross whether to include the cross-validation QoF measure + * @param cross indicator to include the cross-validation/validation QoF measure (defaults to "many") * @param qk index of Quality of Fit (QoF) to use for comparing quality */ - def forwardSelAll (cross: Boolean = true)(using qk: Int): (LSET [Int], MatrixD) + def forwardSelAll (cross: String = "many")(using qk: Int): (LSET [Int], MatrixD) //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Perform BACKWARD ELIMINATION to find the LEAST predictive features/variables @@ -75,24 +98,67 @@ trait FeatureSelection: * new Quality of Fit (QoF) measures/metrics for all steps. * @see `Fit` for index of QoF measures/metrics. * @param first first variable to consider for elimination - * @param cross whether to include the cross-validation QoF measure + * @param cross indicator to include the cross-validation/validation QoF measure (defaults to "many") * @param qk index of Quality of Fit (QoF) to use for comparing quality */ - def backwardElimAll (first: Int = 1, cross: Boolean = true)(using qk: Int): (LSET [Int], MatrixD) + def backwardElimAll (first: Int = 1, cross: String = "many")(using qk: Int): (LSET [Int], MatrixD) //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Perform STEPWISE SELECTION to find a GOOD COMBINATION of predictive features/variables - * to have in the model, returning the features/variables left and the new Quality of Fit + * to have in the model, returning the features/variables selected and the new Quality of Fit * (QoF) measures/metrics for all steps. At each step, it calls forward and backward * and takes the best of the two actions. Stops when neither action yields improvement. * @see `Fit` for index of QoF measures/metrics. - * @param cross whether to include the cross-validation QoF measure + * @param cross indicator to include the cross-validation/validation QoF measure (defaults to "many") * @param swap whether to allow a swap step (swap out a feature for a new feature in one step) * @param qk index of Quality of Fit (QoF) to use for comparing quality */ - def stepwiseSelAll (cross: Boolean = true, swap: Boolean = true)(using qk: Int): + def stepwiseSelAll (cross: String = "many", swap: Boolean = true)(using qk: Int): (LSET [Int], MatrixD) + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Perform BEAM SEARCH SELECTION to find a GOOD COMBINATION of predictive features/variables to + * have in the model, returning the top k sets of features/variables selected and the new Quality of + * Fit (QoF) measures/metrics for all steps. At each step, iterate over the models in the beam + * (top k) and create candidates by adding features (phase 1) and then removing features (phase 2). + * From all the candidates, keep the best k and start a new iteration. Stops when there is + * no improvement in any of top k or the maximum number of features is reached. + * @see `Fit` for index of QoF measures/metrics. + * @param cross indicator to include the cross-validation/validation QoF measure (defaults to "many") + * @param bk the beam width holding the top k models (defaults to 3) + * @param qk index of Quality of Fit (QoF) to use for comparing quality + */ + def beamSelAll (cross: String = "many", bk: Int = 3)(using qk: Int): (LSET [Int], MatrixD) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Filter the x-columns of matrix xy based on the two thresholds, returning + * the filtered matrix and the column indices/predictor variables selected. + * @param xy the [ x, y ] combined data-response matrix + * @param thr1 the threshold used to compare the predictor x-columns to the y-column + * only want variables above some minimal dependency level + * @param thr2 the threshold used to compare the predictor x-columns with each other + * only want variables below some cut-off dependency/collinearity level + * @param dep the variable/column dependency measure (defaults to correlation) + * + def filter (xy: MatrixD, thr1: Double = 0.2, thr2: Double = 0.8) + (dep: MatrixD = xy.corr): (MatrixD, VectorI) = + + val lst = dep.dim2 - 1 // the index of last column (holds y) + val depY = dep(?, lst) // the dependency sub-matrix for xy vs. y (last column) + val depX = dep(0 until lst, 0 until lst) // the dependency sub-matrix for x vs. x + val indices = for i <- 0 until lst if abs (depY(i)) > thr1 yield i // row indices that match (> thr1) + val sIndices = indices.sortBy (i => -abs (depY(i))) // sort indices from highest dep to lowest + + // only add index i if its dependency with all selected columns < thr2 + val selected = ArrayBuffer [Int] () + for i <- sIndices do + if selected.forall (k => abs (depX(i, k)) < thr2) then selected += i // row indices that also match (< thr2) + val selected_ = selected.sorted + + (xy(?, selected_), new VectorI (selected_.size, selected_.toArray)) + end filter + */ + end FeatureSelection @@ -102,17 +168,21 @@ type Model_FS = (Predictor | neuralnet.PredictorMV) & Fit //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `BestStep` is used to record the best improvement step found so far during * feature selection. Note, best depends on whether maximizing or minimizing - * @param col the column/variable to ADD/REMOVE for this step - * @param qof the Quality of Fit (QoF) for this step - * @param mod the model including selected features/variables for this step - * @param qk the index for the Quality of Fit (QoF) measure/metric used for comparison - * @param bestq the best QoF for metric qk so far + * @param col the column/variable to ADD/REMOVE for this step + * @param qof the Quality of Fit (QoF) for this step + * @param mod the model including selected features/variables for this step + * @param mod_cols the columns selected for mod + * @param qk the index for the Quality of Fit (QoF) measure/metric used for comparison + * @param bestq the best QoF for metric qk so far */ -case class BestStep (col: Int = -1, qof: VectorD = null, mod: Model_FS = null) - (using qk: Int)(bestq: Double = Fit.extreme (qk)): +case class BestStep (col: Int = -1, qof: VectorD = null, mod: Model_FS = null, mod_cols: LSET [Int] = null) + (using qk: Int)(bestq: Double = Fit.extreme (qk)) + extends Ordered [BestStep]: private val debug = debugf ("BestStep", false) + debug ("BestStep", s"bestq = $bestq") // needed for unused explicit parameter warning + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Return whether this step is better than that step. * @param that_qof the Qof for that step @@ -143,14 +213,26 @@ case class BestStep (col: Int = -1, qof: VectorD = null, mod: Model_FS = null) //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Return the better between this and the to be formed candidate step. - * @param j the index of the feature/variable - * @param qof_j the QoF for mod_j - * @param mod_j the model with j + * @param j the index of the feature/variable + * @param qof_j the QoF for mod_j + * @param mod_j the model with j + * @param mod_cols the columns selected for mod_j */ - def better (j: Int, qof_j: VectorD, mod_j: Model_FS): BestStep = - better (BestStep (j, qof_j, mod_j)(qof_j(qk))) + def better (j: Int, qof_j: VectorD, mod_j: Model_FS, mod_cols: LSET [Int]): BestStep = + better (BestStep (j, qof_j, mod_j, mod_cols)(qof_j(qk))) end better + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the comparison result between this and that step. + * @param that the other candidate + */ + def compare (that: BestStep): Int = + if qof == null && that.qof == null then 0 + else if qof == null then -1 + else if that.qof == null then 1 + else qof(qk).compare (that.qof(qk)) + end compare + end BestStep @@ -160,14 +242,22 @@ end BestStep * @see `Predictor` * @param rSq the matrix contain information about r-Sq-based QoF measures * @param l the l-th iteration - * @param cross indicator of whether cross-validation are to be included + * @param cross indicator to include "many" cross-validation, "one" validation, or "none" nothing * @param best the best step so far */ -def updateQoF (rSq: MatrixD, l: Int, cross: Boolean, best: BestStep): Unit = +def updateQoF (rSq: MatrixD, l: Int, cross: String, best: BestStep): Unit = rSq(l) = - if cross then + cross match + case "many" => Fit.qofVector (best.qof, best.mod.crossValidate ()) // results for model mod_l, with cross-validation - else - Fit.qofVector (best.qof, null) // results for model mod_l, no cross-validation + case "one" => + val qof = best.mod.validate ()()._2 + qof match + case qofv: VectorD => + Fit.qofVector (best.qof, best.mod.qof2Stat (qofv)) // results for model mod_l, with validation + case qofm: MatrixD => + Fit.qofVector (best.qof, best.mod.qof2Stat (qofm(0))) // results for model mod_l, with validation + case _ => + Fit.qofVector (best.qof, null) // results for model mod_l, with nothing end updateQoF diff --git a/src/main/scala/scalation/modeling/Fit.scala b/src/main/scala/scalation/modeling/Fit.scala index f06d1431b..107d7953a 100644 --- a/src/main/scala/scalation/modeling/Fit.scala +++ b/src/main/scala/scalation/modeling/Fit.scala @@ -8,9 +8,16 @@ * @note Model Support: Quality of Fit (QoF) * * @see facweb.cs.depaul.edu/sjost/csc423/documents/f-test-reg.htm - * @see avesbiodiv.mncn.csic.es/estadistica/ejemploaic.pdf - * @see en.wikipedia.org/wiki/Bayesian_information_criterion - * @see www.forecastpro.com/Trends/forecasting101August2011.html + * avesbiodiv.mncn.csic.es/estadistica/ejemploaic.pdf + * en.wikipedia.org/wiki/Bayesian_information_criterion + * www.forecastpro.com/Trends/forecasting101August2011.html + * + * @see FitI.scala.bak + * @see github.com/scikit-learn/scikit-learn/issues/20162 // used in scikit-learn + * www.mdpi.com/1999-4893/13/6/132 // defines several metrics + * arxiv.org/pdf/2005.12881.pdf // for IS and WIS + * https://www.sciencedirect.com/science/article/pii/S1364032120308005 + * www.datasciencewithmarco.com/blog/conformal-prediction-in-time-series-forecasting */ package scalation @@ -29,39 +36,38 @@ import scalation.random.CDF.{fisherCDF, studentTCDF} */ enum QoF (val name: String): - case rSq extends QoF ("rSq") // index 0 - case rSqBar extends QoF ("rSqBar") // index 1 - case sst extends QoF ("sst") // index 2 - case sse extends QoF ("sse") // index 3 - - case sde extends QoF ("sde") // index 4 - case mse0 extends QoF ("mse0") // index 5 - case rmse extends QoF ("rmse") // index 6 - case mae extends QoF ("mae") // index 7 - case smape extends QoF ("smape") // index 8 - - case m extends QoF ("m") // index 9 - case dfm extends QoF ("dfm") // index 10 - case df extends QoF ("df") // index 11 - case fStat extends QoF ("fStat") // index 12 - case aic extends QoF ("aic") // index 13 - case bic extends QoF ("bic") // index 14 - - case mape extends QoF ("mape") // index 15 - case mase extends QoF ("mase") // index 16 - case smapeIC extends QoF ("smapeIC") // index 17 - - case picp extends QoF ("picp") // index 18 - case pinc extends QoF ("pinc") // index 19 - case ace extends QoF ("ace") // index 20 - case pinaw extends QoF ("pinaw") // index 21 - case pinad extends QoF ("pinad") // index 22 - case iscore extends QoF ("iscore") // index 23 - case wis extends QoF ("wis") // index 24 + case rSq extends QoF ("rSq") // index 0 0-3 related to R^2 + case rSqBar extends QoF ("rSqBar") // index 1 + case sst extends QoF ("sst") // index 2 + case sse extends QoF ("sse") // index 3 + + case sde extends QoF ("sde") // index 4 4-8 various error metrics + case mse0 extends QoF ("mse0") // index 5 + case rmse extends QoF ("rmse") // index 6 + case mae extends QoF ("mae") // index 7 + case smape extends QoF ("smape") // index 8 + + case m extends QoF ("m") // index 9 9-14 degrees of freedom and information criteria + case dfr extends QoF ("dfr") // index 10 + case df extends QoF ("df") // index 11 + case fStat extends QoF ("fStat") // index 12 + case aic extends QoF ("aic") // index 13 + case bic extends QoF ("bic") // index 14 + + case mape extends QoF ("mape") // index 15 15-17 time series metrics (also 8) + case mase extends QoF ("mase") // index 16 + case smapeC extends QoF ("smapeC") // index 17 + + case picp extends QoF ("picp") // index 18 18-23 for prediction intervals + case pinc extends QoF ("pinc") // index 19 + case ace extends QoF ("ace") // index 20 + case pinaw extends QoF ("pinaw") // index 21 + case mis extends QoF ("mis") // index 22 + case wis extends QoF ("wis") // index 23 end QoF -val qoF_names = QoF.values.map (_.toString) // The QoF names from the QoF enum +val qoF_names = QoF.values.map (_.toString) // The QoF names from the QoF enum import QoF._ @@ -74,6 +80,8 @@ object Fit: val MIN_FOLDS = 3 // minimum number of folds for cross-validation val N_QoF = QoF.values.size // the number of QoF measures + val α_ = VectorD (0.02, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9) // values of α (type I error) used by WIS + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Return the help string that describes the Quality of Fit (QoF) measures * provided by the `Fit` trait. The QoF measures are divided into two groups: @@ -85,35 +93,34 @@ object Fit: def help: String = """ help: Quality of Fit (QoF) metrics/measures: - rSq = R-squared, the Coefficient of Determination (R^2) - rSqBar = adjusted R-squared (R^2-bar) - sst = Sum of Squares Total (ssr + sse) - sse = Sum of Squares for Error (SSE = RSS) - - sde = Standard Deviation of Errors - mse0 = raw Mean Square Error (MSE = SSE / m) - rmse = Root Mean Square Error (RMSE) - mae = Mean Absolute Error (MAE) - smape = symmetric Mean Absolute Percentage Error (sMAPE) - - m = Number of Observations - dfm = Degrees of Freedom taken by the model, e.g., one lost per parameter - df = Degrees of Freedom left for residuals/errors - fStat = Fisher's Statistic - aic = Akaike Information Criterion (AIC) - bic = Bayesian Information Criterion (BIC) - - mape = Mean Absolute Percentage Error (MAPE) - mase = Mean Absolute Scaled Error (MASE) - smapeIC = symmetric Mean Absolute Percentage Error Information Criterion (sMAPE-IC) - - picp = prediction interval coverage probability - pinc = prediction interval nominal coverage - ace = average coverage error - pinaw = prediction interval normalized average width - pinad = prediction interval normalized average deviation - iscore = interval score - wis = weighted interval score + rSq = R-squared, the Coefficient of Determination (R^2) + rSqBar = adjusted R-squared (R^2-bar) + sst = Sum of Squares Total (SST) [ssr + sse] + sse = Sum of Squares for Error (SSE = RSS) + + sde = Standard Deviation of Errors (SDE) + mse0 = raw Mean Square Error (MSE = SSE / m) + rmse = Root Mean Square Error (RMSE) + mae = Mean Absolute Error (MAE) + smape = symmetric Mean Absolute Percentage Error (sMAPE) + + m = Number of Observations + dfr = Degrees of Freedom (DFr) taken by the regression/model, e.g., one lost per parameter + df = Degrees of Freedom (DF) left for residuals/errors + fStat = Fisher's Statistic + aic = Akaike Information Criterion (AIC) + bic = Bayesian Information Criterion (BIC) + + mape = Mean Absolute Percentage Error (MAPE) + mase = Mean Absolute Scaled Error (MASE) + smapeC = symmetric Mean Absolute Percentage Error information Criterion (sMAPE-IC) + + picp = Prediction Interval empirical Coverage Probability (PICP) + pinc = Prediction Interval Nominal Coverage probability (PINC) [1 - α/2] + ace = Average Coverage Error (ACE) [empirical - nominal coverage, i.e., picp - pinc] + pinaw = Prediction Interval Normalized Average Width (PINAW) + mis = Mean Interval Score (MIS) [ over given instances ] + wis = Weighted Interval Score (WIS) [ over several α values ] """ end help @@ -132,12 +139,12 @@ help: Quality of Fit (QoF) metrics/measures: * @param cv_fit the fit array of statistics for cross-validation (upon test sets) */ def qofVector (fit: VectorD, cv_fit: Array [Statistic]): VectorD = - val cv = if cv_fit == null then -0.0 // cv not computed - else cv_fit(rSq.ordinal).mean // mean for R^2 cv + val cv = if cv_fit == null then fit(smapeC.ordinal) // cv not computed => use sMAPE_IC + else 100 * cv_fit(rSq.ordinal).mean // mean for R^2 cv VectorD (100 * fit(rSq.ordinal), // R^2 as percentage 100 * fit(rSqBar.ordinal), // R^2 Bar as percentage fit(smape.ordinal), // sMAPE - 100 * cv) // R^2 cv as percentage + cv) // R^2 cv as percentage, or sMAPE_IC end qofVector val qofVectorSize = 4 // must correspond to size of qofVector @@ -152,6 +159,16 @@ help: Quality of Fit (QoF) metrics/measures: stats end qofStatTable + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Show the quality of fit measures/metrics for each response/output variable. + @ @see `FitM.showFitMap` + * @param ftMat the matrix of QoF values (qof x var) + * @param ftLab the array of QoF labels (defaults to QoF.values.map (_.toString)) + */ + def showFitMap (ftMat: MatrixD, ftLab: Array [String] = QoF.values.map (_.toString)): String = + FitM.showFitMap (ftMat, ftLab) + end showFitMap + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Tally the current QoF measures into the statistical accumulators. * @param stats the statistics table being updated @@ -160,7 +177,6 @@ help: Quality of Fit (QoF) metrics/measures: def tallyQof (stats: Array [Statistic], qof: VectorD): Unit = if qof(sst.ordinal) > 0.0 then // requires variation in test set for q <- qof.indices do stats(q).tally (qof(q)) // tally these QoF measures - end if end tallyQof //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -182,7 +198,7 @@ help: Quality of Fit (QoF) metrics/measures: * @param y the given time-series * @param h the forecasting horizon or stride (defaults to 1) */ - def mae_n (y: VectorD, h: Int = 1): Double = + inline def mae_n (y: VectorD, h: Int = 1): Double = var sum = 0.0 for t <- h until y.dim do sum += abs (y(t) - y(t-h)) sum / (y.dim - h) @@ -196,76 +212,95 @@ help: Quality of Fit (QoF) metrics/measures: * @param yp the forecasted time-series * @param h the forecasting horizon or stride (defaults to 1) */ - def mase (y: VectorD, yp: VectorD, h: Int = 1): Double = - mae (y, yp, h) / mae_n (y, 1) // compare to Naive (one-step) -// mae (y, yp, h) / mae_n (y, h) // compare to Naive (h-steps) + inline def mase (y: VectorD, yp: VectorD, h: Int = 1): Double = + mae (y, yp, h) / mae_n (y, 1) // compare to Naive (one-step) +// mae (y, yp, h) / mae_n (y, h) // compare to Naive (h-steps) end mase //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Return the Prediction Interval Coverage Probability (PICP) metric, i.e., - * the fraction is actual values inside the prediction interval. - * @param y the given time-series (must be aligned with the interval forecast) - * @param low the lower bound - * @param up the upper bound + * the fraction of actual values inside the prediction interval. + * While PINC is the nominal/desired coverage probability (1 - α), PICP is + * the corresponding empirical coverage probability. + * @param y the given time-series (must be aligned with the interval forecast) + * @param low_up the (lower, upper) bound vectors used for prediction intervals */ - inline def picp_ (y: VectorD, low: VectorD, up: VectorD): Double = + inline def picp_ (y: VectorD, low_up: (VectorD, VectorD)): Double = var count = 0 - for i<- y.indices if y(i) in (low(i), up(i)) do count += 1 + for i <- y.indices if y(i) in (low_up._1(i), low_up._2(i)) do count += 1 count / y.dim.toDouble end picp_ //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Return the Prediction Interval Normalised Average Deviation (PINAD) metric, i.e., * the normalized (by range) average deviation outside the prediction interval. - * @param y the given time-series (must be aligned with the interval forecast) - * @param low the lower bound - * @param up the upper bound + * @param y the given time-series (must be aligned with the interval forecast) + * @param low_up the (lower, upper) bound vectors used for prediction intervals */ - inline def pinad_ (y: VectorD, low: VectorD, up: VectorD): Double = + inline def pinad_ (y: VectorD, low_up: (VectorD, VectorD)): Double = var sum = 0.0 for i <- y.indices do - sum += (if y(i) < low(i) then low(i) - y(i) - else if y(i) > up(i) then y(i) - up(i) + sum += (if y(i) < low_up._1(i) then low_up._1(i) - y(i) + else if y(i) > low_up._2(i) then y(i) - low_up._2(i) else 0.0) sum / (y.dim * (y.max - y.min)) end pinad_ //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the Interval Score (IS) metric, i.e., the ... + /** Return the Mean Interval Score (MIS) metric which starts with the average prediction + * interval width and adds a penalty for each true_value y(i) that is outside + * the prediction interval. Smaller (in absolute value) scores are better. + * @see huiwenn.github.io/predictive-distributions * @see arxiv.org/pdf/2005.12881.pdf - * @param y the given time-series (must be aligned with the interval forecast) - * @param low the lower bound - * @param up the upper bound - & @param alpha the prediction level + * @see search.r-project.org/CRAN/refmans/scoringutils/html/interval_score.html + * + * score = (up − low) + α/2 * (low − true_value) ∗ is(true_value < low) + * + α/2 * (true_value − up) ∗ is(true_value > up) + * + * @param y the given time-series (must be aligned with the interval forecast) + * @param low_up the (lower, upper) bound vectors used for prediction intervals + & @param α the significance level (1 - p_) */ - def iscore_ (y: VectorD, low: VectorD, up: VectorD, alpha: Double = 0.1): Double = - val fac = 2.0 / alpha + inline def mis_ (y: VectorD, low_up: (VectorD, VectorD), α: Double = 0.1): Double = + val (low, up) = low_up +// val pf = 2.0 / α // penalty factor + val pf = 4.0 / α // penalty factor - based on α/2 var sum = 0.0 for i <- y.indices do - sum += up(i) - low(i) // interval width - if y(i) < low(i) then sum += fac * (low(i) - y(i)) // y_i below interval penalty - if y(i) > up(i) then sum += fac * (y(i) - up(i)) // y_i above interval penalty - sum / y.dim - end iscore_ + sum += up(i) - low(i) // interval width + if y(i) < low(i) then sum += pf * (low(i) - y(i)) // y_i below interval penalty + if y(i) > up(i) then sum += pf * (y(i) - up(i)) // y_i above interval penalty + sum / y.dim // return the mean score + end mis_ //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the Weighted Interval Score (WIS) metric, i.e., the ... + /** Return the Weighted Interval Score (WIS) metric, i.e., a weighted average of + * K prediction intervals each calculated for a different alpha (α) level. + * WIS approximates the Continuous Ranked Probability Score (CRPS). * @see arxiv.org/pdf/2005.12881.pdf + * @see pmc.ncbi.nlm.nih.gov/articles/PMC7880475/pdf/pcbi.1008618.pdf (equation 1) * @param y the given time-series (must be aligned with the interval forecast) * @param yp the point prediction mean/median - * @param low the lower bounds for various alpha levels - * @param up the upper bounds for various alpha levels - * @param alphas the array of prediction levels + * @param low_up the (lower, upper) bound vectors used for prediction intervals + * @param α the vector of significance levels (defaults to the K = 11 prediction intervals + * used by the COVID-19 Forecast Hub) */ - def wis_ (y: VectorD, yp: VectorD, low: MatrixD, up: MatrixD, - alphas: Array [Double] = - Array (0.02, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9)): Double = - val k = alphas.size - var sum = alphas(0) * (y - yp).abs.mean - for j <- 1 until k do sum += alphas(j) * iscore_ (y, low(j), up(j), alphas(j)) - sum / (2 * k + 1) + def wis_ (y: VectorD, yp: VectorD, low_up: (MatrixD, MatrixD), α: VectorD = α_): Double = + val w = α * 0.5 + val ww = 0.5 + var sum = (ww * (y - yp).abs.mean) / (α.dim + 0.5) + for k <- α.indices do sum += w(k) * mis_ (y, (low_up._1(k), low_up._2(k)), α(k)) + sum end wis_ +/* + MAY NEED TO FIX -- use prediction median instead of prediction mean + val kk = α.dim + var sum = α(0) * (y - yp).abs.mean + for k <- 1 until kk do sum += α(k) * mis_ (y, low_up._1(k), low_up._2(k), α(k)) + sum / (2 * k + 1) +*/ + end Fit import Fit._ @@ -273,163 +308,196 @@ import Fit._ //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `Fit` trait provides methods to determine basic Quality of Fit QoF measures. * @see reset to reset the degrees of freedom - * @param dfm the degrees of freedom for model/regression + * @param dfr the degrees of freedom for regression/model * @param df the degrees of freedom for error */ -trait Fit (protected var dfm: Double, protected var df: Double) +trait Fit (protected var dfr: Double, protected var df: Double) extends FitM: - private val debug = debugf ("Fit", false) // debug function - private val flaw = flawf ("Fit") // flaw function + private val debug = debugf ("Fit", true) // debug function + private val flaw = flawf ("Fit") // flaw function - private val pIC = 2.0 // penalty multiplier for sMAPE IC - private var df_t = dfm + df // total degrees of freedom - private var r_df = if df > 1.0 then df_t / df // ratio of degrees of freedom (total / error) - else dfm + 1.0 // case for for less than 1 dof error + private val pIC = 2.0 // penalty multiplier for sMAPE IC + private var df_t = dfr + df // total degrees of freedom + private var r_df = if df > 1.0 then df_t / df // ratio of degrees of freedom (total / error) + else dfr + 1.0 // case for for less than 1 dof error - private var mse = -1.0 // mean of squares for error MSE (unbiased) - private var rse = -1.0 // residual standard error (RSE) - private var msr = -1.0 // mean of squares for regression/model (MSR) + private var mse = -1.0 // mean of squares for error MSE (unbiased) + private var rse = -1.0 // residual standard error (RSE) + private var msr = -1.0 // mean of squares for regression/model (MSR) - private var rSqBar = -1.0 // adjusted R-squared (R^2 Bar) - private var fStat = -1.0 // F statistic (Quality of Fit) - private var p_fS = -1.0 // p-value for fStat - private var aic = -1.0 // Akaike Information Criterion (AIC) - private var bic = -1.0 // Bayesian Information Criterion (BIC) + private var rSqBar = -1.0 // adjusted R-squared (R^2 Bar) + private var fStat = -1.0 // F statistic (Quality of Fit) + private var p_fS = -1.0 // p-value for fStat + private var aic = -1.0 // Akaike Information Criterion (AIC) + private var bic = -1.0 // Bayesian Information Criterion (BIC) // Measures used for time series @see www.forecastpro.com/Trends/forecasting101August2011.html - private var mape = -1.0 // Mean Absolute Percentage Error (MAPE) - private var mase = -1.0 // Mean Absolute Scaled Error (MASE) - private var smapeIC = -1.0 // symmetric Mean Absolute Percentage Error Information Criteria (sMAPE-IC) -// private var nmae = -1.0 // normalized MAE (MAD/Mean Ratio) + private var mape = -1.0 // Mean Absolute Percentage Error (MAPE) + private var mase = -1.0 // Mean Absolute Scaled Error (MASE) + private var smapeC = -1.0 // symmetric Mean Absolute Percentage Error Information Criteria (sMAPE-IC) +// private var nmae = -1.0 // normalized MAE (MAD/Mean Ratio) - private var picp = -1.0 // prediction interval coverage probability - private var pinc = -1.0 // prediction interval nominal coverage - private var ace = -1.0 // average coverage error - private var pinaw = -1.0 // prediction interval normalized average width - private var pinad = -1.0 // prediction interval normalized average deviation - private var iscore = -1.0 // interval score - private var wis = -1.0 // weighted interval score + private var picp = -1.0 // Prediction Interval empirical Coverage Probability (PICP) + private var pinc = -1.0 // Prediction Interval Nominal Coverage probability (PINC) + private var ace = -1.0 // Average Coverage Error (picp - pinc) + private var pinaw = -1.0 // Prediction Interval Normalized Average Width (PINAW) + private var mis = -1.0 // Mean Interval Score (MIS) [over given instances] + private var wis = -1.0 // Weighted Interval Score (WIS) [over several α values] - protected var sig2e = -1.0 // MLE estimate of the population variance on the residuals + protected var sig2e = -1.0 // MLE estimate of the population variance on the residuals - protected var yForm: Transform = null // optional transformation of the response variable y - protected var scaledMetrics: Boolean = false // whether to use scaled metrics (otherwise use the default orifinal scale) + protected var yForm: Transform = null // optional transformation of the response variable y + protected var scaledMetrics: Boolean = false // whether to use scaled metrics (otherwise use the default original scale) //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the the y-transformation. + /** Return the y-transformation. */ def getYForm: Transform = yForm //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Reset the degrees of freedom to the new updated values. For some models, * the degrees of freedom is not known until after the model is built. - * @param df_update the updated degrees of freedom (model, error) + * @param df_update the updated degrees of freedom (regression/model, error) */ def resetDF (df_update: (Double, Double)): Unit = - dfm = df_update._1; df = df_update._2 // degrees of freedom - df_t = dfm + df // total degrees of freedom - r_df = if df > 1.0 then df_t / df // ratio of degrees of freedom (total / error) - else dfm + 1.0 // case for for less than 1 dof error - debug ("resetDF", s"dfm = $dfm, df = $df") + dfr = df_update._1; df = df_update._2 // degrees of freedom + df_t = dfr + df // total degrees of freedom + r_df = if df > 1.0 then df_t / df // ratio of degrees of freedom (total / error) + else dfr + 1.0 // case for for less than 1 DoF error + debug ("resetDF", s"dfr = $dfr, df = $df") end resetDF //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Return the mean of the squares for error (sse / df). Must call diagnose first. */ - def mse_ : Double = mse + inline def mse_ : Double = mse //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Diagnose the health of the model by computing the Quality of Fit (QoF) measures, * from the error/residual vector and the predicted & actual responses. * For some models the instances may be weighted. * @see `Regression_WLS` - * @param y_ the actual response/output vector to use (test/full) - * @param yp_ the predicted response/output vector (test/full) - * @param w the weights on the instances (defaults to null) + * @param y_raw the actual response/output vector to use (test/full) + * @param yp_raw the predicted response/output vector (test/full) + * @param w the weights on the instances (defaults to null) */ - override def diagnose (y_ : VectorD, yp_ : VectorD, w: VectorD = null): VectorD = - val (y, yp) = if scaledMetrics || yForm == null then (y_, yp_) - else (yForm.fi(y_), yForm.fi(yp_)) - super.diagnose (y, yp, w) // compute `FitM` metrics + override def diagnose (y_raw: VectorD, yp_raw: VectorD, w: VectorD = null): VectorD = + val idx = y_raw.indexOf (NO_DOUBLE) // skip all after NO_DOUBLE (filler) - val e = y - yp // FIX - avoid computing twice + val (y_, yp_) = if idx < 0 then (y_raw, yp_raw) else (y_raw(0 until idx), yp_raw(0 until idx)) + val (y, yp) = if scaledMetrics || yForm == null then (y_, yp_) + else (yForm.fi(y_), yForm.fi(yp_)) + super.diagnose (y, yp, w) // compute `FitM` metrics + + val e = y - yp // FIX - avoid computing twice // println (s"Fit.diagnose:\n y = $y,\n yp = $yp,\n e = $e") - if dfm < 0 || df < 0 then - flaw ("diagnose", s"degrees of freedom dfm = $dfm and df = $df must be non-negative") + if dfr < 0 || df < 0 then + flaw ("diagnose", s"degrees of freedom dfr = $dfr and df = $df must be non-negative") - msr = if dfm == 0 then 0.0 else ssr / dfm // mean squared regression/model - mse = sse / df // mean squares error + msr = if dfr == 0 then 0.0 else ssr / dfr // Mean Squared Regression + mse = sse / df // Mean squared Error - rse = sqrt (mse) // residual standard error - rSqBar = 1 - (1-rSq) * r_df // adjusted R-squared + rse = sqrt (mse) // Residual Standard Error + rSqBar = 1 - (1-rSq) * r_df // adjusted R-squared - fStat = msr / mse // F statistic (quality of fit) - p_fS = if dfm == 0 then -0.0 - else 1.0 - fisherCDF (fStat, dfm.toInt, df.toInt) // p-value for fStat - if p_fS.isNaN then p_fS = -0.0 // NaN => check error message produced by fisherCDF + fStat = msr / mse // F statistic (quality of fit) + p_fS = if dfr == 0 then -0.0 + else 1.0 - fisherCDF (fStat, dfr.toInt, df.toInt) // p-value for fStat + if p_fS.isNaN then p_fS = -0.0 // NaN => check error message produced by fisherCDF if sig2e == -1.0 then sig2e = e.variance_ - val ln_m = log (m) // natural log of m (ln(m)) - aic = ll() + 2 * (dfm + 1) // Akaike Information Criterion - // the + 1 on dfm accounts for the sig2e, which is - // an additional parameter to be estimated in MLE - bic = aic + (dfm + 1) * (ln_m - 2) // Bayesian Information Criterion - mape = 100 * (e.abs / y.abs).sum / m // mean absolute percentage error - mase = Fit.mase (y, yp) // mean absolute scaled error - smapeIC = smape + pIC * (dfm + 1) / y.dim.toDouble // sSMAPE Information Criterion + val ln_m = log (m) // natural log of m (ln(m)) + aic = ll() + 2 * (dfr + 1) // Akaike Information Criterion + // the + 1 on dfr accounts for the sig2e, which is + // an additional parameter to be estimated in MLE + bic = aic + (dfr + 1) * (ln_m - 2) // Bayesian Information Criterion + mape = 100 * (e.abs / y.abs).sum / m // Mean Absolute Percentage Error + mase = Fit.mase (y, yp) // Mean Absolute Scaled Error + smapeC = smape + pIC * (dfr + 1) / y.dim.toDouble // sMAPE Information Criterion fit end diagnose -// nmae = mae / mu // normalized MAE (MAD/Mean Ratio) -// nrmse = rmse / mu // normalized RMSE +// nmae = mae / mu // normalized MAE (MAD/Mean Ratio) +// nrmse = rmse / mu // normalized RMSE // issues concerning mean: full, train or test? // val ym = if ym_ == -0.0 then { debug ("diagnose", "test mean"); mu } // else { debug ("diagnose", "train mean"); ym_ } //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Diagnose the health of the model by computing the Quality of Fit (QoF) metrics/measures, - * from the error/residual vector and the predicted & actual responses. - * For some models the instances may be weighted. Include interval measures. - * Note: `wis` should be computed separately. - * @see `Regression_WLS` - * @param y the actual response/output vector to use (test/full) - * @param yp the point prediction mean/median - * @param low the predicted lower bound - * @param up the predicted upper bound - * @param alpha the nominal level of uncertainty (alpha) (defaults to 0.9, 90%) - * @param w the weights on the instances (defaults to null) + /** Diagnose the health of the model by computing the Quality of Fit (QoF) + * metrics/measures, from the error/residual vector and the predicted & + * actual responses. For some models the instances may be weighted. + * This method also includes PREDICTION INTERVAL (PI) metrics/measures. + * @see otexts.com/fpp2/prediction-intervals.html + * Note: `wis` should be computed separately as the bounds are matrices. + * @param y the actual response/output vector to use (test/full) + * @param yp the point prediction mean/median + * @param low_up the predicted (lower, upper) bounds vectors + * @param α the significance/nominal level of uncertainty (α) (defaults to 0.1, 10%) + * @param w the weights on the instances (defaults to null) */ - def diagnose_ (y: VectorD, yp: VectorD, low: VectorD, up: VectorD, alpha: Double = 0.1, + def diagnose_ (y: VectorD, yp: VectorD, low_up: (VectorD, VectorD), α: Double = 0.1, w: VectorD = null): VectorD = - diagnose (y, yp, w) - - picp = picp_ (y, low, up) // prediction interval coverage probability - pinc = 1 - alpha // prediction interval nominal coverage - ace = picp - pinc // average coverage error - pinaw = (up - low).mean / (y.max - y.min) // prediction interval normalized average width - pinad = pinad_ (y, low, up) // prediction interval normalized average deviation - iscore = iscore_ (y, low, up) // interval score + diagnose (y, yp, w) // call the main diagnose method for non-PI metrics + + picp = picp_ (y, low_up) // Prediction Interval empirical Coverage Probability + pinc = 1 - α/2 // Prediction Interval Nominal Coverage probability + ace = picp - pinc // Average Coverage Error (empirical - nominal) + pinaw = (low_up._2 - low_up._1).mean / (y.max - y.min) // Prediction Interval Normalized Average Width + // average PI width / range of y values + mis = mis_ (y, low_up) // Mean Interval Score fit end diagnose_ //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Diagnose the health of the model by computing the Quality of Fit (QoF) measures, + * specifically for the weighted interval score that allows using custom α levels. * @param y the given time-series (must be aligned with the interval forecast) * @param yp the point prediction mean/median - * @param low the lower bounds for various alpha levels - * @param up the upper bounds for various alpha levels - * @param alphas the array of prediction levels + * @param low_up the predicted (lower, upper) bounds matrices for various α levels + * (column for each α level) + * @param α the vector of significance levels (defaults to the K = 11 prediction + * intervals used by the CDC Forecast Hub) */ - def diagnose_wis (y: VectorD, yp: VectorD, low: MatrixD, up: MatrixD, - alphas: Array [Double] = - Array (0.02, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9)): Double = - wis = wis_ (y, yp, low, up, alphas) + def diagnose_wis (y: VectorD, yp: VectorD, low_up: (MatrixD, MatrixD), α: VectorD = α_): Double = + wis = wis_ (y, yp, low_up, α) wis end diagnose_wis + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Diagnose the health of the model by computing the Quality of Fit (QoF) measures + * for both POINT PREDICTIONS and PREDICTION INTERVALS. + * @param y the given time-series (must be aligned with the interval forecast) + * @param yp the point prediction mean/median + * @param low_up the predicted (lower, upper) bounds matrices for various α levels + * (column for each α level) + * @param α the vector of significance levels (defaults to the K = 11 prediction + * intervals used by the CDC Forecast Hub) + * @param iα the index for the main significance level out of the vector α + */ + def diagnose_pi (y: VectorD, yp: VectorD, low_up: (MatrixD, MatrixD), α: VectorD = α_, + iα: Int = 2): (VectorD, Int) = + diagnose_ (y, yp, MatrixD.at (low_up, iα), α(iα)) + wis = wis_ (y, yp, low_up, α) + (fit, iα) + end diagnose_pi + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Diagnose the health of the model by computing the Quality of Fit (QoF) measures, + * from the predicted & actual matrix responses (output variable per column). + * For some models the instances may be weighted. + * @see `Regression_WLS` + * @param yy the actual response/output matrix to use (test/full) + * @param yyp the predicted response/output matrix (test/full) + * @param w the weights on the instances (defaults to null) + */ + def diagnose_mat (yy: MatrixD, yyp: MatrixD, w: VectorD = null): MatrixD = + MatrixD (for k <- yy.indices2 yield diagnose (yy(?, k), yyp(?, k), w)).ᵀ + end diagnose_mat + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The log-likelihood function times -2. Override as needed. * @see www.stat.cmu.edu/~cshalizi/mreg/15/lectures/06/lecture-06.pdf @@ -451,32 +519,14 @@ trait Fit (protected var dfm: Double, protected var df: Double) * Override to add more quality of fit measures. */ override def fit: VectorD = VectorD (rSq, rSqBar, sst, sse, sde, mse0, rmse, mae, - smape, m, dfm, df, fStat, aic, bic, mape, mase, smapeIC, - picp, pinc, ace, pinaw, pinad, iscore, wis) + smape, m, dfr, df, fStat, aic, bic, mape, mase, smapeC, + picp, pinc, ace, pinaw, mis, wis) //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Return the Quality of Fit (QoF) measures corresponding to the labels given. * Override to add more quality of fit measures. */ -// def fit_ : VectorD = fit ++ VectorD (picp, pinc, ace, pinaw, pinad, iscore, wis) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Show the prediction interval forecasts and relevant QoF metrics/measures. - * @param yy the aligned actual response/output vector to use (test/full) - * @param yfh the forecasts for horizon h - * @param low the predicted lower bound - * @param up the predicted upper bound - * @param qof_all all the QoF metrics (for point and interval forecasts) - * @param h the forecasting horizon - */ - def show_interval_forecasts (yy: VectorD, yfh: VectorD, - low: VectorD, up: VectorD, - qof_all: VectorD, h: Int): Unit = - println (FitM.fitMap (qof_all, qoF_names)) // fully evaluate h-steps ahead forecasts - new PlotM (null, MatrixD (yy, yfh, low, up), // aligned actual, forecasted, lower, upper - Array ("yy", "yfh", "low", "up"), - "Plot Prediction Intervals for horizon $h", lines = true) - end show_interval_forecasts +// def fit_ : VectorD = fit ++ VectorD (picp, pinc, ace, pinaw, mis, wis) //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Return the help string that describes the Quality of Fit (QoF) measures @@ -485,7 +535,60 @@ trait Fit (protected var dfm: Double, protected var df: Double) override def help: String = Fit.help //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Produce a QoF summary for a model with diagnostics for each predictor x_j + /** Show the QoF metrics/measures in vector qof. + * @param qof the QoF metrics (e.g., for point and interval predictions/forecasts) + */ + def showQoF (qof: VectorD): Unit = println (FitM.fitMap (qof, qoF_names)) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Make the PREDICTION INTERVAL (PI) lower and upper bound vectors from + * the point predictions and the interval half widths. + * @param yp the vector of point predictions (y-hat) + * @param ihw the vector of interval half widths (one for each prediction) + */ + inline def PIbounds (yp: VectorD, ihw: VectorD): (VectorD, VectorD) = (yp - ihw, yp + ihw) + + inline def PIbounds (yp: VectorD, ihw_ : MatrixD): (MatrixD, MatrixD) = (-ihw_ + yp, ihw_ + yp) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Produce a PREDICTION INTERVAL half width for each prediction yp (y-hat). + * Note: `Fac_Cholesky is used to compute the inverse of xtx. + * @see `predictCInt` in `Predictor` + * @see stats.stackexchange.com/questions/585660/what-is-the-formula-for-prediction-interval-in-multivariate-case + * @see www.geeksforgeeks.org/data-analysis/confidence-and-prediction-intervals-with-statsmodels/ + * @param x_ the testing/full data/input matrix + * @param df_ the error/residual degrees of freedom + * @param α the significance level α = .1 for TWO TAILS: left tail .05 | 1 - α = .90 | .05 right tail + * e.g., for AutoMPG, t_crit (385, 0.90) = 1.6488210657096942 + * t_crit (385, 0.95) = 1.966 + */ + def predictInt (x_ : MatrixD, df_ : Double = df, α: Double = .1): VectorD = + val facCho = new Fac_Cholesky (x_.ᵀ * x_) // create a Cholesky factorization of xtx + val xtxInv = facCho.inverse // take inverse + val sig2 = mse_ + val p_ = 1 - α/2 // need p_-th quantile + val t_ = t_crit (df_.toInt, p_) // critical value from the t-distribution (two tails) + debug ("predictInt", s"t_crit (${df_.toInt}, $p_) = $t_") + val ihw = new VectorD (x_.dim) + for i <- x_.indices do + val x_i = x_(i) // use i-th predictor vector for ihw(i) + ihw(i) = t_ * sqrt (sig2 * (1 + (x_i dot xtxInv * x_i))) + ihw // return vector of Interval Half Widths (IHWs) + end predictInt + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Produce a PREDICTION INTERVAL half width for each prediction yp (y-hat) and + * each significance level. + * @param x_ the testing/full data/input matrix + * @param df_ the error/residual degrees of freedom + * @param α the significance levels to be used (defaults to `Fit.α_`) + */ + def predictInt_ (x_ : MatrixD, df_ : Double = df, α: VectorD = α_): MatrixD = + MatrixD (α.map (predictInt (x_, df_, _))) + end predictInt_ + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Produce a QoF SUMMARY for a model with diagnostics for each predictor x_j * and the overall Quality of Fit (QoF). * Note: `Fac_Cholesky is used to compute the inverse of xtx. * @param x_ the testing/full data/input matrix @@ -493,11 +596,12 @@ trait Fit (protected var dfm: Double, protected var df: Double) * @param b the parameters/coefficients for the model * @param vifs the Variance Inflation Factors (VIFs) */ - override def summary (x_ : MatrixD, fname: Array [String], b: VectorD, vifs: VectorD = null): String = + override def summary (x_ : MatrixD = null, fname: Array [String] = null, b: VectorD = null, + vifs: VectorD = null): String = - val facCho = new Fac_Cholesky (x_.transpose * x_) // create a Cholesky factorization of xtx - val diag = facCho.inverse(?) // take inverse and get main diagonal - val stdErr = (diag * mse_).sqrt // standard error of coefficients + val facCho = new Fac_Cholesky (x_.ᵀ * x_) // create a Cholesky factorization of xtx + val diag = facCho.inverse(?) // take inverse and get main diagonal + val stdErr = (diag * mse_).sqrt // standard error of coefficients val stats = (sumCoeff (b, stdErr, vifs), fmt(rse), fmt(rSq), fmt(rSqBar)) debug ("summary", s"stats = $stats") @@ -511,7 +615,7 @@ SUMMARY ${stats._1} Residual standard error: ${stats._2} on $df degrees of freedom Multiple R-squared: ${stats._3}, Adjusted R-squared: ${stats._4} - F-statistic: $fStat on $dfm and $df DF, p-value: $p_fS + F-statistic: $fStat on $dfr and $df DF, p-value: $p_fS ---------------------------------------------------------------------------------- """ end summary @@ -526,10 +630,9 @@ ${stats._1} debug ("sumCoeff", s"stdErr = $stdErr") var t, p: VectorD = null if stdErr != null then - t = b / stdErr // Student's T statistic + t = b / stdErr // Student's T statistic p = if df > 0 then t.map ((x: Double) => 2.0 * studentTCDF (-abs (x), df)) // p value else -VectorD.one (b.dim) - end if val sb = new StringBuilder () for j <- b.indices do sb.append (" x" + j + "\t " + fmt(b(j)) + @@ -549,7 +652,7 @@ end Fit //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `TestFit` class can be used for comparing two vectors on the basis of QoF. - * The degrees of freedom (dfm) for the "model" is assumed to be 1. + * The degrees of freedom (dfr) for the "model" is assumed to be 1. * Can be used when the degrees of freedom are not known. * @param m the size of vectors to compare */ @@ -564,13 +667,14 @@ end TestFit //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `fitTest` main function is used to test the `Fit` trait on a simulated dataset. + * It test the `diagnose` method to get metrics on POINT PREDICTIONS. * > runMain scalation.modeling.fitTest */ @main def fitTest (): Unit = // import scalation.random.Normal - for sig2 <- 10 to 50 by 10 do + for sig2 <- 10 to 50 by 10 do // test for increasing noise // val rv = Normal (0, sig2) val rv = SimpleUniform (-sig2, sig2) val y = VectorD.range (1, 101) + 10.0 @@ -588,6 +692,7 @@ end fitTest //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `fitTest2` main function is used to test the `Fit` class on a simulated * time series. + * It test the `diagnose_` method to get metrics on PREDICTION INTERVALS. * @see `scalation.modeling.forecasting.randomWalkTest3` for another test case * > runMain scalation.modeling.fitTest2 */ @@ -595,20 +700,21 @@ end fitTest import scalation.random.Normal - for sig2 <- 10 to 50 by 10 do + for sig2 <- 10 to 50 by 10 do // test for increasing noise val rv = Normal (0, sig2) val w = math.sqrt (sig2) * 1.96 val yp = VectorD.range (1, 101) + 10.0 - val y = yp.map (_ + rv.gen) // simulated time series + val y = yp.map (_ + rv.gen) // simulated time series val low = yp.map (_ - w) val up = yp.map (_ + w) new PlotM (null, MatrixD (y, yp, low, up), Array ("y", "yp", "low", "up"), "plot y, low and up") object ft extends Fit (1, y.dim) - ft.diagnose_ (y, yp, low, up) - ft.diagnose_wis (y, yp, MatrixD (low), MatrixD (up), Array (0.1)) + ft.diagnose_ (y, yp, (low, up)) + ft.diagnose_wis (y, yp, (MatrixD (low), MatrixD (up)), VectorD (0.1)) // FIX - WIS needs multiple α levels val qof = ft.fit println (FitM.fitMap (qof, qoF_names)) end for end fitTest2 + diff --git a/src/main/scala/scalation/modeling/FitI.scala.bak b/src/main/scala/scalation/modeling/FitI.scala.bak index 27d9281d7..6d871bcac 100644 --- a/src/main/scala/scalation/modeling/FitI.scala.bak +++ b/src/main/scala/scalation/modeling/FitI.scala.bak @@ -10,6 +10,8 @@ * @see github.com/scikit-learn/scikit-learn/issues/20162 // used in scikit-learn * www.mdpi.com/1999-4893/13/6/132 // defines several metrics * arxiv.org/pdf/2005.12881.pdf // for IS and WIS + * https://www.sciencedirect.com/science/article/pii/S1364032120308005 + * www.datasciencewithmarco.com/blog/conformal-prediction-in-time-series-forecasting */ package scalation @@ -18,7 +20,8 @@ package modeling import scalation.mathstat._ //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `QoFI` enum defines the Interval-based Quality of Fit (QoFI) measures/metrics. +/** The `QoFI` enum defines the Interval-based Quality of Fit (QoFI) measures/metrics, + * i.e., prediction interval metrics. * @param name the name of the parameter */ enum QoFI (val name: String): @@ -27,9 +30,11 @@ enum QoFI (val name: String): case pinc extends QoFI ("pinc") // index 1 case ace extends QoFI ("ace") // index 2 case pinaw extends QoFI ("pinaw") // index 3 - case pinad extends QoFI ("pinad") // index 4 - case iscore extends QoFI ("iscore") // index 5 - case wis extends QoFI ("wis") // index 7 + case iscore extends QoFI ("iscore") // index 4 + case wis extends QoFI ("wis") // index 5 + case pinad extends QoFI ("pinad") // index 6 + case cwc extends QoFI ("cwc") // index 7 + case crps extends QoFI ("crps") // index 8 end QoFI @@ -55,13 +60,15 @@ object FitI: def help: String = """ help: Interval-based Quality of Fit (QoFI) metrics/measures: - picp = prediction interval coverage probability + picp = prediction interval coverage probability (reliability) pinc = prediction interval nominal coverage ace = average coverage error - pinaw = prediction interval normalized average width - pinad = prediction interval normalized average deviation + pinaw = prediction interval normalized average width (sharpness) iscore = interval score wis = weighted interval score + pinad = prediction interval normalized average deviation (future use) + cwc = coverage width-based criterion (future use) + crps = continuous ranked probability score (future use) """ end help diff --git a/src/main/scala/scalation/modeling/FitM.scala b/src/main/scala/scalation/modeling/FitM.scala index 8d32fde37..4dc6ba398 100644 --- a/src/main/scala/scalation/modeling/FitM.scala +++ b/src/main/scala/scalation/modeling/FitM.scala @@ -36,16 +36,16 @@ trait FitM: protected var m = -1 // number of instances (# data points) - protected var sse = -1.0 // sum of squares for error (SSE or RSS) - protected var ssr = -1.0 // sum of squares regression/model (SSR) - protected var sst = -1.0 // sum of squares total (SST = SSR + SSE) - protected var sde = -1.0 // standard deviation of errors (standard error of estimate) + protected var sse = -1.0 // Sum of Squares for Error (SSE or RSS) + protected var ssr = -1.0 // Sum of Squares Regression/model (SSR) + protected var sst = -1.0 // Sum of Squares Total (SST = SSR + SSE) + protected var sde = -1.0 // Standard Deviation of Errors (standard error of estimate) // note sde uses sample vs. rmse uses population formulas protected var rSq = -1.0 // coefficient of determination R^2 using mean protected var rSq0 = -1.0 // coefficient of determination R^2 using 0 - protected var mse0 = -1.0 // raw/MLE mean squared error (MSE0) - protected var rmse = -1.0 // root mean squared error (RMSE) - protected var mae = -1.0 // mean absolute error (MAE or MAD) + protected var mse0 = -1.0 // raw/MLE Mean Squared Error (MSE0) + protected var rmse = -1.0 // Root Mean Squared Error (RMSE) + protected var mae = -1.0 // Mean Absolute Error (MAE or MAD) protected var smape = -1.0 // symmetric Mean Absolute Percentage Error (sMAPE) private val flaw = flawf ("FitM") // flaw function @@ -53,28 +53,13 @@ trait FitM: //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Return the sum of the squares for error (sse). Must call diagnose first. */ - def sse_ : Double = sse + inline def sse_ : Double = sse //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Return the coefficient of determination (R^2). Must call diagnose first. */ - def rSq_ : Double = rSq // using mean - def rSq0_ : Double = rSq0 // using 0 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the symmetric Mean Absolute Percentage Error (sMAPE) score. - * Caveat: y_i = yp_i = 0 => no error => no percentage error - * @param y the given time-series (must be aligned with the forecast) - * @param yp the forecasted time-series - * @param e_ the error/residual vector (if null, recompute) - */ - inline def smapeF (y: VectorD, yp: VectorD, e_ : VectorD = null): Double = - val e = if e_ == null then y - yp else e_ - var s = 0.0 - for i <- e.indices if e(i) != 0.0 do - s += abs (e(i)) / (abs (y(i)) + abs (yp(i))) - 200 * s / e.dim - end smapeF + inline def rSq_ : Double = rSq // using mean + inline def rSq0_ : Double = rSq0 // using 0 //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Diagnose and return the health of the model by computing the Quality of Fit (QoF) @@ -91,26 +76,24 @@ trait FitM: if m < 2 then flaw ("diagnose", s"requires at least 2 responses to evaluate m = $m") if yp.dim != m then flaw ("diagnose", s"yp.dim = ${yp.dim} != y.dim = $m") - val mu = y.mean // mean of y (may be zero) + val mu = y.mean // Mean of y (may be zero) val e = y - yp // residual/error vector - sse = e.normSq // sum of squares for error + sse = e.normSq // Sum of Squares for Error if w == null then - sst = (y - mu).normSq // sum of squares total (ssr + sse) - ssr = sst - sse // sum of squares regression/model -// println (s"ssr = $ssr") + sst = (y - mu).normSq // Sum of Squares Total (ssr + sse) + ssr = sst - sse // Sum of Squares Regression else - ssr = (w * (yp - (w * yp / w.sum).sum)~^2).sum // regression sum of squares + ssr = (w * (yp - (w * yp / w.sum).sum)~^2).sum sst = ssr + sse - end if - sde = e.stdev // standard deviation of error + sde = e.stdev // Standard Deviation of Error rSq = 1 - sse / sst // R^2 using mean rSq0 = 1 - sse / y.normSq // R^2 using 0 (used by R when no intercept) - mse0 = sse / m // raw/MLE mean squared error - rmse = sqrt (mse0) // root mean squared error (RMSE) - mae = e.norm1 / m // mean absolute error - smape = smapeF (y, yp, e) // symmetric Mean Absolute Percentage Error (sMAPE) + mse0 = sse / m // raw/MLE Mean Squared Error + rmse = sqrt (mse0) // Root Mean Squared Error + mae = e.norm1 / m // Mean Absolute Error + smape = FitM.smapeF (y, yp, e) // symmetric Mean Absolute Percentage Error (sMAPE) fit // returns QoF end diagnose @@ -150,6 +133,79 @@ object FitM: private val fitLabel = Array ("rSq", "sst", "sse", "sde", "mse0", "rmse", "mae", "smape", "m") + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the symmetric Mean Absolute Percentage Error (sMAPE) score. + * Caveat: y_i = yp_i = 0 => no error => no percentage error + * @param y the given time-series (must be aligned with the forecast) + * @param yp the forecasted time-series + * @param e_ the error/residual vector (if null, recompute) + */ + inline def smapeF (y: VectorD, yp: VectorD, e_ : VectorD = null): Double = + val e = if e_ == null then y - yp else e_ + var s = 0.0 + for i <- e.indices if e(i) != 0.0 do + s += abs (e(i)) / (abs (y(i)) + abs (yp(i))) + 200 * s / e.dim + end smapeF + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the Mean Absolute Error (MAE) score on the normalized/transformed data. + * Caveat: y_i = yp_i = 0 => no error => no percentage error + * @param y the given time-series (must be aligned with the forecast) + * @param yp the forecasted time-series + * @param tForm the transformation to scale the data + */ + inline def n_maeF (y: VectorD, yp: VectorD, tForm: Transform = null): Double = + val m = y.dim // size of response vector (test/full) + val y_t = tForm.f(y) + val yp_t = tForm.f(yp) + val e = y_t - yp_t + e.norm1 / m + end n_maeF + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the Mean Squared Error (MSE) score on the normalized/transformed data. + * Caveat: y_i = yp_i = 0 => no error => no percentage error + * @param y the given time-series (must be aligned with the forecast) + * @param yp the forecasted time-series + * @param tForm the transformation to scale the data + */ + inline def n_mseF (y: VectorD, yp: VectorD, tForm: Transform = null): Double = + val m = y.dim // size of response vector (test/full) + val y_t = tForm.f(y) + val yp_t = tForm.f(yp) + val e = y_t - yp_t + e.normSq / m + end n_mseF + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return common Time Series (TS) results (sample size, sMAPEs, normalized MAEs and MSEs) + * for all horizons (0->h1, 1->h2, ..., hh-1->hh) using the forecast matrix and return + * averages. These results are commonly given in research papers. + * @param yf the forecast matrix + * @param hh total numer of horizons + * @param tForm the transformation to scale the data + */ + def getTSResult (yf: MatrixD, hh: Int, tForm: Transform): Array [VectorD] = + val n_sample = new VectorD (hh) + val smapes = new VectorD (hh + 1) + val maes = new VectorD (hh + 1) + val mses = new VectorD (hh + 1) + val y = yf(?, 0) + val d = y.dim + for h <- 0 until hh do + val yp = yf(?, h + 1) + n_sample(h) = d - h + smapes(h) = FitM.smapeF (y(h until d), yp(0 until d-h)) + maes(h) = FitM.n_maeF (y(h until d), yp(0 until d-h), tForm) + mses(h) = FitM.n_mseF (y(h until d), yp(0 until d-h), tForm) + end for + smapes(hh) = smapes(0 until hh).mean + maes(hh) = maes(0 until hh).mean + mses(hh) = mses(0 until hh).mean + Array (n_sample, smapes, maes, mses) + end getTSResult + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Build a map of quality of fit measures (use of `LinkedHashMap` makes it ordered). * @param ftVec the vector of QoF values @@ -175,8 +231,8 @@ object FitM: end fitMap //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Show the quality of fit measures for each response/output variable. - * @param ftMat the matrix of QoF values + /** Show the quality of fit measures/metrics for each response/output variable. + * @param ftMat the matrix of QoF values (qof x var) * @param ftLab the array of QoF labels */ def showFitMap (ftMat: MatrixD, ftLab: Array [String]): String = diff --git a/src/main/scala/scalation/modeling/Imputation.scala b/src/main/scala/scalation/modeling/Imputation.scala index db3674073..dd3218eee 100644 --- a/src/main/scala/scalation/modeling/Imputation.scala +++ b/src/main/scala/scalation/modeling/Imputation.scala @@ -6,21 +6,33 @@ * @see LICENSE (MIT style license file). * * @note Collection of Simple Imputation Techniques for Missing Values or Outliers + * + * Common Imputation Techniques: Multiple Imputation of Chained Equations (MICE), + * Regression Imputation (RI), kNN, Decision Trees, Random Forests (missForest), SoftImpute + * + * FIX -- implement Multiple Imputation of Chained Equations (MICE) + * @see https://pmc.ncbi.nlm.nih.gov/articles/PMC3074241/ + * @see https://www.machinelearningplus.com/machine-learning/mice-imputation */ package scalation package modeling +import scala.annotation.unused + import scalation.mathstat._ import scalation.random.Normal //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `Imputation` trait specifies an imputation operation called impute to be defined * by the objects implementing it, i.e., - * `ImputeRegression` - impute missing values using `SimpleRegression` + * `ImputeMICE` - impute missing values using MICE + * `ImputeMRegression` - impute missing values using `Regression` (RI) + * `ImputeSRegression` - impute missing values using `SimpleRegression` * `ImputeForward` - impute missing values using previous values and slopes * `ImputeBackward` - impute missing values using subsequent values and slopes - * `ImputeMean` - impute missing values usind the filtered mean + * `ImputeMean` - impute missing values using the filtered mean + * `ImputeMedian` - impute missing values using the filtered median * `ImputeNormal` - impute missing values using the median of Normal random variates * `ImputeMovingAvg` - impute missing values using the moving average * `ImputeNormalWin` - impute missing values using the median of Normal random variates for a window @@ -119,31 +131,21 @@ trait Imputation: */ protected def nextVal (x: VectorD, i: Int): Double = val j = x.indexWhere (_ != missVal, i) // find first non-missing from i + debug ("nextVal", s"unable to find any non-missing values from $i to ${x.dim -1}") if j >= 0 then x(j) else missVal end nextVal -/* - for j <- i until x.dim if x(j) != missVal do return x(j) // find first non-missing from i - println (s"nextVal: unable to find any non-missing values from $i to ${x.dim -1}") - missVal -*/ - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Return the previous non-missing value in vector x from index i. If none, return missVal. * @param x the vector to be searched (backwards) for a non-missing value * @param i the starting index to look for non-missing value */ protected def prevVal (x: VectorD, i: Int): Double = - val j = x.lastIndexWhere (_ != missVal, i) // find first non-missing from i backward + val j = x.lastIndexWhere (_ != missVal, i) // find first non-missing from i backward + debug ("prevVal", s"unable to find any non-missing values from $i downto 0") if j >= 0 then x(j) else missVal end prevVal -/* - for j <- i to 0 by -1 if x(j) != missVal do return x(j) // find first non-missing from i - println (s"prevVal: unable to find any non-missing values from $i downto 0") - missVal -*/ - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Return the index of first missing value in vector x from index i and the * new imputed value. @@ -151,19 +153,11 @@ trait Imputation: * @param i the starting index to look for missing value */ def findMissing (x: VectorD, i: Int = 0): (Int, Double) = - val j = x.indexOf (missVal, i) // find first missing from i - if j >= 0 then (j, imputeAt (x, j)) // return (index, imputed value) - else (-1, nextVal (x, i)) // return (not found, first value) + val j = x.indexOf (missVal, i) // find first missing from i + if j >= 0 then (j, imputeAt (x, j)) // return (index, imputed value) + else (-1, nextVal (x, i)) // return (not found, first value) end findMissing -/* - var value = nextVal (x, i) - for j <- i until x.dim if x(j) == missVal do // find first missing from i - value = imputeAt (x, j) - return (j, value) // return (index, imputed value) - end for -*/ - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Return the index of last missing value in vector x from index i and the * new imputed value. @@ -172,27 +166,113 @@ trait Imputation: */ def findLastMissing (x: VectorD, i_ : Int = -1): (Int, Double) = val i = if i_ < 0 then x.dim-1 else i_ - val j = x.lastIndexOf (missVal, i) // find last missing from i - if j >= 0 then (j, imputeAt (x, j)) // return (index, imputed value) - else (-1, prevVal (x, i)) // return (not found, last value) + val j = x.lastIndexOf (missVal, i) // find last missing from i + if j >= 0 then (j, imputeAt (x, j)) // return (index, imputed value) + else (-1, prevVal (x, i)) // return (not found, last value) end findLastMissing -/* - var value = prevVal (x, i) - for j <- i to 0 by -1 if x(j) == missVal do // find last missing from ii - val value = imputeAt (x, j) - return (j, value) // return (index, imputed value) +end Imputation + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `ImputeMICE` object imputes missing values using MICE. + * Use the columns in matrix z to impute values for target vector x. + */ +object ImputeMICE extends Imputation: + + private val max_iter = 5 // maximum number of impute steps + private var z: MatrixD = null // matrix holding the dataset + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Set the value for the z matrix containing the dataset (consisting of multiple + * columns/variables) where missing values are to be imputed. + * @param z_ the matrix to be assigned + */ + def setZ (z_ : MatrixD): Unit = z = z_ + + def imputeAt (x: VectorD, i: Int): Double = + throw new UnsupportedOperationException ("ImputeImputeMICE: 'imputeAt' not supported, use 'imputeAll'") + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Make initial imputation by replacing missing values with the column means. + * Return the positions imputed and the new imputed matrix. + */ + def initialImpute (): (Array [IndexedSeq [Int]], MatrixD) = + val idx = Array.fill (z.dim2) (IndexedSeq [Int] ()) // hold the indices of missing values + val zz = z.copy // put imputed values in a copy of the z matrix + for j <- z.indices2 do + idx(j) = for i <- z.indices if z(i, j) != missVal yield i // find indices where z has missing values + ImputeMean.imputeAll (zz(?, j)) // replace missing values with column mean + (idx, zz) + end initialImpute + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Perform a MICE imputation step by fixing each column that has missing values. + */ + def imputeStep (): Unit = + val (idx, zz) = initialImpute () + for j <- z.indices2 if idx(j).size < z.dim2 do // look for columns with missing values + val y = zz(?, j) // column to impute values + val x = zz.not(?, j) // other columns used to predict the impute column + val rg = new Regression (x, y, null) // create a multiple regression model + rg.train (x(idx(j)), y(idx(j))) // train using rows with no missing values in y + val pidx = IndexedSeq.range (0, x.dim) diff idx(j) // indices where values are to be predicted + for i <- pidx do zz(i, j) = rg.predict (zz(i)) // call predict to get values where y had missing values + // FIX - may add noise to prediction end for - (-1, value) // return (not found, last value) -*/ + end imputeStep -end Imputation + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Replace all missing values in vector x with imputed values. Will change + * the values in vector x. Make a copy to preserve values x.copy. + * @param x the vector with missing values (target column) + */ + override def imputeAll (@unused x: VectorD): VectorD = + for _ <- 1 to max_iter do imputeStep () + null + end imputeAll + +end ImputeMICE + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `ImputeMRegression` object imputes missing values using multiple `Regression`. + * Use the columns in matrix z to impute values for target vector x. + */ +object ImputeMRegression extends Imputation: + + private var z: MatrixD = null // matrix holding the dataset + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Set the value for the z matrix containing the other columns used to predict + * values for target column x. + * @param z_ the matrix to be assigned + */ + def setZ (z_ : MatrixD): Unit = z = z_ + + def imputeAt (x: VectorD, i: Int): Double = + throw new UnsupportedOperationException ("ImputeImputeMRegression: 'imputeAt' not supported, use 'imputeAll'") + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Replace all missing values in vector x with imputed values. Will change + * the values in vector x. Make a copy to preserve values x.copy. + * @param x the vector with missing values (target column) + */ + override def imputeAll (x: VectorD): VectorD = + val idx = for i <- x.indices if x(i) != missVal yield i // find indices where x does not have missing values + val rg = new Regression (z, x, null) // create a multiple regression model + rg.train (z(idx), x(idx)) // train using rows with no missing values in x + val pidx = IndexedSeq.range (0, x.dim) diff idx // indices where values are to be predicted + for i <- pidx do x(i) = rg.predict (z(i)) // call predict to get values where x had missing values + x(pidx) // return the new predicted values for x + end imputeAll + +end ImputeMRegression //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ImputeRegression` object imputes missing values using `SimpleRegression`. +/** The `ImputeSRegression` object imputes missing values using `SimpleRegression`. */ -object ImputeRegression extends Imputation: +object ImputeSRegression extends Imputation: //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Impute a value for the first missing value in vector x from index i @@ -201,14 +281,14 @@ object ImputeRegression extends Imputation: * @param i the starting index to look for missing values */ def imputeAt (x: VectorD, i: Int): Double = - val xf = x.filter (_ != missVal) - val t = VectorD.range (0, xf.dim) - val rg = SimpleRegression (t, xf, null) - rg.train (rg.getX, xf) - rg.predict (VectorD (1, t(i))) + val xf = x.filter (_ != missVal) // x-vector with missing values removed + val t = VectorD.range (0, xf.dim) // vector = 0, 1, ..., xf.dim-1 + val rg = SimpleRegression (t, xf, null) // Simple Regression model of xf onto t + rg.train (rg.getX, xf) // train the model: X = [1, t], y = xf + rg.predict (VectorD (1, t(i))) // return the predicted value for the i-th element end imputeAt -end ImputeRegression +end ImputeSRegression //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -296,6 +376,25 @@ object ImputeMean extends Imputation: end ImputeMean +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `ImputeMean` object imputes missing values using the filtered median. + */ +object ImputeMedian extends Imputation: + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Impute a value for the first missing value in vector x from index i + * using the filtered mean. + * @param x the vector with missing values + * @param i the starting index to look for missing values (ignored) + */ + def imputeAt (x: VectorD, i: Int): Double = + val xf = x.filter (_ != missVal) + xf.median () + end imputeAt + +end ImputeMedian + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `ImputeNormal` object imputes missing values using the median Normal variates. */ @@ -329,7 +428,7 @@ object ImputeMovingAvg extends Imputation: def imputeAt (x: VectorD, i: Int): Double = var (sum, cnt) = (0.0, 0) for k <- i - dist to i + dist do - if k >= 0 && k < x.dim && k != i && x(k) != missVal then { sum += x(k); cnt += 1 } + if k >= 0 && k < x.dim && k != i && x(k) != missVal then { sum += x(k); cnt += 1 } end for sum / cnt end imputeAt @@ -344,8 +443,7 @@ end ImputeMovingAvg object ImputeNormalWin extends Imputation: def imputeAt (x: VectorD, i: Int): Double = - throw new UnsupportedOperationException ("ImputeNormalWin: 'impute' not supported, use 'imputeAll'") - end imputeAt + throw new UnsupportedOperationException ("ImputeNormalWin: 'imputeAt' not supported, use 'imputeAll'") //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Impute all the missing values in vector x using Normal Distribution for @@ -353,9 +451,9 @@ object ImputeNormalWin extends Imputation: * @param q size of the sliding window */ override def imputeAll (x: VectorD): VectorD = - val z = new VectorD (x.dim) - val sumq = new SumSqQueue (q) - sumq += nextVal (x, 0) // prime with first non-missing value + val z = new VectorD (x.dim) + val sumq = new SumSqQueue (q) + sumq += nextVal (x, 0) // prime with first non-missing value for i <- x.indices do debug ("imputeAll", s"mu = ${sumq.mean}, sig2 = ${sumq.variance}") @@ -377,67 +475,67 @@ end ImputeNormalWin */ @main def imputationTest (): Unit = - val x = VectorD (1, 2, 3, 4, NO_DOUBLE, 6, 7, 8, 9) - val x2 = x.copy - val x3 = x.copy - var iv = (-1, NO_DOUBLE) + val x = VectorD (1, 2, 3, 4, NO_DOUBLE, 6, 7, 8, 9) + val x2 = x.copy + val x3 = x.copy + var iv = (-1, NO_DOUBLE) - banner ("ImputeRegression.impute") - iv = ImputeRegression.impute (x) - x2(iv._1) = iv._2 - println (s"x = $x") - println (s"x2 = $x2") + banner ("ImputeSRegression.impute") + iv = ImputeSRegression.impute (x) + x2(iv._1) = iv._2 + println (s"x = $x") + println (s"x2 = $x2") - banner ("ImputeForward.impute") - iv = ImputeForward.impute (x) - x2(iv._1) = iv._2 - println (s"x = $x") - println (s"x2 = $x2") + banner ("ImputeForward.impute") + iv = ImputeForward.impute (x) + x2(iv._1) = iv._2 + println (s"x = $x") + println (s"x2 = $x2") - banner ("ImputeBackward.impute") - iv = ImputeBackward.impute (x) - x2(iv._1) = iv._2 - println (s"x = $x") - println (s"x2 = $x2") + banner ("ImputeBackward.impute") + iv = ImputeBackward.impute (x) + x2(iv._1) = iv._2 + println (s"x = $x") + println (s"x2 = $x2") - banner ("ImputeMean.impute") - iv = ImputeMean.impute (x) - x2(iv._1) = iv._2 - println (s"x = $x") - println (s"x2 = $x2") + banner ("ImputeMean.impute") + iv = ImputeMean.impute (x) + x2(iv._1) = iv._2 + println (s"x = $x") + println (s"x2 = $x2") - banner ("ImputeNormal.impute") - iv = ImputeNormal.impute (x) - x2(iv._1) = iv._2 - println (s"x = $x") - println (s"x2 = $x2") + banner ("ImputeNormal.impute") + iv = ImputeNormal.impute (x) + x2(iv._1) = iv._2 + println (s"x = $x") + println (s"x2 = $x2") - banner ("ImputeMovingAvg.impute") - iv = ImputeMovingAvg.impute (x) - x2(iv._1) = iv._2 - println (s"x = $x") - println (s"x2 = $x2") + banner ("ImputeMovingAvg.impute") + iv = ImputeMovingAvg.impute (x) + x2(iv._1) = iv._2 + println (s"x = $x") + println (s"x2 = $x2") - banner ("ImputeRegression.imputeAll") - println ("x3 = " + ImputeRegression.imputeAll (x3.copy)) + banner ("ImputeSRegression.imputeAll") + println ("x3 = " + ImputeSRegression.imputeAll (x3.copy)) - banner ("ImputeForward.imputeAll") - println ("x3 = " + ImputeForward.imputeAll (x3.copy)) + banner ("ImputeForward.imputeAll") + println ("x3 = " + ImputeForward.imputeAll (x3.copy)) - banner ("ImputeBackward.imputeAll") - println ("x3 = " + ImputeBackward.imputeAll (x3.copy)) + banner ("ImputeBackward.imputeAll") + println ("x3 = " + ImputeBackward.imputeAll (x3.copy)) - banner ("ImputeMean.imputeAll") - println ("x3 = " + ImputeMean.imputeAll (x3.copy)) + banner ("ImputeMean.imputeAll") + println ("x3 = " + ImputeMean.imputeAll (x3.copy)) - banner ("ImputeNormal.imputeAll") - println ("x3 = " + ImputeNormal.imputeAll (x3.copy)) + banner ("ImputeNormal.imputeAll") + println ("x3 = " + ImputeNormal.imputeAll (x3.copy)) - banner ("ImputeMovingAvg.imputeAll") - println ("x3 = " + ImputeMovingAvg.imputeAll (x3.copy)) + banner ("ImputeMovingAvg.imputeAll") + println ("x3 = " + ImputeMovingAvg.imputeAll (x3.copy)) - banner ("ImputeNormalWin.imputeAll") - println ("x3 = " + ImputeNormalWin.imputeAll (x3.copy)) + banner ("ImputeNormalWin.imputeAll") + println ("x3 = " + ImputeNormalWin.imputeAll (x3.copy)) end imputationTest @@ -449,73 +547,124 @@ end imputationTest */ @main def imputationTest2 (): Unit = - val x = VectorD (NO_DOUBLE, NO_DOUBLE, 1, 2, 3, 4, 5, 6, 7, 8, 9) - var x2 = null.asInstanceOf [VectorD] - val x3 = x.copy - var iv = (-1, NO_DOUBLE) - - banner ("ImputeRegression.impute") - iv = ImputeRegression.impute (x) - x2 = x.copy - x2(iv._1) = iv._2 - println (s"x = $x") - println (s"x2 = $x2") - - banner ("ImputeForward.impute") - iv = ImputeForward.impute (x) - x2 = x.copy - x2(iv._1) = iv._2 - println (s"x = $x") - println (s"x2 = $x2") - - banner ("ImputeBackward.impute") - iv = ImputeBackward.impute (x) - x2 = x.copy - x2(iv._1) = iv._2 - println (s"x = $x") - println (s"x2 = $x2") - - banner ("ImputeMean.impute") - iv = ImputeMean.impute (x) - x2 = x.copy - x2(iv._1) = iv._2 - println (s"x = $x") - println (s"x2 = $x2") - - banner ("ImputeNormal.impute") - iv = ImputeNormal.impute (x) - x2 = x.copy - x2(iv._1) = iv._2 - println (s"x = $x") - println (s"x2 = $x2") - - banner ("ImputeMovingAvg.impute") - iv = ImputeMovingAvg.impute (x) - x2 = x.copy - x2(iv._1) = iv._2 - println (s"x = $x") - println (s"x2 = $x2") - - banner ("ImputeRegression.imputeAll") - println ("x3 = " + ImputeRegression.imputeAll (x3.copy)) - - banner ("ImputeForward.imputeAll") - println ("x3 = " + ImputeForward.imputeAll (x3.copy)) - - banner ("ImputeBackward.imputeAll") - println ("x3 = " + ImputeBackward.imputeAll (x3.copy)) - - banner ("ImputeMean.imputeAll") - println ("x3 = " + ImputeMean.imputeAll (x3.copy)) - - banner ("ImputeNormal.imputeAll") - println ("x3 = " + ImputeNormal.imputeAll (x3.copy)) - - banner ("ImputeMovingAvg.imputeAll") - println ("x3 = " + ImputeMovingAvg.imputeAll (x3.copy)) - - banner ("ImputeNormalWin.imputeAll") - println ("x3 = " + ImputeNormalWin.imputeAll (x3.copy)) + val x = VectorD (NO_DOUBLE, NO_DOUBLE, 1, 2, 3, 4, 5, 6, 7, 8, 9) + var x2 = null.asInstanceOf [VectorD] + val x3 = x.copy + var iv = (-1, NO_DOUBLE) + + banner ("ImputeSRegression.impute") + iv = ImputeSRegression.impute (x) + x2 = x.copy + x2(iv._1) = iv._2 + println (s"x = $x") + println (s"x2 = $x2") + + banner ("ImputeForward.impute") + iv = ImputeForward.impute (x) + x2 = x.copy + x2(iv._1) = iv._2 + println (s"x = $x") + println (s"x2 = $x2") + + banner ("ImputeBackward.impute") + iv = ImputeBackward.impute (x) + x2 = x.copy + x2(iv._1) = iv._2 + println (s"x = $x") + println (s"x2 = $x2") + + banner ("ImputeMean.impute") + iv = ImputeMean.impute (x) + x2 = x.copy + x2(iv._1) = iv._2 + println (s"x = $x") + println (s"x2 = $x2") + + banner ("ImputeNormal.impute") + iv = ImputeNormal.impute (x) + x2 = x.copy + x2(iv._1) = iv._2 + println (s"x = $x") + println (s"x2 = $x2") + + banner ("ImputeMovingAvg.impute") + iv = ImputeMovingAvg.impute (x) + x2 = x.copy + x2(iv._1) = iv._2 + println (s"x = $x") + println (s"x2 = $x2") + + banner ("ImputeSRegression.imputeAll") + println ("x3 = " + ImputeSRegression.imputeAll (x3.copy)) + + banner ("ImputeForward.imputeAll") + println ("x3 = " + ImputeForward.imputeAll (x3.copy)) + + banner ("ImputeBackward.imputeAll") + println ("x3 = " + ImputeBackward.imputeAll (x3.copy)) + + banner ("ImputeMean.imputeAll") + println ("x3 = " + ImputeMean.imputeAll (x3.copy)) + + banner ("ImputeNormal.imputeAll") + println ("x3 = " + ImputeNormal.imputeAll (x3.copy)) + + banner ("ImputeMovingAvg.imputeAll") + println ("x3 = " + ImputeMovingAvg.imputeAll (x3.copy)) + + banner ("ImputeNormalWin.imputeAll") + println ("x3 = " + ImputeNormalWin.imputeAll (x3.copy)) end imputationTest2 + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `imputationTest3` main function imputes a missing value for the Texas Temperatures + * dataset that contains temperatures from counties in Texas where the variables/factors + * to consider are Latitude (x1), Elevation (x2) and Longitude (x3). The model equation + * is the following: + * y = b dot x = b0 + b1*x1 + b2*x2 + b3*x3 + * > runMain scalation.modeling.imputationTest3 + */ +@main def imputationTest3 (): Unit = + + // 16 data points: one x1 x2 x3 y + // Lat Elev Long Temp County + val xy = MatrixD ((16, 5), 1.0, 29.767, 41.0, 95.367, 56.0, // Harris + 1.0, 32.850, 440.0, 96.850, 48.0, // Dallas + 1.0, 26.933, 25.0, 97.800, 60.0, // Kennedy + 1.0, 31.950, 2851.0, 102.183, 46.0, // Midland + 1.0, 34.800, 3840.0, 102.467, 38.0, // Deaf Smith + 1.0, 33.450, 1461.0, 99.633, 46.0, // Knox + 1.0, 28.700, 815.0, 100.483, 53.0, // Maverick + 1.0, 32.450, 2380.0, 100.533, NO_DOUBLE, // Nolan (46.0) + 1.0, 31.800, 3918.0, 106.400, 44.0, // El Paso + 1.0, 34.850, 2040.0, 100.217, 41.0, // Collington + 1.0, 30.867, 3000.0, 102.900, 47.0, // Pecos + 1.0, 36.350, 3693.0, 102.083, 36.0, // Sherman + 1.0, 30.300, 597.0, 97.700, 52.0, // Travis + 1.0, 26.900, 315.0, 99.283, 60.0, // Zapata + 1.0, 28.450, 459.0, 99.217, 56.0, // Lasalle + 1.0, 25.900, 19.0, 97.433, 62.0) // Cameron + + println (s"xy = $xy") + + banner ("Texas Temperatures Regression before Imputation") + var mod = Regression (xy)() // create model with intercept (else pass x) + mod.trainNtest ()() // train and test the model + println (mod.summary ()) // parameter/coefficient statistics + + val x = xy.not (?, 4) + val y = xy(?, 4) + + banner ("ImputeMRegression.imputeAll") + ImputeMRegression.setZ (x) + println ("y = " + ImputeMRegression.imputeAll (y)) + + banner ("Texas Temperatures Regression after Imputation") + mod = new Regression (x, y) // create model with intercept (else pass x) + mod.trainNtest ()() // train and test the model + println (mod.summary ()) // parameter/coefficient statistics + +end imputationTest3 + diff --git a/src/main/scala/scalation/modeling/Initialzer.scala b/src/main/scala/scalation/modeling/Initialzer.scala index 0ed6e6808..ac94fb60f 100644 --- a/src/main/scala/scalation/modeling/Initialzer.scala +++ b/src/main/scala/scalation/modeling/Initialzer.scala @@ -65,7 +65,7 @@ object Initializer: */ def weightVec2 (rows: Int, stream: Int = 0): VectorD = val normal = new Normal (stream = stream) - VectorD (for i <- 0 until rows yield normal.gen) + VectorD (for _ <- 0 until rows yield normal.gen) end weightVec2 //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: diff --git a/src/main/scala/scalation/modeling/KNN_Regression.scala b/src/main/scala/scalation/modeling/KNN_Regression.scala index 07f2bd052..565a670bf 100644 --- a/src/main/scala/scalation/modeling/KNN_Regression.scala +++ b/src/main/scala/scalation/modeling/KNN_Regression.scala @@ -34,17 +34,17 @@ import scalation.mathstat._ class KNN_Regression (x: MatrixD, y: VectorD, fname_ : Array [String] = null, hparam: HyperParameter = KNN_Regression.hp) extends Predictor (x, y, fname_, hparam) - with Fit (dfm = x.dim2 - 1, df = x.dim - x.dim2): + with Fit (dfr = x.dim2 - 1, df = x.dim - x.dim2): private val debug = debugf ("KNN_Regression", false) // debug function private val flaw = flawf ("KNN_Regression") // debug function private val MAX_DOUBLE = Double.PositiveInfinity // infinity - private val kappa = hparam ("kappa").toInt // the number of nearest neighbors to consider + private val kappa = hparam("kappa").toInt // the number of nearest neighbors to consider private val topK = Array.fill (kappa)(-1, MAX_DOUBLE) // top-kappa nearest points (in reserve order) private val d = new VectorD (x.dim) // vector to hold distances - modelName = "KNN_Regression" + _modelName = s"KNN_Regression_$kappa" //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Compute a distance metric between vectors/points x and z. @@ -142,31 +142,36 @@ class KNN_Regression (x: MatrixD, y: VectorD, fname_ : Array [String] = null, //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /* Use validation to compute test Quality of Fit (QoF) measures by dividing - * the full dataset into a TESTING set and a TRAINING set. - * The test set is defined by idx and the rest of the data is the training set. + * the full dataset into a TESTING-set and a TRAINING-set. + * The testing-set is defined by idx and the rest of the data is the training-set. + * @see `modeling.Predictor.validate` about the RANDOM, FIRST, and LAST options + * for selecting the testing-set. * @param rando flag indicating whether to use randomized or simple validation - * @param ratio the ratio of the TESTING set to the full dataset (most common 70-30, 80-20) - * @param idx the prescribed TESTING set indices + * @param ratio the ratio of the TESTING-set to the full dataset (most common 70-30 (.3), 80-20 (.2)) + * @param idx the prescribed TESTING-set indices */ - override def validate (rando: Boolean = true, ratio: Double = 0.2) - (idx : IndexedSeq [Int] = testIndices ((ratio * y.dim).toInt, rando)): VectorD = + override def validate (rando: Boolean = true, ratio: Double = Model.TE_RATIO) +// (idx : IndexedSeq [Int] = testIndices ((ratio * y.dim).toInt, rando)): + (idx: IndexedSeq [Int] = testIndices (y.dim, (ratio * y.dim).toInt, rando)): + (VectorD, VectorD) = val x_e = x(idx) // test data/input matrices val y_e = y(idx) // test response/output vectors - val qof = testNoSpy (x_e, y_e, idx)._2 // test on test-set and get QoF measures + val (yp, qof) = testNoSpy (x_e, y_e, idx) // test on TESTING-set and get its yp and QoF measures if qof(QoF.sst.ordinal) <= 0.0 then // requires variation in test-set flaw ("validate", "chosen testing set has no variability") - end if println (FitM.fitMap (qof, QoF.values.map (_.toString))) - qof + (yp, qof) end validate //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Build a sub-model that is restricted to the given columns of the data matrix. * @param x_cols the columns that the new model is restricted to + * @param fname2 the variable/feature names for the new model (defaults to null) */ - override def buildModel (x_cols: MatrixD): KNN_Regression = - new KNN_Regression (x, y, null, hparam) + def buildModel (x_cols: MatrixD, fname2: Array [String] = null): KNN_Regression = + debug ("buildModel", s"${x_cols.dim} by ${x_cols.dim2}") + new KNN_Regression (x, y, fname2, hparam) end buildModel end KNN_Regression @@ -307,7 +312,7 @@ end kNN_RegressionTest2 rSq(k) = Fit.qofVector (mod.fit, mod.crossValidate ()) // use main model, knn end for - new PlotM (kr, rSq.transpose, lines = true) + new PlotM (kr, rSq.ᵀ, Regression.metrics, lines = true) end kNN_RegressionTest3 diff --git a/src/main/scala/scalation/modeling/LassoRegression.scala b/src/main/scala/scalation/modeling/LassoRegression.scala index 847bf4a4d..b8ef2e756 100644 --- a/src/main/scala/scalation/modeling/LassoRegression.scala +++ b/src/main/scala/scalation/modeling/LassoRegression.scala @@ -5,12 +5,16 @@ * @date Tue Apr 18 14:24:14 EDT 2017 * @see LICENSE (MIT style license file). * - * @note Model: Lasso Regression (L1 Shrinkage/Regularization) + * @note Model: Lasso Regression (L_1 Shrinkage/Regularization) + * + * Before calling the constructor, users should center their data; automatic by all factory methods. */ package scalation package modeling +import scala.math.abs + import scalation.mathstat._ import scalation.optimization.LassoAdmm @@ -25,17 +29,24 @@ import scalation.optimization.LassoAdmm * @param y the response/output m-vector * @param fname_ the feature/variable names (defaults to null) * @param hparam the shrinkage hyper-parameter, lambda (0 => OLS) in the penalty term 'lambda * b dot b' + * @param xℱ the transformation applied to x (e.g., Center or Norm) + * @param yℱ the transformation applied to y (e.g., Center) */ class LassoRegression (x: MatrixD, y: VectorD, fname_ : Array [String] = null, - hparam: HyperParameter = LassoRegression.hp) + hparam: HyperParameter = RidgeRegression.hp, + xℱ: Transform = null, yℱ: Transform = null) extends Predictor (x, y, fname_, hparam) - with Fit (dfm = x.dim2 - 1, df = x.dim - x.dim2): + with Fit (dfr = x.dim2, df = x.dim - x.dim2 - 1): + // degrees of freedom: dfr = n, df = m - n - 1 as centered x matrix has 1 less column + // fix after training by moving a dof from error to model for each coefficient eliminated + // if not using an intercept df = (x.dim2, x.dim-x.dim2), correct by calling 'resetDF' method from `Fit` - private val flaw = flawf ("LassoRegression") // flaw function private val debug = debugf ("LassoRegression", true) // debug function - private val lambda = hparam ("lambda").toDouble // weight to put on regularization + private val flaw = flawf ("LassoRegression") // flaw function + private val lambda = hparam("lambda").toDouble // shrinkage parameter (weight to put on regularization) + private val sparse = hparam("sparse").toInt == 1 // whether to sparsify - modelName = "LassoRegression" + _modelName = "LassoRegression_${lambda}" //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Return the value of the shrinkage parameter 'lambda'. @@ -52,18 +63,18 @@ class LassoRegression (x: MatrixD, y: VectorD, fname_ : Array [String] = null, var l = lambda var l_best = l var sse = Double.MaxValue - for i <- 0 to 20 do - LassoRegression.hp("lambda") = l + + cfor (0, 20) { _ => + RidgeRegression.hp("lambda") = l val rrg = new LassoRegression (x, y) val stats = rrg.crossValidate () val sse2 = stats(QoF.sse.ordinal).mean banner (s"LassoRegression with lambda = ${rrg.lambda_} has sse = $sse2") if sse2 < sse then sse = sse2; l_best = l - end if FitM.showQofStatTable (stats) l *= 2 - end for + } // cfor (l_best, sse) end findLambda @@ -81,7 +92,10 @@ class LassoRegression (x: MatrixD, y: VectorD, fname_ : Array [String] = null, b = LassoAdmm.solve (x_, y_, lambda) // Alternating Direction Method of Multipliers if b(0).isNaN then flaw ("train", s"parameter b = $b") + if sparse then LassoRegression.sparsify (b) debug ("train", s"LassoAdmm estimates parameter b = $b") + val nz = b.countZero // count number of coefficients set to zero + if nz > 0 then resetDF (x.dim2 - nz, x.dim - x.dim2 - 1 + nz) end train //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -93,10 +107,42 @@ class LassoRegression (x: MatrixD, y: VectorD, fname_ : Array [String] = null, * @param y_ the testing/full response/output vector (defaults to full y) */ def test (x_ : MatrixD = x, y_ : VectorD = y): (VectorD, VectorD) = - val yp = predict (x_) // make predictions + val yp = predict_ (x_) // make predictions (yp, diagnose (y_, yp)) // return predictions and QoF vector end test + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Predict the value of y = f(z) by evaluating the formula y = b dot z. + * It works on transformed values. + * @param z the new vector to predict + */ + def predict_ (z: VectorD): Double = b dot z + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Predict the value of vector y = f(x_, b). It works on transformed values. + * @param x_ the matrix to use for making predictions, one for each row + */ + def predict_ (x_ : MatrixD): VectorD = x_ * b + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Predict the value of y = f(z) by evaluating the formula y = b dot z. + * It is overridden to handle transformations. + * @param z the new vector to predict + */ + override def predict (z: VectorD): Double = + val zz = if xℱ == null then z else xℱ.f(MatrixD (z))(0) + if yℱ == null then b dot zz else yℱ.fi_(b dot zz) + end predict + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Predict the value of vector y = f(x_, b). It is overridden to handle transformations. + * @param x_ the matrix to use for making predictions, one for each row + */ + override def predict (x_ : MatrixD): VectorD = + val xx = if xℱ == null then x_ else xℱ.f(x_) + if yℱ == null then xx * b else yℱ.fi(xx * b) + end predict + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Produce a QoF summary for a model with diagnostics for each predictor 'x_j' * and the overall Quality of Fit (QoF). @@ -113,10 +159,11 @@ class LassoRegression (x: MatrixD, y: VectorD, fname_ : Array [String] = null, //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Build a sub-model that is restricted to the given columns of the data matrix. * @param x_cols the columns that the new model is restricted to + * @param fname2 the variable/feature names for the new model (defaults to null) */ - override def buildModel (x_cols: MatrixD): LassoRegression = + def buildModel (x_cols: MatrixD, fname2: Array [String] = null): LassoRegression = debug ("buildModel", s"${x_cols.dim} by ${x_cols.dim2}") - new LassoRegression (x_cols, y, null, hparam) + new LassoRegression (x_cols, y, fname2, hparam) end buildModel end LassoRegression @@ -126,14 +173,13 @@ end LassoRegression /** The `LassoRegression` companion object provides factory methods for the * `LassoRegression` class. */ -object LassoRegression: +object LassoRegression extends Regularized: - /** Base hyper-parameter specification for `LassoRegression` - */ - val hp = new HyperParameter; hp += ("lambda", 0.01, 0.01) // L1 regularization/shrinkage parameter + val hp = RidgeRegression.hp //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `LassoRegression` object from a combined data matrix. + /** Create a Lasso Regression object from a combined data matrix. + * This function centers the data. * @param xy the combined data matrix * @param fname the feature/variable names (defaults to null) * @param hparam the hyper-parameters (defaults to hp) @@ -141,12 +187,30 @@ object LassoRegression: */ def apply (xy: MatrixD, fname: Array [String] = null, hparam: HyperParameter = hp)(col: Int = xy.dim2 - 1): LassoRegression = - new LassoRegression (xy.not(?, col), xy(?, col), fname, hparam) + val (x, y) = (xy.not(?, col), xy(?, col)) + val xℱ = CenterForm (x) + val yℱ = CenterForm (y) + new LassoRegression (xℱ.f(x), yℱ.f(y), fname, hparam, xℱ, yℱ) end apply + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a Lasso Regression from a data matrix and response vector. + * This function centers the data. + * @param x the un-centered data/input m-by-n matrix, NOT augmented with a first column of ones + * @param y the un-centered response/output vector + * @param fname the feature/variable names (defaults to null) + * @param hparam the shrinkage hyper-parameter (0 => OLS) in the penalty term 'lambda * |b|' + */ + def center (x: MatrixD, y: VectorD, fname: Array [String] = null, + hparam: HyperParameter = RidgeRegression.hp): LassoRegression = + val xℱ = CenterForm (x) + val yℱ = CenterForm (y) + new LassoRegression (xℱ.f(x), yℱ.f(y), fname, hparam, xℱ, yℱ) + end center + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `LassoRegression` object from a data matrix and a response vector. - * This method provides data rescaling. + /** Create a Lasso Regression object from a data matrix and a response vector. + * This method provides data rescaling on x and centering on y. * @param x the data/input m-by-n matrix * (augment with a first column of ones to include intercept in model) * @param y the response/output m-vector @@ -155,59 +219,84 @@ object LassoRegression: */ def rescale (x: MatrixD, y: VectorD, fname: Array [String] = null, hparam: HyperParameter = hp): LassoRegression = - val xn = normalize ((x.mean, x.stdev)) (x) - new LassoRegression (xn, y, fname, hparam) + val xℱ = NormForm (x) + val yℱ = CenterForm (y) + new LassoRegression (xℱ.f(x), yℱ.f(y), fname, hparam, xℱ, yℱ) end rescale + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Zero out small parameters/coefficients in the model that are below a threshold. + * Intended for use by any regularized regression, but especially Lasso and Bridge. + * @param b the parameters/coefficients to sparsify + * @param relThresh the relative (to the max) threshold below which parameter is set to zero + * @return a sparse version of the parameter vector + */ + def sparsify (b: VectorD, relThresh: Double = 1e-3): Unit = + val thresh = b.max * relThresh + for i <- b.indices do if abs (b(i)) < thresh then b(i) = 0.0 + end sparsify + end LassoRegression //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `lassoRegressionTest` main function tests `LassoRegression` class using * the following regression equation. - * y = b dot x = b_0 + b_1*x_1 + b_2*x_2. + * y = b dot x = b_1*x_1 + b_2*x_2. * It comapres `LassoRegression` to `Regression`. * @see statmaster.sdu.dk/courses/st111/module03/index.html * > runMain scalation.modeling.lassoRegressionTest */ @main def lassoRegressionTest (): Unit = - // 5 data points: constant term, x_1 coordinate, x_2 coordinate - val x = MatrixD ((5, 3), 1.0, 36.0, 66.0, // 5-by-3 matrix - 1.0, 37.0, 68.0, - 1.0, 47.0, 64.0, - 1.0, 32.0, 53.0, - 1.0, 1.0, 101.0) - val y = VectorD (745.0, 895.0, 442.0, 440.0, 1598.0) - val z = VectorD (1.0, 20.0, 80.0) + // 5 data points: x_0 x_1 + val x = MatrixD ((5, 2), 36.0, 66.0, // 5-by-2 data matrix + 37.0, 68.0, + 47.0, 64.0, + 32.0, 53.0, + 1.0, 101.0) + val y = VectorD (745.0, 895.0, 442.0, 440.0, 1598.0) // 5-dim response vector // println ("model: y = b_0 + b_1*x_1 + b_2*x_2") - println ("model: y = b₀ + b₁*x₁ + b₂*x₂") - println ("x = " + x) - println ("y = " + y) + println ("model: y = b₀ + b₁*x₁ + b₂*x₂") // for Regression, remove b₀ for Lasso + println (s"x = $x") + println (s"y = $y") banner ("Regression") - val reg = new Regression (x, y) // create a regression model - reg.trainNtest ()() // train and test the model - println (reg.summary ()) // parameter/coefficient statistics - println (s"predict ($z) = ${reg.predict (z)}") // make an out-of-sample prediction - - val yp = reg.predict (x) // predict y for several points - println (s"predict (x) = $yp") - - banner ("LassoRegression") - val mod = new LassoRegression (x, y) // create a Lasso regression model - mod.trainNtest ()() // train and test the model - println (mod.summary ()) // parameter/coefficient statistics - println (s"predict ($z) = ${mod.predict (z)}") // make an out-of-sample prediction - - val yyp = mod.predict (x) // predict y for several points - println (s"predict (x) = $yyp") - - new Plot (null, y, yp, "Regression y vs. yp") - new Plot (null, y, yyp, "Lasso Regression y vs. yyp") - new Plot (x(?, 1), y, yyp, "y, yyp vs. col 1") - new Plot (x(?, 2), y, yyp, "y, yyp vs. col 2") + val ox = VectorD.one (y.dim) +^: x // prepend a column of all 1's + val reg = new Regression (ox, y) // create a Regression model + reg.trainNtest ()() // train and test the model + + banner ("LassoRegression with manual centering") + val mu_x = x.mean // column-wise mean of x + val mu_y = y.mean // mean of y + val x_c = x - mu_x // centered x (column-wise) + val y_c = y - mu_y // centered y + val mod = new LassoRegression (x_c, y_c) // create a Lasso Regression model + mod.trainNtest ()() // train and test the model + + banner ("LassoRegression with Auto-centering") + val amod = LassoRegression.center (x, y) // create an auto-centered Lasso Regression model + amod.trainNtest ()() // train and test the model + + banner ("LasoRegression with Rescaling") + val rmod = LassoRegression.rescale (x, y) // create a rescaled Lasso Regression model + rmod.trainNtest ()() // train and test the model + + banner ("Make one OOS Predictions") + val z = VectorD (20.0, 80.0) // new instance to predict + val _1z = 1.0 +: z // prepend 1 to z + val z_c = z - mu_x // center z + println (s"reg.predict ($z) = ${reg.predict (_1z)}") // predict using _1z + println (s"mod.predict ($z) = ${mod.predict (z_c) + mu_y}") // predict using z_c and add y's mean + println (s"amod.predict ($z) = ${amod.predict (z)}") // predict using z with auto-centering + println (s"rmod.predict ($z) = ${rmod.predict (z)}") // predict using z with rescaling + + banner ("Compare Summaries") + println (reg.summary ()) + println (mod.summary ()) + println (amod.summary ()) + println (rmod.summary ()) end lassoRegressionTest @@ -221,7 +310,7 @@ end lassoRegressionTest */ @main def lassoRegressionTest2 (): Unit = - import LassoRegression.hp + import RidgeRegression.hp println (s"hp = $hp") val hp2 = hp.updateReturn ("lambda", 1.0) // try different values @@ -274,79 +363,152 @@ end lassoRegressionTest2 println (s"predict ($z) = ${mod.predict (z)}") // make an out-of-sample prediction banner ("Forward Selection Test") - mod.forwardSelAll (cross = false) + mod.forwardSelAll (cross = "none") banner ("Backward Elimination Test") - mod.backwardElimAll (cross = false) + mod.backwardElimAll (cross = "none") end lassoRegressionTest3 //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `lassoRegressionTest4` main function tests the `LassoRegression` class using - * the AutoMPG dataset. It illustrates using the `Relation` class for reading the - * data from a .csv file "auto-mpg.csv". Assumes no missing values. + * the Covid-19 dataset. It illustrates using the `Relation`/Table class for reading + * the data from a .csv file "covid_19_weekly.csv". Assumes no missing values. * It also combines feature selection with cross-validation and plots - * R^2, R^2 Bar and R^2 cv vs. the instance index. + * R^2, R^2 bar, sMAPE, and R^2 cv vs. the instance index. * > runMain scalation.modeling.lassoRegressionTest4 - * + */ @main def lassoRegressionTest4 (): Unit = - import scalation.database.relation.Relation +// import scalation.database.relation.Relation + import scalation.database.table.Table - banner ("AutoMPG relation") - val auto_tab = Relation (DATE_DIR + "auto-mpg.csv", "auto_mpg", null, -1) - auto_tab.show () + banner ("covid_19_weekly Table") +// val data = Relation (DATE_DIR + "auto-mpg.csv", "auto_mpg", null, -1) + val data = Table.load ("covid_19_weekly.csv", "covid_19_weekly", 17, null) + data.show () - banner ("AutoMPG (x, y) dataset") - val (x, y) = auto_tab.toMatrixDD (ArrayBuffer.range (1, 7), 0) - println (s"x = $x") + banner ("covid_19_weekly dataset") +// val (x, y) = data.toMatrixDD (ArrayBuffer.range (1, 7), 0) + val xcols = Array (1, 3, 4, 5, 6, 7, 8, 9, 10) + val (x, y) = data.toMatrixV (xcols, 2) + val fname = xcols.map (data.schema (_)) + println (s"fname = $fname") + println (s"y = $y") println (s"y = $y") - banner ("LassoRegression for AutoMPG") - val mod = new LassoRegression (x, y) // create a Lasso regression model + banner ("LassoRegression for covid_19_weekly") + val mod = new LassoRegression (x, y, fname) // create a Lasso regression model mod.trainNtest ()() // train and test the model println (mod.summary ()) // parameter/coefficient statistics banner ("Forward Selection Test") - val (cols, rSq) = mod.forwardSelAll () // R^2, R^2 Bar, R^2 cv + val (cols, rSq) = mod.forwardSelAll () // R^2, R^2 bar, sMAPE, R^2 cv val k = cols.size val t = VectorD.range (1, k) // instance index - new PlotM (t, rSq.transpose, Array ("R^2", "R^2 bar", "R^2 cv"), - "R^2 vs n for LassoRegression", lines = true) + new PlotM (t, rSq.ᵀ, Regression.metrics, "R^2 vs n for LassoRegression", lines = true) println (s"rSq = $rSq") end lassoRegressionTest4 - */ //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `lassoRegressionTest5` main function tests the `LassoRegression` class using - * the AutoMPG dataset. It illustrates using the `Relation` class for reading the' + * the COVID dataset. It illustrates using the `Table` class for reading the' * data from a .csv file "auto-mpg.csv". Assumes no missing values. * It also uses the 'findLambda' method to search for a shrinkage parameter * that roughly mininizes 'sse_cv'. * > runMain scalation.modeling.lassoRegressionTest5 - * + */ @main def lassoRegressionTest5 (): Unit = - import scalation.database.relation.Relation + import scalation.database.table.Table - banner ("AutoMPG relation") - val auto_tab = Relation (DATA_DIR + "auto-mpg.csv", "auto_mpg", null, -1) - auto_tab.show () + banner ("auto-mpg Table") + val ncols = 8 + val data = Table.load ("auto_mpg.csv", "auto_mpg", ncols, null) + data.show () - banner ("AutoMPG (x, y) dataset") - val (x, y) = auto_tab.toMatrixDD (ArrayBuffer.range (1, 7), 0) + banner ("auto-mpg dataset") + val xcols = Array.range (0, ncols-1) + val (x, y) = data.toMatrixV (xcols, ncols-1) + val fname = xcols.map (data.schema (_)) + println (s"y = $y") - banner ("LassoRegression for AutoMPG") - val mod = new LassoRegression (x, y) // create a Lasso regression model + banner ("LassoRegression for auto-mpg") + val mod = new LassoRegression (x, y, fname) // create a Lasso regression model mod.trainNtest ()() // train and test the model println (mod.summary ()) // parameter/coefficient statistics println (s"best (lambda, sse) = ${mod.findLambda}") end lassoRegressionTest5 - */ +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `lassoRegressionTest6` main function tests the multi-collinearity method in + * the `LassoRegression` class using the following regression equation. + * y = b dot x = b_1*x_1 + b_2*x_2 + * Contour Plots for see, L2 penalty, see + L2 penalty, L1 penalty, sse + L1 penalty + * > runMain scalation.modeling.lassoRegressionTest6 + */ +@main def lassoRegressionTest6 (): Unit = + + val rvg = random.RandomVecD (100) + val nrm = random.NormalVec_c (100, 0, 50) + val x_1 = rvg.gen + val x_2 = rvg.gen + val x = MatrixD (x_1, x_2).ᵀ + + val b_ = VectorD (4, 5) + val y = x * b_ + nrm.gen + val xy = x :^+ y + println (s"Correlation matrix for xy: rho = ${xy.corr}") + + val x_c = x - x.mean + val y_c = y - y.mean + + banner ("Regression Model") + val mod = new Regression (x_c, y_c) + mod.trainNtest ()() + println (mod.summary ()) + FitM.showQofStatTable (mod.crossValidate ()) + var lambda = 0.0 + + banner ("Ridge Regression Model") + for i <- 1 to 10 do + lambda = 200.0 * i + RidgeRegression.hp("lambda") = lambda + val mod2 = new RidgeRegression (x_c, y_c) + mod2.trainNtest ()() + println (mod2.summary ()) + FitM.showQofStatTable (mod2.crossValidate ()) + end for + + banner ("Lasso Regression Model") + for i <- 1 to 10 do + lambda = 2000.0 * i + RidgeRegression.hp("lambda") = lambda + val mod2 = new LassoRegression (x_c, y_c) + mod2.trainNtest ()() + println (mod2.summary ()) + FitM.showQofStatTable (mod2.crossValidate ()) + end for + + def f(b: VectorD): Double = (y - x * b).normSq + def f2(b: VectorD): Double = b.normSq * 2000.0 + def f3(b: VectorD): Double = f(b) + f2(b) + def f4(b: VectorD): Double = b.norm1 * 20000.0 + def f5(b: VectorD): Double = f(b) + f4(b) + + val lb = VectorD (3, 4) + val ub = VectorD (5, 6) + new PlotC (f, lb, ub, title = "Contour plot of sse") + new PlotC (f2, lb, ub, title = "Contour plot of L2 penalty") + new PlotC (f3, lb, ub, title = "Contour Plot of sse + L2 penalty") + new PlotC (f4, lb, ub, title = "Contour Plot of L1 penalty") + new PlotC (f5, lb, ub, title = "Contour Plot of sse + L1 penalty") + +end lassoRegressionTest6 + diff --git a/src/main/scala/scalation/modeling/MatrixTransform.scala b/src/main/scala/scalation/modeling/MatrixTransform.scala index 1e8efcc39..feeb78953 100644 --- a/src/main/scala/scalation/modeling/MatrixTransform.scala +++ b/src/main/scala/scalation/modeling/MatrixTransform.scala @@ -87,6 +87,15 @@ def denormalizeV (mu_sig: (Double, Double)) (x_n: VectorD): VectorD = if sig_x =~ 0.0 then x_n else x_n * sig_x + mu_x end denormalizeV +// ADDED FOR COVID TESTING WITH RNNs -- @author Praveen -- FIX merge with mathstat.Transform +def logTransformV (offset: Double = 1e-8)(x: VectorD): VectorD = + x.map (v => math.log (v + offset)) +end logTransformV + +def expTransformV (offset: Double = 1e-8)(x_log: VectorD): VectorD = + x_log.map (v => math.exp(v) - offset) +end expTransformV + // Matrix Transformations -------------------------------------------------- //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -99,23 +108,25 @@ def extreme (x: MatrixD): (VectorD, VectorD) = (x.min, x.max) /** Center matrix x to zero mean, column-wise, by subtracting the mean. * @param x the matrix to center * @param mu_x the vector of column means of matrix x - */ + * def center (x: MatrixD, mu_x: VectorD): MatrixD = val x_c = new MatrixD (x.dim, x.dim2) for j <- x.indices2 do x_c(?, j) = x(?, j) - mu_x(j) // subtract column means x_c end center + */ //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Uncenter matrix x_c from zero mean, column-wise, by adding the mean. * @param x_c the matrix to uncenter * @param mu_x the vector of column means of matrix x_c - */ + * def uncenter (x_c: MatrixD, mu_x: VectorD): MatrixD = val x = new MatrixD (x_c.dim, x_c.dim2) for j <- x.indices2 do x(?, j) = x_c(?, j) + mu_x(j) // add column means x end uncenter + */ //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Scale matrix x to the range lb to ub, column-wise: x -> x_s. @@ -136,7 +147,6 @@ def scale (extremes: (VectorD, VectorD), bounds: (Double, Double) = (0, 1)) (x: x_s(?, j) = (x(?, j) - min_x(j)) * scale + lb // shift and scale else x_s(?, j) = x(?, j) // no change - end if end for x_s end scale @@ -160,7 +170,6 @@ def unscale (extremes: (VectorD, VectorD), bounds: (Double, Double) = (0, 1)) (x x(?, j) = (x_s(?, j) - lb) / scale + min_x(j) // scale and shift else x(?, j) = x_s(?, j) // no change - end if end for x end unscale @@ -223,14 +232,14 @@ end denormalize println (s"(min_x, max_x) = ($min_x, $max_x)") println (s"(mu_x, sig_x) = ($mu_x, $sig_x)") - val x_c = center (x, mu_x) +// val x_c = center (x, mu_x) val x_s = scale ((min_x, max_x), (0, 1)) (x) // e.g., used for sigmoid activation function val x_s2 = scale ((min_x, max_x), (-1, 1)) (x) // e.g., used by tanh activation function val x_n = normalize ((mu_x, sig_x)) (x) // e.g., used for unbounded activation function println ("x = " + x) - banner ("Center at 0") - println ("x_c = " + x_c) +// banner ("Center at 0") +// println ("x_c = " + x_c) banner ("Scale to (0, 1)") println ("x_s = " + x_s) banner ("Scale to (-1, 1)") @@ -238,7 +247,7 @@ end denormalize banner ("Normalize to (mu = 0, sig = 1)") println ("x_n = " + x_n) - assert (uncenter (x_c, mu_x) == x, "uncenter") +// assert (uncenter (x_c, mu_x) == x, "uncenter") assert (unscale ((min_x, max_x), (0, 1)) (x_s) == x, "unscale") assert (unscale ((min_x, max_x), (-1, 1)) (x_s2) == x, "unscale") assert (denormalize ((mu_x, sig_x)) (x_n) == x, "denormalize") diff --git a/src/main/scala/scalation/modeling/Model.scala b/src/main/scala/scalation/modeling/Model.scala index bdc82e874..316b8fe48 100644 --- a/src/main/scala/scalation/modeling/Model.scala +++ b/src/main/scala/scalation/modeling/Model.scala @@ -13,14 +13,31 @@ package modeling import java.net.URI +import scala.collection.mutable.{ArrayBuffer, IndexedSeq} +import scala.math.{abs, round} import scala.runtime.ScalaRunTime.stringOf import scalation.mathstat._ +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `TaskType` enum specifies the types of tasks supported by ScalaTion. + * @param name the name of task type + * @param base the base trait/abstract class for the given type of task + */ +enum TaskType (val name: String, val base: String): + + case Predict extends TaskType ("Predict", "Predictor") // FIX -- make compiler checked + case Forecast extends TaskType ("Forecast", "forecasting.Forecaster") + case Classify extends TaskType ("Classify", "classifying.Classifier") + case Cluster extends TaskType ("Cluster", "clustering.Clusterer") + +end TaskType + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Order vectors y_ and yp_ according to the ascending order of y_. - * This can be used for graphical comparison or true and predicted values. - * @param y_ the vector to order by (e.g., true response values) + * This can be used for graphical comparison or actual and predicted values. + * @param y_ the vector to order by (e.g., actual response values) * @param yp_ the vector to be order by y_ (e.g., predicted response values) */ def orderByY (y_ : VectorD, yp_ : VectorD): (VectorD, VectorD) = @@ -28,11 +45,25 @@ def orderByY (y_ : VectorD, yp_ : VectorD): (VectorD, VectorD) = (y_.reorder (rank), yp_.reorder (rank)) // (y_ in ascending order, yp_ ordered by y_) end orderByY +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Order matrix ys according to the ascending order of ys(0) (row vector by row vector). + * This can be used for graphical comparison or actual and predicted values and intervals. + * @param ys the matrix of vectors to reorder based the first row (typically the y-actual value) + */ +def orderByY (ys: MatrixD): MatrixD = + val oys = new MatrixD (ys.dim, ys.dim2) + val rank = ys(0).iqsort // rank order for vector y_ + oys(0) = ys(0).reorder (rank) // ys(0) in ascending order + for i <- 1 until ys.dim do + oys(i) = ys(i).reorder (rank) // row i of ys ordered by ys(0) + oys +end orderByY + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Order matrices y_ and yp_ according to the ascending order of y_ * (column vector by column vector). - * This can be used for graphical comparison or true and predicted values. - * @param y_ the matrix to order by (e.g., true response values) + * This can be used for graphical comparison or actual and predicted values. + * @param y_ the matrix to order by (e.g., actual response values) * @param yp_ the matrix to be order by y_ (e.g., predicted response values) */ def orderByYY (y_ : MatrixD, yp_ : MatrixD): (MatrixD, MatrixD) = @@ -42,7 +73,6 @@ def orderByYY (y_ : MatrixD, yp_ : MatrixD): (MatrixD, MatrixD) = val rank = yj.iqsort // rank order for vector yj oy(?, j) = yj.reorder (rank) // yj in ascending order oyp(?, j) = yp_(?, j).reorder (rank) // column j of yp_ ordered by yj - end for (oy, oyp) end orderByYY @@ -57,13 +87,31 @@ end orderByYY */ trait Model: + /** The flaw function used for writing errors and warnings + */ + private val flaw = flawf ("Model") + /** The optional reference to an ontological concept */ var modelConcept: URI = null - /** The name for the model (or modeling technique). + /** The name for the model (or modeling technique). Each model should reassign. + */ + protected var _modelName: String = "Model" + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Get the model name. + */ + inline def modelName: String = _modelName + + /** The type of task the model performs. Base traits for other tasks for reassign. */ - var modelName: String = "Model" + protected var _taskType: TaskType = TaskType.Predict + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Get the type of the task performed by model. + */ + inline def taskType: TaskType = _taskType //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Return the used data matrix x. Mainly for derived classes where x is expanded @@ -71,6 +119,11 @@ trait Model: */ def getX: MatrixD + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the data matrix x concatenated with response vector y. + */ + def getXy: MatrixD = getX :^+ getY + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Return the used response vector y. Mainly for derived classes where y is * transformed, e.g., `TranRegression`, `ARX`. @@ -101,10 +154,10 @@ trait Model: * and QoF vectors. * This may include the importance of its parameters (e.g., if 0 is in a parameter's * confidence interval, it is a candidate for removal from the model). - * Extending traits and classess should implement various diagnostics for + * Extending traits and classes should implement various diagnostics for * the test and full (training + test) datasets. - * @param x_ the testiing/full data/input matrix (impl. classes may default to x) - * @param y_ the testiing/full response/output vector (impl. classes may default to y) + * @param x_ the testing/full data/input matrix (impl. classes may default to x) + * @param y_ the testing/full response/output vector (impl. classes may default to y) */ def test (x_ : MatrixD, y_ : VectorD): (VectorD, VectorD) @@ -127,11 +180,27 @@ trait Model: */ def parameter: VectorD | MatrixD + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Print the model prediction equation in readable form. + * Override per model. + */ + def equation: String = s"model prediction equation $modelName: ŷ = f(${parameter})" + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Print the model prediction equation in LaTex form. + * Override per model. + */ + def equationLaTeX: String = ??? + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Return a basic report on a trained and tested model. * @param ftVec the vector of qof values produced by the `Fit` trait */ def report (ftVec: VectorD): String = + val (fn, b) = (getFname, parameter.asInstanceOf [VectorD]) + println (s"b = $b, fn = $fn") + if fn == null then flaw ("report", "no feature names given fn = null") + else if fn.size != b.dim then flaw ("report", s"# featuers = ${fn.size} != # parameters = ${b.dim}") s""" REPORT ---------------------------------------------------------------------------- @@ -139,9 +208,9 @@ REPORT ---------------------------------------------------------------------------- hparameter hp = $hparameter ---------------------------------------------------------------------------- - features fn = ${stringOf (getFname)} + features fn = ${stringOf (fn)} ---------------------------------------------------------------------------- - parameter b = $parameter + parameter b = $b ---------------------------------------------------------------------------- fitMap qof = ${FitM.fitMap (ftVec, QoF.values.map (_.toString))} ---------------------------------------------------------------------------- @@ -153,6 +222,8 @@ REPORT * @param ftMat the matrix of qof values produced by the `Fit` trait */ def report (ftMat: MatrixD): String = + val (fn, b) = (getFname, parameter.asInstanceOf [MatrixD]) + if fn.size != b.dim then flaw ("report", s"# featuers = ${fn.size} != # parameters = ${b.dim}") s""" REPORT ---------------------------------------------------------------------------- @@ -160,19 +231,91 @@ REPORT ---------------------------------------------------------------------------- hparameter hp = $hparameter ---------------------------------------------------------------------------- - features fn = ${stringOf (getFname)} + features fn = ${stringOf (fn)} ---------------------------------------------------------------------------- - parameter b = $parameter + parameter b = $b ---------------------------------------------------------------------------- fitMap qof = ${FitM.fitMap (ftMat, QoF.values.map (_.toString))} ---------------------------------------------------------------------------- """ end report + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Screen the x-columns of matrix xy based on the two thresholds, returning + * the reduced matrix and the column indices/predictor variables selected. + * @param xy the [ x, y ] combined data-response matrix + * @param thr1 the threshold used to compare the predictor x-columns to the y-column + * only want variables above some minimal dependency level + * @param thr2 the threshold used to compare the predictor x-columns with each other + * only want variables below some cut-off dependency/collinearity level + * @param dep the variable/column dependency measure (defaults to correlation) + */ + def screen (xy: MatrixD, thr1: Double = 0.05, thr2: Double = 0.95) + (dep: MatrixD = xy.corr): (MatrixD, VectorI) = + + val lst = dep.dim2 - 1 // the index of last column (holds y) + val depY = dep(?, lst) // the dependency sub-matrix for xy vs. y (last column) + val depX = dep(0 until lst, 0 until lst) // the dependency sub-matrix for x vs. x + val indices = for i <- 0 until lst if abs (depY(i)) > thr1 + yield i // row indices that match (> thr1) + val sIndices = indices.sortBy (i => -abs (depY(i))) // sort indices from highest dep to lowest + + // only add index i if its dependency with all selected columns < thr2 + val selected = ArrayBuffer [Int] () + for i <- sIndices do + if selected.forall (k => abs (depX(i, k)) < thr2) then + selected += i // row indices that also match (< thr2) + val selected_ = selected.sorted + + (xy(?, selected_), new VectorI (selected_.size, selected_.toArray)) + end screen + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the best model found from feature selection. + */ + def getBest: BestStep + +// T E S T I N G S C E N A R I O S + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Perform In-Sample Testing, i.e., train and test on the same FULL data set. + * Good for initial testing and understanding variable relationships. + * @note May lead to over-fitting for complex models. + * @param skip the number of initial data points to skip (due to insufficient information) + * @param showYp whether to show the prediction vector + */ + def inSample_Test (skip: Int = 0, showYp: Boolean = false): Unit + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /* Use validation to compute test Quality of Fit (QoF) measures by dividing + * the full dataset into a TESTING-set and a TRAINING-set, returning qof and yp. + * The testing-set is defined by idx and the rest of the data is the training-set. + * @note: Results depend on which testing-set is chosen. + * @see `modeling.Predictor.validate` for how to choose (1) RANDOM, (2) FIRST, or (3) LAST. + * @param rando flag indicating whether to use randomized or simple validation + * @param ratio the ratio of the TESTING-set to the full dataset (most common 70-30 (.3), 80-20 (.2)) + * @param idx the prescribed TESTING-set indices + */ + def validate (rando: Boolean = true, ratio: Double = Model.TE_RATIO) + (idx: IndexedSeq [Int] = null): (VectorD | MatrixD, VectorD | MatrixD) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Convert QoF results into an array (of size 1) of `Statistic` for compatibility + * with the `crossValidate` method. + * @param qof the Quality of Fit (QoF) results + */ + def qof2Stat (qof: VectorD): Array [Statistic] = + val stats = Fit.qofStatTable // create table for QoF measures + if qof(QoF.sst.ordinal) > 0.0 then // requires variation in test-set + for q <- qof.indices do stats(q).tally (qof(q)) // tally these QoF measures + stats + end qof2Stat + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /* Use k-fold cross-validation to compute test Quality of Fit (QoF) measures * by iteratively dividing the full dataset into a TRAINING and a TESTING set. * Each test set is defined by idx and the rest of the data is the training set. + * @note: Replace with `rollValidate` for forecasting tasks. * @see showQofStatTable in `Fit` object for printing the returned stats. * @param k the number of cross-validation iterations/folds (defaults to 5x). * @param rando flag indicating whether to use randomized or simple cross-validation @@ -181,3 +324,41 @@ REPORT end Model + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `Model` companion object provides methods useful for classes extending the `Model` trait. + */ +object Model: + + private val flaw = flawf ("Model") // flaw function + + private var _TE_RATIO = 0.2 // ratio of TESTING-set to FULL dataset + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Get the TE ratio = ratio of the size of the TESTING-set to size of the FULL dataset. + */ + inline def TE_RATIO: Double = _TE_RATIO + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Set the TE ratio = ratio of the size of the TESTING-set to size of the FULL dataset. + * @param ratio the new ratio of the size of the TESTING-set to size of the FULL dataset + */ + def TE_RATIO_= (ratio: Double): Unit = + if ratio out (0.05, 0.95) then flaw ("init", s"testing ratio = $ratio should be in (0.05, 0.95)") + _TE_RATIO = ratio + end TE_RATIO_= + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Calculate the size (number of instances) for a testing set (round up). + * @param m the size of the full dataset + */ + inline def teSize (m: Int): Int = (round (m * TE_RATIO + 0.5)).toInt + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Calculate the size (number of instances) for a training set. + * @param m the size of the full dataset + */ + inline def trSize (m: Int): Int = m - teSize (m) + +end Model + diff --git a/src/main/scala/scalation/modeling/MonitorLoss.scala b/src/main/scala/scalation/modeling/MonitorLoss.scala index 1047089ba..a3bd6f631 100644 --- a/src/main/scala/scalation/modeling/MonitorLoss.scala +++ b/src/main/scala/scalation/modeling/MonitorLoss.scala @@ -41,5 +41,10 @@ trait MonitorLoss: new Plot (epoch, loss, null, s"loss vs epoch $optName") end plotLoss + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the best/minumum loss seen. + */ + def getBestLoss: Double = losses.min + end MonitorLoss diff --git a/src/main/scala/scalation/modeling/NoBuildModel.scala b/src/main/scala/scalation/modeling/NoBuildModel.scala index 24f6f27ef..543b8aba0 100644 --- a/src/main/scala/scalation/modeling/NoBuildModel.scala +++ b/src/main/scala/scalation/modeling/NoBuildModel.scala @@ -27,11 +27,20 @@ trait NoSubModels: * Must be implemented for models that support feature selection. * NOT SUPPORTED for this model, so throw an EXCEPTION. * @param x_cols the columns that the new model is restricted to + * @param fname2 the variable/feature names for the new model (defaults to null) */ - def buildModel (x_cols: MatrixD): Predictor & Fit = + def buildModel (x_cols: MatrixD, fname2: Array [String] = null): Predictor & Fit = throw new UnsupportedOperationException ("buildModel: this model does not support building sub-models for feature selection") end buildModel + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the best model found from feature selection. + */ + def getBest: BestStep = + throw new UnsupportedOperationException + ("getBest: this model does not support building sub-models for feature selection") + end getBest + end NoSubModels diff --git a/src/main/scala/scalation/modeling/NonlinearRegression.scala b/src/main/scala/scalation/modeling/NonlinearRegression.scala index c2812ec34..1af6d43ca 100644 --- a/src/main/scala/scalation/modeling/NonlinearRegression.scala +++ b/src/main/scala/scalation/modeling/NonlinearRegression.scala @@ -38,15 +38,17 @@ class NonlinearRegression (x: MatrixD, y: VectorD, f: FunctionP2S, b_init: VectorD, fname_ : Array [String] = null, hparam: HyperParameter = null) extends Predictor (x, y, fname_, hparam) - with Fit (dfm = x.dim2 - 1, df = x.dim - x.dim2) - with NoSubModels: + with Fit (dfr = x.dim2 - 1, df = x.dim - x.dim2) + with NoSubModels: // FIX -- need efficient feature selection private val debug = debugf ("Nonlinear", true) // debug function private val flaw = flawf ("Nonlinear") // flaw function if y != null && x.dim != y.dim then flaw ("init", "dimensions of x and y are incompatible") - modelName = "NonlinearRegression" + _modelName = "NonlinearRegression" + + override def getBest: BestStep = super [NoSubModels].getBest //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Train the predictor by fitting the parameter vector (b-vector) in the @@ -106,8 +108,6 @@ end NonlinearRegression */ object NonlinearRegression: - private val debug = debugf ("NonlinearRegression", true) // debug function - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Create a `NonlinearRegression` with automatic rescaling from a combined data matrix. * @param xy the combined data/input and response/output matrix @@ -120,46 +120,10 @@ object NonlinearRegression: def apply (xy: MatrixD, f: FunctionP2S, b_init: VectorD, fname: Array [String] = null, hparam: HyperParameter = null) (col: Int = xy.dim2 - 1): NonlinearRegression = -// var itran: FunctionV2V = null // inverse transform -> original scale val (x, y) = (xy.not(?, col), xy(?, col)) // assumes the last column is the response - -/* // FIX - function needs bounds - val x_s = if rescale then rescaleX (x, f0) - else x - val y_s = if f0.bounds != null then { val y_i = rescaleY (y, f0); itran = y_i._2; y_i._1 } - else y - -*/ - val (x_s, y_s) = (x, y) - debug ("apply", s" scaled: x = $x_s \n scaled y = $y_s") - new NonlinearRegression (x_s, y_s, f, b_init, fname, hparam) + new NonlinearRegression (x, y, f, b_init, fname, hparam) end apply - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `NonlinearRegression` with automatic rescaling from a data matrix and response vector. - * @param x the data/input matrix - * @param y the response/output vector - * @param f the nonlinear function f(x, b) to fit - * @param b_init the initial guess for the parameter vector b - * @param fname the feature/variable names (defaults to null) - * @param hparam the hyper-parameters (currently has none) - */ - def rescale (x: MatrixD, y: VectorD, f: FunctionP2S, b_init: VectorD, - fname: Array [String] = null, - hparam: HyperParameter = null): NonlinearRegression = -// var itran: FunctionV2V = null // inverse transform -> original scale - -/* // FIX - function needs bounds - val x_s = if rescale then rescaleX (x, f0) - else x - val y_s = if f0.bounds != null then { val y_i = rescaleY (y, f0); itran = y_i._2; y_i._1 } - else y -*/ - val (x_s, y_s) = (x, y) - debug ("rescale", s" scaled: x = $x_s \n scaled y = $y_s") - new NonlinearRegression (x_s, y_s, f, b_init, fname, hparam) - end rescale - end NonlinearRegression diff --git a/src/main/scala/scalation/modeling/NullModel.scala b/src/main/scala/scalation/modeling/NullModel.scala index 6fdc09bfa..7f61783b5 100644 --- a/src/main/scala/scalation/modeling/NullModel.scala +++ b/src/main/scala/scalation/modeling/NullModel.scala @@ -25,10 +25,12 @@ import scalation.mathstat._ */ class NullModel (y: VectorD) extends Predictor (MatrixD.one (y.dim), y, Array ("one"), null) - with Fit (dfm = 0, df = y.dim-1) + with Fit (dfr = 0, df = y.dim-1) with NoSubModels: - modelName = "NullModel" + _modelName = "NullModel" + + override def getBest: BestStep = super [NoSubModels].getBest //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Train the predictor by fitting the parameter vector (b-vector) in the diff --git a/src/main/scala/scalation/modeling/Outlier.scala b/src/main/scala/scalation/modeling/Outlier.scala index 3423e5292..73afca142 100644 --- a/src/main/scala/scalation/modeling/Outlier.scala +++ b/src/main/scala/scalation/modeling/Outlier.scala @@ -243,7 +243,7 @@ end outlierTest2 import scalation.random.Normal val normal = Normal () - val y = VectorD (for i <- 0 until 10000 yield normal.gen) + val y = VectorD (for _ <- 0 until 10000 yield normal.gen) banner ("Standard Deviation Method: DistanceOutlier") var bounds = DistanceOutlier.calcBounds (y) diff --git a/src/main/scala/scalation/modeling/Perceptron.scala b/src/main/scala/scalation/modeling/Perceptron.scala index c46d59906..0af310526 100644 --- a/src/main/scala/scalation/modeling/Perceptron.scala +++ b/src/main/scala/scalation/modeling/Perceptron.scala @@ -40,16 +40,16 @@ class Perceptron (x: MatrixD, y: VectorD, fname_ : Array [String] = null, hparam: HyperParameter = Perceptron.hp, f: AFF = f_sigmoid, val itran: FunctionV2V = null) extends Predictor (x, y, fname_, hparam) - with Fit (dfm = x.dim2 - 1, df = x.dim - x.dim2) + with Fit (dfr = x.dim2 - 1, df = x.dim - x.dim2) with MonitorLoss: private val debug = debugf ("Perceptron", false) // debug function private val flaw = flawf ("Perceptron") // flaw function private val (m, n) = x.dims // input data matrix dimensions - private val η = hparam ("eta").toDouble // the learning/convergence rate (requires adjustment) - private val maxEpochs = hparam ("maxEpochs").toInt // the maximum number of training epcochs/iterations + private val η = hparam("eta").toDouble // the learning/convergence rate (requires adjustment) + private val maxEpochs = hparam("maxEpochs").toInt // the maximum number of training epcochs/iterations - modelName = "Perceptron_" + f.name + _modelName = s"Perceptron_${f.name}" if y.dim != m then flaw ("init", "dimensions of x and y are incompatible") @@ -82,7 +82,7 @@ class Perceptron (x: MatrixD, y: VectorD, fname_ : Array [String] = null, val yp = f.f_ (x_ * b) // predicted output vector yp = f(Xb) val e = y_ - yp // error vector for y (protected var from `Predictor) val δ = -f.d (yp) * e // delta vector for y (protected var from `Predictor) - b -= x_.𝐓 * δ * η // update the parameters/weights (𝐓 for transpose) + b -= x_.ᵀ * δ * η // update the parameters/weights (ᵀ for transpose) val sse = (y_ - f.f_ (x_ * b)).normSq // recompute sum of squared errors collectLoss (sse) // collect loss per epoch @@ -121,11 +121,7 @@ class Perceptron (x: MatrixD, y: VectorD, fname_ : Array [String] = null, debug ("trainNTest", s"b = $b") val (yp, qof) = test (xx, yy) println (report (qof)) - if DO_PLOT then - val yy_ = if itran == null then yy else itran (yy) // undo scaling, if used - val (ryy, ryp) = orderByY (yy_, yp) // order by yy - new Plot (null, ryy, ryp, s"$modelName: y actual, predicted") - end if + Predictor.plotPrediction (if itran == null then yy else itran (yy), yp, modelName) // undo scaling, if used (yp, qof) end trainNtest @@ -150,9 +146,11 @@ class Perceptron (x: MatrixD, y: VectorD, fname_ : Array [String] = null, //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Build a sub-model that is restricted to the given columns of the data matrix. * @param x_cols the columns that the new model is restricted to + * @param fname2 the variable/feature names for the new model (defaults to null) */ - override def buildModel (x_cols: MatrixD): Perceptron = - new Perceptron (x_cols, y, null, hparam, f, itran) + def buildModel (x_cols: MatrixD, fname2: Array [String] = null): Perceptron = + debug ("buildModel", s"${x_cols.dim} by ${x_cols.dim2}") + new Perceptron (x_cols, y, fname2, hparam, f, itran) end buildModel end Perceptron @@ -229,7 +227,7 @@ import Perceptron.hp @main def perceptronTest (): Unit = /* - // 9 data points: Constant x1 x2 y + // 9 data points: One x1 x2 y val xy = MatrixD ((9, 4), 1.0, 1.0, 1.0, 0.04, // dataset 1 1.0, 2.0, 1.0, 0.05, 1.0, 3.0, 1.0, 0.06, @@ -246,7 +244,7 @@ import Perceptron.hp // val b = VectorD (-5.0, -0.5, 1.5) // initial weights/parameters, better */ - // 9 data points: Constant x1 x2 y + // 9 data points: One x1 x2 y val xy = MatrixD ((9, 4), 1.0, 0.0, 0.0, 0.5, // dataset 2 1.0, 0.0, 0.5, 0.3, 1.0, 0.0, 1.0, 0.2, @@ -269,46 +267,46 @@ import Perceptron.hp val sst = (y - y.mean).normSq // sum of squares total println (s"sst = $sst") - val η = 0.5 + val η = 2.0 hp("eta") = η // try several values for eta // val nn = new Perceptron (x, y, null, hp, f_reLU) // create a perceptron, user control - val nn = new Perceptron (x, y, null, hp) // create a perceptron, user control +// val nn = new Perceptron (x, y, null, hp) // create a perceptron, user control // val nn = Perceptron (xy, null, hp) // create a perceptron, automatic scaling banner ("initialize") - nn.setWeights (b) // set the parameters/weights +// nn.setWeights (b) // set the parameters/weights + + var u, yp, e, fp, d, g: VectorD = null - for epoch <- 1 to 5 do + for epoch <- 1 to 10 do banner (s"improvement step $epoch") - val u = x * b // pre-activation value - val yp = nn.predict () // predicted response from nn - val yp2 = sigmoid_ (u) // predicted response from calculation for sigmoid -// val yp2 = reLU_ (u) // predicted response from calculation for reLU - assert (yp == yp2) - val e = y - yp // error - val fp = yp * (_1 - yp) // derivative (f') for sigmoid -// val fp = u.map (z => is_ (z >= 0.0)) // derivative (f') for reLU - val d = - e * fp // delta - val g = x.transpose * d // gradient - val bup = g * η // parameter update - b -= bup // new parameter vector + u = x * b // pre-activation value + yp = sigmoid_ (u) // predicted response from calculation for sigmoid + e = y - yp // error + fp = yp * (_1 - yp) // derivative (f') for sigmoid + d = - e * fp // delta + g = x.ᵀ * d // gradient + b -= g * η // new parameter vector val sse = e.normSq // sum of squared errors - println (s"b = $b") +// val yp2 = nn.predict () // predicted response from nn +// val yp = reLU_ (u) // predicted response from calculation for reLU +// assert (yp == yp2) +// val fp = u.map (z => is_ (z >= 0.0)) // derivative (f') for reLU + println (s"u = $u") println (s"y = $y") println (s"yp = $yp") - println (s"yp2 = $yp2") +// println (s"yp2 = $yp2") println (s"e = $e") println (s"fp = $fp") println (s"d = $d") println (s"g = $g") - println (s"bup = $bup") println (s"b = $b") println (s"sse = $sse") println (s"R^2 = ${1 - sse/sst}") - nn.setWeights (b) +// nn.setWeights (b) end for end perceptronTest @@ -423,8 +421,7 @@ end perceptronTest3 // val (cols, rSq) = mod.backwardElimAll () // R^2, R^2 bar, R^2 cv val k = cols.size println (s"k = $k, n = ${x.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "R^2 cv"), - "R^2 vs n for Perceptron", lines = true) + new PlotM (null, rSq.ᵀ, Regression.metrics, "R^2 vs n for Perceptron", lines = true) println (s"rSq = $rSq") end perceptronTest4 @@ -456,8 +453,7 @@ end perceptronTest4 val (cols, rSq) = mod.selectFeatures (tech) // R^2, R^2 bar, R^2 cv val k = cols.size println (s"k = $k, n = ${x.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "R^2 cv"), - s"R^2 vs n for Perceptron with $tech", lines = true) + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs n for Perceptron with $tech", lines = true) println (s"$tech: rSq = $rSq") end for diff --git a/src/main/scala/scalation/modeling/PoissonRegression.scala b/src/main/scala/scalation/modeling/PoissonRegression.scala index 812cac885..e52cb9eb7 100644 --- a/src/main/scala/scalation/modeling/PoissonRegression.scala +++ b/src/main/scala/scalation/modeling/PoissonRegression.scala @@ -38,19 +38,20 @@ import scalation.optimization.quasi_newton.BFGS class PoissonRegression (x: MatrixD, y: VectorD, fname_ : Array [String] = null, hparam: HyperParameter = null) extends Predictor (x, y, fname_, hparam) - with Fit (dfm = x.dim2 - 1, df = x.dim - x.dim2): + with Fit (dfr = x.dim2 - 1, df = x.dim - x.dim2): /* - private val k = x.dim2 - 1 // number of variables - private val n = x.dim.toDouble // number of data points (rows) - private val r_df = (n-1.0) / (n-k-1.0) // ratio of degrees of freedom + private val k = x.dim2 - 1 // number of variables + private val n = x.dim.toDouble // number of data points (rows) + private val r_df = (n-1.0) / (n-k-1.0) // ratio of degrees of freedom */ - private var aic = -1.0 // Akaike’s Information Criterion - private var n_dev = -1.0 // null dev: -LL, for null model (intercept only) - private var r_dev = -1.0 // residual dev: -LL, for full model - private var pseudo_rSq = -1.0 // McFaffen's pseudo R-squared + private val debug = debugf ("PoissonRegression", true) // debug function + private var aic = -1.0 // Akaike’s Information Criterion + private var n_dev = -1.0 // null dev: -LL, for null model (intercept only) + private var r_dev = -1.0 // residual dev: -LL, for full model + private var pseudo_rSq = -1.0 // McFaffen's pseudo R-squared - modelName = "PoissonRegression" + _modelName = "PoissonRegression" //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** For a given parameter vector 'b', compute '-Log-Likelihood' (-LL). @@ -62,9 +63,9 @@ class PoissonRegression (x: MatrixD, y: VectorD, fname_ : Array [String] = null, var sum = 0.0 for i <- x.indices do val bx = b dot x(i) - sum += y(i) * bx - exp (bx) // last term not needed [ - log (fac (y(i))) ] + sum += y(i) * bx - exp (bx) // last term not needed [ - log (fac (y(i))) ] end for - -sum // set up for minimization + -sum // set up for minimization end ll //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -76,10 +77,10 @@ class PoissonRegression (x: MatrixD, y: VectorD, fname_ : Array [String] = null, def ll_null (b: VectorD): Double = var sum = 0.0 for i <- x.indices do - val bx = b(0) // only use the intercept - sum += y(i) * bx - exp (bx) // last term not needed [ - log (fac (y(i))) ] + val bx = b(0) // only use the intercept + sum += y(i) * bx - exp (bx) // last term not needed [ - log (fac (y(i))) ] end for - - sum // set up for minimization + - sum // set up for minimization end ll_null //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -91,11 +92,11 @@ class PoissonRegression (x: MatrixD, y: VectorD, fname_ : Array [String] = null, def train (x_ : MatrixD = x, y_ : VectorD = y.toDouble): Unit = // FIX - currently only works for x_ = x and y_ = y train_null () - val b0 = new VectorD (x_.dim2) // use b_0 = 0 for starting guess for parameters - val bfgs = new BFGS (ll) // minimizer for -2LL + val b0 = new VectorD (x_.dim2) // use b_0 = 0 for starting guess for parameters + val bfgs = new BFGS (ll) // minimizer for -2LL - b = bfgs.solve (b0)._2 // find optimal solution for parameters - r_dev = ll (b) // measure of fitness for full model + b = bfgs.solve (b0)._2 // find optimal solution for parameters + r_dev = ll (b) // measure of fitness for full model aic = r_dev + 2.0 * x_.dim2 end train @@ -105,11 +106,11 @@ class PoissonRegression (x: MatrixD, y: VectorD, fname_ : Array [String] = null, * Do this by minimizing '-2LL'. */ def train_null (): Unit = - val b0 = new VectorD (x.dim2) // use b0 = 0 for starting guess for parameters - val bfgs = new BFGS (ll_null) // minimizer for -2LL + val b0 = new VectorD (x.dim2) // use b0 = 0 for starting guess for parameters + val bfgs = new BFGS (ll_null) // minimizer for -2LL - val b_n = bfgs.solve (b0)._2 // find optimal solution for parameters - n_dev = ll_null (b_n) // measure of fitness for null nodel + val b_n = bfgs.solve (b0)._2 // find optimal solution for parameters + n_dev = ll_null (b_n) // measure of fitness for null nodel end train_null //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -121,8 +122,8 @@ class PoissonRegression (x: MatrixD, y: VectorD, fname_ : Array [String] = null, * @param y_ the testing/full response/output vector (defaults to full y) */ def test (x_ : MatrixD = x, y_ : VectorD = y): (VectorD, VectorD) = - val yp = predict (x_) // make predictions - (yp, diagnose (y_, yp)) // return predictions and QoF vector + val yp = predict (x_) // make predictions + (yp, diagnose (y_, yp)) // return predictions and QoF vector end test //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -149,9 +150,11 @@ class PoissonRegression (x: MatrixD, y: VectorD, fname_ : Array [String] = null, //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Build a sub-model that is restricted to the given columns of the data matrix. * @param x_cols the columns that the new model is restricted to + * @param fname2 the variable/feature names for the new model (defaults to null) */ - def buildModel (x_cols: MatrixD): PoissonRegression = - new PoissonRegression (x_cols, y) + def buildModel (x_cols: MatrixD, fname2: Array [String] = null): PoissonRegression = + debug ("buildModel", s"${x_cols.dim} by ${x_cols.dim2}") + new PoissonRegression (x_cols, y, fname2) end buildModel end PoissonRegression diff --git a/src/main/scala/scalation/modeling/PolyORegression.scala b/src/main/scala/scalation/modeling/PolyORegression.scala index e1fc2fba5..1a788787f 100644 --- a/src/main/scala/scalation/modeling/PolyORegression.scala +++ b/src/main/scala/scalation/modeling/PolyORegression.scala @@ -40,11 +40,10 @@ class PolyORegression (t: MatrixD, y: VectorD, ord: Int, fname_ : Array [String] private val debug = debugf ("PolyORegression", false) // debug function private val flaw = flawf ("PolyORegression") // flaw function - private val n0 = 1 // number of terms/columns originally private val nt = PolyORegression.numTerms (ord) // number of terms/columns after expansion private val a = PolyORegression.getA // get the multipliers for orthogonal polynomials - modelName = "PolyORegression" + _modelName = s"PolyORegression_$ord" if t.dim2 != 1 then flaw ("init", "matrix t must have 1 column") @@ -53,7 +52,7 @@ class PolyORegression (t: MatrixD, y: VectorD, ord: Int, fname_ : Array [String] * i.e., add polynomial terms. * @param z the un-expanded vector */ - def expand (z: VectorD): VectorD = PolyORegression.forms (z, n0, nt) + def expand (z: VectorD): VectorD = PolyORegression.forms (z, nt) //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Follow the same transformations used to orthogonalize the data/input matrix 'x', @@ -123,7 +122,7 @@ object PolyORegression: */ def apply (t: VectorD, y: VectorD, ord: Int, fname: Array [String], hparam: HyperParameter): PolyORegression = - new PolyORegression (MatrixD (t).transpose, y, ord, fname, hparam) + new PolyORegression (MatrixD (t).ᵀ, y, ord, fname, hparam) end apply //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -151,10 +150,9 @@ object PolyORegression: /** Given a 1-vector/point 'v', compute the values for all of its polynomial * forms/terms, returning them as a vector. * @param v the vector/point (i-th row of t) for creating forms/terms - * @param k number of features/predictor variables (not counting intercept) = 1 * @param nt the number of terms */ - def forms (v: VectorD, k: Int, nt: Int): VectorD = + def forms (v: VectorD, nt: Int): VectorD = val t = v(0) VectorD (for j <- 0 until nt yield t~^j) end forms @@ -169,7 +167,7 @@ object PolyORegression: val nt = numTerms (ord) println (s"allForms: create expanded data matrix with nt = $nt columns from k = $k columns") val xe = new MatrixD (x.dim, nt) - for i <- x.indices do xe(i) = forms (x(i), k, nt) // vector with values for all forms/terms + for i <- x.indices do xe(i) = forms (x(i), nt) // vector with values for all forms/terms val za = orthogonalize (xe) a = za._2 // save multipliers debug ("allForms", s"expanded data matrix za._1 = ${za._1}") diff --git a/src/main/scala/scalation/modeling/PolyRegression.scala b/src/main/scala/scalation/modeling/PolyRegression.scala index e9c6ec2d1..2a21d4ebb 100644 --- a/src/main/scala/scalation/modeling/PolyRegression.scala +++ b/src/main/scala/scalation/modeling/PolyRegression.scala @@ -35,10 +35,9 @@ class PolyRegression (t: MatrixD, y: VectorD, ord: Int, fname_ : Array [String] extends Regression (PolyRegression.allForms (t, ord), y, fname_, hparam): private val flaw = flawf ("PolyRegression") // flaw function - private val n0 = 1 // number of terms/columns originally private val nt = PolyRegression.numTerms (ord) // number of terms/columns after expansion - modelName = "PolyRegression" + _modelName = s"PolyRegression_$ord" if t.dim2 != 1 then flaw ("init", "matrix t must have 1 column") @@ -47,7 +46,7 @@ class PolyRegression (t: MatrixD, y: VectorD, ord: Int, fname_ : Array [String] * i.e., add polynomial terms. * @param z the un-expanded vector */ - def expand (z: VectorD): VectorD = PolyRegression.forms (z, n0, nt) + def expand (z: VectorD): VectorD = PolyRegression.forms (z, nt) //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Given the scalar z, expand it and predict the response value. @@ -97,7 +96,7 @@ object PolyRegression: */ def apply (t: VectorD, y: VectorD, ord: Int, fname: Array [String], hparam: HyperParameter): PolyRegression = - new PolyRegression (MatrixD (t).transpose, y, ord, fname, hparam) + new PolyRegression (MatrixD (t).ᵀ, y, ord, fname, hparam) end apply //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -125,10 +124,9 @@ object PolyRegression: /** Given a 1-vector/point v, compute the values for all of its polynomial * forms/terms, returning them as a vector. * @param v the 1-vector (e.g., i-th row of t) for creating forms/terms - * @param k number of features/predictor variables (not counting intercept) = 1 * @param nt the number of terms */ - def forms (v: VectorD, k: Int, nt: Int): VectorD = + def forms (v: VectorD, nt: Int): VectorD = val t = v(0) VectorD (for j <- 0 until nt yield t~^j) end forms @@ -143,7 +141,7 @@ object PolyRegression: val nt = numTerms (ord) println (s"allForms: create expanded data matrix with nt = $nt columns from k = $k columns") val xe = new MatrixD (x.dim, nt) - for i <- x.indices do xe(i) = forms (x(i), k, nt) // vector with values for all forms/terms + for i <- x.indices do xe(i) = forms (x(i), nt) // vector with values for all forms/terms xe // expanded matrix end allForms @@ -234,3 +232,39 @@ end polyRegressionTest end polyRegressionTest2 + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `polyRegressionTest3` main function tests the collinearity/correlation of + * x1, x1^2, x1^3 under centering, standardizing, and min-max normalization, + * Picking an min-max interval like [-2, 2] or [-3, 3] may make the effect somewhat + * similar to standardization. + * > runMain scalation.modeling.polyRegressionTest3 + */ +@main def polyRegressionTest3 (): Unit = + + import scalation.mathstat.VectorDOps._ + + val x1 = VectorD (1, 2, 3, 4, 5, 6, 7, 8, 9, 10) + val x = MatrixD (x1, x1~^2, x1~^3).ᵀ + banner (s"original: x = $x") + println (s"original: x.corr = ${x.corr}") + + var z1 = x1 - x1.mean + var z = MatrixD (z1, z1~^2, z1~^3).ᵀ + banner (s"centered: z = $z") + println (s"centered: z.corr = ${z.corr}") + + z1 = (x1 - x1.mean) / x1.stdev + z = MatrixD (z1, z1~^2, z1~^3).ᵀ + banner (s"standardized: z = $z") + println (s"standardized: z.corr = ${z.corr}") + + banner ("minmax [a, b]: z = z") + for a <- -2 to 2; b <- a+1 to a+4 do + z1 = (b - a).toDouble * (x1 - x1.min) / (x1.max - x1.min) + a + z = MatrixD (z1, z1~^2, z1~^3).ᵀ +// banner (s"minmax [$a, $b]: z = $z") + println (s"minmax [$a, $b]: z.corr = ${z.corr}") + +end polyRegressionTest3 + diff --git a/src/main/scala/scalation/modeling/Predictor.scala b/src/main/scala/scalation/modeling/Predictor.scala index 48dfa4d64..3acf5069b 100644 --- a/src/main/scala/scalation/modeling/Predictor.scala +++ b/src/main/scala/scalation/modeling/Predictor.scala @@ -11,16 +11,27 @@ package scalation package modeling -import scala.collection.mutable.{ArrayBuffer, IndexedSeq, LinkedHashSet => LSET, Set} -import scala.math.{cbrt, min, sqrt} +import scala.collection.mutable.{ArrayBuffer, IndexedSeq, LinkedHashSet => LSET} +import scala.math.{max, min} import scala.util.control.Breaks.{break, breakable} import scalation.mathstat._ +//import scalation.random.RandomVecSample + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Shifted Rectified Linear Unit (Shifted ReLU) for a scalar. + * @param x the scalar to be rectified + * @param a the slope parameter: a = 1 => __/ , a = -1 => \__ + * @param b the shift (intercept analog) parameter: b = 0 => __/ , b = 2 => __/ + * 0 0 + */ +inline def srelu (x: Double, a: Double = 1.0, b: Double = 0.0): Double = max (0, a * x - b) + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Rectify the prediction/forecast when they are required to be non-negative, by * setting negative values to zero. - * @param yp the predictived/forecasted value + * @param yp the predicted/forecasted value * @param nneg whether the values are required to be non-negative (e.g., counts) */ inline def rectify (yp: Double, nneg: Boolean = true): Double = @@ -31,7 +42,7 @@ end rectify //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Rectify the prediction/forecast when they are required to be non-negative, by * setting negative values in the vector to zero. - * @param yp the predictived/forecasted vector + * @param yp the predicted/forecasted vector * @param nneg whether the values are required to be non-negative (e.g., counts) */ inline def rectify (yp: VectorD, nneg: Boolean): VectorD = @@ -39,6 +50,16 @@ inline def rectify (yp: VectorD, nneg: Boolean): VectorD = end rectify +// G I V E N S + +// Change as needed the default (given instance) whether to display plots + +//given DO_PLOT: Boolean = false +given DO_PLOT: Boolean = true + +//given DO_REPORT: Boolean = false +given DO_REPORT: Boolean = true + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `Predictor` trait provides a framwork for multiple predictive analytics * techniques, e.g., `Regression`. x is multi-dimensional [1, x_1, ... x_k]. @@ -54,26 +75,28 @@ trait Predictor (x: MatrixD, y: VectorD, protected var fname: Array [String], hp extends Model with FeatureSelection: - protected val DO_PLOT = true // whether to plot y vs yp - protected val LIMIT = 5000 // do not plot more than 5000 points private val debug = debugf ("Predictor", true) // debug function private val flaw = flawf ("Predictor") // flaw function if x != null then if x.dim != y.dim then flaw ("init", "row dimensions of x and y are incompatible") + if x.dim2 < 1 then flaw ("init", s"dim2 = ${x.dim2} of the x matrix must be at least 1") if x.dim <= x.dim2 then flaw ("init", s"Predictor requires more rows ${x.dim} than columns ${x.dim2}") - end if private val MIN_FOLDS = 3 // minimum number of folds for cross validation private val stream = 0 // random number stream to use private val permGen = TnT_Split.makePermGen (y.dim, stream) // permutation generator protected var b: VectorD = null // parameter/coefficient vector [b_0, b_1, ... b_k] -// protected var e: VectorD = null // residual/error vector [e_0, e_1, ... e_m-1] if x != null && fname == null then fname = x.indices2.map ("x" + _).toArray // default feature/variable names + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the set of columns (numbers) for the features in this model. + */ + def mcols: LSET [Int] = LSET.range (0, getX.dim2) + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Return the used data matrix x. Mainly for derived classes where x is expanded * from the given columns in x_, e.g., `SymbolicRegression.quadratic` adds squared columns. @@ -139,17 +162,12 @@ trait Predictor (x: MatrixD, y: VectorD, protected var fname: Array [String], hp */ def trainNtest (x_ : MatrixD = x, y_ : VectorD = y) (xx: MatrixD = x, yy: VectorD = y): (VectorD, VectorD) = - train (x_, y_) // train the model on training set + train (x_, y_) // train the model on training set debug ("trainNTest", s"b = $b") - val (yp, qof) = test (xx, yy) // test the model on testing set - println (report (qof)) // report on Quality of Fit (QoF) - if DO_PLOT then - val lim = min (yy.dim, LIMIT) - val (qyy, qyp) = (yy(0 until lim), yp(0 until lim)) // slice to LIMIT - val (ryy, ryp) = orderByY (qyy, qyp) // order by yy -// new Plot (null, ryy, ryp, s"$modelName: y black/actual vs. yp red/predicted") - new Plot (null, ryy, ryp, s"$modelName: y black/actual vs. yp red/predicted", lines = true) - end if + val (yp, qof) = test (xx, yy) // test the model on testing set + println (report (qof)) // report on Quality of Fit (QoF) + Predictor.plotPrediction (yy, yp, modelName, doPlot = false) // plot actual and predicted for test-set + Predictor.plotPrediction (yy, yp, modelName, doPlot = DO_PLOT) // plot actual and predicted for test-set (reordered) (yp, qof) end trainNtest @@ -170,6 +188,29 @@ trait Predictor (x: MatrixD, y: VectorD, protected var fname: Array [String], hp VectorD (for i <- x_.indices yield predict (x_(i))) end predict + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Produce a conformal PREDICTION INTERVAL half width for each prediction yp (y-hat). + * Implements Algorithm 2 Split Conformal Prediction from + * @see www.stat.cmu.edu/~ryantibs/papers/conformal.pdf + * @param x_ the testing/full data/input matrix + * @param y_ the testing/full response/output vector + * @param α the significance level (1 - p_) + */ + def predictCInt (x_ : MatrixD, y_ : VectorD, α: Double = .1): VectorD = + val n = y_.dim // number of instances + var n_by2 = n / 2 // number of instances for half + val idx = testIndices (n_by2, true) // randomly split into equal-size subsets idx, rest + val (x_e, x_t, y_e, y_t) = TnT_Split (x_, y_, idx) // Test-n-Train Split: test _e, train _t + train (x_t, y_t) // train model on the TRAINING-set (rest) + val yp = predict (x_e) // predict values for TESTING-set (idx) + + val r = y_e - yp // compute the residuals/errors for TESTING-set + n_by2 = r.dim / 2 // number of residuals for half + val k = math.ceil ((n_by2 + 1) * (1.0 - α)).toInt // determine k based on significance level + val d = r.median (k) // the k-th smallest residual from TESTING-set + VectorD.fill (n)(d) // return as a vector (general form) + end predictCInt + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Return the hyper-parameters. */ @@ -180,22 +221,22 @@ trait Predictor (x: MatrixD, y: VectorD, protected var fname: Array [String], hp */ def parameter: VectorD = b - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the vector of residuals/errors. - */ -// def residual: VectorD = e - // F E A T U R E S E L E C T I O N + // @see givens in `modeling.FeatureSelection` + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Build a sub-model that is restricted to the given columns of the data matrix. * Must be implemented for models that support feature selection. * Otherwise, use @see `NoBuildModel * @param x_cols the columns that the new model is restricted to + * @param fname2 the variable/feature names for the new model (defaults to null) */ - def buildModel (x_cols: MatrixD): Predictor & Fit + def buildModel (x_cols: MatrixD, fname2: Array [String] = null): Predictor & Fit private var theBest = BestStep ()() // record the best model from feature selection + private val t_rng = if fullset_FS then 0 until y.dim // use full dataset for Feature Selection (FS) + else 0 until Model.trSize (y.dim) // use training set for Feature Selection (FS) //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Reset the best-step to default @@ -217,22 +258,6 @@ trait Predictor (x: MatrixD, y: VectorD, protected var fname: Array [String], hp if theBest.qof == null || (best gt theBest.qof(qk)) then theBest = best end updateBest - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Update the rSq-based QoF results for the l-th iteration. - * @param rSq the matrix contain information about r-Sq-based QoF measures - * @param l the l-th iteration - * @param cross indicator of whether cross-validation are to be included - * @param fit_l the fit vector for the l-th iteration - * @param mod_l the predictive model for the l-th iteration - private def updateQoF (rSq: MatrixD, l: Int, cross: Boolean, best: BestStep): Unit = - rSq(l) = - if cross then - Fit.qofVector (best.qof, best.mod.crossValidate ()) // results for model mod_l, with cross-validation - else - Fit.qofVector (best.qof, null) // results for model mod_l, no cross-validation - end updateQoF - */ - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Perform forward selection to find the most predictive variable to add the * existing model, returning the variable to add and the new model. @@ -247,9 +272,11 @@ trait Predictor (x: MatrixD, y: VectorD, protected var fname: Array [String], hp for j <- x.indices2 if ! (cols contains j) do val cols_j = cols union LSET (j) // try adding variable/column x_j val x_cols = x(?, cols_j) // x projected onto cols_j columns - val mod_j = buildModel (x_cols) // regress with x_j added - mod_j.train () // train model - best = best.better (j, mod_j.test ()._2, mod_j) // which is better + val mod_j = buildModel (x_cols, newFname (fname, cols_j)) // regress with x_j added + + val (x_tr, y_tr) = (x_cols(t_rng), y(t_rng)) // get full/training data + mod_j.train (x_tr, y_tr) // train model + best = best.better (j, mod_j.test (x_tr, y_tr)._2, mod_j, cols_j) // which is better end for if best.col == -1 then @@ -263,21 +290,21 @@ trait Predictor (x: MatrixD, y: VectorD, protected var fname: Array [String], hp */ def select0 (qk: Int): BestStep = val x_cols = x(?, LSET (0)) // x projected onto columns {0} - val mod_0 = buildModel (x_cols) // regress with x_0 added + val mod_0 = buildModel (x_cols, newFname (fname, LSET (0))) // regress with x_0 added mod_0.train () // train model val qof_0 = mod_0.test ()._2 BestStep (0, qof_0, mod_0)(qof_0(qk)) // result for intercept only end select0 //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Perform forward selection to find the most predictive variables to have + /** Perform FORWARD SELECTION to find the MOST predictive variables to have * in the model, returning the variables added and the new Quality of Fit (QoF) * measures for all steps. * @see `Fit` for index of QoF measures. - * @param cross whether to include the cross-validation QoF measure + * @param cross indicator to include the cross-validation/validation QoF measure (defaults to "many") * @param qk index of Quality of Fit (QoF) to use for comparing quality */ - def forwardSelAll (cross: Boolean = true)(using qk: Int): (LSET [Int], MatrixD) = + def forwardSelAll (cross: String = "many")(using qk: Int): (LSET [Int], MatrixD) = resetBest () val rSq = new MatrixD (x.dim2, Fit.qofVectorSize) // QoF: R^2, R^2 Bar, sMAPE, R^2 cv val cols = LSET (0) // start with x_0 in model (e.g., intercept) @@ -307,12 +334,12 @@ trait Predictor (x: MatrixD, y: VectorD, protected var fname: Array [String], hp * @param rSq the matrix R^2 values (stand in for sse) */ def importance (cols: Array [Int], rSq: MatrixD): Array [(Int, Double)] = - val r2 = rSq(?, 0) // use column 0 for R^2 - val imp = Array.ofDim [(Int, Double)] (r2.dim) // for variables, except intercept - val sf = 1.0 / (r2(1) - r2(0)) // scale factor, so most important = 1 + val r2 = rSq(?, 0) // use column 0 for R^2 + val imp = Array.ofDim [(Int, Double)] (r2.dim) // for variables, except intercept + val sf = 1.0 / (r2(1) - r2(0)) // scale factor, so most important = 1 imp(0) = (cols(0), -0.0) for j <- 1 until imp.size do imp(j) = (cols(j), sf * (r2(j) - r2(j-1))) // scaled improvement in R^2 (2 => cv) - imp // return the importance + imp // return the importance end importance //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -331,9 +358,11 @@ trait Predictor (x: MatrixD, y: VectorD, protected var fname: Array [String], hp for j <- first until x.dim2 if cols contains j do val cols_j = cols diff LSET (j) // try removing variable/column x_j val x_cols = x(?, cols_j) // x projected onto cols_j columns - val mod_j = buildModel (x_cols) // regress with x_j added - mod_j.train () // train model - best = best.better (j, mod_j.test ()._2, mod_j) // which is better + val mod_j = buildModel (x_cols, newFname (fname, cols_j)) // regress with x_j added + + val (x_tr, y_tr) = (x_cols(t_rng), y(t_rng)) // get full/training data + mod_j.train (x_tr, y_tr) // train model + best = best.better (j, mod_j.test (x_tr, y_tr)._2, mod_j, cols_j) // which is better end for if best.col == -1 then @@ -347,32 +376,35 @@ trait Predictor (x: MatrixD, y: VectorD, protected var fname: Array [String], hp * @param qk index of Quality of Fit (QoF) to use for comparing quality */ def fullModel (qk: Int): BestStep = - val mod_a = buildModel (x) // regress with all variables x_j - mod_a.train () // train model - val qof_a = mod_a.test ()._2 - BestStep (-1, qof_a, mod_a)(qof_a(qk)) // result for full only + val mod_a = buildModel (x, fname) // regress with all variables x_j + + val (x_tr, y_tr) = (x(t_rng), y(t_rng)) // get full/training data + mod_a.train (x_tr, y_tr) // train model + val qof_a = mod_a.test (x_tr, y_tr)._2 // get test qof for mod_a + BestStep (-1, qof_a, mod_a, mcols)(qof_a(qk)) // result for full only end fullModel //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Perform backward elimination to find the least predictive variables to remove + /** Perform BACKWARD ELIMINATION to find the LEAST predictive variables to remove * from the full model, returning the variables left and the new Quality of Fit (QoF) * measures for all steps. * @see `Fit` for index of QoF measures. * @param first first variable to consider for elimination - * @param cross whether to include the cross-validation QoF measure + * @param cross indicator to include the cross-validation/validation QoF measure (defaults to "many") * @param qk index of Quality of Fit (QoF) to use for comparing quality */ - def backwardElimAll (first: Int = 1, cross: Boolean = true)(using qk: Int): + def backwardElimAll (first: Int = 1, cross: String = "many")(using qk: Int): (LSET [Int], MatrixD) = resetBest () val rSq = new MatrixD (x.dim2, Fit.qofVectorSize) // R^2, R^2 Bar, sMAPE, R^2 cv - val cols = LSET.range (0, x.dim2) // start with all x_j in model + val cols = mcols // start with all x_j in model val rem = ArrayBuffer [Int] () // start with no columns removed - val best0 = fullModel (qk) + val best0 = fullModel (qk) // start with all columns + updateBest (best0) updateQoF (rSq, 0, cross, best0) // update QoF results for full model val jj_qof = best0.qof(qk) - banner (s"backwardElimAll: (l = 0) INITIAL variables (all) => cols = $cols @ $jj_qof") + debug ("backwardElimAll", s"(l = 0) INITIAL variables (all) => cols = $cols @ $jj_qof") breakable { for l <- 1 until x.dim2 - 1 do // l indicates number of variables eliminated @@ -383,7 +415,7 @@ trait Predictor (x: MatrixD, y: VectorD, protected var fname: Array [String], hp rem += best.col // keep track of removed columns updateQoF (rSq, l, cross, best) // update QoF results val (jj, jj_qof) = (best.col, best.qof(qk)) - banner (s"backwardElimAll: (l = $l) REMOVE variable ($jj, ${fname(jj)}) => cols = $cols @ $jj_qof") + debug ("backwardElimAll", s"(l = $l) REMOVE variable ($jj, ${fname(jj)}) => cols = $cols @ $jj_qof") end for } // breakable @@ -395,16 +427,16 @@ trait Predictor (x: MatrixD, y: VectorD, protected var fname: Array [String], hp end backwardElimAll //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Perform stepwise regression to find the most predictive variables to have - * in the model, returning the variables left and the new Quality of Fit (QoF) + /** Perform STEPWISE SELECTION to find a GOOD COMBINATION of predictive variables to have + * in the model, returning the variables selected and the new Quality of Fit (QoF) * measures for all steps. At each step it calls forwardSel and backwardElim * and takes the best of the two actions. Stops when neither action yields improvement. * @see `Fit` for index of QoF measures. - * @param cross whether to include the cross-validation QoF measure + * @param cross indicator to include the cross-validation/validation QoF measure (defaults to "many") * @param swap whether to allow a swap step (swap out a feature for a new feature in one step) * @param qk index of Quality of Fit (QoF) to use for comparing quality */ - def stepwiseSelAll (cross: Boolean = true, swap: Boolean = true)(using qk: Int): + def stepwiseSelAll (cross: String = "many", swap: Boolean = true)(using qk: Int): (LSET [Int], MatrixD) = resetBest () val rSq = new MatrixD (x.dim2 - 1, Fit.qofVectorSize) // QoF: R^2, R^2 Bar, sMAPE, R^2 cv @@ -420,8 +452,13 @@ trait Predictor (x: MatrixD, y: VectorD, protected var fname: Array [String], hp val bestb = backwardElim (cols, 1) // remove least predictive variable debug ("stepwiseSelAll", s"bestf = $bestf, bestb = $bestb") - if (bestb.col == -1 || (bestf ge bestb.qof(qk))) && // forward as good as backward - (bestf.col != -1 && (bestf gt last_q)) then // a better model has been found + val slack = 25.0 / l~^2 // increase slack to include more features + // slack => likely to ADD features at the beginning + +// FIX +/- slack depends metric, e.g., rSq requires -, smape requires + [ need general solution ] + + if (bestb.col == -1 || (bestf ge bestb.qof(qk) - slack)) && // forward as good as backward + (bestf.col != -1 && (bestf gt last_q - slack)) then // a better model has been found updateBest (bestf) vars += bestf.col cols += bestf.col // ADD variable bestf.col @@ -455,12 +492,12 @@ trait Predictor (x: MatrixD, y: VectorD, protected var fname: Array [String], hp updateQoF (rSq, l, cross, bestfb) // update QoF results println (s"\nstepwiseSelAll: (l = $l) SWAP variable $bestb with $bestf") else + println (s"\nstepwiseSelAll: (l = $l) last_q = $last_q better ($bestb, $bestf)") break () // can't find a better model -> quit - end if end if val x_cols = x(?, cols) // x projected onto cols columns - val mod_ = buildModel (x_cols) // regress on this x + val mod_ = buildModel (x_cols, newFname (fname, cols)) // regress on this x mod_.train () // train model println (mod_.report (mod_.test ()._2)) // test and report end for @@ -492,6 +529,25 @@ trait Predictor (x: MatrixD, y: VectorD, protected var fname: Array [String], hp BestStep (in, qof_in, mod_j)(qof_in(qk)) // candidate step end swapVars + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Perform BEAM SEARCH SELECTION to find a GOOD COMBINATION of predictive features/variables to + * have in the model, returning the top k sets of features/variables selected and the new Quality of + * Fit (QoF) measures/metrics for all steps. At each step, iterate over the models in the beam + * (top k) and create candidates by adding features (phase 1) and then removing features (phase 2). + * From all the candidates, keep the best k and start a new iteration. Stops when there is + * no improvement in any of top k or the maximum number of features is reached. + * @see `Fit` for index of QoF measures/metrics. + * @param cross indicator to include the cross-validation/validation QoF measure (defaults to "many") + * @param bk the beam width holding the top k models (defaults to 3) + * @param qk index of Quality of Fit (QoF) to use for comparing quality + */ + def beamSelAll (cross: String = "many", bk: Int = 3)(using qk: Int): (LSET [Int], MatrixD) = + + // FIX - to be implemented + + null + end beamSelAll + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Compute the Variance Inflation Factor (VIF) for each variable to test * for multi-collinearity by regressing x_j against the rest of the variables. @@ -516,8 +572,22 @@ trait Predictor (x: MatrixD, y: VectorD, protected var fname: Array [String], hp vifV end vif +// T E S T I N G S C E N A R I O S + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Perform In-Sample Testing, i.e., train and test on the full data set. + * @param skip the number of initial data points to skip (due to insufficient information) + * @param showYp whether to show the prediction vector + */ + def inSample_Test (skip: Int = 0, showYp: Boolean = false): Unit = + val (x_, y_) = (x.drop (skip), y.drop (skip)) + val yp = trainNtest (x_, y_)(x_, y_)._1 + if showYp then + println (s"Final In-Sample Prediction Vector yp = $yp") + end inSample_Test + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the indices for the test-set. + /** Return the indices for the test-set for (1) RANDONLY or (2) FIRST. * @see `scalation.mathstat.TnT_Split` * @param n_test the size of test-set * @param rando whether to select indices randomly or in blocks @@ -526,25 +596,46 @@ trait Predictor (x: MatrixD, y: VectorD, protected var fname: Array [String], hp TnT_Split.testIndices (permGen, n_test, rando) end testIndices + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the indices for the test-set for (1) RANDONLY or (3) LAST + * @see `scalation.mathstat.TnT_Split` + * @param n_total the size of full dataset + * @param n_test the size of test-set + * @param rando whether to select indices randomly or in blocks + */ + inline def testIndices (n_total: Int, n_test: Int, rando: Boolean): IndexedSeq [Int] = + TnT_Split.testIndices (permGen, n_total, n_test, rando) + end testIndices + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /* Use validation to compute test Quality of Fit (QoF) measures by dividing - * the full dataset into a TESTING set and a TRAINING set. - * The test set is defined by idx and the rest of the data is the training set. + * the full dataset into a TESTING-set and a TRAINING-set, returning qof and yp. + * The testing-set is defined by idx and the rest of the data is the training-set. + * Select the TESTING-set to be (@see `mathstat.TnT_Split`) + * 1. RANDOM pass rando = true + * 2. FIRST pass rando = false and + * idx = testIndices ((ratio * y.dim).toInt, rando) + * 3. LAST pass rando = false and + * idx = testIndices (y.dim, (ratio * y.dim).toInt, rando) + * 4. CUSTOM pass rando = false and + * idx = indices specified by user * @param rando flag indicating whether to use randomized or simple validation - * @param ratio the ratio of the TESTING set to the full dataset (most common 70-30, 80-20) - * @param idx the prescribed TESTING set indices (default => generate) - */ - def validate (rando: Boolean = true, ratio: Double = 0.2) - (idx: IndexedSeq [Int] = testIndices ((ratio * y.dim).toInt, rando)): VectorD = + * @param ratio the ratio of the TESTING-set to the full dataset (most common 70-30 (.3), 80-20 (.2)) + * @param idx the prescribed TESTING-set indices (default => generate) + */ + def validate (rando: Boolean = true, ratio: Double = Model.TE_RATIO) +// (idx: IndexedSeq [Int] = testIndices ((ratio * y.dim).toInt, rando)): + (idx: IndexedSeq [Int] = testIndices (y.dim, (ratio * y.dim).toInt, rando)): + (VectorD, VectorD) = + debug ("validate", s"n_test = ${(ratio * y.dim).toInt}, rando = $rando") val (x_e, x_, y_e, y_) = TnT_Split (x, y, idx) // Test-n-Train Split - train (x_, y_) // train model on the training set - val qof = test (x_e, y_e)._2 // test on test-set and get QoF measures - if qof(QoF.sst.ordinal) <= 0.0 then // requires variation in test-set + train (x_, y_) // train model on the TRAINING-set + val (yp, qof) = test (x_e, y_e) // test on TESTING-set and get its yp and QoF measures + if qof(QoF.sst.ordinal) <= 0.0 then // requires variation in TESTING-set flaw ("validate", "chosen testing set has no variability") - end if println (FitM.fitMap (qof, QoF.values.map (_.toString))) - qof + (yp, qof) end validate //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -566,11 +657,10 @@ trait Predictor (x: MatrixD, y: VectorD, protected var fname: Array [String], hp for fold <- 0 until k do banner (s"crossValidate: fold $fold: train-test splits sizes = (${y.dim - sz}, $sz)") val idx = fullIdx (fold * sz until (fold+1) * sz).toMuIndexedSeq // instance indices for this fold - val qof = validate (rando, ratio)(idx) + val qof = validate (rando, ratio)(idx)._2 debug ("crossValidate", s"fold $fold: qof = $qof") if qof(QoF.sst.ordinal) > 0.0 then // requires variation in test-set for q <- qof.indices do stats(q).tally (qof(q)) // tally these QoF measures - end if end for stats @@ -580,11 +670,50 @@ end Predictor //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Predictor` companion object provides a method for testing predictive - * models. +/** The `Predictor` companion object provides a method for testing predictive models. */ object Predictor: + private val LIMIT = 5000 // do not plot more than 5000 points + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Plot the actual and predicted values/vectors both ordered by increasing yy values. + * @param yy the aligned actual response/output vector to use (test/full) + * @param yp the corresponding vector of predicted values + * @param mName the model name + * @param order whether to order all vectors by y-actual + * @param doPlot whether to plot y-actual vs. predictions + */ + def plotPrediction (yy: VectorD, yp: VectorD, mName: String, + order: Boolean = true, doPlot: Boolean = true): Unit = + if doPlot then + val r = 0 until min (yy.dim, LIMIT) // limited index range + var ys = (yy(r), yp(r)) // slice to LIMIT + if order then ys = orderByY (ys._1, ys._2) // order all by yy + new Plot (null, ys._1, ys._2, // plot ordered actual, predicted + s"Plot $mName predictions: yy black/actual vs. yp red/predicted", lines = true) + end plotPrediction + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Plot the PREDICTION INTERVALS with all vectors ordered by increasing yp values. + * @param yy the aligned actual response/output vector to use (test/full) + * @param yp the corresponding vector of predicted values + * @param low_up the predicted (lower, upper) bound vectors + * @param mName the model name + * @param order whether to order all vectors by y-actual + * @param doPlot whether to plot y-actual vs. predictions as well as prediction intervals + */ + def plotPredictionInt (yy: VectorD, yp: VectorD, + low_up: (VectorD, VectorD), mName: String, + order: Boolean = true, doPlot: Boolean = true): Unit = + if doPlot then + val r = 0 until min (yy.dim, LIMIT) // limited index range + var ys = MatrixD (yp(r), yy(r), low_up._1(r), low_up._2(r)) // slice to LIMIT and order all by yp + if order then ys = orderByY (ys) // order all by yy + new PlotM (null, ys, Array ("yp", "yy", "low", "up"), // plot ordered actual, predicted, lower, upper + s"Plot $mName prediction intervals [low, up]", lines = true) + end plotPredictionInt + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Test (in-sample) by training and testing on the FULL dataset. * Test (out-of-sample) by training on the TRAINING set and testing on the TESTING set. @@ -596,12 +725,12 @@ object Predictor: def test (mod: Predictor, ext: String = "", check: Boolean = true): Unit = val iq = QoF.rSq.ordinal banner (s"Test ${mod.modelName} $ext") - val (yp, qof) = mod.trainNtest ()() // train and test the model on full dataset (in-sample) + val qof = mod.trainNtest ()()._2 // train and test the model on full dataset (in-sample) println ("Validate: Out-of-Sample Testing") - val qof2 = mod.validate ()() // train on training set, test on testing set + val qof2 = mod.validate ()()._2 // train on training-set, test on testing-set if check then assert (rel_diff (qof(iq), qof2(iq)) < 0.2) // check agreement of in-sample and out-of-sample results - println (FitM.fitMap (mod.validate ()(), QoF.values.map (_.toString))) + println (FitM.fitMap (qof2, QoF.values.map (_.toString))) end test end Predictor @@ -625,30 +754,32 @@ end Predictor val fname_6 = Array ("modelyear") // modelyear has highest positive correlation val fname_4 = Array ("weight") // weight has highest correlation magnitude val fname_04 = Array ("intercept", "weight") - val hp2 = Regression.hp // the hyper-parameters of Regression +// val hp2 = Regression.hp // the hyper-parameters of Regression test (new NullModel (y), check = false) // 1 - test (new SimplerRegression (ox(?, Set (6)), y, fname_6)) // 2 - test (new SimpleRegression (ox(?, Set (0, 4)), y, fname_04)) // 3 + test (new SimplerRegression (ox(?, LSET (6)), y, fname_6)) // 2 + test (new SimpleRegression (ox(?, LSET (0, 4)), y, fname_04)) // 3 test (new Regression (x, y, x_fname)) // 4 - no intercept test (new Regression (ox, y, ox_fname)) // 5 test (RidgeRegression.center (x, y, x_fname)) // 6 - no intercept test (new LassoRegression (x, y, x_fname)) // 7 - no intercept test (new LassoRegression (ox, y, ox_fname)) // 8 test (new RegressionWLS (ox, y, ox_fname)) // 9 +/* test (new TranRegression (ox, y, ox_fname, hp2, id, id), "id") // 10 - id test (new TranRegression (ox, y, ox_fname, hp2, sqrt, sq), "sqrt") // 11 - sqrt test (new TranRegression (ox, y, ox_fname, hp2, cbrt, cb), "cbrt") // 12 - cbrt test (new TranRegression (ox, y, ox_fname), "log") // 13 - log test (TranRegression (ox, y, ox_fname), "box-cox") // 14 - box-cox +*/ test (SymbolicRegression.quadratic (x, y, x_fname)) // 15 test (SymbolicRegression.quadratic (x, y, x_fname, true)) // 16 test (SymbolicRegression.cubic (x, y, x_fname)) // 17 test (SymbolicRegression.cubic (x, y, x_fname, true)) // 18 - test (SymbolicRegression (x, y, x_fname, Set (-2.0, -1, 2, 3, 4))) // 19 - test (new PolyRegression (ox(?, Set (4)), y, 4, fname_4)) // 20 - test (new PolyORegression (ox(?, Set (4)), y, 4, fname_4)) // 21 - test (new TrigRegression (ox(?, Set (4)), y, 8, fname_4)) // 22 + test (SymbolicRegression (x, y, x_fname, LSET (-2.0, -1, 2, 3, 4))) // 19 + test (new PolyRegression (ox(?, LSET (4)), y, 4, fname_4)) // 20 + test (new PolyORegression (ox(?, LSET (4)), y, 4, fname_4)) // 21 + test (new TrigRegression (ox(?, LSET (4)), y, 8, fname_4)) // 22 test (new ExpRegression (ox, y, ox_fname)) // 23 test (new KNN_Regression (x, y, x_fname), "k=3") // 25 KNN_Regression.hp("kappa") = 5 diff --git a/src/main/scala/scalation/modeling/QuantileReg.scala b/src/main/scala/scalation/modeling/QuantileReg.scala new file mode 100644 index 000000000..be9698372 --- /dev/null +++ b/src/main/scala/scalation/modeling/QuantileReg.scala @@ -0,0 +1,313 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author John Miller + * @version 2.0 + * @date Wed Dec 24 13:59:45 EST 2025 + * @see LICENSE (MIT style license file). + * + * @note Model: Quantile Regression + */ + +package scalation +package modeling + +import scala.math.{abs, max} +import scala.util.control.Breaks.{break, breakable} + +import scalation.mathstat._ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Return the Pin Ball Loss based on the difference between y and yp = e + * @param e the error/residual vector + * @param q the quantile of interest + */ +def pinball (e: VectorD, q: Double = 0.5): Double = + var sum = 0.0 + cfor (0, e.dim) { i => + val e_i = e(i) + sum += (if e_i >= 0.0 then q * abs (e_i) else (1.0 - q) * abs (e_i)) + } // cfor + sum +end pinball + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Return the Pin Ball Loss based on the difference between y and yp. + * @param x the data/input m-by-n matrix + * @param y the actual response/target variable m-vector + * @param q the quantile of interest + * @param b the parameter vector + */ +def pinball (x: MatrixD, y: VectorD, q: Double)(b: VectorD): Double = + val yp = x * b + val e = y - yp + pinball (e, q) +end pinball + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `QuantileReg` class supports quantile regression. + * @param x the data/input m-by-n matrix + * (augment with a first column of ones to include intercept in model) + * @param y the response/output m-vector + * @param fname_ the feature/variable names (defaults to null) + * @param hparam the hyper-parameters (defaults to QuantileReg.hp) + */ +class QuantileReg (x: MatrixD, y: VectorD, fname_ : Array [String] = null, + hparam: HyperParameter = QuantileReg.hp) + extends Predictor (x, y, fname_, hparam) + with Fit (dfr = x.dim2 - 1, df = x.dim - x.dim2): + // if not using an intercept df = (x.dim2, x.dim-x.dim2), correct by calling 'resetDF' method from `Fit` + + private val debug = debugf ("QuantileReg", true) // debug function + private val flaw = flawf ("QuantileReg") // flaw function + private val n = x.dim2 // number of columns + private val q = hparam("q").toDouble // the quantile sought + private val maxIter = hparam("maxIter").toInt // maximum number of iterations + + _modelName = s"QuantileReg_$dfr" + + if n < 1 then flaw ("init", s"dim2 = $n of the 'x' matrix must be at least 1") + if q out (0.01, 0.99) then flaw ("init", s"quantile q = $q must be in [.01, .99]") + + debug ("init", s"_modelName with q = $q") + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Train the predictor by fitting the parameter vector (b-vector) in the + * multiple regression equation + * y = b dot x + e = [b_0, ... b_k] dot [1, x_1 , ... x_k] + e + * using the Iteratively Reweighted Least Squares 'IRLS' method. + * @param x_ the training/full data/input matrix + * @param y_ the training/full response/output vector + */ + def train (x_ : MatrixD, y_ : VectorD): Unit = + b = VectorD.one (x.dim2) // initial guess (e.g., all ones or OLS) + val m = x.dim + val eps = 1e-8 + + breakable { + cfor (0, maxIter) { _ => + val r = y - x * b // calculate residuals + + // Calculate weight vector (w) based on asymmetric pinball loss + val w = new VectorD (m) + cfor (0, m) { i => + val weight = if r(i) >= 0 then q else 1.0 - q + w(i) = weight / (max (abs (r(i)), eps)) + } // cfor + + // Solve using ScalaTion's Weighted Regression (WLS) + val wr = new RegressionWLS (x_, y_, fname, w) + wr.train () + + val b_new = wr.parameter + if (b_new - b).norm < 1e-6 then break () // convergence check + b = b_new + } // cfor + } // breakable + b + end train + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Test a predictive model y_ = f(x_) + e and return its QoF vector. + * Testing may be be in-sample (on the training set) or out-of-sample + * (on the testing set) as determined by the parameters passed in. + * Note: must call train before test. + * @param x_ the testing/full data/input matrix (defaults to full x) + * @param y_ the testing/full response/output vector (defaults to full y) + */ + def test (x_ : MatrixD, y_ : VectorD): (VectorD, VectorD) = + val yp = predict (x_) // make predictions + (yp, diagnose (y_, yp)) // return predictions and QoF vector + end test + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Predict the value of vector y = f(x_, b). It is overridden for speed. + * @param x_ the matrix to use for making predictions, one for each row + */ + override def predict (x_ : MatrixD): VectorD = x_ * b + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Build a sub-model that is restricted to the given columns of the data matrix. + * @param x_cols the columns that the new model is restricted to + * @param fname2 the variable/feature names for the new model (defaults to null) + */ + def buildModel (x_cols: MatrixD, fname2: Array [String]): Predictor & Fit = ??? + +end QuantileReg + + +object QuantileReg: + + /** Base hyper-parameter specification for `Regression` + */ + val hp = new HyperParameter + hp += ("q", 0.5, 0.5) + hp += ("maxIter", 50, 50) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a `QuantileReg` object from a combined data-response matrix. + * @param xy the combined data-response matrix (predictors and response) + * @param fname the feature/variable names (defaults to null) + * @param hparam the hyper-parameters (defaults to hp) + * @param col the designated response column (defaults to the last column) + */ + def apply (xy: MatrixD, fname: Array [String] = null, + hparam: HyperParameter = hp)(col: Int = xy.dim2 - 1): QuantileReg = + new QuantileReg (xy.not(?, col), xy(?, col), fname, hparam) + end apply + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create two `QuantileReg` objects from a combined data-response matrix, at + * lower and upper quantiles to enable prediction intervals. + * @param xy the combined data-response matrix (predictors and response) + * @param fname the feature/variable names (defaults to null) + * @param α the significance level (e.g., .1 => 2 .05 tails: .05 [ .9 ] .05 + * @param hparam the hyper-parameters (defaults to hp) + * @param col the designated response column (defaults to the last column) + */ + def predInterval (xy: MatrixD, fname: Array [String] = null, α: Double = 0.1, + hparam: HyperParameter = hp)(col: Int = xy.dim2 - 1): (QuantileReg, QuantileReg) = + hp("q") = α / 2 + val mod1 = new QuantileReg (xy.not(?, col), xy(?, col), fname, hparam) + hp("q") = 1 - α / 2 + val mod2 = new QuantileReg (xy.not(?, col), xy(?, col), fname, hparam) + (mod1, mod2) + end predInterval + +end QuantileReg + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `quantileRegTest` main function tests the `QuantileReg` class. + * It tests the Pin Ball Loss Function. + * > runMain scalation.modeling.quantileRegTest + */ +@main def quantileRegTest (): Unit = + + val y = VectorD.range (0, 20) + val yp = VectorD.fill (y.dim)(y.mean) + val loss = pinball (y - yp) + println (s"pinball = $loss") + +end quantileRegTest + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `quantileRegTest2` main function tests a quantile regression model on a small + * dataset of temperatures from counties in Texas where the variables/factors to consider + * are Latitude (x1), Elevation (x2) and Longitude (x3). The model equation + * is the following: + * y = b dot x = b0 + b1*x1 + b2*x2 + b3*x3 + * It compare POINT PREDICTIONS of `Regression` (mean) and `QuantileReg` (median). + * > runMain scalation.modeling.quantileRegTest2 + */ +@main def quantileRegTest2 (): Unit = + + // 16 data points: one x1 x2 x3 y + // Lat Elev Long Temp County + val xy = MatrixD ((16, 5), 1.0, 29.767, 41.0, 95.367, 56.0, // Harris + 1.0, 32.850, 440.0, 96.850, 48.0, // Dallas + 1.0, 26.933, 25.0, 97.800, 60.0, // Kennedy + 1.0, 31.950, 2851.0, 102.183, 46.0, // Midland + 1.0, 34.800, 3840.0, 102.467, 38.0, // Deaf Smith + 1.0, 33.450, 1461.0, 99.633, 46.0, // Knox + 1.0, 28.700, 815.0, 100.483, 53.0, // Maverick + 1.0, 32.450, 2380.0, 100.533, 46.0, // Nolan + 1.0, 31.800, 3918.0, 106.400, 44.0, // El Paso + 1.0, 34.850, 2040.0, 100.217, 41.0, // Collington + 1.0, 30.867, 3000.0, 102.900, 47.0, // Pecos + 1.0, 36.350, 3693.0, 102.083, 36.0, // Sherman + 1.0, 30.300, 597.0, 97.700, 52.0, // Travis + 1.0, 26.900, 315.0, 99.283, 60.0, // Zapata + 1.0, 28.450, 459.0, 99.217, 56.0, // Lasalle + 1.0, 25.900, 19.0, 97.433, 62.0) // Cameron + + banner ("Texas Temperatures Regression") + val mod = Regression (xy)() // create `Regression` model + mod.trainNtest ()() // train and test the model + println (mod.summary ()) // parameter/coefficient statistics + + banner ("Texas Temperatures Quantile (Median) Regression") + val qmod = QuantileReg (xy)() // create `QuantileReg` model + qmod.trainNtest ()() // train and test the model +// println (qmod.summary ()) // parameter/coefficient statistics + +end quantileRegTest2 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `quantileRegTest3` main function tests a quantile regression model on a small + * dataset of temperatures from counties in Texas where the variables/factors to consider + * are Latitude (x1), Elevation (x2) and Longitude (x3). The model equation + * is the following: + * y = b dot x = b0 + b1*x1 + b2*x2 + b3*x3 + * It compare PREDICTION INTERVALS from `Regression` and `QuantileReg`. + * > runMain scalation.modeling.quantileRegTest3 + */ +@main def quantileRegTest3 (): Unit = + + import MatrixD.at + + // 16 data points: one x1 x2 x3 y + // Lat Elev Long Temp County + val xy = MatrixD ((16, 5), 1.0, 29.767, 41.0, 95.367, 56.0, // Harris + 1.0, 32.850, 440.0, 96.850, 48.0, // Dallas + 1.0, 26.933, 25.0, 97.800, 60.0, // Kennedy + 1.0, 31.950, 2851.0, 102.183, 46.0, // Midland + 1.0, 34.800, 3840.0, 102.467, 38.0, // Deaf Smith + 1.0, 33.450, 1461.0, 99.633, 46.0, // Knox + 1.0, 28.700, 815.0, 100.483, 53.0, // Maverick + 1.0, 32.450, 2380.0, 100.533, 46.0, // Nolan + 1.0, 31.800, 3918.0, 106.400, 44.0, // El Paso + 1.0, 34.850, 2040.0, 100.217, 41.0, // Collington + 1.0, 30.867, 3000.0, 102.900, 47.0, // Pecos + 1.0, 36.350, 3693.0, 102.083, 36.0, // Sherman + 1.0, 30.300, 597.0, 97.700, 52.0, // Travis + 1.0, 26.900, 315.0, 99.283, 60.0, // Zapata + 1.0, 28.450, 459.0, 99.217, 56.0, // Lasalle + 1.0, 25.900, 19.0, 97.433, 62.0) // Cameron + + val x = xy.not(?, 4) + val y = xy(?, 4) + + banner ("Texas Temperatures Regression") + val mod = Regression (xy)() // create Regression model with intercept (else pass x) + val (yp, _) = mod.trainNtest ()() // train and test the model + println (mod.summary ()) // parameter/coefficient statistics + + var mName = mod.modelName + + // PREDICTION INTERVAL assuming Gaussian errors and using predictInt from `Fit` + + banner ("Texas Temperatures Prediction Intervals") + val l_u = mod.PIbounds (yp, mod.predictInt_ (x)) // make PI lower and upper bound vectors from yp and ihw + val (qof_all, iα) = mod.diagnose_pi (y, yp, l_u) // compute metrics for both point and interval predictions + mod.showQoF (qof_all) // show all the QoF metrics + Predictor.plotPredictionInt (y, yp, at (l_u, iα), mName) // plot ordered actual, predicted, lower, upper + + // PREDICTION INTERVAL using Split Conformal Predictions (SCP) `predictCInt` from `Predictor` + + banner ("Texas Temperatures Conformal Prediction Intervals") + var l_u_ = mod.PIbounds (yp, mod.predictCInt (x, y)) // make PI lower and upper bound vectors from yp and ihw + var qof_all_ = mod.diagnose_ (y, yp, l_u_) // compute metrics for both point and interval predictions + mod.showQoF (qof_all_) // show all the QoF metrics + Predictor.plotPredictionInt (y, yp, l_u_, mName) // plot ordered actual, predicted, lower, upper + + // PREDICTION INTERVAL using Quantile Regression + + banner ("Texas Temperatures Quantile (PI) Regression") + val mods = QuantileReg.predInterval (xy)() // create two QuantileReg models for prediction intervals + val (yp1, _) = mods._1.trainNtest ()() // train and test the model (low) + val (yp2, _) = mods._2.trainNtest ()() // train and test the model (up) +// println (mods.summary ()) // parameter/coefficient statistics + + mName = mods._1.modelName + l_u_ = (yp1, yp2) + qof_all_ = mods._1.diagnose_ (y, yp, l_u_) // compute metrics for both point and interval predictions + mod.showQoF (qof_all_) // show all the QoF metrics + Predictor.plotPredictionInt (y, yp, l_u_, mName) // plot ordered actual, predicted, lower, upper + new PlotM (null, MatrixD (y, yp1, yp2), Array ("y", "yp1 (low)", "yp2 (up)"), + "Plot Quantile (PI) Regression", lines = true) + +end quantileRegTest3 + diff --git a/src/main/scala/scalation/modeling/Regression.scala b/src/main/scala/scalation/modeling/Regression.scala index 86a6027e9..33c561c56 100644 --- a/src/main/scala/scalation/modeling/Regression.scala +++ b/src/main/scala/scalation/modeling/Regression.scala @@ -6,6 +6,8 @@ * @see LICENSE (MIT style license file). * * @note Model: Multiple Linear Regression (linear terms, no cross-terms) + * + * @see math.stackexchange.com/questions/617735/multiple-regression-degrees-of-freedom-f-test */ package scalation @@ -44,15 +46,16 @@ import scalation.mathstat._ class Regression (x: MatrixD, y: VectorD, fname_ : Array [String] = null, hparam: HyperParameter = Regression.hp) extends Predictor (x, y, fname_, hparam) - with Fit (dfm = x.dim2 - 1, df = x.dim - x.dim2): + with Fit (dfr = x.dim2 - 1, df = x.dim - x.dim2): + // degrees of freedom: dfr = n - 1, df = m - n // if not using an intercept df = (x.dim2, x.dim-x.dim2), correct by calling 'resetDF' method from `Fit` - private val debug = debugf ("Regression", false) // debug function + private val debug = debugf ("Regression", true) // debug function private val flaw = flawf ("Regression") // flaw function private val algorithm = hparam("factorization") // factorization algorithm private val n = x.dim2 // number of columns - modelName = s"Regression @dfm = $dfm" + _modelName = s"Regression_$dfr" if n < 1 then flaw ("init", s"dim2 = $n of the 'x' matrix must be at least 1") @@ -62,9 +65,9 @@ class Regression (x: MatrixD, y: VectorD, fname_ : Array [String] = null, */ private def solver (x_ : MatrixD): Factorization = algorithm match // select factorization algorithm - case "Fac_Cholesky" => new Fac_Cholesky (x_.transpose * x_) // Cholesky Factorization - case "Fac_LU" => new Fac_LU (x_.transpose * x_) // LU Factorization - case "Fac_Inverse" => new Fac_Inverse (x_.transpose * x_) // Inverse Factorization + case "Fac_Cholesky" => new Fac_Cholesky (x_.ᵀ * x_) // Cholesky Factorization + case "Fac_LU" => new Fac_LU (x_.ᵀ * x_) // LU Factorization + case "Fac_Inverse" => new Fac_Inverse (x_.ᵀ * x_) // Inverse Factorization case "Fac_SVD" => new Fac_SVD (x_) // Singular Value Decomposition case _ => Fac_QR (x_) // QR/LQ Factorization (default) end match @@ -85,7 +88,7 @@ class Regression (x: MatrixD, y: VectorD, fname_ : Array [String] = null, b = fac match // RECORD the parameters/coefficients (@see `Predictor`) case fac: Fac_QR => fac.solve (y_) case fac: Fac_SVD => fac.solve (y_) - case _ => fac.solve (x_.transpose * y_) + case _ => fac.solve (x_.ᵀ * y_) if b(0).isNaN then flaw ("train", s"parameter b = $b") debug ("train", s"$fac estimates parameter b = $b") @@ -101,7 +104,6 @@ class Regression (x: MatrixD, y: VectorD, fname_ : Array [String] = null, */ def test (x_ : MatrixD = x, y_ : VectorD = y): (VectorD, VectorD) = val yp = predict (x_) // make predictions -// e = y_ - yp // RECORD the residuals/errors (@see `Predictor`) (yp, diagnose (y_, yp)) // return predictions and QoF vector end test @@ -127,10 +129,11 @@ class Regression (x: MatrixD, y: VectorD, fname_ : Array [String] = null, //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Build a sub-model that is restricted to the given columns of the data matrix. * @param x_cols the columns that the new model is restricted to + * @param fname2 the variable/feature names for the new model (defaults to null) */ - override def buildModel (x_cols: MatrixD): Regression = + def buildModel (x_cols: MatrixD, fname2: Array [String] = null): Regression = debug ("buildModel", s"${x_cols.dim} by ${x_cols.dim2}") - new Regression (x_cols, y, null, hparam) + new Regression (x_cols, y, fname2, hparam) end buildModel end Regression @@ -146,6 +149,10 @@ object Regression: */ val hp = new HyperParameter; hp += ("factorization", "Fac_QR", "Fac_QR") + /** Main metrics for regression type problems, e.g., used in `PlotM` + */ + val metrics = Array ("R^2", "R^2 bar", "sMAPE", "R^2 cv") + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Create a `Regression` object from a combined data-response matrix. * @param xy the combined data-response matrix (predictors and response) @@ -160,7 +167,8 @@ object Regression: //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Create a `Regression` object from a data matrix and a response vector. - * This method provides data rescaling. + * This method provides data rescaling of x. However, rescaling of y may be + * needed for Regularized Regression and Neural Networks. * @param x the data/input m-by-n matrix * (augment with a first column of ones to include intercept in model) * @param y the response/output m-vector @@ -254,13 +262,13 @@ end regressionTest val y = VectorD (105.0, 115.0, 116.0, 117.0, 112.0, 121.0, 121.0, 110.0, 110.0, 114.0, 114.0, 115.0, 114.0, 106.0, 125.0, 114.0, 106.0, 113.0, 110.0, 122.0) - println ("model: y = b_0 + b_1*x1 + b_2*x_ + b3*x3 + b4*x42") + println ("model: y = b_0 + b_1*x_1 + b_2*x_2 + b_3*x_3 + b_4*x_4") // println ("model: y = b₀ + b₁∙x₁ + b₂∙x₂ + b₃∙x₃ + b₄∙x₄") println (s"x = $x") println (s"y = $y") - val xtx = x.transpose * x - val xty = x.transpose * y + val xtx = x.ᵀ * x + val xty = x.ᵀ * y var fac: Factorization = null // factorization algorithm var mod: Regression = null // regression model @@ -341,21 +349,43 @@ import Example_AutoMPG._ */ @main def regressionTest3 (): Unit = -// println (s"ox = $ox") -// println (s"y = $y") + import MatrixD.at + +// println (s"ox = $ox") // data/input matrix +// println (s"y = $y") // response/output vector println (s"ox_fname = ${stringOf (ox_fname)}") banner ("AutoMPG Regression") - val mod = new Regression (ox, y, ox_fname) // create model with intercept (else pass x) - mod.trainNtest ()() // train and test the model - println (mod.summary ()) // parameter/coefficient statistics + val mod = new Regression (ox, y, ox_fname) // create model with intercept (else pass x) + val yp = mod.trainNtest ()()._1 // train and test the model and save predictions + println (mod.summary ()) // parameter/coefficient statistics + + val mName = mod.modelName + + // PREDICTION INTERVAL assuming Gaussian errors and using predictInt from `Fit` + + banner ("AutoMPG Prediction Intervals") + val l_u = mod.PIbounds (yp, mod.predictInt_ (ox)) // make PI lower and upper bound matrices from yp and ihw + val (qof_all, iα) = mod.diagnose_pi (y, yp, l_u) // compute metrics for both point and interval predictions + mod.showQoF (qof_all) // show all the QoF metrics + Predictor.plotPredictionInt (y, yp, at (l_u, iα), mName) // plot ordered actual, predicted, lower, upper + // PREDICTION INTERVAL using Split Conformal Predictions (SCP) `predictCInt` from `Predictor` + + banner ("AutoMPG Conformal Prediction Intervals") + val l_u_ = mod.PIbounds (yp, mod.predictCInt (ox, y)) // make PI lower and upper bound vectors from yp and ihw + val qof_all_ = mod.diagnose_ (y, yp, l_u_) // compute metrics for both point and interval predictions + mod.showQoF (qof_all_) // show all the QoF metrics + Predictor.plotPredictionInt (y, yp, l_u_, mName) // plot ordered actual, predicted, lower, upper + +/* banner ("AutoMPG Validation Test") mod.validate ()() banner ("AutoMPG Cross-Validation Test") val stats = mod.crossValidate () FitM.showQofStatTable (stats) +*/ end regressionTest3 @@ -381,8 +411,7 @@ end regressionTest3 // val (cols, rSq) = mod.backwardElimAll () // R^2, R^2 bar, sMAPE, R^2 cv val k = cols.size println (s"k = $k, n = ${x.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "R^2 cv"), - "R^2 vs n for Regression", lines = true) + new PlotM (null, rSq.ᵀ, Regression.metrics, "R^2 vs n for Regression", lines = true) println (s"rSq = $rSq") end regressionTest4 @@ -413,8 +442,7 @@ end regressionTest4 val (cols, rSq) = mod.selectFeatures (tech) // R^2, R^2 bar, sMAPE, R^2 cv val k = cols.size println (s"k = $k, n = ${x.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "R^2 cv"), - s"R^2 vs n for Regression with $tech", lines = true) + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs n for Regression with $tech", lines = true) banner ("Feature Importance") println (s"$tech: rSq = $rSq") val imp = mod.importance (cols.toArray, rSq) @@ -427,7 +455,7 @@ end regressionTest5 //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `regressionTest6` main function tests the `Regression` class using the following * regression equation. - * y = b dot x = b_0 + b_1*x1 + b_2*x_2. + * y = b dot x = b_0 + b_1*x_1 + b_2*x_2. * Show effects of increasing collinearity. * > runMain scalation.modeling.regressionTest6 */ @@ -446,7 +474,7 @@ end regressionTest5 mod.trainNtest ()() println (mod.summary ()) - for i <- 0 to 8 do + cfor (0, 9) { _ => banner (s"Test Increasing Collinearity: x_32 = ${x(3, 2)}") println (s"x = $x") println (s"x.corr = ${x.corr}") @@ -454,7 +482,7 @@ end regressionTest5 mod.trainNtest ()() println (mod.summary ()) x(3, 2) += 0.5 - end for + } // cfor end regressionTest6 @@ -462,15 +490,15 @@ end regressionTest6 //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `regressionTest7` main function trains a regression model on a small dataset of * temperatures from counties in Texas where the variables/factors to consider - * are Latitude (x1), Elevation (x2) and Longitude (x3). The model equation + * are Latitude (x_1), Elevation (x_2) and Longitude (x_3). The model equation * is the following: - * y = b dot x = b0 + b1*x1 + b2*x2 + b3*x3 + * y = b dot x = b_0 + b_1*x_1 + b_2*x_2 + b_3*x_3 * > runMain scalation.modeling.regressionTest7 */ @main def regressionTest7 (): Unit = // 16 data points: one x1 x2 x3 y - // Lat Elev Long Temp County + // Const Lat Elev Long Temp County val xy = MatrixD ((16, 5), 1.0, 29.767, 41.0, 95.367, 56.0, // Harris 1.0, 32.850, 440.0, 96.850, 48.0, // Dallas 1.0, 26.933, 25.0, 97.800, 60.0, // Kennedy @@ -498,7 +526,8 @@ end regressionTest7 //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `regressionTest8` main function trains a regression model on the Boston House Prices - * dataset. + * dataset. It illustrates use of the `load` method in the `MatrixD` object. + * @see `scalation.mathstat.MatrixD` * > runMain scalation.modeling.regressionTest8 */ @main def regressionTest8 (): Unit = @@ -535,7 +564,7 @@ end regressionTest8 val yp = x * b val e = y - yp val sse = e.normSq - val grad = -x.transpose * e + val grad = -x.ᵀ * e println (s"epoch = $epoch, sse = $sse, rSq = ${1 - sse/sst}, b = $b, yp = $yp, grad = $grad") b -= grad * eta end for @@ -553,7 +582,7 @@ end regressionTest9 */ @main def regressionTest10 (): Unit = - // 5 data points: constant term, x_1 coordinate, x_2 coordinate + // 6 data points: constant term, x_1 coordinate, x_2 coordinate val x = MatrixD ((6, 3), 1.0, 1.0, 1.0, // 6-by-3 matrix 1.0, 2.0, 4.0, @@ -569,3 +598,67 @@ end regressionTest9 end regressionTest10 + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `regressionTest11` main function trains a regression model small dataset. + * val x2 = VectorD (1, 4, 9, 16, 25) + * > runMain scalation.modeling.regressionTest11 + */ +@main def regressionTest11 (): Unit = + + // 5 data points: constant term, x_1 coordinate, x_2 coordinate + + val _1 = VectorD.one (5) + val x1 = VectorD (1, 2, 3, 4, 5) + val y = VectorD (2, 3, 8, 18, 48) + +// val x = MatrixD (_1, x1).transpose + val x = MatrixD (_1, x1, x1~^2).transpose + + val mod = new Regression (x, y) // create model with intercept + mod.trainNtest ()() // train and test the model + println (mod.summary ()) // parameter/coefficient statistics + + println (s"xtx = ${x.transpose * x}") + println (s"xty = ${x.transpose * y}") + +end regressionTest11 + + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `regressionTest12` main function tests the `Regression` class using + * the AutoMPG dataset. It illustrates using the `Table` class for reading + * the data from a .csv file "auto_mpg.csv". Assumes no missing values. + * It also combines feature selection with cross-validation and plots + * R^2, R^2 bar, sMAPE, and R^2 cv vs. the instance index. + * > runMain scalation.modeling.regressionTest12 + */ +@main def regressionTest12 (): Unit = + + import scalation.database.table.Table + + banner ("auto_mpg Table") + val ncols = 8 + val data = Table.load ("auto_mpg.csv", "auto_mpg", ncols, null) + data.show () + + banner ("AutoMPG dataset") + val xcols = Array.range (0, ncols-1) + val (x, y) = data.toMatrixV (xcols, ncols-1) + val fname = xcols.map (data.schema (_)) + println (s"y = $y") + + banner ("Regression for AutoMPG") + val mod = new Regression (x, y, fname) // create a regression model + mod.trainNtest ()() // train and test the model + println (mod.summary ()) // parameter/coefficient statistics + + banner ("Forward Selection Test") + val (cols, rSq) = mod.forwardSelAll () // R^2, R^2 bar, sMAPE, R^2 cv + val k = cols.size + val t = VectorD.range (1, k) // instance index + new PlotM (t, rSq.ᵀ, Regression.metrics, "R^2 vs n for Regression", lines = true) + println (s"rSq = $rSq") + +end regressionTest12 + diff --git a/src/main/scala/scalation/modeling/RegressionCat.scala b/src/main/scala/scalation/modeling/RegressionCat.scala index 2828af74f..dbccdfaed 100644 --- a/src/main/scala/scalation/modeling/RegressionCat.scala +++ b/src/main/scala/scalation/modeling/RegressionCat.scala @@ -16,6 +16,7 @@ package modeling import scala.runtime.ScalaRunTime.stringOf import scalation.mathstat._ +import scalation.theory.Variable type MatrixI = MatrixD // MatrixI object exists, MatrixI class uses MatrixD @@ -76,7 +77,7 @@ class RegressionCat (x_ : MatrixD, t: MatrixI, y: VectorD, fname_ : Array [Strin if x_.dim != m then flaw ("init", s"dimensions of x_ = ${x_.dim} and y = $m are incompatible") if t.dim != m then flaw ("init", s"dimensions of t = ${t.dim} and y = $m are incompatible") - modelName = s"RegressionCat_${t.dim2}" + _modelName = s"RegressionCat_${t.dim2}" debug ("init", s"$modelName on x_t = $getX, y = $y") @@ -174,15 +175,15 @@ object RegressionCat: * @see `Variable` * Note: To maintain consistency `Variable` is the only place where values for * dummy variables should be set. - * @param t the categorical/treatment vector - * @param sht the amount to shift the vector - * @param tmx the maximum vector categorical/treatment after shifting + * @param t the categorical/treatment vector + * @param shft the amount to shift the vector + * @param tmx the maximum vector categorical/treatment after shifting */ - def dummyVar (t: VectorI, shf: VectorI = shift, tmx: VectorI = tmax): VectorD = + def dummyVar (t: VectorI, shft: VectorI = shift, tmx: VectorI = tmax): VectorD = val xd = new VectorD (tmx.sum) var col = 0 for j <- t.indices do - val td = Variable.dummyVar (t(j), shift(j), tmax(j)) + val td = Variable.dummyVar (t(j), shft(j), tmax(j)) for k <- td.indices do xd(col) = td(k); col += 1 end for @@ -197,7 +198,7 @@ object RegressionCat: def stringVec2Dummy (svec: VectorS): MatrixD = val ivec = VectorS (svec).map2Int._1 // VectorS to VectorI debug ("stringVec2Dummy", s"svec = $svec -> ivec = $ivec") - val imat = MatrixI (ivec).transpose // VectorI as column in MatrixI + val imat = MatrixI (ivec).ᵀ // VectorI as column in MatrixI dummyVars (imat) // MatrixD of dummy columns end stringVec2Dummy @@ -313,7 +314,8 @@ end regressionCatTest2 val x1 = VectorS ("English", "French", "German", "Spanish") val (xe, map) = x1.map2Int // map strings to integers - val xm = MatrixI (xe).transpose // form a matrix from vector + println (s"map = $map") + val xm = MatrixI (xe).ᵀ // form a matrix from vector val xd = RegressionCat.dummyVars (xm) // make dummy variable columns println (s"encoded xe = $xe") // encoded @@ -350,8 +352,7 @@ import Example_AutoMPG.{oxr, y, oxr_fname} val (cols, rSq) = mod.selectFeatures (tech) // R^2, R^2 bar, R^2 cv val k = cols.size println (s"k = $k, n = ${oxr.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "R^2 cv"), - s"R^2 vs n for Regression with $tech", lines = true) + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs n for Regression with $tech", lines = true) println (s"$tech: rSq = $rSq") end for @@ -383,8 +384,7 @@ end regressionCatTest4 val (cols, rSq) = mod.selectFeatures (tech) // R^2, R^2 bar, R^2 cv val k = cols.size println (s"k = $k, n = ${oxr.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "R^2 cv"), - s"R^2 vs n for RegressionCat with $tech", lines = true) + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs n for RegressionCat with $tech", lines = true) println (s"$tech: rSq = $rSq") end for @@ -424,7 +424,7 @@ end regressionCatTest5 banner ("Encode Dummy Variables") val x_enc = VectorI (0, 0, 1, 1, 0) // encoded vector - val dm = MatrixI (x_enc).transpose // form a dummy matrix (dummy columns) from encoded vector + val dm = MatrixI (x_enc).ᵀ // form a dummy matrix (dummy columns) from encoded vector val t = RegressionCat.dummyVars (dm) println (s"encoded x_enc = $x_enc") // encoded vector println (s"matrix encoded dm = $dm") // matrix encoded column @@ -468,7 +468,7 @@ end regressionCatTest6 println (s"xs = ${stringOf (xs)}") // data containing strings println (s"y = $y") // response vector - val x = MatrixD (for j <- 0 to 1 yield VectorD.fromValueTypes (xs(j))).transpose + val x = MatrixD (for j <- 0 to 1 yield VectorD.fromValueTypes (xs(j))).ᵀ val xs2 = VectorS.fromValueTypes (xs(2)) val dm = RegressionCat.stringVec2Dummy (xs2) diff --git a/src/main/scala/scalation/modeling/RegressionTree.scala b/src/main/scala/scalation/modeling/RegressionTree.scala index d64302128..3458da765 100644 --- a/src/main/scala/scalation/modeling/RegressionTree.scala +++ b/src/main/scala/scalation/modeling/RegressionTree.scala @@ -125,7 +125,7 @@ object RegressionTree: * @param ssy the sum of squared y-values * @param sse_t the sum of squared errors total (left + right) */ - def check (d: Int, j: Int, xj: VectorD, y: VectorD, thr: Double, ssy: Double, sse_t: Double): Boolean = + def check (d: Int, j: Int, xj: VectorD, y: VectorD, thr: Double, ssy: Double, sse_t: Double): Boolean = val (xj_lo, xj_hi) = (xj.min, xj.max) if thr < xj_lo || xj_hi < thr then flaw ("check", s"thr = $thr outside range of x$j: [$xj_lo, $xj_hi]") val sse_t_ = sse_LR (xj, y, thr, ssy) @@ -207,18 +207,18 @@ class RegressionTree (x: MatrixD, y: VectorD, fname_ : Array [String] = null, feature: Int = -1, use_r_fb: Boolean = false, leaves: Counter = Counter ()) extends Predictor (x, y, fname_, hparam) - with Fit (dfm = x.dim2 - 1, df = x.dim - x.dim2): // call resetDF once tree is built + with Fit (dfr = x.dim2 - 1, df = x.dim - x.dim2): // call resetDF once tree is built private val debug = debugf ("RegressionTree", false) // debug function - private val depth = hparam ("maxDepth").toInt // the depth limit for tree - private val thres = hparam ("threshold").toDouble // the threshold for the tree's parent node, @see buildTree - private val fbRatio = hparam ("fbRatio").toDouble // the feature bagging ratio + private val depth = hparam("maxDepth").toInt // the depth limit for tree + private val thres = hparam("threshold").toDouble // the threshold for the tree's parent node, @see buildTree + private val fbRatio = hparam("fbRatio").toDouble // the feature bagging ratio private val threshold = new VectorD (x.dim2) // store best splitting threshold for each feature private val score = new VectorD (x.dim2) // store best splitting score for each feature private var root: Node = null // root node - modelName = s"RegressionTree ($depth)" + _modelName = s"RegressionTree_$depth" debug ("init", s"Construct a Regression Tree: curDepth = $curDepth") @@ -380,11 +380,11 @@ class RegressionTree (x: MatrixD, y: VectorD, fname_ : Array [String] = null, while ! queue.isEmpty do val size = queue.size level += 1 - for i <- 0 until size do + cfor (0, size) { _ => val nod = queue.dequeue () println ("\t" * level + "[ " + nod + " ]") for cnode <- nod.child do queue += cnode - end for + } // cfor println () end while end printTree2 @@ -412,9 +412,11 @@ class RegressionTree (x: MatrixD, y: VectorD, fname_ : Array [String] = null, //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Build a sub-model that is restricted to the given columns of the data matrix. * @param x_cols the columns that the new model is restricted to + * @param fname2 the variable/feature names for the new model (defaults to null) */ - override def buildModel (x_cols: MatrixD): RegressionTree = - new RegressionTree (x_cols, y, null, hparam) + def buildModel (x_cols: MatrixD, fname2: Array [String] = null): RegressionTree = + debug ("buildModel", s"${x_cols.dim} by ${x_cols.dim2}") + new RegressionTree (x_cols, y, fname2, hparam) end buildModel end RegressionTree @@ -492,12 +494,12 @@ end regressionTreeTest // println (mod.summary ()) // parameter/coefficient statistics banner (s"AutoMPG Regression Tree with d = $d Validation") - val qof2 = mod.validate ()() // out-of-sampling testing + val qof2 = mod.validate ()()._2 // out-of-sampling testing val iq = QoF.rSq.ordinal // index for rSq qual (d-1) = VectorD (qof(iq), qof(iq+1), qof2(iq)) // R^2, R^2 bar, R^2 os end for - new PlotM (VectorD.range (1, dmax+1), qual.transpose, Array ("R^2", "R^2 bar", "R^2 os"), + new PlotM (VectorD.range (1, dmax+1), qual.ᵀ, Array ("R^2", "R^2 bar", "R^2 os"), "RegressionTree in-sample, out-of-sample QoF vs. depth", lines = true) println (s"RegressionTree: qual = $qual") @@ -530,8 +532,7 @@ end regressionTreeTest2 val (cols, rSq) = mod.selectFeatures (tech) // R^2, R^2 bar, R^2 cv val k = cols.size println (s"k = $k, n = ${x.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "R^2 cv"), - s"R^2 vs n for Regression Tree with $tech", lines = true) + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs n for Regression Tree with $tech", lines = true) println (s"$tech: rSq = $rSq") end for diff --git a/src/main/scala/scalation/modeling/RegressionTreeGB.scala b/src/main/scala/scalation/modeling/RegressionTreeGB.scala index a392cdabb..dcd214634 100644 --- a/src/main/scala/scalation/modeling/RegressionTreeGB.scala +++ b/src/main/scala/scalation/modeling/RegressionTreeGB.scala @@ -28,7 +28,7 @@ import modeling.{RegressionTree => REG_TREE} // class RegressionTreeGB (x: MatrixD, y: VectorD, fname_ : Array [String] = null, hparam: HyperParameter = RegressionTree.hp) extends Predictor (x, y, fname_, hparam) - with Fit (dfm = x.dim2 - 1, df = x.dim - x.dim2): // call resetDF once tree is built + with Fit (dfr = x.dim2 - 1, df = x.dim - x.dim2): // call resetDF once tree is built private val debug = debugf ("RegressionTreeGB", false) // debug function private val depth = hparam("maxDepth").toInt // the max depth for the base regression trees @@ -36,7 +36,7 @@ class RegressionTreeGB (x: MatrixD, y: VectorD, fname_ : Array [String] = null, private val eta = hparam("eta").toDouble // the learning rate private val forest = new ArrayBuffer [REG_TREE] () // forest is a list of regression trees - modelName = s"RegressionTreeGB ($depth, $iter)" + _modelName = s"RegressionTreeGB_${depth}_$iter" //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Use Gradient Boosting for Training. For every iteration, evaluate the residual @@ -100,9 +100,11 @@ class RegressionTreeGB (x: MatrixD, y: VectorD, fname_ : Array [String] = null, //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Build a sub-model that is restricted to the given columns of the data matrix. * @param x_cols the columns that the new model is restricted to + * @param fname2 the variable/feature names for the new model (defaults to null) */ - override def buildModel (x_cols: MatrixD): RegressionTreeGB = - new RegressionTreeGB (x_cols, y, null, hparam) + def buildModel (x_cols: MatrixD, fname2: Array [String] = null): RegressionTreeGB = + debug ("buildModel", s"${x_cols.dim} by ${x_cols.dim2}") + new RegressionTreeGB (x_cols, y, fname2, hparam) end buildModel end RegressionTreeGB @@ -134,7 +136,6 @@ object RegressionTreeGB: else val (x, y) = (xy.not (?, col), xy(?, col)) new RegressionTreeGB (x, y, fname, hparam) - end if end apply //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -154,7 +155,6 @@ object RegressionTreeGB: else // FIX - add rescale new RegressionTreeGB (x, y, fname, hparam) - end if end rescale end RegressionTreeGB @@ -229,12 +229,12 @@ end regressionTreeGBTest // println (mod.summary ()) // parameter/coefficient statistics banner (s"AutoMPG Regression Tree GB with d = $d Validation") - val qof2 = mod.validate ()() // out-of-sampling testing + val qof2 = mod.validate ()()._2 // out-of-sampling testing val iq = QoF.rSq.ordinal // index for rSq qual (d-1) = VectorD (qof(iq), qof(iq+1), qof2(iq)) // R^2, R^2 bar, R^2 os end for - new PlotM (VectorD.range (1, dmax+1), qual.transpose, Array ("R^2", "R^2 bar", "R^2 os"), + new PlotM (VectorD.range (1, dmax+1), qual.ᵀ, Array ("R^2", "R^2 bar", "R^2 os"), "RegressionTreeGB in-sample, out-of-sample QoF vs. depth", lines = true) println (s"RegressionTreeGB: qual = $qual") @@ -267,8 +267,7 @@ end regressionTreeGBTest2 val (cols, rSq) = mod.selectFeatures (tech) // R^2, R^2 bar, R^2 cv val k = cols.size println (s"k = $k, n = ${x.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "R^2 cv"), - s"R^2 vs n for Regression Tree GB with $tech", lines = true) + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs n for Regression Tree GB with $tech", lines = true) println (s"$tech: rSq = $rSq") end for diff --git a/src/main/scala/scalation/modeling/RegressionTreeMT.scala b/src/main/scala/scalation/modeling/RegressionTreeMT.scala index 87fef855b..74c8bc07f 100644 --- a/src/main/scala/scalation/modeling/RegressionTreeMT.scala +++ b/src/main/scala/scalation/modeling/RegressionTreeMT.scala @@ -78,17 +78,17 @@ class RegressionTreeMT (x: MatrixD, y: VectorD, fname_ : Array [String] = null, branchValue: Int = -1, feature: Int = -1, use_r_fb: Boolean = false, leaves: Counter = Counter ()) extends Predictor (x, y, fname_, hparam) - with Fit (dfm = x.dim2 - 1, df = x.dim - x.dim2): // call resetDF once tree is built + with Fit (dfr = x.dim2 - 1, df = x.dim - x.dim2): // call resetDF once tree is built private val debug = debugf ("RegressionTreeMT", true) // debug function - private val depth = hparam ("maxDepth").toInt // the depth limit for tree - private val thres = hparam ("threshold").toDouble // the threshold for the tree's parent node, @see buildTree + private val depth = hparam("maxDepth").toInt // the depth limit for tree + private val thres = hparam("threshold").toDouble // the threshold for the tree's parent node, @see buildTree private val threshold = new VectorD (x.dim2) // store best splitting threshold for each feature private val score = new VectorD (x.dim2) // store best splitting score for each feature private var root: Node = null // root node - modelName = s"RegressionTreeMT ($depth)" + _modelName = s"RegressionTreeMT_$depth" debug ("init", s"Construct a Regression Tree (MT): curDepth = $curDepth") @@ -215,11 +215,11 @@ class RegressionTreeMT (x: MatrixD, y: VectorD, fname_ : Array [String] = null, while ! queue.isEmpty do val size = queue.size level += 1 - for i <- 0 until size do + cfor (0, size) { _ => val nod = queue.dequeue () println ("\t" * level + "[ " + nod + " ]") for cnode <- nod.child do queue += cnode - end for + } // cfor println () end while end printTree2 @@ -248,9 +248,11 @@ class RegressionTreeMT (x: MatrixD, y: VectorD, fname_ : Array [String] = null, //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Build a sub-model that is restricted to the given columns of the data matrix. * @param x_cols the columns that the new model is restricted to + * @param fname2 the variable/feature names for the new model (defaults to null) */ - override def buildModel (x_cols: MatrixD): RegressionTreeMT = - new RegressionTreeMT (x_cols, y, null, hparam) + def buildModel (x_cols: MatrixD, fname2: Array [String]): RegressionTreeMT = + debug ("buildModel", s"${x_cols.dim} by ${x_cols.dim2}") + new RegressionTreeMT (x_cols, y, fname2, hparam) end buildModel end RegressionTreeMT @@ -324,12 +326,12 @@ end regressionTreeMTTest // println (mod.summary ()) // parameter/coefficient statistics banner (s"AutoMPG Regression Tree MT with d = $d Validation") - val qof2 = mod.validate ()() // out-of-sampling testing + val qof2 = mod.validate ()()._2 // out-of-sampling testing val iq = QoF.rSq.ordinal // index for rSq qual (d-1) = VectorD (qof(iq), qof(iq+1), qof2(iq)) // R^2, R^2 bar, R^2 os end for - new PlotM (VectorD.range (1, dmax+1), qual.transpose, Array ("R^2", "R^2 bar", "R^2 os"), + new PlotM (VectorD.range (1, dmax+1), qual.ᵀ, Array ("R^2", "R^2 bar", "R^2 os"), "RegressionTreeMT in-sample, out-of-sample QoF vs. depth", lines = true) println (s"RegressionTreeMT: qual = $qual") @@ -362,8 +364,7 @@ end regressionTreeMTTest2 val (cols, rSq) = mod.selectFeatures (tech) // R^2, R^2 bar, R^2 cv val k = cols.size println (s"k = $k, n = ${x.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "R^2 cv"), - s"R^2 vs n for Regression Tree with $tech", lines = true) + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs n for Regression Tree with $tech", lines = true) println (s"$tech: rSq = $rSq") end for diff --git a/src/main/scala/scalation/modeling/RegressionTreeRF.scala b/src/main/scala/scalation/modeling/RegressionTreeRF.scala index bd02e8e8d..34f0290bc 100644 --- a/src/main/scala/scalation/modeling/RegressionTreeRF.scala +++ b/src/main/scala/scalation/modeling/RegressionTreeRF.scala @@ -33,9 +33,9 @@ import modeling.{RegressionTree => REG_TREE} // class RegressionTreeRF (x: MatrixD, y: VectorD, fname_ : Array [String] = null, use_fb: Boolean = false, hparam: HyperParameter = RegressionTree.hp) extends Predictor (x, y, fname_, hparam) - with Fit (dfm = x.dim2 - 1, df = x.dim - x.dim2): // call resetDF once tree is built + with Fit (dfr = x.dim2 - 1, df = x.dim - x.dim2): // call resetDF once tree is built -// private val debug = debugf ("RegressionTreeRF", false) // debug function + private val debug = debugf ("RegressionTreeRF", false) // debug function private val flaw = flawf ("RegressionTreeRF") // flaw function private val depth = hparam("maxDepth").toInt // the max depth for the base regression trees private val nTrees = hparam("nTrees").toInt // number of trees @@ -48,7 +48,7 @@ class RegressionTreeRF (x: MatrixD, y: VectorD, fname_ : Array [String] = null, if bRatio <= 0 || bRatio >= 1 then flaw ("init", "RF bagging ratio restricted to (0, 1)") if fbRatio <= 0 || fbRatio >= 1 then flaw ("init", "RF feature bagging ratio restricted to (0, 1)") - modelName = s"RegressionTreeRF ($depth, $nTrees, $use_fb)" + _modelName = s"RegressionTreeRF_${depth}_${nTrees}_$use_fb" //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Train the regression tree RF by selecting thresholds for the features/variables @@ -61,6 +61,7 @@ class RegressionTreeRF (x: MatrixD, y: VectorD, fname_ : Array [String] = null, for k <- 0 until nTrees do val (xx, yy, imap) = subSample (x_, y_, sampleSize, k) // select rows of data matrix // debug ("train", s"for tree$k, imap = ${stringOf (imap)}") + debug ("train", s"for tree$k, imap.size = $imap.size}") forest(k) = new REG_TREE (xx, yy, fname, hparam, use_r_fb = use_fb) // means/regression in leaves forest(k).train (xx, yy) @@ -98,9 +99,11 @@ class RegressionTreeRF (x: MatrixD, y: VectorD, fname_ : Array [String] = null, //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Build a sub-model that is restricted to the given columns of the data matrix. * @param x_cols the columns that the new model is restricted to + * @param fname2 the variable/feature names for the new model (defaults to null) */ - override def buildModel (x_cols: MatrixD): RegressionTreeRF = - new RegressionTreeRF (x_cols, y, fname, use_fb, hparam) + def buildModel (x_cols: MatrixD, fname2: Array [String] = null): RegressionTreeRF = + debug ("buildModel", s"${x_cols.dim} by ${x_cols.dim2}") + new RegressionTreeRF (x_cols, y, fname2, use_fb, hparam) end buildModel end RegressionTreeRF @@ -175,12 +178,12 @@ end regressionTreeRFTest // println (mod.summary ()) // parameter/coefficient statistics banner (s"AutoMPG Regression Tree RF with d = $d Validation") - val qof2 = mod.validate ()() // out-of-sampling testing + val qof2 = mod.validate ()()._2 // out-of-sampling testing val iq = QoF.rSq.ordinal // index for rSq qual (d-1) = VectorD (qof(iq), qof(iq+1), qof2(iq)) // R^2, R^2 bar, R^2 os end for - new PlotM (VectorD.range (1, dmax+1), qual.transpose, Array ("R^2", "R^2 bar", "R^2 os"), + new PlotM (VectorD.range (1, dmax+1), qual.ᵀ, Array ("R^2", "R^2 bar", "R^2 os"), "RegressionTreeRF in-sample, out-of-sample QoF vs. depth", lines = true) println (s"RegressionTreeRF: qual = $qual") @@ -213,8 +216,7 @@ end regressionTreeRFTest2 val (cols, rSq) = mod.selectFeatures (tech) // R^2, R^2 bar, R^2 cv val k = cols.size println (s"k = $k, n = ${x.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "R^2 cv"), - s"R^2 vs n for Regression Tree RF with $tech", lines = true) + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs n for Regression Tree RF with $tech", lines = true) println (s"$tech: rSq = $rSq") end for diff --git a/src/main/scala/scalation/modeling/RegressionTreeRF_MT.scala b/src/main/scala/scalation/modeling/RegressionTreeRF_MT.scala index 6f5f9f447..bc533fdee 100644 --- a/src/main/scala/scalation/modeling/RegressionTreeRF_MT.scala +++ b/src/main/scala/scalation/modeling/RegressionTreeRF_MT.scala @@ -33,14 +33,14 @@ import modeling.{RegressionTreeMT => REG_TREE} // class RegressionTreeRF_MT (x: MatrixD, y: VectorD, fname_ : Array [String] = null, use_fb: Boolean = false, hparam: HyperParameter = RegressionTree.hp) extends Predictor (x, y, fname_, hparam) - with Fit (dfm = x.dim2 - 1, df = x.dim - x.dim2): // call resetDF once tree is built + with Fit (dfr = x.dim2 - 1, df = x.dim - x.dim2): // call resetDF once tree is built private val debug = debugf ("RegressionTreeRF_MT", true) // debug function private val flaw = flawf ("RegressionTreeRF_MT") // flaw function private val depth = hparam("maxDepth").toInt // the max depth for the base regression trees - private val nTrees = hparam ("nTrees").toInt // number of trees - private val bRatio = hparam ("bRatio").toDouble // bagging ratio - private val fbRatio = hparam ("fbRatio").toDouble // feature bagging ratio + private val nTrees = hparam("nTrees").toInt // number of trees + private val bRatio = hparam("bRatio").toDouble // bagging ratio + private val fbRatio = hparam("fbRatio").toDouble // feature bagging ratio private val sampleSize = (bRatio * x.dim).toInt // size of matrix sub-samples private val forest = Array.ofDim [REG_TREE] (nTrees) // forest of regression trees @@ -48,7 +48,7 @@ class RegressionTreeRF_MT (x: MatrixD, y: VectorD, fname_ : Array [String] = nul if bRatio <= 0 || bRatio >= 1 then flaw ("init", "RF bagging ratio restricted to (0, 1)") if fbRatio <= 0 || fbRatio >= 1 then flaw ("init", "RF feature bagging ratio restricted to (0, 1)") - modelName = s"RegressionTreeRF_MT ($depth, $nTrees, $use_fb)" + _modelName = s"RegressionTreeRF_MT_${depth}_${nTrees}_$use_fb" //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Train the regression tree RF by selecting thresholds for the features/variables @@ -61,6 +61,7 @@ class RegressionTreeRF_MT (x: MatrixD, y: VectorD, fname_ : Array [String] = nul for k <- 0 until nTrees do val (xx, yy, imap) = subSample (x_, y_, sampleSize, k) // select rows of data matrix // debug ("train", s"for tree$k, imap = ${stringOf (imap)}") + debug ("train", s"for tree$k, imap.size = $imap.size}") forest(k) = new REG_TREE (xx, yy, fname, hparam, use_r_fb = use_fb) // means/regression in leaves forest(k).train (xx, yy) @@ -98,9 +99,11 @@ class RegressionTreeRF_MT (x: MatrixD, y: VectorD, fname_ : Array [String] = nul //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Build a sub-model that is restricted to the given columns of the data matrix. * @param x_cols the columns that the new model is restricted to + * @param fname2 the variable/feature names for the new model (defaults to null) */ - override def buildModel (x_cols: MatrixD): RegressionTreeRF_MT = - new RegressionTreeRF_MT (x_cols, y, fname, use_fb, hparam) + def buildModel (x_cols: MatrixD, fname2: Array [String] = null): RegressionTreeRF_MT = + debug ("buildModel", s"${x_cols.dim} by ${x_cols.dim2}") + new RegressionTreeRF_MT (x_cols, y, fname2, use_fb, hparam) end buildModel end RegressionTreeRF_MT @@ -169,18 +172,18 @@ end regressionTreeRF_MTTest banner ("AutoMPG Regression Tree RF with depth d = $d") RegressionTree.hp("maxDepth") = d RegressionTree.hp("nTrees") = 7 - val mod = new RegressionTreeRF_MT (x, y, x_fname) // create model with intercept (else pass x) + val mod = new RegressionTreeRF_MT (x, y, x_fname) // create model with intercept (else pass x) val qof = mod.trainNtest ()()._2 // train and test the model // mod.printTree () // print the regression tree // println (mod.summary ()) // parameter/coefficient statistics banner (s"AutoMPG Regression Tree RF with d = $d Validation") - val qof2 = mod.validate ()() // out-of-sampling testing + val qof2 = mod.validate ()()._2 // out-of-sampling testing val iq = QoF.rSq.ordinal // index for rSq qual (d-1) = VectorD (qof(iq), qof(iq+1), qof2(iq)) // R^2, R^2 bar, R^2 os end for - new PlotM (VectorD.range (1, dmax+1), qual.transpose, Array ("R^2", "R^2 bar", "R^2 os"), + new PlotM (VectorD.range (1, dmax+1), qual.ᵀ, Array ("R^2", "R^2 bar", "R^2 os"), "RegressionTreeRF_MT in-sample, out-of-sample QoF vs. depth", lines = true) println (s"RegressionTreeRF_MT: qual = $qual") @@ -213,8 +216,7 @@ end regressionTreeRF_MTTest2 val (cols, rSq) = mod.selectFeatures (tech) // R^2, R^2 bar, R^2 cv val k = cols.size println (s"k = $k, n = ${x.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "R^2 cv"), - s"R^2 vs n for Regression Tree RF with $tech", lines = true) + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs n for Regression Tree RF with $tech", lines = true) println (s"$tech: rSq = $rSq") end for diff --git a/src/main/scala/scalation/modeling/RegressionWLS.scala b/src/main/scala/scalation/modeling/RegressionWLS.scala index ce0fe48b5..c6dfd2624 100644 --- a/src/main/scala/scalation/modeling/RegressionWLS.scala +++ b/src/main/scala/scalation/modeling/RegressionWLS.scala @@ -55,7 +55,7 @@ class RegressionWLS (x: MatrixD, y: VectorD, fname_ : Array [String] = null, private val debug = debugf ("RegressionWLS", true) // debug function private val flaw = flawf ("RegressionWLS") // flaw function - modelName = "RegressionWLS" + _modelName = "RegressionWLS" if w == null then w = RegressionWLS.weights // adjust weights @@ -117,33 +117,33 @@ class RegressionWLS (x: MatrixD, y: VectorD, fname_ : Array [String] = null, debug ("trainNTest", s"b = $b") val (yp, qof) = test (xx, yy) println (report (qof)) - if DO_PLOT then - val (ryy, ryp) = orderByY (yy, yp) // order by yy - new Plot (null, ryy, ryp, s"$modelName: y actual, predicted") - end if + Predictor.plotPrediction (yy, yp, modelName) (yp, qof) end trainNtest //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /* Use validation to compute test Quality of Fit (QoF) measures by dividing - * the full dataset into a TESTING set and a TRAINING set. - * The test set is defined by idx and the rest of the data is the training set. + * the full dataset into a TESTING-set and a TRAINING-set. + * The testing-set is defined by idx and the rest of the data is the training-set. + * @see `modeling.Predictor.validate` about the RANDOM, FIRST, and LAST options + * for selecting the testing-set. * @param rando flag indicating whether to use randomized or simple validation - * @param ratio the ratio of the TESTING set to the full dataset (most common 70-30, 80-20) - * @param idx the prescribed TESTING set indices + * @param ratio the ratio of the TESTING-set to the full dataset (most common 70-30 (.3), 80-20 (.2)) + * @param idx the prescribed TESTING-set indices (default => generate) */ - override def validate (rando: Boolean = true, ratio: Double = 0.2) - (idx : IndexedSeq [Int] = testIndices ((ratio * y.dim).toInt, rando)): VectorD = + override def validate (rando: Boolean = true, ratio: Double = Model.TE_RATIO) +// (idx: IndexedSeq [Int] = testIndices ((ratio * y.dim).toInt, rando)): + (idx: IndexedSeq [Int] = testIndices (y.dim, (ratio * y.dim).toInt, rando)): + (VectorD, VectorD) = val (x_e, x_) = (x(idx), getX.not(idx)) // test, training data/input matrices val (y_e, y_) = (y(idx), getY.not(idx)) // test, training response/output vectors - train (x_, y_) // train model on the training set - val qof = test (x_e, y_e)._2 // test on test-set and get QoF measures - if qof(QoF.sst.ordinal) <= 0.0 then // requires variation in test-set + train (x_, y_) // train model on the TRAINING-set + val (yp, qof) = test (x_e, y_e) // test on TESTING-set and get QoF measures + if qof(QoF.sst.ordinal) <= 0.0 then // requires variation in TESTING-set flaw ("validate", "chosen testing set has no variability") - end if println (FitM.fitMap (qof, QoF.values.map (_.toString))) - qof + (yp, qof) end validate end RegressionWLS @@ -227,7 +227,6 @@ object RegressionWLS: w = rp.recip // set weight vector for WLS to reciprocal of rp else w = w0 // set weights using custom values - end if rootW = w.sqrt // set root of weight vector end setWeights @@ -258,9 +257,8 @@ object RegressionWLS: * @param x the data matrix * @param y the response vector * @param z a vector to predict - * @param w the root weights */ - def test (x: MatrixD, y: VectorD, z: VectorD, w: VectorD = null): Unit = + def test (x: MatrixD, y: VectorD, z: VectorD): Unit = banner ("Fit the parameter vector b") val mod = new RegressionWLS (x, y) mod.trainNtest ()() // train and test the model @@ -305,10 +303,11 @@ end RegressionWLS test (x, y, z) // weights set internally // val w0 = VectorD (0.106085, 0.0997944, 0.0831033, 0.160486, 0.171810) - val w0 = VectorD (0.318254, 0.299383, 0.249310, 0.481457, 0.515431) +// val w0 = VectorD (0.318254, 0.299383, 0.249310, 0.481457, 0.515431) banner ("custom weights") - test (x, y, z, w0) // custom weights explicitly given +// test (x, y, z, w0) // custom weights explicitly given - FIX: w0 not used yet + test (x, y, z) // custom weights explicitly given end regressionWLSTest diff --git a/src/main/scala/scalation/modeling/Regularized.scala b/src/main/scala/scalation/modeling/Regularized.scala new file mode 100644 index 000000000..e34dbfb5f --- /dev/null +++ b/src/main/scala/scalation/modeling/Regularized.scala @@ -0,0 +1,34 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author John Miller + * @version 2.0 + * @date Thu Mar 5 16:10:39 EST 2026 + * @see LICENSE (MIT style license file). + * + * @note Model Support: Regularization Method + */ + +package scalation +package modeling + +import scalation.mathstat._ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `Regularized` trait describes the `center` method that is to be supported + * by all companion objects supporting regularized regression. + */ +trait Regularized: + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a Regularized Regression from a data matrix and response vector. + * This function centers the data. Implementations should return specific return types. + * @param x the un-centered data/input m-by-n matrix, NOT augmented with a first column of ones + * @param y the un-centered response/output vector + * @param fname the feature/variable names (defaults to null) + * @param hparam the shrinkage hyper-parameter (0 => OLS) in the penalty term 'lambda * norm of b' + */ + def center (x: MatrixD, y: VectorD, fname: Array [String] = null, + hparam: HyperParameter = RidgeRegression.hp): Predictor + +end Regularized + diff --git a/src/main/scala/scalation/modeling/RidgeBridgeRegression.scala b/src/main/scala/scalation/modeling/RidgeBridgeRegression.scala new file mode 100644 index 000000000..f645e388e --- /dev/null +++ b/src/main/scala/scalation/modeling/RidgeBridgeRegression.scala @@ -0,0 +1,308 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author Yousef Fekri Dabanloo + * @version 2.0 + * @date Thu Jul 24 11:23:31 EST 2025 + * @see LICENSE (MIT style license file). + * + * @note Model: Multiple Linear Regression with Ridge-Bridge Regularization + * + * Before calling the constructor, users should center their data; automatic by all factory methods. + */ + +package scalation +package modeling + +import scala.math.abs + +import scalation.mathstat._ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `RidgeBridgeRegression` class supports multiple linear regression with a hybrid + * of Ridge (L2) and Bridge (Lq with 0 < q < 1) regularization. It solves: + * y = Xb + e + * by minimizing: + * ||y - Xb||^2 + lambda * ||b||^2 + beta * sum(|b_j|^q) + * @param x the centered data/input matrix + * @param y the centered response/output vector + * @param fname_ the feature/variable names (defaults to null) + * @param hparam the regularization hyper-parameters (lambda for ridge, beta for bridge, q) + * @param xℱ the transformation applied to x (e.g., Center or Norm) + * @param yℱ the transformation applied to y (e.g., Center) + */ +class RidgeBridgeRegression (x: MatrixD, y: VectorD, fname_ : Array [String] = null, + hparam: HyperParameter = RidgeRegression.hp, + xℱ: Transform = null, yℱ: Transform = null) + extends Predictor (x, y, fname_, hparam) + with Fit (dfr = x.dim2, df = x.dim - x.dim2 - 1): + // degrees of freedom: dfr = n, df = m - n - 1 as centered x matrix has 1 less column + // fix after training by moving a dof from error to model for each coefficient eliminated + // if not using an intercept df = (x.dim2, x.dim-x.dim2), correct by calling 'resetDF' method from `Fit` + + private val debug = debugf ("RidgeBridgeRegression", false) + private val lambda = hparam("lambda").toDouble // the L_2 shrinkage parameter + private val beta = hparam("beta").toDouble // the L_q shrinkage parameter + private val sparse = hparam("sparse").toInt == 1 // whether to sparsify + private val maxIter = hparam("maxIter").toInt // maximum number of iterations for IRR + private val tol = hparam("tol").toDouble // tolerance for convergence + private val eps = hparam("eps").toDouble // small constant to avoid division by zero + private val maxW = 1E6 // maximum weight + private val q = hparam("pow").toDouble // exponent/L_q norm + private val q_2 = q - 2.0 + + _modelName = s"RidgeBridgeRegression_$q" + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Train the model using Iterative Reweighted RidgeRegression (IRR). + * @param x_ the input/data matrix + * @param y_ the output/response vector + */ + def train (x_ : MatrixD = x, y_ : VectorD = y): Unit = + val xtX = x_.ᵀ * x_ + val xty = x_.ᵀ * y_ + val n = x_.dim2 + val w = new MatrixD (n, n) // diagonal weight matrix + + val ridgeMod = RidgeRegression.center (x_, y_, fname_, hparam) + ridgeMod.trainNtest ()() + b = ridgeMod.parameter + + var (iter, diff) = (0, Double.MaxValue) + while iter < maxIter && diff > tol do + cfor (0, n) { j => + val wj = if abs(b(j)) > eps then (q / 2.0) * abs(b(j)) ~^ q_2 else maxW + w(j, j) = beta * wj + lambda + } // cfor + + val fac = Fac_Cholesky (xtX + w).factor () + val b_new = fac.solve (xty) + + diff = (b_new - b).norm + b = b_new + iter += 1 + end while + + if sparse then LassoRegression.sparsify (b) + debug ("train", s"IRR estimates parameter b = $b") + val nz = b.countZero // count number of coefficients set to zero + if nz > 0 then resetDF (x.dim2 - nz, x.dim - x.dim2 - 1 + nz) + end train + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Test a predictive model y_ = f(x_) + e and return its QoF vector. + * Testing may be be in-sample (on the training set) or out-of-sample + * (on the testing set) as determined by the parameters passed in. + * Note: must call train before test. + * @param x_ the testing/full data/input matrix (defaults to full x) + * @param y_ the testing/full response/output vector (defaults to full y) + */ + def test (x_ : MatrixD = x, y_ : VectorD = y): (VectorD, VectorD) = + val yp = predict_ (x_) // make predictions + (yp, diagnose (y_, yp)) // return predictions and QoF vector + end test + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Predict the value of y = f(z) by evaluating the formula y = b dot z. + * It works on transformed values. + * @param z the new vector to predict + */ + def predict_ (z: VectorD): Double = b dot z + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Predict the value of vector y = f(x_, b). It works on transformed values. + * @param x_ the matrix to use for making predictions, one for each row + */ + def predict_ (x_ : MatrixD): VectorD = x_ * b + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Predict the value of y = f(z) by evaluating the formula y = b dot z. + * It is overridden to handle transformations. + * @param z the new vector to predict + */ + override def predict (z: VectorD): Double = + val zz = if xℱ == null then z else xℱ.f(MatrixD (z))(0) + if yℱ == null then b dot zz else yℱ.fi_(b dot zz) + end predict + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Predict the value of vector y = f(x_, b). It is overridden to handle transformations. + * @param x_ the matrix to use for making predictions, one for each row + */ + override def predict (x_ : MatrixD): VectorD = + val xx = if xℱ == null then x_ else xℱ.f(x_) + if yℱ == null then xx * b else yℱ.fi(xx * b) + end predict + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Produce a QoF summary for a model with diagnostics for each predictor 'x_j' + * and the overall Quality of Fit (QoF). + * @param x_ the testing/full data/input matrix + * @param fname_ the array of feature/variable names + * @param b_ the parameters/coefficients for the model + * @param vifs the Variance Inflation Factors (VIFs) + */ + override def summary (x_ : MatrixD = getX, fname_ : Array [String] = fname, b_ : VectorD = b, + vifs: VectorD = vif ()): String = + super.summary (x_, fname_, b_, vifs) // summary from `Fit` + end summary + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Build a sub-model that is restricted to the given columns of the data matrix. + * @param x_cols the columns that the new model is restricted to + * @param fname2 the variable/feature names for the new model (defaults to null) + */ + override def buildModel (x_cols: MatrixD, fname2: Array [String] = null): RidgeBridgeRegression = + new RidgeBridgeRegression (x_cols, y, fname2, hparam) + end buildModel + +end RidgeBridgeRegression + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `RidgeBridgeRegression` companion object provides default hyper-parameters + * and convenience factory methods. + */ +object RidgeBridgeRegression extends Regularized: + + val hp = RidgeRegression.hp + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a Ridge-Bridge Regression from a combined xy matrix. + * @param xy the centered combines x and y matrix + * @param fname_ the feature/variable names (defaults to null) + * @param hparam the regularization hyper-parameters (lambda for ridge, beta for bridge, q) + * @param col the column used for response variable + */ + def apply (xy: MatrixD, fname: Array [String] = null, + hparam: HyperParameter = hp)(col: Int = xy.dim2 - 1): RidgeBridgeRegression = + val (x, y) = (xy.not(?, col), xy(?, col)) + val xℱ = CenterForm (x) + val yℱ = CenterForm (y) + new RidgeBridgeRegression (xℱ.f(x), yℱ.f(y), fname, hparam, xℱ, yℱ) + end apply + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a Ridge-Bridge Regression from an x matrix and y vector. + * @param x the centered data/input matrix + * @param y the centered response/output vector + * @param fname_ the feature/variable names (defaults to null) + * @param hparam the regularization hyper-parameters (lambda for ridge, beta for bridge, q) + */ + def center (x: MatrixD, y: VectorD, fname: Array [String] = null, + hparam: HyperParameter = hp): RidgeBridgeRegression = + val xℱ = CenterForm (x) + val yℱ = CenterForm (y) + new RidgeBridgeRegression (xℱ.f(x), yℱ.f(y), fname, hparam, xℱ, yℱ) + end center + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a Ridge-Bridge Regression object from a data matrix and a response vector. + * This method provides data rescaling of x and centering of y. + * @param x the un-centered data/input m-by-n matrix, NOT augmented with a first column of ones + * @param y the un-centered response/output vector + * @param fname the feature/variable names (defaults to null) + * @param hparam the shrinkage hyper-parameter (0 => OLS) in the penalty term 'lambda * b dot b' + */ + def rescale (x: MatrixD, y: VectorD, fname: Array [String] = null, + hparam: HyperParameter = hp): RidgeBridgeRegression = + val xℱ = NormForm (x) + val yℱ = CenterForm (y) + new RidgeBridgeRegression (xℱ.f(x), yℱ.f(y), fname, hparam, xℱ, yℱ) + end rescale + +end RidgeBridgeRegression + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `ridgeBridgeRegressionTest` main function tests the `RidgeBridgeRegression` class + * using the following regression equation. + * y = b dot x = b_1*x_1 + b_2*x_2. + * It compares `RidgeBridgeRegression` with `Regression` + * @see statmaster.sdu.dk/courses/st111/module03/index.html + * > runMain scalation.modeling.ridgeBridgeRegressionTest + */ +@main def ridgeBridgeRegressionTest (): Unit = + + // 5 data points: x_0 x_1 + val x = MatrixD ((5, 2), 36.0, 66.0, // 5-by-2 data matrix + 37.0, 68.0, + 47.0, 64.0, + 32.0, 53.0, + 1.0, 101.0) + val y = VectorD (745.0, 895.0, 442.0, 440.0, 1598.0) // 5-dim response vector + +// println ("model: y = b_0 + b_1*x_1 + b_2*x_2") + println ("model: y = b₀ + b₁*x₁ + b₂*x₂") // for Regression, remove b₀ for Ridge + println (s"x = $x") + println (s"y = $y") + + banner ("Regression") + val ox = VectorD.one (y.dim) +^: x // prepend a column of all 1's + val reg = new Regression (ox, y) // create a Regression model + reg.trainNtest ()() // train and test the model + + banner ("RidgeBridgeRegression with manual centering") + val mu_x = x.mean // column-wise mean of x + val mu_y = y.mean // mean of y + val x_c = x - mu_x // centered x (column-wise) + val y_c = y - mu_y // centered y + val mod = new RidgeBridgeRegression (x_c, y_c) // create a Ridge Regression model + mod.trainNtest ()() // train and test the model + + banner ("RidgeBridgeRegression with Auto-centering") + val amod = RidgeBridgeRegression.center (x, y) // create an auto-centered Ridge Regression model + amod.trainNtest ()() // train and test the model + + banner ("RidgeBridgeRegression with Rescaling") + val rmod = RidgeBridgeRegression.rescale (x, y) // create a rescaled Ridge Regression model + rmod.trainNtest ()() // train and test the model + + banner ("Make one OOS Predictions") + val z = VectorD (20.0, 80.0) // new instance to predict + val _1z = 1.0 +: z // prepend 1 to z + val z_c = z - mu_x // center z + println (s"reg.predict ($z) = ${reg.predict (_1z)}") // predict using _1z + println (s"mod.predict ($z) = ${mod.predict (z_c) + mu_y}") // predict using z_c and add y's mean + println (s"amod.predict ($z) = ${amod.predict (z)}") // predict using z with auto-centering + println (s"rmod.predict ($z) = ${rmod.predict (z)}") // predict using z with rescaling + + banner ("Compare Summaries") + println (reg.summary ()) + println (mod.summary ()) + println (amod.summary ()) + println (rmod.summary ()) + +end ridgeBridgeRegressionTest + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `ridgeBridgeRegressionTest2` main function tests the `RidgeBridgeRegression` class using + * the AutoMPG dataset. Assumes no missing values. + * It also combines feature selection with cross-validation and plots + * R^2, R^2 bar and R^2 cv vs. the instance index. + * Note, since x0 is automatically included in feature selection, make it an important variable. + * > runMain scalation.modeling.ridgeBridgeRegressionTest2 + */ +@main def ridgeBridgeRegressionTest2 (): Unit = + + import scalation.modeling.Example_AutoMPG._ // import sample dataset (x, y, x_fname, etc.) + import RidgeRegression.hp + + hp("beta") = 10.0 + banner("AutoMPG Regression") + val reg = new Regression(ox, y, ox_fname) + reg.trainNtest()() + println(reg.summary()) + + banner("AutoMPG Ridge Regression") + val mod1 = RidgeRegression.center (x, y, x_fname) + mod1.trainNtest ()() + println (mod1.summary ()) + + banner("AutoMPG Ridge + Bridge Regression") + val mod2 = RidgeBridgeRegression.center (x, y, x_fname) + mod2.trainNtest ()() + println (mod2.summary ()) + +end ridgeBridgeRegressionTest2 + diff --git a/src/main/scala/scalation/modeling/RidgeRegression.scala b/src/main/scala/scalation/modeling/RidgeRegression.scala index 1a88b19b4..1ba6d56af 100644 --- a/src/main/scala/scalation/modeling/RidgeRegression.scala +++ b/src/main/scala/scalation/modeling/RidgeRegression.scala @@ -12,6 +12,7 @@ * @see Hastie, T., Tibshirani, R., & Friedman, J. (2009). The Elements of Statistical Learning * * Since regularization reduces near singularity, Cholesky is used as default + * Before calling the constructor, users should center their data; automatic by all factory methods. */ package scalation @@ -28,7 +29,7 @@ import scalation.mathstat._ * In this case, x is multi-dimensional [x_1, ... x_k]. Ridge regression puts * a penalty on the L2 norm of the parameters b to reduce the chance of them taking * on large values that may lead to less robust models. Both the input matrix x - * and the response vector y are centered (zero mean). Fit the parameter vector + * and the response vector y should be centered (zero mean). Fit the parameter vector * b in the regression equation * y = b dot x + e = b_1 * x_1 + ... b_k * x_k + e * where e represents the residuals (the part not explained by the model). @@ -46,20 +47,24 @@ import scalation.mathstat._ * @param y the centered response/output m-vector * @param fname_ the feature/variable names (defaults to null) * @param hparam the shrinkage hyper-parameter, lambda (0 => OLS) in the penalty term 'lambda * b dot b' + * @param xℱ the transformation applied to x (e.g., Center or Norm) + * @param yℱ the transformation applied to y (e.g., Center) */ class RidgeRegression (x: MatrixD, y: VectorD, fname_ : Array [String] = null, - hparam: HyperParameter = RidgeRegression.hp) + hparam: HyperParameter = RidgeRegression.hp, + xℱ: Transform = null, yℱ: Transform = null) extends Predictor (x, y, fname_, hparam) - with Fit (dfm = x.dim2, df = x.dim - x.dim2 - 1): + with Fit (dfr = x.dim2, df = x.dim - x.dim2 - 1): + // degrees of freedom: dfr = n, df = m - n - 1 as centered x matrix has 1 less column // if not using an intercept df = (x.dim2, x.dim-x.dim2), correct by calling 'resetDF' method from `Fit` // no intercept => correct Degrees of Freedom (DoF); as lambda get larger, need effective DoF private val debug = debugf ("RidgeRegression", false) // debug function - private val lambda = if hparam("lambda") <= 0.0 then findLambda._1 - else hparam ("lambda").toDouble private val algorithm = hparam("factorization") // factorization algorithm + private val lambda = if hparam("lambda") <= 0.0 then findLambda._1 + else hparam("lambda").toDouble - modelName = "RidgeRegression" + _modelName = s"RidgeRegression_${lambda}" //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Return the value of the shrinkage parameter lambda. @@ -71,8 +76,8 @@ class RidgeRegression (x: MatrixD, y: VectorD, fname_ : Array [String] = null, * factorization technique. * @param x_ the data/input matrix */ - private def solver (x_ : MatrixD = x): Factorization = - val xtx = x_.transpose * x_ // pre-compute X.t * X + private def solver (x_ : MatrixD): Factorization = + val xtx = x_.ᵀ * x_ // pre-compute X.t * X val ey = MatrixD.eye (x_.dim, x_.dim2) // identity matrix val xtx_ = xtx.copy // copy xtx (X.t * X) for i <- xtx_.indices do xtx_(i, i) += lambda // add lambda to the diagonal @@ -102,7 +107,7 @@ class RidgeRegression (x: MatrixD, y: VectorD, fname_ : Array [String] = null, b = fac match // solve for coefficient vector b case fac: Fac_QR => fac.solve (y_ ++ new VectorD (y_.dim)) // case fac: Fac_SVD => fac.solve (y_) - case _ => fac.solve (x_.transpose * y_) + case _ => fac.solve (x_.ᵀ * y_) end train //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -114,7 +119,7 @@ class RidgeRegression (x: MatrixD, y: VectorD, fname_ : Array [String] = null, * @param y_ the testing/full response/output vector (defaults to full y) */ def test (x_ : MatrixD = x, y_ : VectorD = y): (VectorD, VectorD) = - val yp = predict (x_) // make predictions + val yp = predict_ (x_) // make predictions on transformed values (yp, diagnose (y_, yp)) // return predictions and QoF vector end test @@ -128,7 +133,7 @@ class RidgeRegression (x: MatrixD, y: VectorD, fname_ : Array [String] = null, var l = lambda // start with a small default value var l_best = l var sse = Double.MaxValue - for i <- 0 to 20 do + cfor (0, 20) { _ => RidgeRegression.hp("lambda") = l val rrg = new RidgeRegression (x, y) val stats = rrg.crossValidate () @@ -137,7 +142,7 @@ class RidgeRegression (x: MatrixD, y: VectorD, fname_ : Array [String] = null, if sse2 < sse then { sse = sse2; l_best = l } // debug ("findLambda", showQofStatTable (stats)) l *= 2 - end for + } // cfor (l_best, sse) // best lambda and its sse_cv end findLambda @@ -153,7 +158,7 @@ class RidgeRegression (x: MatrixD, y: VectorD, fname_ : Array [String] = null, def f_sse (λ: Double): Double = lambda = λ train (xx, yy) - e = yy - xx * b + val e = yy - xx * b val sse = e dot e if sse.isNaN then throw new ArithmeticException ("sse is NaN") debug ("findLambda2", s"for lambda = $λ, sse = $sse") @@ -166,10 +171,36 @@ class RidgeRegression (x: MatrixD, y: VectorD, fname_ : Array [String] = null, */ //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Predict the value of vector y = f(x_, b). It is overridden for speed. + /** Predict the value of y = f(z) by evaluating the formula y = b dot z. + * It works on transformed values. + * @param z the new vector to predict + */ + def predict_ (z: VectorD): Double = b dot z + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Predict the value of vector y = f(x_, b). It works on transformed values. + * @param x_ the matrix to use for making predictions, one for each row + */ + def predict_ (x_ : MatrixD): VectorD = x_ * b + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Predict the value of y = f(z) by evaluating the formula y = b dot z. + * It is overridden to handle transformations. + * @param z the new vector to predict + */ + override def predict (z: VectorD): Double = + val zz = if xℱ == null then z else xℱ.f(MatrixD (z))(0) + if yℱ == null then b dot zz else yℱ.fi_(b dot zz) + end predict + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Predict the value of vector y = f(x_, b). It is overridden to handle transformations. * @param x_ the matrix to use for making predictions, one for each row */ - override def predict (x_ : MatrixD): VectorD = x_ * b + override def predict (x_ : MatrixD): VectorD = + val xx = if xℱ == null then x_ else xℱ.f(x_) + if yℱ == null then xx * b else yℱ.fi(xx * b) + end predict //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Produce a QoF summary for a model with diagnostics for each predictor 'x_j' @@ -187,10 +218,11 @@ class RidgeRegression (x: MatrixD, y: VectorD, fname_ : Array [String] = null, //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Build a sub-model that is restricted to the given columns of the data matrix. * @param x_cols the columns that the new model is restricted to + * @param fname2 the variable/feature names for the new model (defaults to null) */ - override def buildModel (x_cols: MatrixD): RidgeRegression = + def buildModel (x_cols: MatrixD, fname2: Array [String] = null): RidgeRegression = debug ("buildModel", s"${x_cols.dim} by ${x_cols.dim2}") - new RidgeRegression (x_cols, y, null, hparam) + new RidgeRegression (x_cols, y, fname2, hparam) end buildModel end RidgeRegression @@ -200,19 +232,37 @@ end RidgeRegression /** The `RidgeRegression` companion object defines hyper-parameters and provides * factory methods creating ridge regression models. */ -object RidgeRegression: +object RidgeRegression extends Regularized: - /** Base hyper-parameter specification for `RidgeRegression` + /** Base hyper-parameter specification for `RidgeRegression` and other regularized regression classes */ - val hp = new HyperParameter; + val hp = new HyperParameter hp += ("factorization", "Fac_Cholesky", "Fac_Cholesky") // factorization algorithm - hp += ("lambda", 0.01, 0.01) // L2 regularization/shrinkage parameter + hp += ("lambda", 0.1, 0.1) // Ridge L_2 regularization/shrinkage parameter + hp += ("sparse", 1, 1) // 1 => sparsify, 0 => don't (@see sparsify in `LassoRegression`) + hp += ("beta", 0.1, 0.1) // Bridge L_q regularization/shrinkage parameter + hp += ("pow", 0.5, 0.5) // Bridge power (0 < q < 1) + hp += ("tol", 1e-6, 1e-6) // convergence tolerance + hp += ("eps", 1e-8, 1e-8) // small constant to avoid division by zero + hp += ("pow", 0.5, 0.5) // exponent/L_q norm + hp += ("maxIter", 50, 50) // maximum number of iterations, needed for Bridge + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Fix the smape calculation for be in the original rather than centered scale. + * @param mod the model being used + * @param y the response vector in the original scale + * @param qof the Quality-of-Fit metrics + */ + def fix_smape (mod: RidgeRegression, y: VectorD, qof: VectorD): VectorD = + qof(QoF.smape.ordinal) = FitM.smapeF (y, mod.predict (mod.getX) + y.mean) + qof + end fix_smape //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Create a Ridge Regression from a combined data matrix. * This function centers the data. - * @param xy the uncentered data/input m-by-n matrix, NOT augmented with a first column of ones - * and the uncentered response m-vector (combined) + * @param xy the un-centered data/input m-by-n matrix, NOT augmented with a first column of ones + * and the un-centered response m-vector (combined) * @param fname the feature/variable names (defaults to null) * @param hparam the shrinkage hyper-parameter (0 => OLS) in the penalty term lambda * b dot b * @param col the designated response column (defaults to the last column) @@ -220,32 +270,40 @@ object RidgeRegression: def apply (xy: MatrixD, fname: Array [String] = null, hparam: HyperParameter = hp)(col: Int = xy.dim2 - 1): RidgeRegression = val (x, y) = (xy.not(?, col), xy(?, col)) - val mu_x = x.mean // column-wise mean of x - val mu_y = y.mean // mean of y - val x_c = x - mu_x // centered x (column-wise) - val y_c = y - mu_y // centered y - new RidgeRegression (x_c, y_c, fname, hparam) + val xℱ = CenterForm (x) + val yℱ = CenterForm (y) + new RidgeRegression (xℱ.f(x), yℱ.f(y), fname, hparam, xℱ, yℱ) end apply //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Create a Ridge Regression from a data matrix and response vector. * This function centers the data. - * @param x the uncentered data/input m-by-n matrix, NOT augmented with a first column of ones - * @param y the uncentered response/output vector + * @param x the un-centered data/input m-by-n matrix, NOT augmented with a first column of ones + * @param y the un-centered response/output vector * @param fname the feature/variable names (defaults to null) * @param hparam the shrinkage hyper-parameter (0 => OLS) in the penalty term 'lambda * b dot b' */ def center (x: MatrixD, y: VectorD, fname: Array [String] = null, - hparam: HyperParameter = RidgeRegression.hp): RidgeRegression = - val mu_x = x.mean // column-wise mean of x - val mu_y = y.mean // mean of y - val x_c = x - mu_x // centered x (column-wise) - val y_c = y - mu_y // centered y - new RidgeRegression (x_c, y_c, fname, hparam) + hparam: HyperParameter = RidgeRegression.hp): RidgeRegression = + val xℱ = CenterForm (x) + val yℱ = CenterForm (y) + new RidgeRegression (xℱ.f(x), yℱ.f(y), fname, hparam, xℱ, yℱ) end center + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a `RidgeRegression` object from a data matrix and a response vector. + * This method provides data rescaling of x and centering of y. + * @param x the un-centered data/input m-by-n matrix, NOT augmented with a first column of ones + * @param y the un-centered response/output vector + * @param fname the feature/variable names (defaults to null) + * @param hparam the shrinkage hyper-parameter (0 => OLS) in the penalty term 'lambda * b dot b' + */ def rescale (x: MatrixD, y: VectorD, fname: Array [String] = null, - hparam: HyperParameter = hp): RidgeRegression = ??? + hparam: HyperParameter = hp): RidgeRegression = + val xℱ = NormForm (x) + val yℱ = CenterForm (y) + new RidgeRegression (xℱ.f(x), yℱ.f(y), fname, hparam, xℱ, yℱ) + end rescale end RidgeRegression @@ -261,13 +319,15 @@ end RidgeRegression @main def ridgeRegressionTest (): Unit = // 5 data points: x_0 x_1 - val x = MatrixD ((5, 2), 36.0, 66.0, // 5-by-2 matrix data matrix + val x = MatrixD ((5, 2), 36.0, 66.0, // 5-by-2 data matrix 37.0, 68.0, 47.0, 64.0, 32.0, 53.0, 1.0, 101.0) val y = VectorD (745.0, 895.0, 442.0, 440.0, 1598.0) // 5-dim response vector +// println ("model: y = b_0 + b_1*x_1 + b_2*x_2") + println ("model: y = b₀ + b₁*x₁ + b₂*x₂") // for Regression, remove b₀ for Ridge println (s"x = $x") println (s"y = $y") @@ -276,7 +336,7 @@ end RidgeRegression val reg = new Regression (ox, y) // create a Regression model reg.trainNtest ()() // train and test the model - banner ("RidgeRegression") + banner ("RidgeRegression with manual centering") val mu_x = x.mean // column-wise mean of x val mu_y = y.mean // mean of y val x_c = x - mu_x // centered x (column-wise) @@ -284,23 +344,35 @@ end RidgeRegression val mod = new RidgeRegression (x_c, y_c) // create a Ridge Regression model mod.trainNtest ()() // train and test the model - banner ("Make Predictions") + banner ("RidgeRegression with Auto-centering") + val amod = RidgeRegression.center (x, y) // create an auto-centered Ridge Regression model + amod.trainNtest ()() // train and test the model + + banner ("RidgeRegression with Rescaling") + val rmod = RidgeRegression.rescale (x, y) // create a rescaled Ridge Regression model + rmod.trainNtest ()() // train and test the model + + banner ("Make one OOS Predictions") val z = VectorD (20.0, 80.0) // new instance to predict val _1z = 1.0 +: z // prepend 1 to z val z_c = z - mu_x // center z println (s"reg.predict ($z) = ${reg.predict (_1z)}") // predict using _1z println (s"mod.predict ($z) = ${mod.predict (z_c) + mu_y}") // predict using z_c and add y's mean + println (s"amod.predict ($z) = ${amod.predict (z)}") // predict using z with auto-centering + println (s"rmod.predict ($z) = ${rmod.predict (z)}") // predict using z with rescaling banner ("Compare Summaries") println (reg.summary ()) println (mod.summary ()) + println (amod.summary ()) + println (rmod.summary ()) end ridgeRegressionTest //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `ridgeRegressionTest2` main function tests the `RidgeRegression` class using - * the following regression equation. + * the following regression equation * y = b dot x = b_1*x1 + b_2*x_2. * Try non-default value for the 'lambda' hyper-parameter. * > runMain scalation.modeling.ridgeRegressionTest2 @@ -387,10 +459,10 @@ end ridgeRegressionTest2 println (mod.summary ()) // parameter/coeefficient statistics banner ("Forward Selection Test") - mod.forwardSelAll (cross = false) + mod.forwardSelAll (cross = "none") banner ("Backward Elimination Test") - mod.backwardElimAll (cross = false) + mod.backwardElimAll (cross = "none") end ridgeRegressionTest3 @@ -460,8 +532,7 @@ end ridgeRegressionTest4 val (cols, rSq) = mod.selectFeatures (tech) // R^2, R^2 bar, R^2 cv val k = cols.size println (s"k = $k, n = ${x.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "R^2 cv"), - s"R^2 vs n for RidgeRegression with $tech", lines = true) + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs n for RidgeRegression with $tech", lines = true) println (s"$tech: rSq = $rSq") end for @@ -507,7 +578,7 @@ end ridgeRegressionTest6 val y = VectorD (1.0, 3.0, 3.0, 4.0) // 4-dim response vector val n = x.dim2 - val xt = x.transpose + val xt = x.ᵀ val xtx = xt * x val b = inverse (xtx)() * xt * y val yp = x * b @@ -529,7 +600,7 @@ end ridgeRegressionTest6 val x_ = x - mu_x // center the data val y_ = y - mu_y - val xt_ = x_.transpose + val xt_ = x_.ᵀ val xtx_ = xt_ * x_ + eye (n, n) * l val b_ = inverse (xtx_)() * xt_ * y_ val yp_ = x_ * b_ + mu_y @@ -545,3 +616,101 @@ end ridgeRegressionTest6 end ridgeRegressionTest7 + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `ridgeRegressionTest8` main function tests the multi-collinearity method in + * the `RidgeRegression` class using the following regression equation. + * y = b dot x = b_1*x_1 + b_2*x_2 + b_2*x_3 + * Check correlation for perfectly and highly collinear vectors (@see Textbook, exercise 1) + * > runMain scalation.modeling.ridgeRegressionTest8 + */ +@main def ridgeRegressionTest8 (): Unit = + + val rvg = random.RandomVecD (100) + val nrm = random.NormalVec_c (100, 0, 100) + val x_1 = rvg.gen + val x_2 = rvg.gen + val x_3 = x_2 * 2 + 3 + val x = MatrixD (x_1, x_2, x_3).ᵀ + println (s"Perfectly Collinear: correlation matrix rho = ${x.corr}") + x(?, 2) = x_3 + rvg.gen / 3.0 + println (s"Highly Collinear: correlation matrix rho = ${x.corr}") + + val b_ = VectorD (2, 3, 4) + val y = x * b_ + nrm.gen + val xy = x :^+ y + println (s"Correlation matrix for xy: rho = ${xy.corr}") + + val x_c = x - x.mean + val y_c = y - y.mean + + banner ("Regression Model") + val mod = new Regression (x_c, y_c) + mod.trainNtest ()() + println (mod.summary ()) + FitM.showQofStatTable (mod.crossValidate ()) + + banner ("Ridge Regression Model") + for i <- 1 to 10 do + RidgeRegression.hp("lambda") = 25.0 * i + val mod2 = new RidgeRegression (x_c, y_c) + mod2.trainNtest ()() + println (mod2.summary ()) + FitM.showQofStatTable (mod2.crossValidate ()) + end for + +end ridgeRegressionTest8 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `ridgeRegressionTest9` main function tests the multi-collinearity method in + * the `RidgeRegression` class using the following regression equation. + * y = b dot x = b_1*x_1 + b_2*x_2 + * Contour Plots for see, L2 penalty, see + penalty. + * > runMain scalation.modeling.ridgeRegressionTest9 + */ +@main def ridgeRegressionTest9 (): Unit = + + val rvg = random.RandomVecD (100) + val nrm = random.NormalVec_c (100, 0, 50) + val x_1 = rvg.gen + val x_2 = rvg.gen + val x = MatrixD (x_1, x_2).ᵀ + + val b_ = VectorD (4, 5) + val y = x * b_ + nrm.gen + val xy = x :^+ y + println (s"Correlation matrix for xy: rho = ${xy.corr}") + + val x_c = x - x.mean + val y_c = y - y.mean + + banner ("Regression Model") + val mod = new Regression (x_c, y_c) + mod.trainNtest ()() + println (mod.summary ()) + FitM.showQofStatTable (mod.crossValidate ()) + var lambda = 0.0 + + banner ("Ridge Regression Model") + for i <- 1 to 10 do + lambda = 100.0 * i + RidgeRegression.hp("lambda") = lambda + val mod2 = new RidgeRegression (x_c, y_c) + mod2.trainNtest ()() + println (mod2.summary ()) + FitM.showQofStatTable (mod2.crossValidate ()) + end for + + def f(b: VectorD): Double = (y - x * b).normSq + def f2(b: VectorD): Double = b.normSq * lambda + def f3(b: VectorD): Double = f(b) + f2(b) + + val lb = VectorD (3, 4) + val ub = VectorD (5, 6) + new PlotC (f, lb, ub, title = "Contour plot of sse") + new PlotC (f2, lb, ub, title = "Contour plot of L2 penalty") + new PlotC (f3, lb, ub, title = "Contour Plot of sse + penalty") + +end ridgeRegressionTest9 + diff --git a/src/main/scala/scalation/modeling/Sampling.scala b/src/main/scala/scalation/modeling/Sampling.scala index 01548458e..6f0e31189 100644 --- a/src/main/scala/scalation/modeling/Sampling.scala +++ b/src/main/scala/scalation/modeling/Sampling.scala @@ -31,7 +31,6 @@ def subSample (x: MatrixD, nSamp: Int, stream: Int): (MatrixD, VectorI) = val rsg = RandomVecSample (x.dim, nSamp, stream) // random sample generator val irows = rsg.igen // select rows, e.g., 5, 3, 7 (x(irows), irows) - end if end subSample //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -49,7 +48,6 @@ def subSample (x: MatrixD, y: VectorD, nSamp: Int, stream: Int): (MatrixD, Vecto val rsg = RandomVecSample (x.dim, nSamp, stream) // random sample generator val irows = rsg.igen // select rows, e.g., 5, 3, 7 (x(irows), y(irows), irows) - end if end subSample //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -67,7 +65,6 @@ def subSample (x: MatrixD, y: VectorI, nSamp: Int, stream: Int): (MatrixD, Vecto val rsg = RandomVecSample (x.dim, nSamp, stream) // random sample generator val irows = rsg.igen // select rows, e.g., 5, 3, 7 (x(irows), y(irows), irows) - end if end subSample //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: diff --git a/src/main/scala/scalation/modeling/SimpleExpRegression.scala b/src/main/scala/scalation/modeling/SimpleExpRegression.scala index 88c72f9b4..669082340 100644 --- a/src/main/scala/scalation/modeling/SimpleExpRegression.scala +++ b/src/main/scala/scalation/modeling/SimpleExpRegression.scala @@ -11,6 +11,8 @@ package scalation package modeling +// FIX -- option to include intercept and improve model accuracy + import scala.math.exp import scalation.mathstat._ @@ -29,7 +31,7 @@ import scalation.mathstat._ class SimpleExpRegression (x: MatrixD, y: VectorD, fname_ : Array [String] = null, hparam: HyperParameter = null, nonneg: Boolean = true) extends Predictor (x, y, fname_, hparam) - with Fit (dfm = 1, df = x.dim - 2) + with Fit (dfr = 1, df = x.dim - 2) with NoSubModels: private val debug = debugf ("SimpleExpRegression", true) // debug function @@ -37,13 +39,15 @@ class SimpleExpRegression (x: MatrixD, y: VectorD, fname_ : Array [String] = nul private val cutoff = 1E-5 // cutoff threshold private val eta = 0.00005 // the learning/convergence rate (requires adjustment) private val maxEpochs = 1000 // the maximum number of training epcochs/iterations -// private val eta = hparam ("eta") // the learning/convergence rate (requires adjustment) -// private val maxEpochs = hparam ("maxEpochs").toInt // the maximum number of training epcochs/iterations +// private val eta = hparam("eta") // the learning/convergence rate (requires adjustment) +// private val maxEpochs = hparam("maxEpochs").toInt // the maximum number of training epcochs/iterations - modelName = "SimpleExpRegression" + _modelName = "SimpleExpRegression" if nonneg && ! y.isNonnegative then flaw ("init", "response vector y must be nonnegative") + override def getBest: BestStep = super [NoSubModels].getBest + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The estimated response value at point xi. * @param xi the point to evaluate @@ -182,6 +186,7 @@ end SimpleExpRegression println (s"x_ = $x_") val mod = new SimpleExpRegression (x_, y) // create a model +// val mod = new SimpleExpRegression (x, y) // create a model -- with intercept mod.trainNtest ()() // train and test the model val yp = mod.predict (x_) @@ -208,10 +213,10 @@ end simpleExpRegressionTest //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Test `SimpleExpRegression` by simulating n-many observations. - * @param n number of observations - * @param k number of variables + * @param n number of observations (e.g., 10000) + * @param k number of variables (e.g., 5) */ - def test (n: Int = 10000, k: Int = 5): Unit = + def test (n: Int, k: Int): Unit = val u = new Uniform (0, 1) // uniform random val e = new Exponential (1) // exponential error val r = new Random () diff --git a/src/main/scala/scalation/modeling/SimpleNN.scala b/src/main/scala/scalation/modeling/SimpleNN.scala new file mode 100644 index 000000000..b9d4e3187 --- /dev/null +++ b/src/main/scala/scalation/modeling/SimpleNN.scala @@ -0,0 +1,633 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author John Miller + * @version 2.0 + * @date Mon Oct 20 20:16:25 EDT 2025 + * @see LICENSE (MIT style license file). + * + * @note Simple Neural Networks Using Gradient Descent Optimization + * Tests both Gradient Descent (GD) and Incremental Gradient Descent (IGD) + * Simplified versions of `Regression`, `Perceptron`, `NeuralNet_2L`, and `NeuralNet_3L` + * for illustration/learning, not production + * + * @note the symbol ƒ indicates the derivative of function f, i.e., ƒ = f' + */ + +package scalation +package modeling + +import scalation.mathstat.{VectorD, MatrixD, Plot} +import scalation.mathstat.VectorDOps._ +import ActivationFun._ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `SimpleNN` object contains a simple dataset for testing Gradient Descent (GD) + * and Incremental Gradient Descent (IGD) optimization algorithms. + * @see https://nowak.ece.wisc.edu/MFML.pdf + */ +object SimpleNN: + + // 9 data points: One x1 x2 y1 y2 + val xy = MatrixD ((9, 5), 1.0, 0.1, 0.1, 0.5, 0.25, // dataset + 1.0, 0.1, 0.5, 0.3, 0.49, + 1.0, 0.1, 1.0, 0.2, 0.64, + + 1.0, 0.5, 0.1, 0.8, 0.04, + 1.0, 0.5, 0.5, 0.5, 0.25, + 1.0, 0.5, 1.0, 0.3, 0.49, + + 1.0, 1.0, 0.1, 1.0, 0.0, + 1.0, 1.0, 0.5, 0.8, 0.04, + 1.0, 1.0, 1.0, 0.5, 0.25) + + val (x, y) = (xy(?, 0 until 3), xy(?, 3)) // input matrix, output/response vector + +end SimpleNN + +import SimpleNN._ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `simpleNN1` main function illustrates the use of Gradient Descent (GD) to + * optimize the weights/parameters of a Multiple Linear Regression (MLR) model. + * + * grad g = -x.ᵀ * (y - ŷ) where pred ŷ = x * b + * + * Computations done at the vector level: X -> y. R^2 = .827 + * > runMain scalation.modeling.simpleNN1 + */ +@main def simpleNN1 (): Unit = + + val sst = (y - y.mean).normSq // sum of squares total + val b = VectorD (0.1, 0.2, 0.1) // initial weights/parameters (random in practice) + + val η = 0.1 // learning rate (to be tuned) + var ŷ, ε, δ, g: VectorD = null + + for epoch <- 1 to 10 do + println (s"Improvement step $epoch") + + // forward prop: input -> output + ŷ = x * b // prediction vector + ε = y - ŷ // error vector + + // backward prop: output -> input + δ = -ε // delta correction vector + g = x.ᵀ * δ // gradient vector + + // parameter update + b -= g * η // update parameter vector + + val sse = ε.normSq // sum of squared errors + val r2 = 1.0 - sse / sst // R^2 + + println (s""" + ŷ = $ŷ + ε = $ε + δ = $δ + g = $g + b = $b + sse = $sse + r2 = $r2 + """) + end for + + new Plot (null, y, ŷ, "GD for MLR y", lines = true) + +end simpleNN1 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `simpleNN2` main function illustrates the use of Incremental Gradient Descent (IGD) + * to optimize the weights/parameters of a Multiple Linear Regression (MLR) model. + * Computations done at the vector level: X -> y. R^2 = .933 + * > runMain scalation.modeling.simpleNN2 + */ +@main def simpleNN2 (): Unit = + + val sst = (y - y.mean).normSq // sum of squares total + val b = VectorD (0.1, 0.2, 0.1) // initial weights/parameters (random in practice) + + val η = 0.2 // learning rate (to be tuned) + var ŷ, ε, δ: Double = -0.0 + var g: VectorD = null + val yp = new VectorD (y.dim) // save each prediction in yp + + for epoch <- 1 to 10 do + println (s"Improvement step $epoch") + var sse = 0.0 + for i <- x.indices do + val (xi, yi) = (x(i), y(i)) // randomize i for Stochastic Gradient Descent (SGD) + + // forward prop: input -> output + ŷ = xi ∙ b // prediction scalar + ε = yi - ŷ // error scalar + + // backward prop: output -> input + δ = -ε // delta correction scalar + g = xi * δ // gradient vector + + // parameter update + b -= g * η // update parameter vector + + yp(i) = ŷ // save i-th prediction + sse += ε * ε // sum of squared errors + end for + val r2 = 1.0 - sse / sst // R^2 + + println (s""" + ŷ = $ŷ + ε = $ε + δ = $δ + g = $g + b = $b + sse = $sse + r2 = $r2 + """) + end for + + new Plot (null, y, yp, "IGD for MLR y", lines = true) + +end simpleNN2 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `simpleNN3` main function illustrates the use of Gradient Descent (GD) to + * optimize the weights/parameters of a simple neural network (Perceptron). + * Originally, perceptrons used the Heavyside activation function for binary classification + * problems, but have been extended to multi-class classification and regression problems. + * Furthermore, when the activation function is identity, the perceptron models are equivalent + * multiple linear regression models. + * + * grad g = -x.ᵀ * (y - ŷ) * ƒ where pred ŷ = f(x * b) + * + * Computations done at the vector level: X -> y. R^2 = .865 + * > runMain scalation.modeling.simpleNN3 + */ +@main def simpleNN3 (): Unit = + + val sst = (y - y.mean).normSq // sum of squares total + + val b = VectorD (0.1, 0.2, 0.1) // initial weights/parameters (random in practice) + + val η = 2.5 // learning rate (to be tuned) + var u, ŷ, ε, ƒ, δ, g: VectorD = null + + for epoch <- 1 to 10 do + println (s"Improvement step $epoch") + + // forward prop: input -> output + u = x * b // pre-activation vector + ŷ = sigmoid_ (u) // prediction vector + ε = y - ŷ // error vector + + // backward prop: output -> input + ƒ = ŷ * (1.0 - ŷ) // derivative (f') for sigmoid + δ = -ε * ƒ // delta correction vector + g = x.ᵀ * δ // gradient vector + + // parameter update + b -= g * η // update parameter vector + + val sse = ε.normSq // sum of squared errors + val r2 = 1.0 - sse / sst // R^2 + + println (s""" + u = $u + ŷ = $ŷ + ε = $ε + ƒ = $ƒ + δ = $δ + g = $g + b = $b + sse = $sse + r2 = $r2 + """) + + end for + new Plot (null, y, ŷ, "GD for Perceptron y", lines = true) + +end simpleNN3 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `simpleNN4` main function illustrates the use of Gradient Descent (GD) to optimize + * the weights/parameters of a simple neural network (2-layer (no hidden) Neural Network). + * + * Prediction Equation: ŷ = f(B^T x) = f(W x + b) + * + * where x is an input vector, ŷ is a predicted output vector, f is the activation function, + * B is the parameter matrix, W is the weight matrix, and b is the bias vector. + * @note, PyTorch stores the weight matrix W [num_out, num_in] transposed from B to make + * back-propagation more efficient. + * Computations done at the vector level: X -> y. R^2 = .865, .826 + * > runMain scalation.modeling.simpleNN4 + */ +@main def simpleNN4 (): Unit = + + val (x, y) = (xy(?, 0 until 3), xy(?, 3 until 5)) // input matrix, output/response matrix + val sst = (y - y.mean).normSq // sum of squares total + + val b = MatrixD ((3, 2), 0.1, 0.1, + 0.2, 0.1, + 0.1, 0.1) // initial weights/parameters (random in practice) + + val η = 2.5 // learning rate (to be tuned) + var u, ŷ, ε, ƒ, δ, g: VectorD = null + + for epoch <- 1 to 10; k <- y.indices2 do + println (s"Improvement step $epoch") + + // forward prop: input -> output + u = x * b(?, k) // pre-activation vector + ŷ = sigmoid_ (u) // prediction vector + ε = y(?, k) - ŷ // error vector for column k + + // backward prop: output -> input + ƒ = ŷ * (1.0 - ŷ) // derivative (f') for sigmoid + δ = -ε * ƒ // delta correction vector + g = x.ᵀ * δ // gradient vector + + // parameter update + b(?, k) = b(?, k) - g * η // update parameter matrix, column k + + val sse = ε.normSq // sum of squared errors + val r2 = 1.0 - sse / sst(k) // R^2 + + println (s""" + for k = $k + u = $u + ŷ = $ŷ + ε = $ε + ƒ = $ƒ + δ = $δ + g = $g + b = $b + sse = $sse + r2 = $r2 + """) + end for + + new Plot (null, y(?, 1), ŷ, "GD for Two-layer Neural Net y_1", lines = true) + +end simpleNN4 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `simpleNN5` main function illustrates the use of Gradient Descent (GD) to optimize + * the weights/parameters of a simple neural network (2-layer (no hidden) Neural Network). + * Computations done at the matrix level: X -> Y. R^2 = .865, .826 + * > runMain scalation.modeling.simpleNN5 + */ +@main def simpleNN5 (): Unit = + + import scalation.mathstat.MatrixDOps._ // may clash with VectorDOps + + val (x, y) = (xy(?, 0 until 3), xy(?, 3 until 5)) // input matrix, output/response matrix + val sst = (y - y.mean).normSq // sum of squares total + + val b = MatrixD ((3, 2), 0.1, 0.1, + 0.2, 0.1, + 0.1, 0.1) // initial weights/parameters (random in practice) + + val η = 2.5 // learning rate (to be tuned) +// val η = 0.29 // learning rate (to be tuned) best for var 1 83% + var u, ŷ, ε, ƒ, δ, g: MatrixD = null + + for epoch <- 1 to 10 do + println (s"Improvement step $epoch") + + // forward prop: input -> output + u = x * b // pre-activation matrix + ŷ = f_sigmoid.fM (u) // prediction matrix +// ŷ = f_tanh.fM (u) // prediction matrix + ε = y - ŷ // error matrix + + // backward prop: output -> input + ƒ = ŷ ⊙ (1.0 - ŷ) // derivative (f') for sigmoid +// ƒ = 1.0 - ŷ~^2 // derivative (f') for tanh + δ = -ε ⊙ ƒ // delta correction matrix via Hadamard product + g = x.ᵀ * δ // transposed Jacobian matrix (gradients) + + // parameter update + b -= g * η // update parameter matrix + + val sse = ε.normSq // sum of squared errors + val r2 = -(sse / sst - 1.0) // R^2 (recoded to avoid Ops clash) + + println (s""" + u = $u + ŷ = $ŷ + ε = $ε + ƒ = $ƒ + δ = $δ + g = $g + b = $b + sse = $sse + r2 = $r2 + """) + end for + + new Plot (null, y(?, 0), ŷ(?, 0), "GD for Two-layer Neural Net y_0", lines = true) + new Plot (null, y(?, 1), ŷ(?, 1), "GD for Two-layer Neural Net y_1", lines = true) + +end simpleNN5 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `simpleNN6` main function illustrates the use of Gradient Descent (GD) to optimize + * the weights/parameters of a simple neural network (3-layer (1 hidden) Neural Network). + * Computations done at the matrix level: X -> Z -> Y. R^2 = .299, .432 (requires more epochs) + * > runMain scalation.modeling.simpleNN6 + */ +@main def simpleNN6 (): Unit = + + import scalation.mathstat.MatrixDOps._ // may clash with VectorDOps + + val (x, y) = (xy(?, 1 until 3), xy(?, 3 until 5)) // input matrix, output/response matrix + val sst = (y - y.mean).normSq // sum of squares total + + val a = MatrixD ((2, 3), 0.1, 0.1, 0.1, // parameter/weight matrix: input -> hidden + 0.1, 0.1, 0.1) + val α = VectorD (0.1, 0.1, 0.1) // hidden layer bias vector + val b = MatrixD ((3, 2), 0.1, 0.1, // parameter/weight matrix: hidden -> output + 0.2, 0.1, + 0.1, 0.1) // initial weights/parameters (random in practice) + val β = VectorD (0.1, 0.1) // output layer bias vector + + val η = 10.0 // learning rate (to be tuned) + var u, z, v, ŷ, ε, ƒ1, δ1, g1, ƒ0, δ0, g0: MatrixD = null + + for epoch <- 1 to 10 do + println (s"Improvement step $epoch") + + // forward prop: input -> hidden + u = x * a + α // hidden pre-activation matrix + z = f_sigmoid.fM (u) // hidden matrix from f0 activation + + // forward prop: hidden -> output + v = z * b + β // output pre-activation matrix + ŷ = f_sigmoid.fM (v) // output/prediction matrix from f1 activation + ε = y - ŷ // error matrix + + // backward prop: hidden <- output + ƒ1 = ŷ ⊙ (1.0 - ŷ) // derivative (f1') for sigmoid + δ1 = -ε ⊙ ƒ1 // delta correction matrix via Hadamard product + g1 = z.ᵀ * δ1 // transposed Jacobian matrix (gradients) + + // backward prop: input <- hidden + ƒ0 = z ⊙ (1.0 - z) // derivative (f0') for sigmoid + δ0 = (δ1 * b.ᵀ) ⊙ ƒ0 // delta correction matrix + g0 = x.ᵀ * δ0 // transposed Jacobian matrix (gradients) + + // parameter updates + b -= g1 * η // update output parameter/weight matrix + β -= δ1.mean * η // update output bias vector + a -= g0 * η // update hidden parameter/weight matrix + α -= δ0.mean * η // update hidden bias vector + + val sse = ε.normSq // sum of squared errors + val r2 = -(sse / sst - 1.0) // R^2 (recoded to avoid Ops clash) + + println (s""" + u = $u + z = $z + v = $v + ŷ = $ŷ + ε = $ε + ƒ1 = $ƒ1 + δ1 = $δ1 + g1 = $g1 + ƒ0 = $ƒ0 + δ0 = $δ0 + g0 = $g0 + b = $b + β = $β + a = $a + α = $α + sse = $sse + r2 = $r2 + """) + end for + + new Plot (null, y(?, 0), ŷ(?, 0), "GD for Three-layer Neural Net y_0", lines = true) + new Plot (null, y(?, 1), ŷ(?, 1), "GD for Three-layer Neural Net y_1", lines = true) + +end simpleNN6 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `simpleNN7` main function illustrates the use of Incremental Gradient Descent (IGD) + * to optimize the weights/parameters of a simple neural network (3-layer (1 hidden) Neural Network). + * + * Prediction Equation: z = f0(A^T x + α) + * ŷ = f1(B^T z + β) + * + * where x is an input vector, z is the hidden layer vector, ŷ is a predicted output vector, f0, f1 + * are the activation functions, A and B are the parameter matrices, and α and β are the bias vectors. + * @note: Stochastic Gradient Descent (SGD) adds stochastic selection to IGD. In practice, + * mini-batches of size 32, 64, or 128 are commonly used. + * Computations done at the vector level, x -> z -> y. R^2 = .863, .913 (for 10 epochs) + * > runMain scalation.modeling.simpleNN7 + */ +@main def simpleNN7 (): Unit = + + import scalation.mathstat.MatrixDOps.⊗ // outer product of two vectors: v1 ⊗ v2 = v1 v1.ᵀ + // matrix where m_ij = v1_i * v2_j + + val (xx, yy) = (xy(?, 1 until 3), xy(?, 3 until 5)) // input matrix, output/response matrix + val sst = (y - y.mean).normSq // sum of squares total + + val a = MatrixD ((2, 3), 0.2, 0.3, 0.2, // parameter/weight matrix: input -> hidden + -0.1, -0.2, -0.1) + val α = VectorD (-0.1, -0.1, -0.1) // hidden layer bias vector + val b = MatrixD ((3, 2), 0.1, -0.1, // parameter/weight matrix: hidden -> output + 0.2, -0.2, + 0.1, -0.1) // initial weights/parameters (random in practice) + val β = VectorD (-0.1, 0.1) // output layer bias vector + +// val η = 20.0 // learning rate (to be tuned) + val η = 1.0 // learning rate (to be tuned) + var u, z, v, ŷ, ε, ƒ1, δ1, ƒ0, δ0: VectorD = null + var g1, g0: MatrixD = null + val yp = new MatrixD (yy.dim, yy.dim2) // save each prediction in yp + +// try f0 = tanh -> f1 = id + + for epoch <- 1 to 10 do + println (s"Improvement step $epoch") + val sse = new VectorD (2) + for i <- xx.indices do + val (x, y) = (xx(i), yy(i)) // randomize i for Stochastic Gradient Descent (SGD) + + // forward prop: input -> hidden + u = a.ᵀ * x + α // hidden pre-activation vector +// z = sigmoid_ (u) // hidden vector from f0 = sigmoid activation + z = tanh_ (u) // hidden vector from f0 = tanh activation + + // forward prop: hidden -> output + v = b.ᵀ * z + β // output pre-activation vector +// ŷ = sigmoid_ (v) // output/prediction vector from f1 = sigmoid activation + ŷ = v // output/prediction vector from f1 = id activation + ε = y - ŷ // error vector [ε_1, ε_2] + + // backward prop: hidden <- output +// ƒ1 = ŷ * (1.0 - ŷ) // derivative (f1') for sigmoid +// ƒ1 = 1.0 - ŷ~^2 // derivative (f1') for tanh + ƒ1 = VectorD.one (ŷ.dim) // derivative (f1') for id + δ1 = -ε * ƒ1 // delta correction vector via elementwise product + g1 = z ⊗ δ1 // transposed Jacobian matrix (gradients) + + // backward prop: input <- hidden +// ƒ0 = z * (1.0 - z) // derivative (f0') for sigmoid + ƒ0 = 1.0 - z~^2 // derivative (f0') for tanh +// ƒ0 = VectorD.one (z.dim) // derivative (f0') for id + δ0 = b * δ1 * ƒ0 // delta correction vector + g0 = x ⊗ δ0 // transposed Jacobian matrix (gradients) + + // parameter updates + b -= g1 * η // update output parameter/weight matrix + β -= δ1 * η // update output bias vector + a -= g0 * η // update hidden parameter/weight matrix + α -= δ0 * η // update hidden bias vector + + yp(i) = ŷ // save i-th prediction + sse += ε * ε // sum of squared errors + val r2 = -(sse / sst - 1.0) // R^2 (recoded to avoid Ops clash) + + println (s""" + u = $u + z = $z + v = $v + ŷ = $ŷ + ε = $ε + ƒ1 = $ƒ1 + δ1 = $δ1 + g1 = $g1 + ƒ0 = $ƒ0 + δ0 = $δ0 + g0 = $g0 + b = $b + β = $β + a = $a + α = $α + sse = $sse + r2 = $r2 + """) + end for + end for + + new Plot (null, yy(?, 0), yp(?, 0), "IGD for Three-layer Neural Net y_0", lines = true) + new Plot (null, yy(?, 1), yp(?, 1), "IGD for Three-layer Neural Net y_1", lines = true) + +end simpleNN7 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `simpleNN8` main function illustrates the use of Incremental Gradient Descent (IGD) + * to optimize the weights/parameters of a simple neural network (3-layer (1 hidden) Neural Network). + * + * Prediction Equation: z = f0(A^T x + α) + * ŷ = f1(B^T z + β) + * + * where x is an input vector, z is the hidden layer vector, ŷ is a predicted output vector, f0, f1 + * are the activation functions, A and B are the parameter matrices, and α and β are the bias vectors. + * Illustrates the need for RESCALING the data. + * Computations done at the vector level, x -> z -> y. R^2 = .512 (no rescaling) .813 (rescaling) (for 20 epochs) + * > runMain scalation.modeling.simpleNN8 + */ +@main def simpleNN8 (): Unit = + + + import Example_AutoMPG.xy + import scalation.mathstat.MatrixDOps.⊗ // outer product of two vectors: v1 ⊗ v2 = v1 v1.ᵀ + // matrix where m_ij = v1_i * v2_j + def rescale (x: MatrixD, yes: Boolean): Unit = + if yes then + for j <- x.indices2 do + val (mu_j, sig_j) = (x(?, j).mean, x(?, j).stdev) + x(?, j) = (x(?, j) - mu_j) / sig_j // option: subtract mean, divide by standard deviation + end rescale + + val n = xy.dim2 - 1 // last column in xy + val (xx, y) = (xy.not(?, n), xy(?, n)) // (data/input matrix, response column) + val sst = (y - y.mean).normSq // sum of squares total + val yy = MatrixD.fromVector (y) // turn the m-vector y into an m-by-1 matrix + + val a = MatrixD.fill (6, 3, 0.1) // parameter/weight matrix: input -> hidden + val α = VectorD (0.1, 0.1, 0.1) // hidden layer bias vector + val b = MatrixD ((3, 1), 0.1, // parameter/weight matrix: hidden -> output + 0.1, + 0.1) // initial weights/parameters (random in practice) + val β = VectorD (0.1) // output layer bias vector + + rescale (xx, true) // true => rescale, false => no rescaling + println (s"xx = $xx") + + val η = 0.1 // learning rate (to be tuned) + var x, u, z, v, ŷ, ε, ƒ1, δ1, ƒ0, δ0: VectorD = null + var g1, g0: MatrixD = null + val yp = new MatrixD (yy.dim, yy.dim2) // save each prediction in yp + + for epoch <- 1 to 20 do + println (s"Improvement step $epoch") + val sse = new VectorD (1) + for i <- xx.indices do + x = xx(i) // randomize i for Stochastic Gradient Descent (SGD) + val y = yy(i) + + // forward prop: input -> hidden + u = a.ᵀ * x + α // hidden pre-activation vector + z = sigmoid_ (u) // hidden vector from f0 = sigmoid activation + + // forward prop: hidden -> output + v = b.ᵀ * z + β // output pre-activation vector + ŷ = v // output/prediction vector from f1 = id activation + ε = y - ŷ // error vector [ε_1, ε_2] + + // backward prop: hidden <- output + ƒ1 = VectorD (1.0) // derivative (f1') for id + δ1 = -ε * ƒ1 // delta correction vector via elementwise product + g1 = z ⊗ δ1 // transposed Jacobian matrix (gradients) + + // backward prop: input <- hidden + ƒ0 = z * (1.0 - z) // derivative (f0') for sigmoid + δ0 = b * δ1 * ƒ0 // delta correction vector + g0 = x ⊗ δ0 // transposed Jacobian matrix (gradients) + + // parameter updates + b -= g1 * η // update output parameter/weight matrix + β -= δ1 * η // update output bias vector + a -= g0 * η // update hidden parameter/weight matrix + α -= δ0 * η // update hidden bias vector + + yp(i) = ŷ // save i-th prediction + sse += ε * ε // sum of squared errors + end for + val r2 = -(sse / sst - 1.0) // R^2 (recoded to avoid Ops clash) + + println (s""" + x = $x + u = $u + z = $z + v = $v + ŷ = $ŷ + ε = $ε + ƒ1 = $ƒ1 + δ1 = $δ1 + g1 = $g1 + ƒ0 = $ƒ0 + δ0 = $δ0 + g0 = $g0 + b = $b + β = $β + a = $a + α = $α + sse = $sse + r2 = $r2 + """) + end for + + new Plot (null, yy(?, 0), yp(?, 0), "IGD for Three-layer Neural Net y_0", lines = true) + +end simpleNN8 + diff --git a/src/main/scala/scalation/modeling/SimpleNN.scala.bak b/src/main/scala/scalation/modeling/SimpleNN.scala.bak new file mode 100644 index 000000000..539346c34 --- /dev/null +++ b/src/main/scala/scalation/modeling/SimpleNN.scala.bak @@ -0,0 +1,639 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author John Miller + * @version 2.0 + * @date Mon Oct 20 20:16:25 EDT 2025 + * @see LICENSE (MIT style license file). + * + * @note Simple Neural Networks Using Gradient Descent Optimization + * Tests both Gradient Descent (GD) and Incremental Gradient Descent (IGD) + * Simplified versions of `Regression`, `Perceptron`, `NeuralNet_2L`, and `NeuralNet_3L` + * for illustration/learning, not production + * + * @note the symbol ƒ indicates the derivative of function f, i.e., ƒ = f' + */ + +package scalation +package modeling + +import scalation.mathstat.{VectorD, MatrixD, Plot} +import scalation.mathstat.VectorDOps._ +import ActivationFun._ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `SimpleNN` object contains a simple dataset for testing Gradient Descent (GD) + * and Incremental Gradient Descent (IGD) optimization algorithms. + * @see https://nowak.ece.wisc.edu/MFML.pdf + */ +object SimpleNN: + + // 9 data points: One x1 x2 y1 y2 + val xy = MatrixD ((9, 5), 1.0, 0.1, 0.1, 0.5, 0.25, // dataset + 1.0, 0.1, 0.5, 0.3, 0.49, + 1.0, 0.1, 1.0, 0.2, 0.64, + + 1.0, 0.5, 0.1, 0.8, 0.04, + 1.0, 0.5, 0.5, 0.5, 0.25, + 1.0, 0.5, 1.0, 0.3, 0.49, + + 1.0, 1.0, 0.1, 1.0, 0.0, + 1.0, 1.0, 0.5, 0.8, 0.04, + 1.0, 1.0, 1.0, 0.5, 0.25) +end SimpleNN + +import SimpleNN.xy + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `simpleNN1` main function illustrates the use of Gradient Descent (GD) to + * optimize the weights/parameters of a Multiple Linear Regression (MLR) model. + * + * grad g = -x.ᵀ * (y - ŷ) where pred ŷ = x * b + * + * Computations done at the vector level: X -> y. R^2 = .827 + * > runMain scalation.modeling.simpleNN1 + */ +@main def simpleNN1 (): Unit = + + val (x, y) = (xy(?, 0 until 3), xy(?, 3)) // input matrix, output/response vector + val sst = (y - y.mean).normSq // sum of squares total + val b = VectorD (0.1, 0.2, 0.1) // initial weights/parameters (random in practice) + + val η = 0.1 // learning rate (to be tuned) + var ŷ, ε, δ, g: VectorD = null + + for epoch <- 1 to 10 do + println (s"Improvement step $epoch") + + // forward prop: input -> output + ŷ = x * b // prediction vector + ε = y - ŷ // error vector + + // backward prop: output -> input + δ = -ε // delta correction vector + g = x.ᵀ * δ // gradient vector + + // parameter update + b -= g * η // update parameter vector + val sse = ε.normSq // sum of squared errors + val r2 = 1.0 - sse / sst // R^2 + + println (s""" + ŷ = $ŷ + ε = $ε + δ = $δ + g = $g + b = $b + r2 = $r2 + """) + end for + + new Plot (null, y, ŷ, "IGD for MLR y", lines = true) + +end simpleNN1 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `simpleNN2` main function illustrates the use of Incremental Gradient Descent (IGD) + * to optimize the weights/parameters of a Multiple Linear Regression (MLR) model. + * Computations done at the vector level: X -> y. R^2 = .933 + * > runMain scalation.modeling.simpleNN2 + */ +@main def simpleNN2 (): Unit = + + val (xx, yy) = (xy(?, 0 until 3), xy(?, 3)) // input matrix, output/response vector + val sst = (yy - yy.mean).normSq // sum of squares total + val b = VectorD (0.1, 0.2, 0.1) // initial weights/parameters (random in practice) + + val η = 0.2 // learning rate (to be tuned) + var ŷ, ε, δ: Double = -0.0 + var g: VectorD = null + val yp = new VectorD (yy.dim) // save each prediction in yp + + for epoch <- 1 to 10 do + println (s"Improvement step $epoch") + var sse = 0.0 + for i <- xx.indices do + val (x, y) = (xx(i), yy(i)) // randomize i for Stochastic Gradient Descent (SGD) + + // forward prop: input -> output + ŷ = x ∙ b // prediction scalar + ε = y - ŷ // error scalar + + // backward prop: output -> input + δ = -ε // delta correction scalar + g = x * δ // gradient vector + + // parameter update + b -= g * η // update parameter vector + yp(i) = ŷ // save i-th prediction + sse += ε * ε // sum of squared errors + end for + val r2 = 1.0 - sse / sst // R^2 + + println (s""" + ŷ = $ŷ + ε = $ε + δ = $δ + g = $g + b = $b + r2 = $r2 + """) + end for + + new Plot (null, yy, yp, "IGD for MLR y", lines = true) + +end simpleNN2 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `simpleNN3` main function illustrates the use of Gradient Descent (GD) to + * optimize the weights/parameters of a simple neural network (Perceptron). + * Originally, perceptrons used the Heavyside activation function for binary classification + * problems, but have been extended to multi-class classification and regression problems. + * Furthermore, when the activation function is identity, the perceptron models are equivalent + * multiple linear regression models. + * + * grad g = -x.ᵀ * (y - ŷ) * ƒ where pred ŷ = f(x * b) + * + * Computations done at the vector level: X -> y. R^2 = .865 + * > runMain scalation.modeling.simpleNN3 + */ +@main def simpleNN3 (): Unit = + + val (x, y) = (xy(?, 0 until 3), xy(?, 3)) // input matrix, output/response vector + val sst = (y - y.mean).normSq // sum of squares total + val b = VectorD (0.1, 0.2, 0.1) // initial weights/parameters (random in practice) + + val η = 2.5 // learning rate (to be tuned) + var u, ŷ, ε, ƒ, δ, g: VectorD = null + + for epoch <- 1 to 10 do + println (s"Improvement step $epoch") + + // forward prop: input -> output + u = x * b // pre-activation vector + ŷ = sigmoid_ (u) // prediction vector + ε = y - ŷ // error vector + + // backward prop: output -> input + ƒ = ŷ * (1.0 - ŷ) // derivative (f') for sigmoid + δ = -ε * ƒ // delta correction vector + g = x.ᵀ * δ // gradient vector + + // parameter update + b -= g * η // update parameter vector + val sse = ε.normSq // sum of squared errors + val r2 = 1.0 - sse / sst // R^2 + + println (s""" + u = $u + ŷ = $ŷ + ε = $ε + ƒ = $ƒ + δ = $δ + g = $g + b = $b + r2 = $r2 + """) + + end for + new Plot (null, y, ŷ, "GD for Perceptron y", lines = true) + +end simpleNN3 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `simpleNN4` main function illustrates the use of Gradient Descent (GD) to optimize + * the weights/parameters of a simple neural network (2-layer (no hidden) Neural Network). + * + * Prediction Equation: ŷ = f(B^T x) = f(W x + b) + * + * where x is an input vector, ŷ is a predicted output vector, f is the activation function, + * B is the parameter matrix, W is the weight matrix, and b is the bias vector. + * @note, PyTorch stores the weight matrix W [num_out, num_in] transposed from B to make + * back-propagation more efficient. + * Computations done at the vector level: X -> y. R^2 = .865, .826 + * > runMain scalation.modeling.simpleNN4 + */ +@main def simpleNN4 (): Unit = + + val (x, y) = (xy(?, 0 until 3), xy(?, 3 until 5)) // input matrix, output/response matrix + val sst = (y - y.mean).normSq // sum of squares total, per column + val b = MatrixD ((3, 2), 0.1, 0.1, + 0.2, 0.1, + 0.1, 0.1) // initial weights/parameters (random in practice) + + val η = 2.5 // learning rate (to be tuned) + var u, ŷ, ε, ƒ, δ, g: VectorD = null + + for epoch <- 1 to 10; k <- y.indices2 do + println (s"Improvement step $epoch") + + // forward prop: input -> output + u = x * b(?, k) // pre-activation vector + ŷ = sigmoid_ (u) // prediction vector + ε = y(?, k) - ŷ // error vector for column k + + // backward prop: output -> input + ƒ = ŷ * (1.0 - ŷ) // derivative (f') for sigmoid + δ = -ε * ƒ // delta correction vector + g = x.ᵀ * δ // gradient vector + + // parameter update + b(?, k) = b(?, k) - g * η // update parameter matrix, column k + val sse = ε.normSq // sum of squared errors + val r2 = 1.0 - sse / sst(k) // R^2 + + println (s""" + for k = $k + u = $u + ŷ = $ŷ + ε = $ε + ƒ = $ƒ + δ = $δ + g = $g + b = $b + r2 = $r2 + """) + end for + + new Plot (null, y(?, 1), ŷ, "GD for Two-layer Neural Net y_1", lines = true) + +end simpleNN4 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `simpleNN5` main function illustrates the use of Gradient Descent (GD) to optimize + * the weights/parameters of a simple neural network (2-layer (no hidden) Neural Network). + * Computations done at the matrix level: X -> Y. R^2 = .865, .826 + * > runMain scalation.modeling.simpleNN5 + */ +@main def simpleNN5 (): Unit = + + import scalation.mathstat.MatrixDOps._ // may clash with VectorDOps + + val (x, y) = (xy(?, 0 until 3), xy(?, 3 until 5)) // input matrix, output/response matrix + val sst = (y - y.mean).normSq // sum of squares total, per column + println (s"sst = $sst") + + val b = MatrixD ((3, 2), 0.1, 0.1, + 0.2, 0.1, + 0.1, 0.1) // initial weights/parameters (random in practice) + +// val η = 2.6 // learning rate (to be tuned) above original value too high -1 +// val η = 2.5 // learning rate (to be tuned) original value too high -1 +// val η = 1.4 // learning rate (to be tuned) +// val η = 1.0 // learning rate (to be tuned) +// val η = 0.9 // learning rate (to be tuned) +// val η = 0.6 // learning rate (to be tuned) +// val η = 0.5 // learning rate (to be tuned) + val η = 0.3 // learning rate (to be tuned) +// val η = 0.29 // learning rate (to be tuned) best for var 1 83% +// val η = 0.28 // learning rate (to be tuned) +// val η = 0.26 // learning rate (to be tuned) +// val η = 0.25 // learning rate (to be tuned) +// val η = 0.2 // learning rate (to be tuned) +// val η = 0.1545 // learning rate (to be tuned) +// val η = 0.15 // learning rate (to be tuned) +// val η = 0.1235 // learning rate (to be tuned) +// val η = 0.1 // learning rate (to be tuned) +// val η = 0.05 // learning rate (to be tuned) +// val η = 0.03 // learning rate (to be tuned) +// val η = 0.01 // learning rate (to be tuned) + var u, ŷ, ε, ƒ, δ, g: MatrixD = null + + for epoch <- 1 to 10 do + println (s"Improvement step $epoch") + + // forward prop: input -> output + u = x * b // pre-activation matrix +// ŷ = f_sigmoid.fM (u) // prediction matrix + ŷ = f_tanh.fM (u) // prediction matrix + ε = y - ŷ // error matrix + + // backward prop: output -> input +// ƒ = ŷ ⊙ (1.0 - ŷ) // derivative (f') for sigmoid + ƒ = 1.0 - ŷ~^2 // derivative (f') for tanh + δ = -ε ⊙ ƒ // delta correction matrix via Hadamard product + g = x.ᵀ * δ // transposed Jacobian matrix (gradients) + + // parameter update + b -= g * η // update parameter matrix + val sse = ε.normSq // sum of squared errors + val r2 = -(sse / sst - 1.0) // R^2 (recoded to avoid Ops clash) + + println (s""" + u = $u + ŷ = $ŷ + ε = $ε + ƒ = $ƒ + δ = $δ + g = $g + b = $b + sse= $sse + r2 = $r2 + """) + end for + + new Plot (null, y(?, 0), ŷ(?, 0), "GD for Two-layer Neural Net y_0", lines = true) + new Plot (null, y(?, 1), ŷ(?, 1), "GD for Two-layer Neural Net y_1", lines = true) + +end simpleNN5 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `simpleNN6` main function illustrates the use of Gradient Descent (GD) to optimize + * the weights/parameters of a simple neural network (3-layer (1 hidden) Neural Network). + * Computations done at the matrix level: X -> Z -> Y. R^2 = .299, .432 (requires more epochs) + * > runMain scalation.modeling.simpleNN6 + */ +@main def simpleNN6 (): Unit = + + import scalation.mathstat.MatrixDOps._ // may clash with VectorDOps + + val (x, y) = (xy(?, 1 until 3), xy(?, 3 until 5)) // input matrix, output/response matrix + val sst = (y - y.mean).normSq // sum of squares total, per column + val a = MatrixD ((2, 3), 0.1, 0.1, 0.1, // parameter/weight matrix: input -> hidden + 0.1, 0.1, 0.1) + val α = VectorD (0.1, 0.1, 0.1) // hidden layer bias vector + val b = MatrixD ((3, 2), 0.1, 0.1, // parameter/weight matrix: hidden -> output + 0.2, 0.1, + 0.1, 0.1) // initial weights/parameters (random in practice) + val β = VectorD (0.1, 0.1) // output layer bias vector + + val η = 10.0 // learning rate (to be tuned) + var u, z, v, ŷ, ε, ƒ1, δ1, g1, ƒ0, δ0, g0: MatrixD = null + + for epoch <- 1 to 10 do + println (s"Improvement step $epoch") + + // forward prop: input -> hidden + u = x * a + α // hidden pre-activation matrix + z = f_sigmoid.fM (u) // hidden matrix from f0 activation + + // forward prop: hidden -> output + v = z * b + β // output pre-activation matrix + ŷ = f_sigmoid.fM (v) // output/prediction matrix from f1 activation + ε = y - ŷ // error matrix + + // backward prop: hidden <- output + ƒ1 = ŷ ⊙ (1.0 - ŷ) // derivative (f1') for sigmoid + δ1 = -ε ⊙ ƒ1 // delta correction matrix via Hadamard product + g1 = z.ᵀ * δ1 // transposed Jacobian matrix (gradients) + + // backward prop: input <- hidden + ƒ0 = z ⊙ (1.0 - z) // derivative (f0') for sigmoid + δ0 = (δ1 * b.ᵀ) ⊙ ƒ0 // delta correction matrix + g0 = x.ᵀ * δ0 // transposed Jacobian matrix (gradients) + + // parameter updates + b -= g1 * η // update output parameter/weight matrix + β -= δ1.mean * η // update output bias vector + a -= g0 * η // update hidden parameter/weight matrix + α -= δ0.mean * η // update hidden bias vector + val sse = ε.normSq // sum of squared errors + val r2 = -(sse / sst - 1.0) // R^2 (recoded to avoid Ops clash) + + println (s""" + u = $u + z = $z + v = $v + ŷ = $ŷ + ε = $ε + ƒ1 = $ƒ1 + δ1 = $δ1 + g1 = $g1 + ƒ0 = $ƒ0 + δ0 = $δ0 + g0 = $g0 + b = $b + β = $β + a = $a + α = $α + r2 = $r2 + """) + end for + + new Plot (null, y(?, 0), ŷ(?, 0), "GD for Three-layer Neural Net y_0", lines = true) + new Plot (null, y(?, 1), ŷ(?, 1), "GD for Three-layer Neural Net y_1", lines = true) + +end simpleNN6 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `simpleNN7` main function illustrates the use of Incremental Gradient Descent (IGD) + * to optimize the weights/parameters of a simple neural network (3-layer (1 hidden) Neural Network). + * + * Prediction Equation: z = f0(A^T x + α) + * ŷ = f1(B^T z + β) + * + * where x is an input vector, z is the hidden layer vector, ŷ is a predicted output vector, f0, f1 + * are the activation functions, A and B are the parameter matrices, and α and β are the bias vectors. + * @note: Stochastic Gradient Descent (SGD) adds stochastic selection to IGD. In practice, + * mini-batches of size 32, 64, or 128 are commonly used. + * Computations done at the vector level, x -> z -> y. R^2 = .768, .869 (for 10 epochs) + * > runMain scalation.modeling.simpleNN7 + */ +@main def simpleNN7 (): Unit = + + import scalation.mathstat.MatrixDOps.⊗ // outer product of two vectors: v1 ⊗ v2 = v1 v1.ᵀ + // matrix where m_ij = v1_i * v2_j + + val (xx, yy) = (xy(?, 1 until 3), xy(?, 3 until 5)) // input matrix, output/response matrix + val sst = (yy - yy.mean).normSq // sum of squares total, per column + val a = MatrixD ((2, 3), 0.2, 0.3, 0.2, // parameter/weight matrix: input -> hidden + -0.1, -0.2, -0.1) + val α = VectorD (-0.1, -0.1, -0.1) // hidden layer bias vector + val b = MatrixD ((3, 2), 0.1, -0.1, // parameter/weight matrix: hidden -> output + 0.2, -0.2, + 0.1, -0.1) // initial weights/parameters (random in practice) + val β = VectorD (-0.1, 0.1) // output layer bias vector + + println (s"sst = $sst, xx = $xx, yy = $yy") + +// val η = 20.0 // learning rate (to be tuned) + val η = 1.0 // learning rate (to be tuned) + var u, z, v, ŷ, ε, ƒ1, δ1, ƒ0, δ0: VectorD = null + var g1, g0: MatrixD = null + val yp = new MatrixD (yy.dim, yy.dim2) // save each prediction in yp + +// try f0 = tanh -> f1 = id + + for epoch <- 1 to 1 do + println (s"Improvement step $epoch") + val sse = new VectorD (2) +// for i <- xx.indices do + for i <- 0 to 0 do + val (x, y) = (xx(i), yy(i)) // randomize i for Stochastic Gradient Descent (SGD) + + // forward prop: input -> hidden + u = a.ᵀ * x + α // hidden pre-activation vector +// z = sigmoid_ (u) // hidden vector from f0 = sigmoid activation + z = tanh_ (u) // hidden vector from f0 = tanh activation + + // forward prop: hidden -> output + v = b.ᵀ * z + β // output pre-activation vector +// ŷ = sigmoid_ (v) // output/prediction vector from f1 = sigmoid activation + ŷ = v // output/prediction vector from f1 = id activation + ε = y - ŷ // error vector [ε_1, ε_2] + + // backward prop: hidden <- output +// ƒ1 = ŷ * (1.0 - ŷ) // derivative (f1') for sigmoid +// ƒ1 = 1.0 - ŷ~^2 // derivative (f1') for tanh + ƒ1 = VectorD.one (ŷ.dim) // derivative (f1') for id + δ1 = -ε * ƒ1 // delta correction vector via elementwise product + g1 = z ⊗ δ1 // transposed Jacobian matrix (gradients) + + // backward prop: input <- hidden +// ƒ0 = z * (1.0 - z) // derivative (f0') for sigmoid + ƒ0 = 1.0 - z~^2 // derivative (f0') for tanh +// ƒ0 = VectorD.one (z.dim) // derivative (f0') for id + δ0 = b * δ1 * ƒ0 // delta correction vector + g0 = x ⊗ δ0 // transposed Jacobian matrix (gradients) + + // parameter updates + b -= g1 * η // update output parameter/weight matrix + β -= δ1 * η // update output bias vector + a -= g0 * η // update hidden parameter/weight matrix + α -= δ0 * η // update hidden bias vector + yp(i) = ŷ // save i-th prediction + sse += ε * ε // sum of squared errors + val r2 = -(sse / sst - 1.0) // R^2 (recoded to avoid Ops clash) + + println (s""" + u = $u + z = $z + v = $v + ŷ = $ŷ + ε = $ε + ƒ1 = $ƒ1 + δ1 = $δ1 + g1 = $g1 + ƒ0 = $ƒ0 + δ0 = $δ0 + g0 = $g0 + b = $b + β = $β + a = $a + α = $α + sse= $sse + r2 = $r2 + """) + end for + end for + + new Plot (null, yy(?, 0), yp(?, 0), "IGD for Three-layer Neural Net y_0", lines = true) + new Plot (null, yy(?, 1), yp(?, 1), "IGD for Three-layer Neural Net y_1", lines = true) + +end simpleNN7 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `simpleNN8` main function illustrates the use of Incremental Gradient Descent (IGD) + * to optimize the weights/parameters of a simple neural network (3-layer (1 hidden) Neural Network). + * + * Prediction Equation: z = f0(A^T x + α) + * ŷ = f1(B^T z + β) + * + * where x is an input vector, z is the hidden layer vector, ŷ is a predicted output vector, f0, f1 + * are the activation functions, A and B are the parameter matrices, and α and β are the bias vectors. + * Illustrates the need for RESCALING the data. + * Computations done at the vector level, x -> z -> y. R^2 = .512 (no rescaling) .813 (rescaling) (for 20 epochs) + * > runMain scalation.modeling.simpleNN8 + */ +@main def simpleNN8 (): Unit = + + + import Example_AutoMPG.xy + import scalation.mathstat.MatrixDOps.⊗ // outer product of two vectors: v1 ⊗ v2 = v1 v1.ᵀ + // matrix where m_ij = v1_i * v2_j + def rescale (x: MatrixD, yes: Boolean): Unit = + if yes then + for j <- x.indices2 do + val (mu_j, sig_j) = (x(?, j).mean, x(?, j).stdev) + x(?, j) = (x(?, j) - mu_j) / sig_j // option: subtract mean, divide by standard deviation + end rescale + + val n = xy.dim2 - 1 // last column in xy + val (xx, y) = (xy.not(?, n), xy(?, n)) // (data/input matrix, response column) + val yy = MatrixD.fromVector (y) // turn the m-vector y into an m-by-1 matrix + + val sst = (yy - yy.mean).normSq // sum of squares total, per column + val a = MatrixD.fill (6, 3, 0.1) // parameter/weight matrix: input -> hidden + val α = VectorD (0.1, 0.1, 0.1) // hidden layer bias vector + val b = MatrixD ((3, 1), 0.1, // parameter/weight matrix: hidden -> output + 0.1, + 0.1) // initial weights/parameters (random in practice) + val β = VectorD (0.1) // output layer bias vector + + println (s"sst = $sst") + rescale (xx, true) // true => rescale, false => no rescaling + println (s"xx = $xx") + + val η = 0.1 // learning rate (to be tuned) + var x, u, z, v, ŷ, ε, ƒ1, δ1, ƒ0, δ0: VectorD = null + var g1, g0: MatrixD = null + val yp = new MatrixD (yy.dim, yy.dim2) // save each prediction in yp + + for epoch <- 1 to 20 do + println (s"Improvement step $epoch") + val sse = new VectorD (1) + for i <- xx.indices do + x = xx(i) // randomize i for Stochastic Gradient Descent (SGD) + val y = yy(i) + + // forward prop: input -> hidden + u = a.ᵀ * x + α // hidden pre-activation vector + z = sigmoid_ (u) // hidden vector from f0 = sigmoid activation + + // forward prop: hidden -> output + v = b.ᵀ * z + β // output pre-activation vector + ŷ = v // output/prediction vector from f1 = id activation + ε = y - ŷ // error vector [ε_1, ε_2] + + // backward prop: hidden <- output + ƒ1 = VectorD (1.0) // derivative (f1') for id + δ1 = -ε * ƒ1 // delta correction vector via elementwise product + g1 = z ⊗ δ1 // transposed Jacobian matrix (gradients) + + // backward prop: input <- hidden + ƒ0 = z * (1.0 - z) // derivative (f0') for sigmoid + δ0 = b * δ1 * ƒ0 // delta correction vector + g0 = x ⊗ δ0 // transposed Jacobian matrix (gradients) + + // parameter updates + b -= g1 * η // update output parameter/weight matrix + β -= δ1 * η // update output bias vector + a -= g0 * η // update hidden parameter/weight matrix + α -= δ0 * η // update hidden bias vector + yp(i) = ŷ // save i-th prediction + sse += ε * ε // sum of squared errors + end for + val r2 = -(sse / sst - 1.0) // R^2 (recoded to avoid Ops clash) + + println (s""" + x = $x + u = $u + z = $z + v = $v + ŷ = $ŷ + ε = $ε + ƒ1 = $ƒ1 + δ1 = $δ1 + g1 = $g1 + ƒ0 = $ƒ0 + δ0 = $δ0 + g0 = $g0 + b = $b + β = $β + a = $a + α = $α + sse= $sse + r2 = $r2 + """) + end for + + new Plot (null, yy(?, 0), yp(?, 0), "IGD for Three-layer Neural Net y_0", lines = true) + +end simpleNN8 + diff --git a/src/main/scala/scalation/modeling/SimpleNN.scala.txt b/src/main/scala/scalation/modeling/SimpleNN.scala.txt new file mode 100644 index 000000000..1e186d6f9 --- /dev/null +++ b/src/main/scala/scalation/modeling/SimpleNN.scala.txt @@ -0,0 +1,640 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author John Miller + * @version 2.0 + * @date Mon Oct 20 20:16:25 EDT 2025 + * @see LICENSE (MIT style license file). + * + * @note Simple Neural Networks Using Gradient Descent Optimization + * Tests both Gradient Descent (GD) and Incremental Gradient Descent (IGD) + * Simplified versions of `Regression`, `Perceptron`, `NeuralNet_2L`, and `NeuralNet_3L` + * for illustration/learning, not production + * + * @note the symbol ƒ indicates the derivative of function f, i.e., ƒ = f' + */ + +package scalation.modeling + +import scalation.? // wildcard: xy(?, 3) gives column 3 +import scalation.mathstat.{VectorD, MatrixD, Plot} +import scalation.mathstat.VectorDOps._ +import scalation.modeling.ActivationFun.{f_sigmoid, sigmoid_} // sigmoid activation functions +import scalation.modeling.ActivationFun.{f_tanh, tanh_} // tanh activation functions + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `SimpleNN` object contains a simple dataset for testing Gradient Descent (GD) + * and Incremental Gradient Descent (IGD) optimization algorithms. + * @see https://nowak.ece.wisc.edu/MFML.pdf + */ +object SimpleNN: + + // 9 data points: One x1 x2 y1 y2 + val xy = MatrixD ((9, 5), 1.0, 0.1, 0.1, 0.5, 0.25, // dataset + 1.0, 0.1, 0.5, 0.3, 0.49, + 1.0, 0.1, 1.0, 0.2, 0.64, + + 1.0, 0.5, 0.1, 0.8, 0.04, + 1.0, 0.5, 0.5, 0.5, 0.25, + 1.0, 0.5, 1.0, 0.3, 0.49, + + 1.0, 1.0, 0.1, 1.0, 0.0, + 1.0, 1.0, 0.5, 0.8, 0.04, + 1.0, 1.0, 1.0, 0.5, 0.25) +end SimpleNN + +import SimpleNN.xy + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `simpleNN1` main function illustrates the use of Gradient Descent (GD) to + * optimize the weights/parameters of a Multiple Linear Regression (MLR) model. + * + * grad g = -x.ᵀ * (y - ŷ) where pred ŷ = x * b + * + * Computations done at the vector level: X -> y. R^2 = .827 + * > runMain scalation.modeling.simpleNN1 + */ +@main def simpleNN1 (): Unit = + + val (x, y) = (xy(?, 0 until 3), xy(?, 3)) // input matrix, output/response vector + val sst = (y - y.mean).normSq // sum of squares total + val b = VectorD (0.1, 0.2, 0.1) // initial weights/parameters (random in practice) + + val η = 0.1 // learning rate (to be tuned) + var ŷ, ε, δ, g: VectorD = null + + for epoch <- 1 to 10 do + println (s"Improvement step $epoch") + + // forward prop: input -> output + ŷ = x * b // prediction vector + ε = y - ŷ // error vector + + // backward prop: output -> input + δ = -ε // delta correction vector + g = x.ᵀ * δ // gradient vector + + // parameter update + b -= g * η // update parameter vector + val sse = ε.normSq // sum of squared errors + val r2 = 1.0 - sse / sst // R^2 + + println (s""" + ŷ = $ŷ + ε = $ε + δ = $δ + g = $g + b = $b + r2 = $r2 + """) + end for + + new Plot (null, y, ŷ, "IGD for MLR y", lines = true) + +end simpleNN1 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `simpleNN2` main function illustrates the use of Incremental Gradient Descent (IGD) + * to optimize the weights/parameters of a Multiple Linear Regression (MLR) model. + * Computations done at the vector level: X -> y. R^2 = .933 + * > runMain scalation.modeling.simpleNN2 + */ +@main def simpleNN2 (): Unit = + + val (xx, yy) = (xy(?, 0 until 3), xy(?, 3)) // input matrix, output/response vector + val sst = (yy - yy.mean).normSq // sum of squares total + val b = VectorD (0.1, 0.2, 0.1) // initial weights/parameters (random in practice) + + val η = 0.2 // learning rate (to be tuned) + var ŷ, ε, δ: Double = -0.0 + var g: VectorD = null + val yp = new VectorD (yy.dim) // save each prediction in yp + + for epoch <- 1 to 10 do + println (s"Improvement step $epoch") + var sse = 0.0 + for i <- xx.indices do + val (x, y) = (xx(i), yy(i)) // randomize i for Stochastic Gradient Descent (SGD) + + // forward prop: input -> output + ŷ = x ∙ b // prediction scalar + ε = y - ŷ // error scalar + + // backward prop: output -> input + δ = -ε // delta correction scalar + g = x * δ // gradient vector + + // parameter update + b -= g * η // update parameter vector + yp(i) = ŷ // save i-th prediction + sse += ε * ε // sum of squared errors + end for + val r2 = 1.0 - sse / sst // R^2 + + println (s""" + ŷ = $ŷ + ε = $ε + δ = $δ + g = $g + b = $b + r2 = $r2 + """) + end for + + new Plot (null, yy, yp, "IGD for MLR y", lines = true) + +end simpleNN2 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `simpleNN3` main function illustrates the use of Gradient Descent (GD) to + * optimize the weights/parameters of a simple neural network (Perceptron). + * Originally, perceptrons used the Heavyside activation function for binary classification + * problems, but have been extended to multi-class classification and regression problems. + * Furthermore, when the activation function is identity, the perceptron models are equivalent + * multiple linear regression models. + * + * grad g = -x.ᵀ * (y - ŷ) * ƒ where pred ŷ = f(x * b) + * + * Computations done at the vector level: X -> y. R^2 = .865 + * > runMain scalation.modeling.simpleNN3 + */ +@main def simpleNN3 (): Unit = + + val (x, y) = (xy(?, 0 until 3), xy(?, 3)) // input matrix, output/response vector + val sst = (y - y.mean).normSq // sum of squares total + val b = VectorD (0.1, 0.2, 0.1) // initial weights/parameters (random in practice) + + val η = 2.5 // learning rate (to be tuned) + var u, ŷ, ε, ƒ, δ, g: VectorD = null + + for epoch <- 1 to 10 do + println (s"Improvement step $epoch") + + // forward prop: input -> output + u = x * b // pre-activation vector + ŷ = sigmoid_ (u) // prediction vector + ε = y - ŷ // error vector + + // backward prop: output -> input + ƒ = ŷ * (1.0 - ŷ) // derivative (f') for sigmoid + δ = -ε * ƒ // delta correction vector + g = x.ᵀ * δ // gradient vector + + // parameter update + b -= g * η // update parameter vector + val sse = ε.normSq // sum of squared errors + val r2 = 1.0 - sse / sst // R^2 + + println (s""" + u = $u + ŷ = $ŷ + ε = $ε + ƒ = $ƒ + δ = $δ + g = $g + b = $b + r2 = $r2 + """) + + end for + new Plot (null, y, ŷ, "GD for Perceptron y", lines = true) + +end simpleNN3 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `simpleNN4` main function illustrates the use of Gradient Descent (GD) to optimize + * the weights/parameters of a simple neural network (2-layer (no hidden) Neural Network). + * + * Prediction Equation: ŷ = f(B^T x) = f(W x + b) + * + * where x is an input vector, ŷ is a predicted output vector, f is the activation function, + * B is the parameter matrix, W is the weight matrix, and b is the bias vector. + * @note, PyTorch stores the weight matrix W [num_out, num_in] transposed from B to make + * back-propagation more efficient. + * Computations done at the vector level: X -> y. R^2 = .865, .826 + * > runMain scalation.modeling.simpleNN4 + */ +@main def simpleNN4 (): Unit = + + val (x, y) = (xy(?, 0 until 3), xy(?, 3 until 5)) // input matrix, output/response matrix + val sst = (y - y.mean).normSq // sum of squares total, per column + val b = MatrixD ((3, 2), 0.1, 0.1, + 0.2, 0.1, + 0.1, 0.1) // initial weights/parameters (random in practice) + + val η = 2.5 // learning rate (to be tuned) + var u, ŷ, ε, ƒ, δ, g: VectorD = null + + for epoch <- 1 to 10; k <- y.indices2 do + println (s"Improvement step $epoch") + + // forward prop: input -> output + u = x * b(?, k) // pre-activation vector + ŷ = sigmoid_ (u) // prediction vector + ε = y(?, k) - ŷ // error vector for column k + + // backward prop: output -> input + ƒ = ŷ * (1.0 - ŷ) // derivative (f') for sigmoid + δ = -ε * ƒ // delta correction vector + g = x.ᵀ * δ // gradient vector + + // parameter update + b(?, k) = b(?, k) - g * η // update parameter matrix, column k + val sse = ε.normSq // sum of squared errors + val r2 = 1.0 - sse / sst(k) // R^2 + + println (s""" + for k = $k + u = $u + ŷ = $ŷ + ε = $ε + ƒ = $ƒ + δ = $δ + g = $g + b = $b + r2 = $r2 + """) + end for + + new Plot (null, y(?, 1), ŷ, "GD for Two-layer Neural Net y_1", lines = true) + +end simpleNN4 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `simpleNN5` main function illustrates the use of Gradient Descent (GD) to optimize + * the weights/parameters of a simple neural network (2-layer (no hidden) Neural Network). + * Computations done at the matrix level: X -> Y. R^2 = .865, .826 + * > runMain scalation.modeling.simpleNN5 + */ +@main def simpleNN5 (): Unit = + + import scalation.mathstat.MatrixDOps._ // may clash with VectorDOps + + val (x, y) = (xy(?, 0 until 3), xy(?, 3 until 5)) // input matrix, output/response matrix + val sst = (y - y.mean).normSq // sum of squares total, per column + println (s"sst = $sst") + + val b = MatrixD ((3, 2), 0.1, 0.1, + 0.2, 0.1, + 0.1, 0.1) // initial weights/parameters (random in practice) + +// val η = 2.6 // learning rate (to be tuned) above original value too high -1 +// val η = 2.5 // learning rate (to be tuned) original value too high -1 +// val η = 1.4 // learning rate (to be tuned) +// val η = 1.0 // learning rate (to be tuned) +// val η = 0.9 // learning rate (to be tuned) +// val η = 0.6 // learning rate (to be tuned) +// val η = 0.5 // learning rate (to be tuned) + val η = 0.3 // learning rate (to be tuned) +// val η = 0.29 // learning rate (to be tuned) best for var 1 83% +// val η = 0.28 // learning rate (to be tuned) +// val η = 0.26 // learning rate (to be tuned) +// val η = 0.25 // learning rate (to be tuned) +// val η = 0.2 // learning rate (to be tuned) +// val η = 0.1545 // learning rate (to be tuned) +// val η = 0.15 // learning rate (to be tuned) +// val η = 0.1235 // learning rate (to be tuned) +// val η = 0.1 // learning rate (to be tuned) +// val η = 0.05 // learning rate (to be tuned) +// val η = 0.03 // learning rate (to be tuned) +// val η = 0.01 // learning rate (to be tuned) + var u, ŷ, ε, ƒ, δ, g: MatrixD = null + + for epoch <- 1 to 10 do + println (s"Improvement step $epoch") + + // forward prop: input -> output + u = x * b // pre-activation matrix +// ŷ = f_sigmoid.fM (u) // prediction matrix + ŷ = f_tanh.fM (u) // prediction matrix + ε = y - ŷ // error matrix + + // backward prop: output -> input +// ƒ = ŷ ⊙ (1.0 - ŷ) // derivative (f') for sigmoid + ƒ = 1.0 - ŷ~^2 // derivative (f') for tanh + δ = -ε ⊙ ƒ // delta correction matrix via Hadamard product + g = x.ᵀ * δ // transposed Jacobian matrix (gradients) + + // parameter update + b -= g * η // update parameter matrix + val sse = ε.normSq // sum of squared errors + val r2 = -(sse / sst - 1.0) // R^2 (recoded to avoid Ops clash) + + println (s""" + u = $u + ŷ = $ŷ + ε = $ε + ƒ = $ƒ + δ = $δ + g = $g + b = $b + sse= $sse + r2 = $r2 + """) + end for + + new Plot (null, y(?, 0), ŷ(?, 0), "GD for Two-layer Neural Net y_0", lines = true) + new Plot (null, y(?, 1), ŷ(?, 1), "GD for Two-layer Neural Net y_1", lines = true) + +end simpleNN5 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `simpleNN6` main function illustrates the use of Gradient Descent (GD) to optimize + * the weights/parameters of a simple neural network (3-layer (1 hidden) Neural Network). + * Computations done at the matrix level: X -> Z -> Y. R^2 = .299, .432 (requires more epochs) + * > runMain scalation.modeling.simpleNN6 + */ +@main def simpleNN6 (): Unit = + + import scalation.mathstat.MatrixDOps._ // may clash with VectorDOps + + val (x, y) = (xy(?, 1 until 3), xy(?, 3 until 5)) // input matrix, output/response matrix + val sst = (y - y.mean).normSq // sum of squares total, per column + val a = MatrixD ((2, 3), 0.1, 0.1, 0.1, // parameter/weight matrix: input -> hidden + 0.1, 0.1, 0.1) + val α = VectorD (0.1, 0.1, 0.1) // hidden layer bias vector + val b = MatrixD ((3, 2), 0.1, 0.1, // parameter/weight matrix: hidden -> output + 0.2, 0.1, + 0.1, 0.1) // initial weights/parameters (random in practice) + val β = VectorD (0.1, 0.1) // output layer bias vector + + val η = 10.0 // learning rate (to be tuned) + var u, z, v, ŷ, ε, ƒ1, δ1, g1, ƒ0, δ0, g0: MatrixD = null + + for epoch <- 1 to 10 do + println (s"Improvement step $epoch") + + // forward prop: input -> hidden + u = x * a + α // hidden pre-activation matrix + z = f_sigmoid.fM (u) // hidden matrix from f0 activation + + // forward prop: hidden -> output + v = z * b + β // output pre-activation matrix + ŷ = f_sigmoid.fM (v) // output/prediction matrix from f1 activation + ε = y - ŷ // error matrix + + // backward prop: hidden <- output + ƒ1 = ŷ ⊙ (1.0 - ŷ) // derivative (f1') for sigmoid + δ1 = -ε ⊙ ƒ1 // delta correction matrix via Hadamard product + g1 = z.ᵀ * δ1 // transposed Jacobian matrix (gradients) + + // backward prop: input <- hidden + ƒ0 = z ⊙ (1.0 - z) // derivative (f0') for sigmoid + δ0 = (δ1 * b.ᵀ) ⊙ ƒ0 // delta correction matrix + g0 = x.ᵀ * δ0 // transposed Jacobian matrix (gradients) + + // parameter updates + b -= g1 * η // update output parameter/weight matrix + β -= δ1.mean * η // update output bias vector + a -= g0 * η // update hidden parameter/weight matrix + α -= δ0.mean * η // update hidden bias vector + val sse = ε.normSq // sum of squared errors + val r2 = -(sse / sst - 1.0) // R^2 (recoded to avoid Ops clash) + + println (s""" + u = $u + z = $z + v = $v + ŷ = $ŷ + ε = $ε + ƒ1 = $ƒ1 + δ1 = $δ1 + g1 = $g1 + ƒ0 = $ƒ0 + δ0 = $δ0 + g0 = $g0 + b = $b + β = $β + a = $a + α = $α + r2 = $r2 + """) + end for + + new Plot (null, y(?, 0), ŷ(?, 0), "GD for Three-layer Neural Net y_0", lines = true) + new Plot (null, y(?, 1), ŷ(?, 1), "GD for Three-layer Neural Net y_1", lines = true) + +end simpleNN6 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `simpleNN7` main function illustrates the use of Incremental Gradient Descent (IGD) + * to optimize the weights/parameters of a simple neural network (3-layer (1 hidden) Neural Network). + * + * Prediction Equation: z = f0(A^T x + α) + * ŷ = f1(B^T z + β) + * + * where x is an input vector, z is the hidden layer vector, ŷ is a predicted output vector, f0, f1 + * are the activation functions, A and B are the parameter matrices, and α and β are the bias vectors. + * @note: Stochastic Gradient Descent (SGD) adds stochastic selection to IGD. In practice, + * mini-batches of size 32, 64, or 128 are commonly used. + * Computations done at the vector level, x -> z -> y. R^2 = .768, .869 (for 10 epochs) + * > runMain scalation.modeling.simpleNN7 + */ +@main def simpleNN7 (): Unit = + + import scalation.mathstat.MatrixDOps.⊗ // outer product of two vectors: v1 ⊗ v2 = v1 v1.ᵀ + // matrix where m_ij = v1_i * v2_j + + val (xx, yy) = (xy(?, 1 until 3), xy(?, 3 until 5)) // input matrix, output/response matrix + val sst = (yy - yy.mean).normSq // sum of squares total, per column + val a = MatrixD ((2, 3), 0.2, 0.3, 0.2, // parameter/weight matrix: input -> hidden + -0.1, -0.2, -0.1) + val α = VectorD (-0.1, -0.1, -0.1) // hidden layer bias vector + val b = MatrixD ((3, 2), 0.1, -0.1, // parameter/weight matrix: hidden -> output + 0.2, -0.2, + 0.1, -0.1) // initial weights/parameters (random in practice) + val β = VectorD (-0.1, 0.1) // output layer bias vector + + println (s"sst = $sst, xx = $xx, yy = $yy") + +// val η = 20.0 // learning rate (to be tuned) + val η = 1.0 // learning rate (to be tuned) + var u, z, v, ŷ, ε, ƒ1, δ1, ƒ0, δ0: VectorD = null + var g1, g0: MatrixD = null + val yp = new MatrixD (yy.dim, yy.dim2) // save each prediction in yp + +// try f0 = tanh -> f1 = id + + for epoch <- 1 to 1 do + println (s"Improvement step $epoch") + val sse = new VectorD (2) +// for i <- xx.indices do + for i <- 0 to 0 do + val (x, y) = (xx(i), yy(i)) // randomize i for Stochastic Gradient Descent (SGD) + + // forward prop: input -> hidden + u = a.ᵀ * x + α // hidden pre-activation vector +// z = sigmoid_ (u) // hidden vector from f0 = sigmoid activation + z = tanh_ (u) // hidden vector from f0 = tanh activation + + // forward prop: hidden -> output + v = b.ᵀ * z + β // output pre-activation vector +// ŷ = sigmoid_ (v) // output/prediction vector from f1 = sigmoid activation + ŷ = v // output/prediction vector from f1 = id activation + ε = y - ŷ // error vector [ε_1, ε_2] + + // backward prop: hidden <- output +// ƒ1 = ŷ * (1.0 - ŷ) // derivative (f1') for sigmoid +// ƒ1 = 1.0 - ŷ~^2 // derivative (f1') for tanh + ƒ1 = VectorD.one (ŷ.dim) // derivative (f1') for id + δ1 = -ε * ƒ1 // delta correction vector via elementwise product + g1 = z ⊗ δ1 // transposed Jacobian matrix (gradients) + + // backward prop: input <- hidden +// ƒ0 = z * (1.0 - z) // derivative (f0') for sigmoid + ƒ0 = 1.0 - z~^2 // derivative (f0') for tanh +// ƒ0 = VectorD.one (z.dim) // derivative (f0') for id + δ0 = b * δ1 * ƒ0 // delta correction vector + g0 = x ⊗ δ0 // transposed Jacobian matrix (gradients) + + // parameter updates + b -= g1 * η // update output parameter/weight matrix + β -= δ1 * η // update output bias vector + a -= g0 * η // update hidden parameter/weight matrix + α -= δ0 * η // update hidden bias vector + yp(i) = ŷ // save i-th prediction + sse += ε * ε // sum of squared errors + val r2 = -(sse / sst - 1.0) // R^2 (recoded to avoid Ops clash) + + println (s""" + u = $u + z = $z + v = $v + ŷ = $ŷ + ε = $ε + ƒ1 = $ƒ1 + δ1 = $δ1 + g1 = $g1 + ƒ0 = $ƒ0 + δ0 = $δ0 + g0 = $g0 + b = $b + β = $β + a = $a + α = $α + sse= $sse + r2 = $r2 + """) + end for + end for + + new Plot (null, yy(?, 0), yp(?, 0), "IGD for Three-layer Neural Net y_0", lines = true) + new Plot (null, yy(?, 1), yp(?, 1), "IGD for Three-layer Neural Net y_1", lines = true) + +end simpleNN7 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `simpleNN8` main function illustrates the use of Incremental Gradient Descent (IGD) + * to optimize the weights/parameters of a simple neural network (3-layer (1 hidden) Neural Network). + * + * Prediction Equation: z = f0(A^T x + α) + * ŷ = f1(B^T z + β) + * + * where x is an input vector, z is the hidden layer vector, ŷ is a predicted output vector, f0, f1 + * are the activation functions, A and B are the parameter matrices, and α and β are the bias vectors. + * Illustrates the need for RESCALING the data. + * Computations done at the vector level, x -> z -> y. R^2 = .512 (no rescaling) .813 (rescaling) (for 20 epochs) + * > runMain scalation.modeling.simpleNN8 + */ +@main def simpleNN8 (): Unit = + + + import Example_AutoMPG.xy + import scalation.mathstat.MatrixDOps.⊗ // outer product of two vectors: v1 ⊗ v2 = v1 v1.ᵀ + // matrix where m_ij = v1_i * v2_j + def rescale (x: MatrixD, yes: Boolean): Unit = + if yes then + for j <- x.indices2 do + val (mu_j, sig_j) = (x(?, j).mean, x(?, j).stdev) + x(?, j) = (x(?, j) - mu_j) / sig_j // option: subtract mean, divide by standard deviation + end rescale + + val n = xy.dim2 - 1 // last column in xy + val (xx, y) = (xy.not(?, n), xy(?, n)) // (data/input matrix, response column) + val yy = MatrixD.fromVector (y) // turn the m-vector y into an m-by-1 matrix + + val sst = (yy - yy.mean).normSq // sum of squares total, per column + val a = MatrixD.fill (6, 3, 0.1) // parameter/weight matrix: input -> hidden + val α = VectorD (0.1, 0.1, 0.1) // hidden layer bias vector + val b = MatrixD ((3, 1), 0.1, // parameter/weight matrix: hidden -> output + 0.1, + 0.1) // initial weights/parameters (random in practice) + val β = VectorD (0.1) // output layer bias vector + + println (s"sst = $sst") + rescale (xx, true) // true => rescale, false => no rescaling + println (s"xx = $xx") + + val η = 0.1 // learning rate (to be tuned) + var x, u, z, v, ŷ, ε, ƒ1, δ1, ƒ0, δ0: VectorD = null + var g1, g0: MatrixD = null + val yp = new MatrixD (yy.dim, yy.dim2) // save each prediction in yp + + for epoch <- 1 to 20 do + println (s"Improvement step $epoch") + val sse = new VectorD (1) + for i <- xx.indices do + x = xx(i) // randomize i for Stochastic Gradient Descent (SGD) + val y = yy(i) + + // forward prop: input -> hidden + u = a.ᵀ * x + α // hidden pre-activation vector + z = sigmoid_ (u) // hidden vector from f0 = sigmoid activation + + // forward prop: hidden -> output + v = b.ᵀ * z + β // output pre-activation vector + ŷ = v // output/prediction vector from f1 = id activation + ε = y - ŷ // error vector [ε_1, ε_2] + + // backward prop: hidden <- output + ƒ1 = VectorD (1.0) // derivative (f1') for id + δ1 = -ε * ƒ1 // delta correction vector via elementwise product + g1 = z ⊗ δ1 // transposed Jacobian matrix (gradients) + + // backward prop: input <- hidden + ƒ0 = z * (1.0 - z) // derivative (f0') for sigmoid + δ0 = b * δ1 * ƒ0 // delta correction vector + g0 = x ⊗ δ0 // transposed Jacobian matrix (gradients) + + // parameter updates + b -= g1 * η // update output parameter/weight matrix + β -= δ1 * η // update output bias vector + a -= g0 * η // update hidden parameter/weight matrix + α -= δ0 * η // update hidden bias vector + yp(i) = ŷ // save i-th prediction + sse += ε * ε // sum of squared errors + end for + val r2 = -(sse / sst - 1.0) // R^2 (recoded to avoid Ops clash) + + println (s""" + x = $x + u = $u + z = $z + v = $v + ŷ = $ŷ + ε = $ε + ƒ1 = $ƒ1 + δ1 = $δ1 + g1 = $g1 + ƒ0 = $ƒ0 + δ0 = $δ0 + g0 = $g0 + b = $b + β = $β + a = $a + α = $α + sse= $sse + r2 = $r2 + """) + end for + + new Plot (null, yy(?, 0), yp(?, 0), "IGD for Three-layer Neural Net y_0", lines = true) + +end simpleNN8 + diff --git a/src/main/scala/scalation/modeling/SimplePerceptron.scala b/src/main/scala/scalation/modeling/SimplePerceptron.scala new file mode 100644 index 000000000..9eae29d40 --- /dev/null +++ b/src/main/scala/scalation/modeling/SimplePerceptron.scala @@ -0,0 +1,106 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author John Miller + * @version 2.0 + * @date Mon Oct 20 20:16:25 EDT 2025 + * @see LICENSE (MIT style license file). + * + * @note Simple Neural Networks Using Gradient Descent Optimization + * Tests both Gradient Descent (GD) and Incremental Gradient Descent (IGD) + * Simplified versions of `Regression`, `Perceptron`, `NeuralNet_2L`, and `NeuralNet_3L` + * for illustration/learning, not production + * + * @note the symbol ƒ indicates the derivative of function f, i.e., ƒ = f' + */ + +package scalation +package modeling + +import scalation.mathstat.{VectorD, MatrixD, Plot} +import scalation.mathstat.VectorDOps._ +import ActivationFun._ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `SimpleNN` object contains a simple dataset for testing Gradient Descent (GD) + * and Incremental Gradient Descent (IGD) optimization algorithms. + * @see https://nowak.ece.wisc.edu/MFML.pdf + */ +object SimplePerceptron: + + // 9 data points: One x1 x2 y1 y2 + val xy = MatrixD ((9, 5), 1.0, 0.1, 0.1, 0.5, 0.25, // dataset + 1.0, 0.1, 0.5, 0.3, 0.49, + 1.0, 0.1, 1.0, 0.2, 0.64, + + 1.0, 0.5, 0.1, 0.8, 0.04, + 1.0, 0.5, 0.5, 0.5, 0.25, + 1.0, 0.5, 1.0, 0.3, 0.49, + + 1.0, 1.0, 0.1, 1.0, 0.0, + 1.0, 1.0, 0.5, 0.8, 0.04, + 1.0, 1.0, 1.0, 0.5, 0.25) + + val (x, y) = (xy(?, 0 until 3), xy(?, 3)) // input matrix, output/response vector + +end SimplePerceptron + +import SimplePerceptron._ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `simplePerceptron1` main function illustrates the use of Gradient Descent (GD) to + * optimize the weights/parameters of a simple neural network (Perceptron). + * Originally, perceptrons used the Heavyside activation function for binary classification + * problems, but have been extended to multi-class classification and regression problems. + * Furthermore, when the activation function is identity, the perceptron models are equivalent + * multiple linear regression models. + * + * grad g = -x.ᵀ * (y - ŷ) * ƒ where pred ŷ = f(x * b) + * + * Computations done at the vector level: X -> y. R^2 = .865 + * > runMain scalation.modeling.simplePerceptron1 + */ +@main def simplePerceptron1 (): Unit = + + val sst = (y - y.mean).normSq // sum of squares total + + val b = VectorD (0.1, 0.2, 0.1) // initial weights/parameters (random in practice) + + val η = 2.5 // learning rate (to be tuned) + var u, ŷ, ε, ƒ, δ, g: VectorD = null + + for epoch <- 1 to 10 do + println (s"Improvement step $epoch") + + // forward prop: input -> output + u = x * b // pre-activation vector + ŷ = sigmoid_ (u) // prediction vector + ε = y - ŷ // error vector + + // backward prop: output -> input + ƒ = ŷ * (1.0 - ŷ) // derivative (f') for sigmoid + δ = -ε * ƒ // delta correction vector + g = x.ᵀ * δ // gradient vector + + // parameter update + b -= g * η // update parameter vector + + val sse = ε.normSq // sum of squared errors + val r2 = 1.0 - sse / sst // R^2 + + println (s""" + u = $u + ŷ = $ŷ + ε = $ε + ƒ = $ƒ + δ = $δ + g = $g + b = $b + sse = $sse + r2 = $r2 + """) + + end for + new Plot (null, y, ŷ, "GD for Perceptron y", lines = true) + +end simplePerceptron1 + diff --git a/src/main/scala/scalation/modeling/SimpleRegression.scala b/src/main/scala/scalation/modeling/SimpleRegression.scala index db0d9f8f4..108364e9c 100644 --- a/src/main/scala/scalation/modeling/SimpleRegression.scala +++ b/src/main/scala/scalation/modeling/SimpleRegression.scala @@ -30,16 +30,18 @@ import scalation.mathstat._ */ class SimpleRegression (x: MatrixD, y: VectorD, fname_ : Array [String] = null) extends Predictor (x, y, if fname_ == null then null else fname_.slice (0, 2), null) - with Fit (dfm = 1, df = x.dim - 2) + with Fit (dfr = 1, df = x.dim - 2) with NoSubModels: private val debug = debugf ("SimpleRegression", true) // debug function private val flaw = flawf ("SimpleRegression") // flaw function - modelName = "SimpleRegression" + _modelName = s"SimpleRegression_${fname(1)}" if x.dim2 < 2 then flaw ("init", s"data matrix must have at least 2 columns: ${x.dim2}") + override def getBest: BestStep = super [NoSubModels].getBest + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Train the predictor by fitting the parameter vector (b-vector) in the * simple regression equation @@ -93,9 +95,9 @@ class SimpleRegression (x: MatrixD, y: VectorD, fname_ : Array [String] = null) /** Compute the confidence intervals for the parameters b_0 and b_1, returning * their interval half widths. * @param x_ the training/full data/input matrix - * @param p the confidence level + * @param p_ the confidence level (1 - alpha) */ - def confInterval (x_ : MatrixD = getX, p: Double = .95): VectorD = + def confInterval (x_ : MatrixD = getX, p_ : Double = .95): VectorD = val x1 = x_(?, 1) // second column (column 1) val m = x1.dim // number of instances val df = m - 2 // DoF for error e @@ -106,8 +108,8 @@ class SimpleRegression (x: MatrixD, y: VectorD, fname_ : Array [String] = null) val se_0 = s * sqrt (1.0 / m + x1.mean~^2 / s_xx) // standard error for b_0 val se_1 = s / sqrt (s_xx) // standard error for b_1 - val pp = 1.0 - (1.0 - p) / 2.0 // e.g., .95 --> .975 (two tails) - val t = random.Quantile.studentTInv (pp, df) // critical value from Student's t distribution + val p = 1.0 - (1.0 - p_) / 2.0 // e.g., .95 --> .975 (two tails) + val t = random.Quantile.studentTInv (p, df) // critical value from Student's t distribution debug ("confInterval", s"s = $s, s_xx = $s_xx, se_0 = $se_0, se_1 = $se_1, t = $t") @@ -172,7 +174,7 @@ object SimpleRegression: val j = x.corr (y, 1).argmag () + 1 // use best column val fname_ = Array (fname(0), fname(j)) println (s"best: column j = $j, fname(j) = ${fname(j)}") - new SimpleRegression (MatrixD (x(?, 0), x(?, j)).transpose, y, fname_) + new SimpleRegression (MatrixD (x(?, 0), x(?, j)).ᵀ, y, fname_) end best //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -183,7 +185,6 @@ object SimpleRegression: def coeff (x: VectorD, y: VectorD): VectorD = if x.dim != y.dim then flaw ("coeff", s"dimensions do not agree: x.dim = ${x.dim} != y.dim = ${y.dim}") - end if val b1 = (x cov y) / x.variance val b0 = y.mean - b1 * x.mean VectorD (b0, b1) @@ -295,7 +296,7 @@ end simpleRegressionTest2 val yq = qrg.trainNtest ()()._1 new Plot (x, y, yq, "plot y and yq vs. x", lines = true) - val trg = new TranRegression (ox, y, tran = sqrt, itran = sq) + val trg = new TranRegression (ox, y, yℱ = RootForm ()) val yt = trg.trainNtest ()()._1 new Plot (x, y, yt, "plot y and yt vs. x", lines = true) @@ -450,7 +451,8 @@ end simpleRegressionTest7 @main def simpleRegressionTest8 (): Unit = val x = VectorD (1, 2, 3, 4, 5, 6) - val y = VectorD (1, 3, 3, 5, 4, 4) +// val y = VectorD (1, 3, 3, 5, 4, 4) + val y = VectorD (1, 3, 5, 6, 4, 2) val ox = MatrixD.one (x.dim) :^+ x val x_fname = Array ("x") @@ -464,7 +466,7 @@ end simpleRegressionTest7 new Plot (null, y, nmod.predict (nmod.getX), s"${nmod.modelName} y vs yp", lines = true) banner ("SimplerRegression") - val reg = new SimplerRegression (MatrixD (x).transpose, y, x_fname) + val reg = new SimplerRegression (MatrixD (x).ᵀ, y, x_fname) reg.trainNtest ()() println (s"mse = ${reg.mse_}") println (reg.summary ()) @@ -479,7 +481,7 @@ end simpleRegressionTest7 new Plot (null, y, mod.predict (mod.getX), s"${mod.modelName} y vs yp", lines = true) banner ("SymbolicRegression.quadratic") - val qrg = SymbolicRegression.quadratic (MatrixD (x).transpose, y, x_fname) + val qrg = SymbolicRegression.quadratic (MatrixD (x).ᵀ, y, x_fname) qrg.trainNtest ()() println (s"mse = ${qrg.mse_}") // println (s"confI = ${qrg.confInterval ()}") @@ -487,7 +489,7 @@ end simpleRegressionTest7 new Plot (null, y, qrg.predict (qrg.getX), s"${qrg.modelName} y vs yp", lines = true) banner ("TranRegression") - val trd = new TranRegression (ox, y, ox_fname, tran = sqrt, itran = sq) + val trd = new TranRegression (ox, y, ox_fname, yℱ = RootForm ()) trd.trainNtest ()() println (s"mse = ${trd.mse_}") // println (s"confI = ${trd.confInterval ()}") diff --git a/src/main/scala/scalation/modeling/SimplerRegression.scala b/src/main/scala/scalation/modeling/SimplerRegression.scala index 917eb008b..ef9f200b6 100644 --- a/src/main/scala/scalation/modeling/SimplerRegression.scala +++ b/src/main/scala/scalation/modeling/SimplerRegression.scala @@ -27,10 +27,12 @@ import scalation.mathstat._ */ class SimplerRegression (x: MatrixD, y: VectorD, fname_ : Array [String] = null) extends Predictor (x, y, if fname_ == null then null else fname_.slice (0, 1), null) - with Fit (dfm = 1, df = x.dim - 1) + with Fit (dfr = 1, df = x.dim - 1) with NoSubModels: - modelName = "SimplerRegression" + _modelName = "SimplerRegression" + + override def getBest: BestStep = super [NoSubModels].getBest //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Train the predictor by fitting the parameter vector (b-vector) in the @@ -102,7 +104,7 @@ object SimplerRegression: * @param fname_ the feature/variable names */ def apply (x: VectorD, y: VectorD, fname: Array [String]): SimplerRegression = - new SimplerRegression (MatrixD (x).transpose, y, fname) + new SimplerRegression (MatrixD (x).ᵀ, y, fname) end apply end SimplerRegression diff --git a/src/main/scala/scalation/modeling/SumQueue.scala b/src/main/scala/scalation/modeling/SumQueue.scala index cece36315..d5b3cfe82 100644 --- a/src/main/scala/scalation/modeling/SumQueue.scala +++ b/src/main/scala/scalation/modeling/SumQueue.scala @@ -86,7 +86,6 @@ class SumSqQueue (q: Int = 5): val yy = queue.dequeue () sum -= yy sumSq -= yy ~^ 2 - end if sum += y sumSq += y ~^ 2 queue += y diff --git a/src/main/scala/scalation/modeling/SymLassoRegression.scala b/src/main/scala/scalation/modeling/SymLassoRegression.scala index 15626e302..c6d37e722 100644 --- a/src/main/scala/scalation/modeling/SymLassoRegression.scala +++ b/src/main/scala/scalation/modeling/SymLassoRegression.scala @@ -11,23 +11,41 @@ package scalation package modeling -import scala.collection.mutable.Set +import scala.collection.mutable.{LinkedHashSet => LSET} import scalation.mathstat._ //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `SymLassoRegression` object supports symbolic ridge regression that allows +/** The `SymLassoRegression` class supports symbolic ridge regression that allows * variables/columns to be raised to various powers, e.g., x^2, x^3, x^.5. * Note, x~^p is a column-wise power function (each column raised to p-th power). * IMPORTANT: must not include INTERCEPT (column of ones) in initial data matrix), * i.e., DO NOT include a column of ones in x (will cause singularity in expanded matrix). * Method signatures are the as same as for `SymbolicRegression`, except there is * NO intercept ARGUMENT. + * @param x the initial data/input m-by-n matrix (before expansion) + * must not include an intercept column of all ones + * @param y the response/output m-vector + * @param fname the feature/variable names + * @param powers the set of powers to raise matrix x to + * @param hparam the hyper-parameters (use Regression.hp for default) + */ +class SymLassoRegression (x: MatrixD, y: VectorD, fname: Array [String], + powers: LSET [Double], hparam: HyperParameter = LassoRegression.hp) + extends LassoRegression (x, y, fname, hparam): + + _modelName = s"SymLassoRegression_$powers" + +end SymLassoRegression + + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `SymLassoRegression` object provides factory methods. */ object SymLassoRegression: //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `LassoRegression` object from a data matrix and a response vector. + /** Create a `SymLassoRegression` object from a data matrix and a response vector. * Partial support for "Symbolic Lasso Regression" as matrix x can be raised * to several powers (e.g., x^1 and x^2). Note, x^1 is automatically included. * @see `SymbolicRegression.buildMatrix` @@ -44,19 +62,16 @@ object SymLassoRegression: * adds x0 x1^(-2) */ def apply (x: MatrixD, y: VectorD, fname: Array [String] = null, - powers: Set [Double] = null, intercept: Boolean = true, + powers: LSET [Double] = null, intercept: Boolean = true, cross: Boolean = true, cross3: Boolean = false, hparam: HyperParameter = LassoRegression.hp, - terms: Array [Xj2p]*): LassoRegression = + terms: Array [Xj2p]*): SymLassoRegression = val fname_ = if fname != null then fname else x.indices2.map ("x" + _).toArray // default feature/variable names - val (xx, f_name) = SymbolicRegression.buildMatrix (x, fname_, powers, intercept, + val (xx, f_name) = SymbolicRegression.buildMatrix (x, fname_, powers, null, intercept, cross, cross3, terms*) - val mod = new LassoRegression (xx, y, f_name, hparam) - mod.modelName = "SymLassoRegression" + (if cross then "X" else "") + - (if cross3 then "XX" else "") - mod + new SymLassoRegression (xx, y, f_name, powers, hparam) end apply //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -75,16 +90,16 @@ object SymLassoRegression: * adds x0 x1^(-2) */ def rescale (x: MatrixD, y: VectorD, fname: Array [String] = null, - powers: Set [Double] = null, intercept: Boolean = true, + powers: LSET [Double] = null, intercept: Boolean = true, cross: Boolean = true, cross3: Boolean = false, hparam: HyperParameter = Regression.hp, - terms: Array [Xj2p]*): LassoRegression = + terms: Array [Xj2p]*): SymLassoRegression = val xn = normalize ((x.mean, x.stdev)) (x) apply (xn, y, fname, powers, intercept, cross, cross3, hparam, terms*) end rescale //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `LassoRegression` object that uses multiple regression to fit a quadratic + /** Create a `SymLassoRegression` object that uses multiple regression to fit a quadratic * surface to the data. For example in 2D, the quadratic regression equation is * y = b dot x + e = [b_0, ... b_k] dot [x_0, x_0^2, x_1, x_1^2] + e * @param x the initial data/input m-by-n matrix (before quadratic term expansion) @@ -97,14 +112,12 @@ object SymLassoRegression: */ def quadratic (x: MatrixD, y: VectorD, fname: Array [String] = null, intercept: Boolean = true, cross: Boolean = false, - hparam: HyperParameter = LassoRegression.hp): LassoRegression = - val mod = apply (x, y, fname, Set (1, 2), intercept, cross, false, hparam) - mod.modelName = "SymLassoRegression.quadratic" + (if cross then "X" else "") - mod + hparam: HyperParameter = LassoRegression.hp): SymLassoRegression = + apply (x, y, fname, LSET (1, 2), intercept, cross, false, hparam) end quadratic //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `LassoRegression` object that uses multiple regression to fit a cubic + /** Create a `SymLassoRegression` object that uses multiple regression to fit a cubic * surface to the data. For example in 2D, the cubic regression equation is * y = b dot x + e = [b_0, ... b_k] dot [x_0, x_0^2, x_0^3, * x_1, x_1^2, x_1^3, @@ -120,11 +133,8 @@ object SymLassoRegression: */ def cubic (x: MatrixD, y: VectorD, fname: Array [String] = null, intercept: Boolean = true, cross: Boolean = false, cross3: Boolean = false, - hparam: HyperParameter = LassoRegression.hp): LassoRegression = - val mod = apply (x, y, fname, Set (1, 2, 3), intercept, cross, cross3, hparam) - mod.modelName = "SymLassoRegression.cubic" + (if cross then "X" else "") + - (if cross3 then "X" else "") - mod + hparam: HyperParameter = LassoRegression.hp): SymLassoRegression = + apply (x, y, fname, LSET (1, 2, 3), intercept, cross, cross3, hparam) end cubic end SymLassoRegression @@ -134,7 +144,7 @@ import Example_AutoMPG._ //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `symLassoRegressionTest` main function tests the `SymLassoRegression` * object using the AutoMPG dataset. Assumes no missing values. - * It tests custom "Symbolic Lasso Regression", with powers specified in "Set (...)" and + * It tests custom "Symbolic Lasso Regression", with powers specified in "LSET (...)" and * applies forward selection, backward elimination, or stepwise regression. * > runMain scalation.modeling.symLassoRegressionTest */ @@ -144,7 +154,7 @@ import Example_AutoMPG._ // println (s"y = $y") banner ("AutoMPG Symbolic Lasso Regression") - val mod = SymLassoRegression (x, y, x_fname, Set (-2, -1, 0.5, 2)) // add cross-terms and given powers + val mod = SymLassoRegression (x, y, x_fname, LSET (-2, -1, 0.5, 2)) // add cross-terms and given powers mod.trainNtest ()() // train and test the model println (mod.summary ()) // parameter/coefficient statistics @@ -153,8 +163,7 @@ import Example_AutoMPG._ val (cols, rSq) = mod.selectFeatures (tech) // R^2, R^2 bar, R^2 cv val k = cols.size println (s"k = $k, n = ${x.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "R^2 cv"), - s"R^2 vs n for Symbolic Lasso Regression with $tech", lines = true) + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs n for Symbolic Lasso Regression with $tech", lines = true) println (s"$tech: rSq = $rSq") end for @@ -183,8 +192,7 @@ end symLassoRegressionTest val (cols, rSq) = mod.selectFeatures (tech) // R^2, R^2 bar, R^2 cv val k = cols.size println (s"k = $k, n = ${x.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "R^2 cv"), - s"R^2 vs n for Quadratic Lasso Regression with $tech", lines = true) + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs n for Quadratic Lasso Regression with $tech", lines = true) println (s"$tech: rSq = $rSq") end for @@ -213,8 +221,7 @@ end symLassoRegressionTest2 val (cols, rSq) = mod.selectFeatures (tech) // R^2, R^2 bar, R^2 cv val k = cols.size println (s"k = $k, n = ${x.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "R^2 cv"), - s"R^2 vs n for Quadratic X Lasso Regression with $tech", lines = true) + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs n for Quadratic X Lasso Regression with $tech", lines = true) println (s"$tech: rSq = $rSq") end for @@ -243,8 +250,7 @@ end symLassoRegressionTest3 val (cols, rSq) = mod.selectFeatures (tech) // R^2, R^2 bar, R^2 cv val k = cols.size println (s"k = $k, n = ${x.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "R^2 cv"), - s"R^2 vs n for Cubic Lasso Regression with $tech", lines = true) + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs n for Cubic Lasso Regression with $tech", lines = true) println (s"$tech: rSq = $rSq") end for @@ -273,8 +279,7 @@ end symLassoRegressionTest4 val (cols, rSq) = mod.selectFeatures (tech) // R^2, R^2 bar, R^2 cv val k = cols.size println (s"k = $k, n = ${x.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "R^2 cv"), - s"R^2 vs n for Cubic X Lasso Regression with $tech", lines = true) + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs n for Cubic X Lasso Regression with $tech", lines = true) println (s"$tech: rSq = $rSq") end for @@ -305,8 +310,7 @@ end symLassoRegressionTest5 val (cols, rSq) = mod.selectFeatures (tech) // R^2, R^2 bar, R^2 cv val k = cols.size println (s"k = $k, n = ${x.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "R^2 cv"), - s"R^2 vs n for Cubic XX Lasso Regression with $tech", lines = true) + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs n for Cubic XX Lasso Regression with $tech", lines = true) println (s"$tech: rSq = $rSq") end for @@ -316,7 +320,7 @@ end symLassoRegressionTest6 //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `symLassoRegressionTest7` main function tests the `SymLassoRegression` * object using the AutoMPG dataset. Assumes no missing values. - * It tests custom "Symbolic Lasso Regression", with powers specified in "Set (...)" and + * It tests custom "Symbolic Lasso Regression", with powers specified in "LSET (...)" and * applies forward selection, backward elimination, or stepwise regression. * This test case performs data rescaling. * > runMain scalation.modeling.symLassoRegressionTest7 @@ -328,7 +332,7 @@ end symLassoRegressionTest6 banner ("AutoMPG Symbolic Lasso Regression") val mod = SymLassoRegression.rescale (x, y, x_fname, - Set (-2, -1, 0.5, 2)) // add cross-terms and given powers + LSET (-2, -1, 0.5, 2)) // add cross-terms and given powers mod.trainNtest ()() // train and test the model println (mod.summary ()) // parameter/coefficient statistics @@ -337,8 +341,7 @@ end symLassoRegressionTest6 val (cols, rSq) = mod.selectFeatures (tech) // R^2, R^2 bar, R^2 cv val k = cols.size println (s"k = $k, n = ${x.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "R^2 cv"), - s"R^2 vs n for Symbolic Lasso Regression with $tech", lines = true) + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs n for Symbolic Lasso Regression with $tech", lines = true) println (s"$tech: rSq = $rSq") end for @@ -404,7 +407,7 @@ end symLassoRegressionTest8 val x_c = x - mu_x val y_c = y - mu_y - val xx = MatrixD (x_c).transpose + val xx = MatrixD (x_c).ᵀ banner ("Lasso Regression") var mod = new LassoRegression (xx, y_c) diff --git a/src/main/scala/scalation/modeling/SymRidgeRegression.scala b/src/main/scala/scalation/modeling/SymRidgeRegression.scala index c54b579c3..f007ba782 100644 --- a/src/main/scala/scalation/modeling/SymRidgeRegression.scala +++ b/src/main/scala/scalation/modeling/SymRidgeRegression.scala @@ -11,23 +11,69 @@ package scalation package modeling -import scala.collection.mutable.Set +import scala.collection.mutable.{LinkedHashSet => LSET} import scalation.mathstat._ //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `SymRidgeRegression` object supports symbolic ridge regression that allows +/** The `SymRidgeRegression` class supports symbolic ridge regression that allows * variables/columns to be raised to various powers, e.g., x^2, x^3, x^.5. * Note, x~^p is a column-wise power function (each column raised to p-th power). * IMPORTANT: must not include INTERCEPT (column of ones) in initial data matrix), * i.e., DO NOT include a column of ones in x (will cause singularity in expanded matrix). * Method signatures are the as same as for `SymbolicRegression`, except there is * NO intercept ARGUMENT. + * @param xx the expanded data/input m-by-n matrix + * @param y the response/output m-vector + * @param fname the feature/variable names + * @param powers the set of powers to raise matrix x to + * @param hparam the hyper-parameters (use Regression.hp for default) + */ +class SymRidgeRegression (xx: MatrixD, y: VectorD, fname: Array [String], + powers: LSET [Double], hparam: HyperParameter = Regression.hp) + extends RidgeRegression (xx, y, fname, hparam): + + _modelName = s"SymRidgeRegression_$powers" + +end SymRidgeRegression + + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `SymRidgeRegression` object provides several factory methods. */ object SymRidgeRegression: + private val debug = debugf ("SymbolicRegression", true) // debug function + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Search for a good symbolic regression model by trying several combinations + * of powers. + * @param x the initial data/input m-by-n matrix (before expansion) + * must not include an intercept column of all ones + * @param y the response/output m-vector + * @param fname the feature/variable names (defaults to null) + * @param cross whether to include 2-way cross/interaction terms x_i x_j (defaults to true) + * @param cross3 whether to include 3-way cross/interaction terms x_i x_j x_k (defaults to false) + * @param hparam the hyper-parameters (use Regression.hp for default) + * @param terms custom terms to add into the model, e.g., Array ((0, 1.0), (1, -2.0)) + * adds x0 x1^(-2) + */ + def searchSR (x: MatrixD, y: VectorD, fname: Array [String] = null, + cross: Boolean = true, cross3: Boolean = false, + hparam: HyperParameter = RidgeRegression.hp, + terms: Array [Xj2p]*): BestStep = + var best = BestStep ()() // best step so far + for k <- 2 to 3; pwr <- SymbolicRegression.powers.subsets (k) do // grid search + val mod = apply (x, y, fname, pwr, cross, cross3, hparam, terms*) + val (_, qof) = mod.trainNtest ()() // train and test the model + println (mod.summary ()) // parameter/coefficient statistics + best = best.better (-1, qof, mod, mod.mcols) // which is better (-1 => all columns) + end for + best // return the best model + end searchSR + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `RidgeRegression` object from a data matrix and a response vector. + /** Create a `SymRidgeRegression` object from a data matrix and a response vector. * Partial support for "Symbolic Ridge Regression" as matrix x can be raised * to several powers (e.g., x^1 and x^2). Note, x^1 is automatically included. * NOTE, Ridge Regression will NOT have an INTERCEPT column. @@ -44,24 +90,21 @@ object SymRidgeRegression: * adds x0 x1^(-2) */ def apply (x: MatrixD, y: VectorD, fname: Array [String] = null, - powers: Set [Double] = null, cross: Boolean = true, cross3: Boolean = false, + powers: LSET [Double] = null, cross: Boolean = true, cross3: Boolean = false, hparam: HyperParameter = RidgeRegression.hp, - terms: Array [Xj2p]*): RidgeRegression = + terms: Array [Xj2p]*): SymRidgeRegression = val fname_ = if fname != null then fname else x.indices2.map ("x" + _).toArray // default feature/variable names - val (xx, f_name) = SymbolicRegression.buildMatrix (x, fname_, powers, + val (xx, f_name) = SymbolicRegression.buildMatrix (x, fname_, powers, null, false, cross, cross3, terms*) -// val mod = new RidgeRegression (xx, y, f_name, hparam) // user must center - val mod = RidgeRegression.center (xx, y, f_name, hparam) // automatically centers the data - mod.modelName = "SymRidgeRegression" + (if cross then "X" else "") + - (if cross3 then "XX" else "") - mod + new SymRidgeRegression (xx, y, f_name, powers, hparam) // user must center +// SymRidgeRegression.center (xx, y, f_name, powers, hparam) // FIX -- automatically centers the data end apply //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Create a `SymRidgeRegression` object from a data matrix and a response vector. - * This method provides data rescaling. + * This method provides data rescaling via normalization (z-transform). * NOTE, Ridge Regression will NOT have an INTERCEPT column. * @param x the data/input m-by-n matrix * (augment with a first column of ones to include intercept in model) @@ -75,16 +118,39 @@ object SymRidgeRegression: * adds x0 x1^(-2) */ def rescale (x: MatrixD, y: VectorD, fname: Array [String] = null, - powers: Set [Double] = null, cross: Boolean = true, cross3: Boolean = false, + powers: LSET [Double] = null, cross: Boolean = true, cross3: Boolean = false, hparam: HyperParameter = RidgeRegression.hp, - terms: Array [Xj2p]*): RidgeRegression = + terms: Array [Xj2p]*): SymRidgeRegression = val xn = normalize ((x.mean, x.stdev)) (x) - println (s"rescale: xn = $xn") + debug ("rescale", s"rescaled via z-transform: xn = $xn") apply (xn, y, fname, powers, cross, cross3, hparam, terms*) end rescale + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a `SymRidgeRegression` object from a data matrix and a response vector. + * This method provides data rescaling via min-max-transform. + * @param x the data/input m-by-n matrix + * (augment with a first column of ones to include intercept in model) + * @param y the response/output m-vector + * @param fname the feature/variable names (defaults to null) + * @param powers the set of powers to raise matrix x to + * @param cross whether to include 2-way cross/interaction terms x_i x_j (defaults to true) + * @param cross3 whether to include 3-way cross/interaction terms x_i x_j x_k (defaults to false) + * @param hparam the hyper-parameters (use Regression.hp for default) + * @param terms custom terms to add into the model, e.g., Array ((0, 1.0), (1, -2.0)) + * adds x0 x1^(-2) + */ + def rescale2 (x: MatrixD, y: VectorD, fname: Array [String] = null, + powers: LSET [Double] = null, cross: Boolean = true, cross3: Boolean = false, + hparam: HyperParameter = RidgeRegression.hp, + terms: Array [Xj2p]*): SymRidgeRegression = + val xn = scale (extreme (x)) (x) + debug ("rescale2", s"rescaled via min-max-transform: xn = $xn") + apply (xn, y, fname, powers, cross, cross3, hparam, terms*) + end rescale2 + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `RidgeRegression` object that uses multiple regression to fit a quadratic + /** Create a `SymRidgeRegression` object that uses multiple regression to fit a quadratic * surface to the data. For example in 2D, the quadratic regression equation is * y = b dot x + e = [b_0, ... b_k] dot [x_0, x_0^2, x_1, x_1^2] + e * NOTE, Ridge Regression will NOT have an INTERCEPT column. @@ -97,14 +163,12 @@ object SymRidgeRegression: */ def quadratic (x: MatrixD, y: VectorD, fname: Array [String] = null, cross: Boolean = false, - hparam: HyperParameter = RidgeRegression.hp): RidgeRegression = - val mod = apply (x, y, fname, Set (1, 2), cross, false, hparam) - mod.modelName = "SymRidgeRegression.quadratic" + (if cross then "X" else "") - mod + hparam: HyperParameter = RidgeRegression.hp): SymRidgeRegression = + apply (x, y, fname, LSET (1, 2), cross, false, hparam) end quadratic //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `RidgeRegression` object that uses multiple regression to fit a cubic + /** Create a `SymRidgeRegression` object that uses multiple regression to fit a cubic * surface to the data. For example in 2D, the cubic regression equation is * y = b dot x + e = [b_0, ... b_k] dot [x_0, x_0^2, x_0^3, * x_1, x_1^2, x_1^3, @@ -120,11 +184,8 @@ object SymRidgeRegression: */ def cubic (x: MatrixD, y: VectorD, fname: Array [String] = null, cross: Boolean = false, cross3: Boolean = false, - hparam: HyperParameter = RidgeRegression.hp): RidgeRegression = - val mod = apply (x, y, fname, Set (1, 2, 3), cross, cross3, hparam) - mod.modelName = "SymRidgeRegression.cubic" + (if cross then "X" else "") + - (if cross3 then "X" else "") - mod + hparam: HyperParameter = RidgeRegression.hp): SymRidgeRegression = + apply (x, y, fname, LSET (1, 2, 3), cross, cross3, hparam) end cubic end SymRidgeRegression @@ -134,7 +195,7 @@ import Example_AutoMPG._ //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `symRidgeRegressionTest` main function tests the `SymRidgeRegression` * object using the AutoMPG dataset. Assumes no missing values. - * It tests custom "Symbolic Ridge Regression", with powers specified in "Set (...)" and + * It tests custom "Symbolic Ridge Regression", with powers specified in "LSET (...)" and * applies forward selection, backward elimination, or stepwise regression. * > runMain scalation.modeling.symRidgeRegressionTest */ @@ -144,7 +205,7 @@ import Example_AutoMPG._ // println (s"y = $y") banner ("AutoMPG Symbolic Ridge Regression") - val mod = SymRidgeRegression (x, y, x_fname, Set (-2, -1, 0.5, 2)) // add cross-terms and given powers + val mod = SymRidgeRegression (x, y, x_fname, LSET (-2, -1, 0.5, 2)) // add cross-terms and given powers mod.trainNtest ()() // train and test the model println (mod.summary ()) // parameter/coefficient statistics @@ -153,11 +214,14 @@ import Example_AutoMPG._ val (cols, rSq) = mod.selectFeatures (tech) // R^2, R^2 bar, R^2 cv val k = cols.size println (s"k = $k, n = ${x.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "R^2 cv"), - s"R^2 vs n for Symbolic Ridge Regression with $tech", lines = true) + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs n for Symbolic Ridge Regression with $tech", lines = true) println (s"$tech: rSq = $rSq") end for + FitM.showQofStatTable (mod.crossValidate ()) // cross-validation for full model + val modBest = mod.getBest.mod // FIX - check this is really the best model + FitM.showQofStatTable (modBest.crossValidate ()) // cross-validation for best model + end symRidgeRegressionTest @@ -183,8 +247,7 @@ end symRidgeRegressionTest val (cols, rSq) = mod.selectFeatures (tech) // R^2, R^2 bar, R^2 cv val k = cols.size println (s"k = $k, n = ${x.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "R^2 cv"), - s"R^2 vs n for Quadratic Ridge Regression with $tech", lines = true) + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs n for Quadratic Ridge Regression with $tech", lines = true) println (s"$tech: rSq = $rSq") end for @@ -213,8 +276,7 @@ end symRidgeRegressionTest2 val (cols, rSq) = mod.selectFeatures (tech) // R^2, R^2 bar, R^2 cv val k = cols.size println (s"k = $k, n = ${x.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "R^2 cv"), - s"R^2 vs n for Quadratic X Ridge Regression with $tech", lines = true) + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs n for Quadratic X Ridge Regression with $tech", lines = true) println (s"$tech: rSq = $rSq") end for @@ -243,8 +305,7 @@ end symRidgeRegressionTest3 val (cols, rSq) = mod.selectFeatures (tech) // R^2, R^2 bar, R^2 cv val k = cols.size println (s"k = $k, n = ${x.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "R^2 cv"), - s"R^2 vs n for Cubic Ridge Regression with $tech", lines = true) + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs n for Cubic Ridge Regression with $tech", lines = true) println (s"$tech: rSq = $rSq") end for @@ -273,8 +334,7 @@ end symRidgeRegressionTest4 val (cols, rSq) = mod.selectFeatures (tech) // R^2, R^2 bar, R^2 cv val k = cols.size println (s"k = $k, n = ${x.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "R^2 cv"), - s"R^2 vs n for Cubic X Ridge Regression with $tech", lines = true) + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs n for Cubic X Ridge Regression with $tech", lines = true) println (s"$tech: rSq = $rSq") end for @@ -305,8 +365,7 @@ end symRidgeRegressionTest5 val (cols, rSq) = mod.selectFeatures (tech) // R^2, R^2 bar, R^2 cv val k = cols.size println (s"k = $k, n = ${x.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "R^2 cv"), - s"R^2 vs n for Cubic XX Ridge Regression with $tech", lines = true) + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs n for Cubic XX Ridge Regression with $tech", lines = true) println (s"$tech: rSq = $rSq") end for @@ -316,7 +375,7 @@ end symRidgeRegressionTest6 //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `symRidgeRegressionTest7` main function tests the `SymRidgeRegression` * object using the AutoMPG dataset. Assumes no missing values. - * It tests custom "Symbolic Ridge Regression", with powers specified in "Set (...)" and + * It tests custom "Symbolic Ridge Regression", with powers specified in "LSET (...)" and * applies forward selection, backward elimination, or stepwise regression. * This test case performs data rescaling. * > runMain scalation.modeling.symRidgeRegressionTest7 @@ -331,9 +390,9 @@ end symRidgeRegressionTest6 banner ("AutoMPG Symbolic Ridge Regression") val mod = if RESCALE then - SymRidgeRegression.rescale (x, y, x_fname, Set (-2, -1, 2)) // add cross-terms and given powers & rescale (0.5 -> NaN) + SymRidgeRegression.rescale (x, y, x_fname, LSET (-2, -1, 2)) // add cross-terms and given powers & rescale (0.5 -> NaN) else - SymRidgeRegression (x, y, x_fname, Set (-2, -1, 0.5, 2)) // add cross-terms and given powers + SymRidgeRegression (x, y, x_fname, LSET (-2, -1, 0.5, 2)) // add cross-terms and given powers mod.trainNtest ()() // train and test the model println (mod.summary ()) // parameter/coefficient statistics @@ -342,8 +401,7 @@ end symRidgeRegressionTest6 val (cols, rSq) = mod.selectFeatures (tech) // R^2, R^2 bar, R^2 cv val k = cols.size println (s"k = $k, n = ${x.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "R^2 cv"), - s"R^2 vs n for Symbolic Ridge Regression with $tech", lines = true) + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs n for Symbolic Ridge Regression with $tech", lines = true) println (s"$tech: rSq = $rSq") end for @@ -407,7 +465,7 @@ end symRidgeRegressionTest8 val mu_y = y.mean // center the response data val y_c = y - mu_y - val xx = MatrixD (x).transpose + val xx = MatrixD (x).ᵀ val fname = Array ("x") banner ("Ridge Regression") @@ -439,3 +497,30 @@ end symRidgeRegressionTest8 end symRidgeRegressionTest9 + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `symRidgeRegressionTest14` main function tests the `SymRidgeRegression` + * object using the AutoMPG dataset. Assumes no missing values. + * It tests custom "Symbolic Regression", with powers specified in "LSET (...)" and + * applies forward selection, backward elimination, or stepwise regression. + * This test case performs data rescaling. + * > runMain scalation.modeling.symRidgeRegressionTest14 + */ +@main def symRidgeRegressionTest14 (): Unit = + +// println (s"x = $x") +// println (s"y = $y") + + banner ("AutoMPG Symbolic Regression") + val best = SymbolicRegression.searchSR (x, y, x_fname) // return best model from search + banner (s"Best Full Model: ${best.mod.modelName}, qof = ${best.qof}") + + banner (s"Feature Selection Technique: Stepwise") + val (cols, rSq) = best.mod.selectFeatures (SelectionTech.Stepwise, "one") // R^2, R^2 bar, R^2 cv + val k = cols.size + println (s"k = $k, n = ${x.dim2}") + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs n for Symbolic Regression with Stepwise", lines = true) + println (s"Stepwise: rSq = $rSq") + +end symRidgeRegressionTest14 + diff --git a/src/main/scala/scalation/modeling/SymbolicRegression.scala b/src/main/scala/scalation/modeling/SymbolicRegression.scala index ecd100c24..0bb519662 100644 --- a/src/main/scala/scalation/modeling/SymbolicRegression.scala +++ b/src/main/scala/scalation/modeling/SymbolicRegression.scala @@ -19,7 +19,7 @@ package scalation package modeling -import scala.collection.mutable.Set +import scala.collection.mutable.{LinkedHashSet => LSET} import scala.runtime.ScalaRunTime.stringOf import scalation.mathstat._ @@ -27,18 +27,91 @@ import scalation.mathstat._ type Xj2p = (Int, Double) // factor in term x_j ^ p //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `SymbolicRegression` object supports a limited form of symbolic regression that +/** The `SymbolicRegression` class supports a limited form of symbolic regression that * allows variables/columns to be raised to various powers, e.g., x^2, x^3, x^.5. * Note, x~^p is a column-wise power function (each column raised to p-th power). * IMPORTANT: must not include intercept (column of ones) in initial data matrix), * i.e., DO NOT include a column of ones in x (will cause singularity in expanded matrix). + * @param xx the expanded data/input m-by-n matrix + * @param y the response/output m-vector + * @param fname the feature/variable names + * @param powers the set of powers to raise matrix x to + * @param rpowers the set of rational powers to raise matrix x to (allows a negative base) +* DON'T use the same powers for powers and rpowers + * @param hparam the hyper-parameters (use Regression.hp for default) + */ +class SymbolicRegression (xx: MatrixD, y: VectorD, fname: Array [String], + powers: LSET [Double], + rpowers: LSET [Rat], + hparam: HyperParameter = Regression.hp) + extends Regression (xx, y, fname, hparam): + + _modelName = s"SymbolicRegression_${powers}_$rpowers" + +end SymbolicRegression + + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `SymbolicRegression` object provides several factory methods. */ object SymbolicRegression: private val debug = debugf ("SymbolicRegression", true) // debug function + val powers = LSET (-2.0, -1.5, -1.0, -0.5, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0) // powers (x~^p) are doubles + + val rpowers = LSET (Rat(-2), Rat(-5, 3), Rat(-4, 3), // powers (x↑r) are rational numbers + Rat(-1), Rat(-2, 3), Rat(-1, 3), // these allow a negative base x in x↑r + Rat(1, 3), Rat(2, 3), // since the denominators are odd + Rat(1), Rat(4, 3), Rat(5, 3), + Rat(2), Rat(7, 3), Rat(8, 3), + Rat(3)) + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `Regression` object from a data matrix and a response vector. + /** Search for a good symbolic regression model by trying several combinations + * of powers. + * @param x the initial data/input m-by-n matrix (before expansion) + * must not include an intercept column of all ones + * @param y the response/output m-vector + * @param fname the feature/variable names (defaults to null) + * @param intercept whether to include the intercept term (column of ones) _1 (defaults to true) + * @param cross whether to include 2-way cross/interaction terms x_i x_j (defaults to true) + * @param cross3 whether to include 3-way cross/interaction terms x_i x_j x_k (defaults to false) + * @param rational whether to search over rational or double powers + * @param hparam the hyper-parameters (use Regression.hp for default) + * @param terms custom terms to add into the model, e.g., Array ((0, 1.0), (1, -2.0)) + * adds x0 x1^(-2) + */ + def searchSR (x: MatrixD, y: VectorD, fname: Array [String] = null, + intercept: Boolean = true, + cross: Boolean = true, cross3: Boolean = false, + rational: Boolean = true, + hparam: HyperParameter = Regression.hp, + terms: Array [Xj2p]*): BestStep = + var best = BestStep ()() // best step so far + + for k <- 2 to 3 do + if rational then + for r <- rpowers.subsets (k) do // grid search rational powers r + val mod = apply (x, y, fname, null, r, intercept, + cross, cross3, hparam, terms*) + val (_, qof) = mod.trainNtest ()() // train and test the model + println (mod.summary ()) // parameter/coefficient statistics + best = best.better (-1, qof, mod, mod.mcols) // which is better (-1 => all columns) + + else + for p <- powers.subsets (k) do // grid search double powers p + val mod = apply (x, y, fname, p, null, intercept, + cross, cross3, hparam, terms*) + val (_, qof) = mod.trainNtest ()() // train and test the model + println (mod.summary ()) // parameter/coefficient statistics + best = best.better (-1, qof, mod, mod.mcols) // which is better (-1 => all columns) + end for + best // return the best model + end searchSR + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a `SymbolicRegression` object from a data matrix and a response vector. * Partial support for "Symbolic Regression" as matrix x can be raised to * several powers (e.g., x^1 and x^2). * @param x the initial data/input m-by-n matrix (before expansion) @@ -46,6 +119,8 @@ object SymbolicRegression: * @param y the response/output m-vector * @param fname the feature/variable names (defaults to null) * @param powers the set of powers to raise matrix x to (defaults to null) + * @param rpowers the set of rational powers to raise matrix x to (allows a negative base) + * DON'T use the same powers for powers and rpowers * @param intercept whether to include the intercept term (column of ones) _1 (defaults to true) * @param cross whether to include 2-way cross/interaction terms x_i x_j (defaults to true) * @param cross3 whether to include 3-way cross/interaction terms x_i x_j x_k (defaults to false) @@ -54,22 +129,21 @@ object SymbolicRegression: * adds x0 x1^(-2) */ def apply (x: MatrixD, y: VectorD, fname: Array [String] = null, - powers: Set [Double] = null, intercept: Boolean = true, + powers: LSET [Double] = null, + rpowers: LSET [Rat] = null, + intercept: Boolean = true, cross: Boolean = true, cross3: Boolean = false, hparam: HyperParameter = Regression.hp, - terms: Array [Xj2p]*): Regression = + terms: Array [Xj2p]*): SymbolicRegression = val fname_ = if fname != null then fname else x.indices2.map ("x" + _).toArray // default feature/variable names - val (xx, f_name) = buildMatrix (x, fname_, powers, intercept, cross, cross3, terms*) - val mod = new Regression (xx, y, f_name, hparam) - mod.modelName = "SymbolicRegression" + (if cross then "X" else "") + - (if cross3 then "X" else "") - mod + val (xx, f_name) = buildMatrix (x, fname_, powers, rpowers, intercept, cross, cross3, terms*) + new SymbolicRegression (xx, y, f_name, powers, rpowers, hparam) end apply //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `Regression` object from a data matrix and a response vector. + /** Create a `SymbolicRegression` object from a data matrix and a response vector. * Partial support for "Symbolic Regression" as matrix x can be raised to * several powers (e.g., x^1 and x^2). Will append the columns in matrix dv. * Allows for having dummy variables without raising them to powers or crossing them. @@ -80,6 +154,8 @@ object SymbolicRegression: * @param fname the feature/variable names (defaults to null) * @param fname_dv the feature/variable names for dummy variables (defaults to null) * @param powers the set of powers to raise matrix x to (defaults to null) + * @param rpowers the set of rational powers to raise matrix x to (allows a negative base) + * DON'T use the same powers for powers and rpowers * @param intercept whether to include the intercept term (column of ones) _1 (defaults to true) * @param cross whether to include 2-way cross/interaction terms x_i x_j (defaults to true) * @param cross3 whether to include 3-way cross/interaction terms x_i x_j x_k (defaults to false) @@ -89,18 +165,17 @@ object SymbolicRegression: */ def withDvars (x: MatrixD, dv: MatrixD, y: VectorD, fname: Array [String] = null, fname_dv: Array [String] = null, - powers: Set [Double] = null, intercept: Boolean = true, + powers: LSET [Double] = null, + rpowers: LSET [Rat] = null, + intercept: Boolean = true, cross: Boolean = true, cross3: Boolean = false, hparam: HyperParameter = Regression.hp, - terms: Array [Xj2p]*): Regression = + terms: Array [Xj2p]*): SymbolicRegression = val fname_ = if fname != null then fname else x.indices2.map ("x" + _).toArray // default feature/variable names - val (xx, f_name) = buildMatrix (x, fname_, powers, intercept, cross, cross3, terms*) - val mod = new Regression (xx ++^ dv, y, f_name ++ fname_dv, hparam) - mod.modelName = "SymbolicRegression" + (if cross then "X" else "") + - (if cross3 then "X" else "") - mod + val (xx, f_name) = buildMatrix (x, fname_, powers, rpowers, intercept, cross, cross3, terms*) + new SymbolicRegression (xx ++^ dv, y, f_name ++ fname_dv, powers, rpowers, hparam) end withDvars //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -109,6 +184,8 @@ object SymbolicRegression: * must not include an intercept column of all ones * @param fname the feature/variable names (should not be null here) * @param powers the set of powers to raise matrix x to (x^p or log1p(x) for p = 0) + * @param rpowers the set of rational powers to raise matrix x to (allows a negative base) + * DON'T use the same powers for powers and rpowers * @param intercept whether to include the intercept term (column of ones) _1 (defaults to true) * @param cross whether to include 2-way cross/interaction terms x_i x_j (defaults to true) * @param cross3 whether to include 3-way cross/interaction terms x_i x_j x_k (defaults to false) @@ -116,14 +193,16 @@ object SymbolicRegression: * adds x0 x1^(-2) */ def buildMatrix (x: MatrixD, fname: Array [String], - powers: Set [Double], intercept: Boolean, + powers: LSET [Double], + rpowers: LSET [Rat], + intercept: Boolean, cross: Boolean, cross3: Boolean, terms: Array [Xj2p]*): (MatrixD, Array [String]) = val _1 = VectorD.one (x.dim) // one vector var xx = new MatrixD (x.dim, 0) // start empty var fname_ = Array [String] () // derived feature names - if powers != null then + if powers != null then // raise x to a double if powers contains 1 then xx = xx ++^ x // add linear terms x fname_ = fname @@ -135,6 +214,15 @@ object SymbolicRegression: fname_ ++= fname.map ((n) => s"$n^$p") end if + if rpowers != null then // raise x to a rational number + if rpowers contains Rat._1 then + xx = xx ++^ x // add linear terms x + fname_ = fname + for r <- rpowers do // allows a negative base when dem. is odd + xx = xx ++^ x↑r // add rpower terms x↑p + fname_ ++= fname.map ((n) => s"$n↑$r") + end if + if terms != null then debug ("buildMatrix", s"add custom terms = ${stringOf (terms)}") val z = _1.copy @@ -143,22 +231,20 @@ object SymbolicRegression: for (j, p) <- t do // x_j to the p-th power z *= x(?, j)~^p s = s + s"x$j^$p" - end for xx = xx :^+ z // add custom term/column t fname_ = fname_ :+ s end for end if if cross then - xx = xx ++^ x.crossAll // add 2-way cross terms x_i x_j - fname_ ++= crossNames (fname) + xx = xx ++^ x.crossAll // add 2-way cross terms x_i x_j + fname_ ++= crossNames (fname) if cross3 then - xx = xx ++^ x.crossAll3 // add 3-way cross terms x_i x_j x_k - fname_ ++= crossNames3 (fname) + xx = xx ++^ x.crossAll3 // add 3-way cross terms x_i x_j x_k + fname_ ++= crossNames3 (fname) if intercept then xx = _1 +^: xx // add intercept term (column of ones) _1 fname_ = Array ("one") ++ fname_ - end if // debug ("buildMatrix", s"xx = $xx") (xx, fname_) // return expanded matrix and its column names @@ -166,12 +252,14 @@ object SymbolicRegression: //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Create a `SymbolicRegression` object from a data matrix and a response vector. - * This method provides data rescaling via normalization + * This method provides data rescaling via normalization (z-transform). * @param x the data/input m-by-n matrix * (augment with a first column of ones to include intercept in model) * @param y the response/output m-vector * @param fname the feature/variable names (defaults to null) * @param powers the set of powers to raise matrix x to + * @param rpowers the set of rational powers to raise matrix x to (allows a negative base) + * DON'T use the same powers for powers and rpowers * @param intercept whether to include the intercept term (column of ones) _1 (defaults to true) * @param cross whether to include 2-way cross/interaction terms x_i x_j (defaults to true) * @param cross3 whether to include 3-way cross/interaction terms x_i x_j x_k (defaults to false) @@ -180,22 +268,27 @@ object SymbolicRegression: * adds x0 x1^(-2) */ def rescale (x: MatrixD, y: VectorD, fname: Array [String] = null, - powers: Set [Double] = null, intercept: Boolean = true, + powers: LSET [Double] = null, + rpowers: LSET [Rat] = null, + intercept: Boolean = true, cross: Boolean = true, cross3: Boolean = false, hparam: HyperParameter = Regression.hp, - terms: Array [Xj2p]*): Regression = + terms: Array [Xj2p]*): SymbolicRegression = val xn = normalize ((x.mean, x.stdev)) (x) - apply (xn, y, fname, powers, intercept, cross, cross3, hparam, terms*) + debug ("rescale", s"rescaled via z-transform: xn = $xn") + apply (xn, y, fname, powers, rpowers, intercept, cross, cross3, hparam, terms*) end rescale //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Create a `SymbolicRegression` object from a data matrix and a response vector. - * This method provides data rescaling via min-max. + * This method provides data rescaling via min-max-transform. * @param x the data/input m-by-n matrix * (augment with a first column of ones to include intercept in model) * @param y the response/output m-vector * @param fname the feature/variable names (defaults to null) * @param powers the set of powers to raise matrix x to + * @param rpowers the set of rational powers to raise matrix x to (allows a negative base) + * DON'T use the same powers for powers and rpowers * @param intercept whether to include the intercept term (column of ones) _1 (defaults to true) * @param cross whether to include 2-way cross/interaction terms x_i x_j (defaults to true) * @param cross3 whether to include 3-way cross/interaction terms x_i x_j x_k (defaults to false) @@ -204,12 +297,15 @@ object SymbolicRegression: * adds x0 x1^(-2) */ def rescale2 (x: MatrixD, y: VectorD, fname: Array [String] = null, - powers: Set [Double] = null, intercept: Boolean = true, + powers: LSET [Double] = null, + rpowers: LSET [Rat] = null, + intercept: Boolean = true, cross: Boolean = true, cross3: Boolean = false, hparam: HyperParameter = Regression.hp, - terms: Array [Xj2p]*): Regression = - val xn = normalize ((x.mean, x.stdev)) (x) - apply (xn, y, fname, powers, intercept, cross, cross3, hparam, terms*) + terms: Array [Xj2p]*): SymbolicRegression = + val xn = scale (extreme (x)) (x) + debug ("rescale2", s"rescaled via min-max-transform: xn = $xn") + apply (xn, y, fname, powers, rpowers, intercept, cross, cross3, hparam, terms*) end rescale2 //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -229,7 +325,7 @@ object SymbolicRegression: end crossNames3 //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `Regression` object that uses multiple regression to fit a quadratic + /** Create a `SymbolicRegression` object that uses multiple regression to fit a quadratic * surface to the data. For example in 2D, the quadratic regression equation is * y = b dot x + e = [b_0, ... b_k] dot [1, x_0, x_0^2, x_1, x_1^2] + e * @param x the initial data/input m-by-n matrix (before quadratic term expansion) @@ -242,14 +338,12 @@ object SymbolicRegression: */ def quadratic (x: MatrixD, y: VectorD, fname: Array [String] = null, intercept: Boolean = true, cross: Boolean = false, - hparam: HyperParameter = Regression.hp): Regression = - val mod = apply (x, y, fname, Set (1, 2), intercept, cross, false, hparam) - mod.modelName = "SymbolicRegression.quadratic" + (if cross then "X" else "") - mod + hparam: HyperParameter = Regression.hp): SymbolicRegression = + apply (x, y, fname, LSET (1, 2), null, intercept, cross, false, hparam) end quadratic //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `Regression` object that uses multiple regression to fit a cubic + /** Create a `SymbolicRegression` object that uses multiple regression to fit a cubic * surface to the data. For example in 2D, the cubic regression equation is * y = b dot x + e = [b_0, ... b_k] dot [1, x_0, x_0^2, x_0^3, * x_1, x_1^2, x_1^3, @@ -265,11 +359,8 @@ object SymbolicRegression: */ def cubic (x: MatrixD, y: VectorD, fname: Array [String] = null, intercept: Boolean = true, cross: Boolean = false, cross3: Boolean = false, - hparam: HyperParameter = Regression.hp): Regression = - val mod = apply (x, y, fname, Set (1, 2, 3), intercept, cross, cross3, hparam) - mod.modelName = "SymbolicRegression.cubic" + (if cross then "X" else "") + - (if cross3 then "X" else "") - mod + hparam: HyperParameter = Regression.hp): SymbolicRegression = + apply (x, y, fname, LSET (1, 2, 3), null, intercept, cross, cross3, hparam) end cubic end SymbolicRegression @@ -279,7 +370,7 @@ import Example_AutoMPG._ //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `symbolicRegressionTest` main function tests the `SymbolicRegression` * object using the AutoMPG dataset. Assumes no missing values. - * It tests custom "Symbolic Regression", with powers specified in "Set (...)" and + * It tests custom "Symbolic Regression", with powers specified in "LSET (...)" and * applies forward selection, backward elimination, or stepwise regression. * > runMain scalation.modeling.symbolicRegressionTest */ @@ -289,7 +380,7 @@ import Example_AutoMPG._ // println (s"y = $y") banner ("AutoMPG Symbolic Regression") - val mod = SymbolicRegression (x, y, x_fname, Set (-2, -1, 0.5, 2)) // add, intercept, cross-terms and given powers + val mod = SymbolicRegression (x, y, x_fname, LSET (-2, -1, 0.5, 2)) // add, intercept, cross-terms and given powers mod.trainNtest ()() // train and test the model println (mod.summary ()) // parameter/coefficient statistics @@ -298,11 +389,14 @@ import Example_AutoMPG._ val (cols, rSq) = mod.selectFeatures (tech) // R^2, R^2 bar, R^2 cv val k = cols.size println (s"k = $k, n = ${x.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "R^2 cv"), - s"R^2 vs n for Symbolic Regression with $tech", lines = true) + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs n for Symbolic Regression with $tech", lines = true) println (s"$tech: rSq = $rSq") end for + FitM.showQofStatTable (mod.crossValidate ()) // cross-validation for full model + val modBest = mod.getBest.mod // FIX - check this is really the best model + FitM.showQofStatTable (modBest.crossValidate ()) // cross-validation for best model + end symbolicRegressionTest @@ -331,8 +425,7 @@ end symbolicRegressionTest val (cols, rSq) = mod.selectFeatures (tech) // R^2, R^2 bar, R^2 cv val k = cols.size println (s"k = $k, n = ${x.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "R^2 cv"), - s"R^2 vs n for Quadratic Regression with $tech", lines = true) + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs n for Quadratic Regression with $tech", lines = true) println (s"$tech: rSq = $rSq") end for @@ -362,8 +455,7 @@ end symbolicRegressionTest2 val (cols, rSq) = mod.selectFeatures (tech) // R^2, R^2 bar, R^2 cv val k = cols.size println (s"k = $k, n = ${x.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "R^2 cv"), - s"R^2 vs n for Quadratic X Regression with $tech", lines = true) + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs n for Quadratic X Regression with $tech", lines = true) println (s"$tech: rSq = $rSq") end for @@ -393,8 +485,7 @@ end symbolicRegressionTest3 val (cols, rSq) = mod.selectFeatures (tech) // R^2, R^2 bar, R^2 cv val k = cols.size println (s"k = $k, n = ${x.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "R^2 cv"), - s"R^2 vs n for Cubic Regression with $tech", lines = true) + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs n for Cubic Regression with $tech", lines = true) println (s"$tech: rSq = $rSq") end for @@ -424,8 +515,7 @@ end symbolicRegressionTest4 val (cols, rSq) = mod.selectFeatures (tech) // R^2, R^2 bar, R^2 cv val k = cols.size println (s"k = $k, n = ${x.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "R^2 cv"), - s"R^2 vs n for Cubic X Regression with $tech", lines = true) + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs n for Cubic X Regression with $tech", lines = true) println (s"$tech: rSq = $rSq") end for @@ -456,8 +546,7 @@ end symbolicRegressionTest5 val (cols, rSq) = mod.selectFeatures (tech) // R^2, R^2 bar, R^2 cv val k = cols.size println (s"k = $k, n = ${x.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "R^2 cv"), - s"R^2 vs n for Cubic XX Regression with $tech", lines = true) + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs n for Cubic XX Regression with $tech", lines = true) println (s"$tech: rSq = $rSq") end for @@ -467,19 +556,24 @@ end symbolicRegressionTest6 //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `symbolicRegressionTest7` main function tests the `SymbolicRegression` * object using the AutoMPG dataset. Assumes no missing values. - * It tests custom "Symbolic Regression", with powers specified in "Set (...)" and + * It tests custom "Symbolic Regression", with powers specified in "LSET (...)" and * applies forward selection, backward elimination, or stepwise regression. * This test case performs data rescaling. * > runMain scalation.modeling.symbolicRegressionTest7 */ @main def symbolicRegressionTest7 (): Unit = + val RESCALE = true + // println (s"x = $x") // println (s"y = $y") banner ("AutoMPG Symbolic Regression") - val mod = SymbolicRegression.rescale (x, y, x_fname, - Set (-2, -1, 0.5, 2)) // add intercept, cross-terms and given powers + val mod = + if RESCALE then + SymRidgeRegression.rescale (x, y, x_fname, LSET (-2, -1, 2)) // add cross-terms and given powers & rescale (0.5 -> NaN) + else + SymRidgeRegression (x, y, x_fname, LSET (-2, -1, 0.5, 2)) // add cross-terms and given powers mod.trainNtest ()() // train and test the model println (mod.summary ()) // parameter/coefficient statistics @@ -488,8 +582,7 @@ end symbolicRegressionTest6 val (cols, rSq) = mod.selectFeatures (tech) // R^2, R^2 bar, R^2 cv val k = cols.size println (s"k = $k, n = ${x.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "R^2 cv"), - s"R^2 vs n for Symbolic Regression with $tech", lines = true) + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs n for Symbolic Regression with $tech", lines = true) println (s"$tech: rSq = $rSq") end for @@ -529,7 +622,7 @@ end symbolicRegressionTest7 val (x, y) = (xy.not (?, 2), xy(?, 2)) banner ("Newton's Universal Gravity Symbolic Regression") - val mod = SymbolicRegression (x, y, fname, null, false, false, + val mod = SymbolicRegression (x, y, fname, null, null, false, false, terms = Array ((0, 1.0), (1, -2.0))) // add one custom term mod.trainNtest ()() // train and test the model @@ -559,13 +652,13 @@ end symbolicRegressionTest8 banner ("Quadratic Regression") val fname = Array ("x") - mod = SymbolicRegression.quadratic (MatrixD (x).transpose, y, fname) + mod = SymbolicRegression.quadratic (MatrixD (x).ᵀ, y, fname) mod.trainNtest ()() println (mod.summary ()) new Plot (null, y, mod.predict (mod.getX), s"${mod.modelName} y vs yp", lines = true) banner ("Cubic Regression") - mod = SymbolicRegression.cubic (MatrixD (x).transpose, y, fname) + mod = SymbolicRegression.cubic (MatrixD (x).ᵀ, y, fname) mod.trainNtest ()() println (mod.summary ()) new Plot (null, y, mod.predict (mod.getX), s"${mod.modelName} y vs yp", lines = true) @@ -604,8 +697,7 @@ end symbolicRegressionTest9 val (cols, rSq) = mod.selectFeatures (tech) // R^2, R^2 bar, R^2 cv val k = cols.size println (s"k = $k, n = ${x.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "R^2 cv"), - s"R^2 vs n for Quadratic Regression with $tech", lines = true) + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs n for Quadratic Regression with $tech", lines = true) println (s"$tech: rSq = $rSq") end for @@ -627,7 +719,7 @@ end symbolicRegressionTest10 // loadStr: column 2 in file has month strings, column 3 has day strings - val xy = MatrixD.loadStr ("forestfires.csv", 1, 0)(Set (2, 3), month, day) + val xy = MatrixD.loadStr ("forestfires.csv", 1, 0)(LSET (2, 3), month, day) val resp = xy.dim2 - 1 val y = xy(?, resp) // response - burned area val x = xy.not (?, resp) @@ -638,7 +730,7 @@ end symbolicRegressionTest10 banner ("Forest Fires Cubic Regression with Log with Ordinal Values") val mod = SymbolicRegression (x, y, fname, - Set (0, 1, 2, 3), cross = false) // use log(x), x, x^2 and x^3 terms + LSET (0, 1, 2, 3), cross = false) // use log(x), x, x^2 and x^3 terms // adds intercept by default mod.trainNtest ()() // train and test the model println (mod.summary ()) // parameter/coefficient statistics @@ -654,8 +746,7 @@ end symbolicRegressionTest10 val (cols, rSq) = mod.selectFeatures (tech) // R^2, R^2 bar, R^2 cv val k = cols.size println (s"k = $k, n = ${x.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "R^2 cv"), - s"R^2 vs n for Quadratic Regression with $tech", lines = true) + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs n for Quadratic Regression with $tech", lines = true) println (s"$tech: rSq = $rSq") // end for @@ -680,7 +771,7 @@ end symbolicRegressionTest11 // loadStr: column 2 in file has month strings, column 3 has day strings - val xy = MatrixD.loadStr ("forestfires.csv", 1, 0)(Set (2, 3), month, day) + val xy = MatrixD.loadStr ("forestfires.csv", 1, 0)(LSET (2, 3), month, day) val resp = xy.dim2 - 1 val y = xy(?, resp) // response - burned area var x = xy.not (?, resp) @@ -693,7 +784,7 @@ end symbolicRegressionTest11 x = x.not (?, 3) println (s"day_col = $day_col") - val dv = RegressionCat.dummyVars (MatrixI (day_col).transpose) + val dv = RegressionCat.dummyVars (MatrixI (day_col).ᵀ) println (s"x = $x") // regular predictor variables println (s"dv = $dv") // dummy variables @@ -701,7 +792,7 @@ end symbolicRegressionTest11 banner ("Forest Fires Quadratic Regression with Ordinal Values") val mod = SymbolicRegression.withDvars (x, dv, y, fname, fnam2, - Set (1, 2, 3), cross = false) // use x, x^2 and x^3 terms + LSET (1, 2, 3), cross = false) // use x, x^2 and x^3 terms // adds intercept by default mod.trainNtest ()() // train and test the model println (mod.summary ()) // parameter/coefficient statistics @@ -717,8 +808,7 @@ end symbolicRegressionTest11 val (cols, rSq) = mod.selectFeatures (tech) // R^2, R^2 bar, R^2 cv val k = cols.size println (s"k = $k, n = ${x.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "R^2 cv"), - s"R^2 vs n for Quadratic Regression with $tech", lines = true) + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs n for Quadratic Regression with $tech", lines = true) println (s"$tech: rSq = $rSq") // end for @@ -733,15 +823,25 @@ end symbolicRegressionTest12 */ @main def symbolicRegressionTest13 (): Unit = +/* import neuralnet._ import ActivationFun._ import scala.math.tanh +*/ - val x = VectorD (1, 2, 3, 4, 5, 6, 7, 8, 9) +// val x = VectorD (1, 2, 3, 4, 5, 6, 7, 8, 9) // val y = VectorD (8, 6, 4, 2, 1, 3, 5, 9, 7) - val y = VectorD (7, 8, 5, 3, 2, 1, 4, 6, 9) +// val y = VectorD (7, 8, 5, 3, 2, 1, 4, 6, 9) +// val y = VectorD (7, 6, 5, 3, 2, 1, 4, 8, 9) +// val y = VectorD (6, 7, 5, 3, 2, 1, 4, 8, 9) +// val y = VectorD (9, 6, 4, 2, 1, 3, 5, 8, 7) + + val x = VectorD (1, 2, 3, 4, 5, 6) + val y = VectorD (1, 3, 5, 6, 4, 2) val ox = MatrixD.one (x.dim) :^+ x + println (x.corr (y)) + banner ("SimpleRegression") val mod0 = SimpleRegression (x, y, null) mod0.trainNtest ()() @@ -754,17 +854,31 @@ end symbolicRegressionTest12 println (mod.summary ()) new Plot (null, y, mod.predict (mod.getX), s"${mod.modelName} y vs yp", lines = true) + println (s"X^TX = ${ox.ᵀ * ox}") + println (s"X^Ty = ${ox.ᵀ * y}") + banner ("Quadratic Regression") val fname = Array ("x") - mod = SymbolicRegression.quadratic (MatrixD (x).transpose, y, fname) + mod = SymbolicRegression.quadratic (MatrixD (x).ᵀ, y, fname) + mod.trainNtest ()() + println (mod.summary ()) + new Plot (null, y, mod.predict (mod.getX), s"${mod.modelName} y vs yp", lines = true) + + val xx = mod.getX + println (s"X^TX = ${xx.ᵀ * xx}") + println (s"X^Ty = ${xx.ᵀ * y}") + + banner ("TranRegression (sqrt)") + mod = new TranRegression (ox, y, fname, yℱ = RootForm ()) mod.trainNtest ()() println (mod.summary ()) new Plot (null, y, mod.predict (mod.getX), s"${mod.modelName} y vs yp", lines = true) +/* banner ("NeuralNet 3-Layer") Optimizer.hp("eta") = 0.3 Optimizer.hp("bSize") = 2 - val mod2 = NeuralNet_3L.rescale (MatrixD (x).transpose, MatrixD (y).transpose, fname, nz = 3, f = f_sigmoid, f1 = f_id) + val mod2 = NeuralNet_3L.rescale (MatrixD (x).ᵀ, MatrixD (y).ᵀ, fname, nz = 3, f = f_sigmoid, f1 = f_id) mod2.trainNtest2 ()() // println (mod2.summary ()) new Plot (null, y, (mod2.predict (mod2.getX))(?, 0), s"${mod2.modelName} y vs yp", lines = true) @@ -775,7 +889,33 @@ end symbolicRegressionTest12 0.5099 * tanh(-0.6860 * z - 0.6764) - 0.8904 * tanh(1.2081 * z + 1.6912) + 1.7239 } - println (MatrixD (y, yp).transpose) + println (MatrixD (y, yp).ᵀ) +*/ end symbolicRegressionTest13 + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `symbolicRegressionTest14` main function tests the `SymbolicRegression` + * object using the AutoMPG dataset. Assumes no missing values. + * It tests custom "Symbolic Regression", with powers specified in "LSET (...)" and + * applies forward selection, backward elimination, or stepwise regression. + * This test case performs data rescaling. + * > runMain scalation.modeling.symbolicRegressionTest14 + */ +@main def symbolicRegressionTest14 (): Unit = + + val RAT = true // try both + banner ("AutoMPG Symbolic Regression") + val best = SymbolicRegression.searchSR (x, y, x_fname, rational = RAT) // return best model from search + banner (s"Best Full Model: ${best.mod.modelName}, qof = ${best.qof}") + + banner (s"Feature Selection Technique: Stepwise") + val (cols, rSq) = best.mod.selectFeatures (SelectionTech.Stepwise, "one") // R^2, R^2 bar, R^2 cv + val k = cols.size + println (s"k = $k, n = ${x.dim2}") + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs n for Symbolic Regression with Stepwise", lines = true) + println (s"Stepwise: rSq = $rSq") + +end symbolicRegressionTest14 + diff --git a/src/main/scala/scalation/modeling/SymbolicRegression.scala.bak b/src/main/scala/scalation/modeling/SymbolicRegression.scala.bak new file mode 100644 index 000000000..e8ca5f93d --- /dev/null +++ b/src/main/scala/scalation/modeling/SymbolicRegression.scala.bak @@ -0,0 +1,785 @@ + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author John Miller + * @version 2.0 + * @date Thu Dec 23 13:54:30 EST 2021 + * @see LICENSE (MIT style license file). + * + * @note Model: Symbolic Regression, including Quadratic and Cubic Regression + * + * Supports terms of the form b_j x_j^p for any Double p (p = 0 => log transformation) + * as well intercept, cross (x_i x_j) and cross3 (x_i x_j x_k with at least + * one subscript being different). Also support the inclusion of custom terms. + * + * @see `symbolicRegressionTest8` for an example of the use of custom terms + * @see `symbolicRegressionTest11` for an example where Strings are converted to Ordinals + * @see `symbolicRegressionTest12` for an example where Strings are converted to Dummy Variables + */ + +package scalation +package modeling + +import scala.collection.mutable.Set +import scala.runtime.ScalaRunTime.stringOf + +import scalation.mathstat._ + +type Xj2p = (Int, Double) // factor in term x_j ^ p + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `SymbolicRegression` object supports a limited form of symbolic regression that + * allows variables/columns to be raised to various powers, e.g., x^2, x^3, x^.5. + * Note, x~^p is a column-wise power function (each column raised to p-th power). + * IMPORTANT: must not include intercept (column of ones) in initial data matrix), + * i.e., DO NOT include a column of ones in x (will cause singularity in expanded matrix). + */ +object SymbolicRegression: + + private val debug = debugf ("SymbolicRegression", true) // debug function + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a `Regression` object from a data matrix and a response vector. + * Partial support for "Symbolic Regression" as matrix x can be raised to + * several powers (e.g., x^1 and x^2). + * @param x the initial data/input m-by-n matrix (before expansion) + * must not include an intercept column of all ones + * @param y the response/output m-vector + * @param fname the feature/variable names (defaults to null) + * @param powers the set of powers to raise matrix x to (defaults to null) + * @param intercept whether to include the intercept term (column of ones) _1 (defaults to true) + * @param cross whether to include 2-way cross/interaction terms x_i x_j (defaults to true) + * @param cross3 whether to include 3-way cross/interaction terms x_i x_j x_k (defaults to false) + * @param hparam the hyper-parameters (use Regression.hp for default) + * @param terms custom terms to add into the model, e.g., Array ((0, 1.0), (1, -2.0)) + * adds x0 x1^(-2) + */ + def apply (x: MatrixD, y: VectorD, fname: Array [String] = null, + powers: Set [Double] = null, intercept: Boolean = true, + cross: Boolean = true, cross3: Boolean = false, + hparam: HyperParameter = Regression.hp, + terms: Array [Xj2p]*): Regression = + val fname_ = if fname != null then fname + else x.indices2.map ("x" + _).toArray // default feature/variable names + + val (xx, f_name) = buildMatrix (x, fname_, powers, intercept, cross, cross3, terms*) + val mod = new Regression (xx, y, f_name, hparam) + mod._modelName = "SymbolicRegression" + (if cross then "X" else "") + + (if cross3 then "X" else "") + mod + end apply + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a `Regression` object from a data matrix and a response vector. + * Partial support for "Symbolic Regression" as matrix x can be raised to + * several powers (e.g., x^1 and x^2). Will append the columns in matrix dv. + * Allows for having dummy variables without raising them to powers or crossing them. + * @param x the initial data/input m-by-n matrix (before expansion) + * must not include an intercept column of all ones + * @param dv the matrix of dummy variables (@see `RegressionCat`) + * @param y the response/output m-vector + * @param fname the feature/variable names (defaults to null) + * @param fname_dv the feature/variable names for dummy variables (defaults to null) + * @param powers the set of powers to raise matrix x to (defaults to null) + * @param intercept whether to include the intercept term (column of ones) _1 (defaults to true) + * @param cross whether to include 2-way cross/interaction terms x_i x_j (defaults to true) + * @param cross3 whether to include 3-way cross/interaction terms x_i x_j x_k (defaults to false) + * @param hparam the hyper-parameters (use Regression.hp for default) + * @param terms custom terms to add into the model, e.g., Array ((0, 1.0), (1, -2.0)) + * adds x0 x1^(-2) + */ + def withDvars (x: MatrixD, dv: MatrixD, y: VectorD, + fname: Array [String] = null, fname_dv: Array [String] = null, + powers: Set [Double] = null, intercept: Boolean = true, + cross: Boolean = true, cross3: Boolean = false, + hparam: HyperParameter = Regression.hp, + terms: Array [Xj2p]*): Regression = + val fname_ = if fname != null then fname + else x.indices2.map ("x" + _).toArray // default feature/variable names + + val (xx, f_name) = buildMatrix (x, fname_, powers, intercept, cross, cross3, terms*) + val mod = new Regression (xx ++^ dv, y, f_name ++ fname_dv, hparam) + mod._modelName = "SymbolicRegression" + (if cross then "X" else "") + + (if cross3 then "X" else "") + mod + end withDvars + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Build an expanded input/data matrix from the initial data/input matrix. + * @param x the initial data/input m-by-n matrix (before expansion) + * must not include an intercept column of all ones + * @param fname the feature/variable names (should not be null here) + * @param powers the set of powers to raise matrix x to (x^p or log1p(x) for p = 0) + * @param intercept whether to include the intercept term (column of ones) _1 (defaults to true) + * @param cross whether to include 2-way cross/interaction terms x_i x_j (defaults to true) + * @param cross3 whether to include 3-way cross/interaction terms x_i x_j x_k (defaults to false) + * @param terms custom terms to add into the model, e.g., Array ((0, 1.0), (1, -2.0)) + * adds x0 x1^(-2) + */ + def buildMatrix (x: MatrixD, fname: Array [String], + powers: Set [Double], intercept: Boolean, + cross: Boolean, cross3: Boolean, + terms: Array [Xj2p]*): (MatrixD, Array [String]) = + val _1 = VectorD.one (x.dim) // one vector + var xx = new MatrixD (x.dim, 0) // start empty + var fname_ = Array [String] () // derived feature names + + if powers != null then + if powers contains 1 then + xx = xx ++^ x // add linear terms x + fname_ = fname + if powers contains 0 then + xx = xx ++^ x.log1p // add log terms log1p (x) + fname_ ++= fname.map ((n) => s"log1p($n)") + for p <- powers if p != 1 && p != 0 do + xx = xx ++^ x~^p // add power terms x^p other than 1, 0 + fname_ ++= fname.map ((n) => s"$n^$p") + end if + + if terms != null then + debug ("buildMatrix", s"add custom terms = ${stringOf (terms)}") + val z = _1.copy + var s = "" + for t <- terms do + for (j, p) <- t do // x_j to the p-th power + z *= x(?, j)~^p + s = s + s"x$j^$p" + xx = xx :^+ z // add custom term/column t + fname_ = fname_ :+ s + end for + end if + + if cross then + xx = xx ++^ x.crossAll // add 2-way cross terms x_i x_j + fname_ ++= crossNames (fname) + if cross3 then + xx = xx ++^ x.crossAll3 // add 3-way cross terms x_i x_j x_k + fname_ ++= crossNames3 (fname) + if intercept then + xx = _1 +^: xx // add intercept term (column of ones) _1 + fname_ = Array ("one") ++ fname_ + +// debug ("buildMatrix", s"xx = $xx") + (xx, fname_) // return expanded matrix and its column names + end buildMatrix + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a `SymbolicRegression` object from a data matrix and a response vector. + * This method provides data rescaling via normalization (z-transform). + * @param x the data/input m-by-n matrix + * (augment with a first column of ones to include intercept in model) + * @param y the response/output m-vector + * @param fname the feature/variable names (defaults to null) + * @param powers the set of powers to raise matrix x to + * @param intercept whether to include the intercept term (column of ones) _1 (defaults to true) + * @param cross whether to include 2-way cross/interaction terms x_i x_j (defaults to true) + * @param cross3 whether to include 3-way cross/interaction terms x_i x_j x_k (defaults to false) + * @param hparam the hyper-parameters (use Regression.hp for default) + * @param terms custom terms to add into the model, e.g., Array ((0, 1.0), (1, -2.0)) + * adds x0 x1^(-2) + */ + def rescale (x: MatrixD, y: VectorD, fname: Array [String] = null, + powers: Set [Double] = null, intercept: Boolean = true, + cross: Boolean = true, cross3: Boolean = false, + hparam: HyperParameter = Regression.hp, + terms: Array [Xj2p]*): Regression = + val xn = normalize ((x.mean, x.stdev)) (x) + debug ("rescale", s"rescaled via z-transform: xn = $xn") + apply (xn, y, fname, powers, intercept, cross, cross3, hparam, terms*) + end rescale + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a `SymbolicRegression` object from a data matrix and a response vector. + * This method provides data rescaling via min-max-transform. + * @param x the data/input m-by-n matrix + * (augment with a first column of ones to include intercept in model) + * @param y the response/output m-vector + * @param fname the feature/variable names (defaults to null) + * @param powers the set of powers to raise matrix x to + * @param intercept whether to include the intercept term (column of ones) _1 (defaults to true) + * @param cross whether to include 2-way cross/interaction terms x_i x_j (defaults to true) + * @param cross3 whether to include 3-way cross/interaction terms x_i x_j x_k (defaults to false) + * @param hparam the hyper-parameters (use Regression.hp for default) + * @param terms custom terms to add into the model, e.g., Array ((0, 1.0), (1, -2.0)) + * adds x0 x1^(-2) + */ + def rescale2 (x: MatrixD, y: VectorD, fname: Array [String] = null, + powers: Set [Double] = null, intercept: Boolean = true, + cross: Boolean = true, cross3: Boolean = false, + hparam: HyperParameter = Regression.hp, + terms: Array [Xj2p]*): Regression = + val xn = scale (extreme (x)) (x) + debug ("rescale2", s"rescaled via min-max-transform: xn = $xn") + apply (xn, y, fname, powers, intercept, cross, cross3, hparam, terms*) + end rescale2 + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create all cross names for the 2-way interaction/cross terms: e.g., "name1_name2". + * @param nm the array of names to be crossed + */ + def crossNames (nm: Array [String]): Array [String] = + (for i <- nm.indices; j <- 0 until i yield s"${nm(i)}_${nm(j)}").toArray + end crossNames + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create all cross names for the 3-way interaction/cross terms: e.g., "name1_name2_name3". + * @param nm the array of names to be crossed + */ + def crossNames3 (nm: Array [String]): Array [String] = + (for i <- nm.indices; j <- 0 until i; k <- 0 until j yield s"${nm(i)}_${nm(j)}_${nm(k)}").toArray + end crossNames3 + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a `Regression` object that uses multiple regression to fit a quadratic + * surface to the data. For example in 2D, the quadratic regression equation is + * y = b dot x + e = [b_0, ... b_k] dot [1, x_0, x_0^2, x_1, x_1^2] + e + * @param x the initial data/input m-by-n matrix (before quadratic term expansion) + * must not include an intercept column of all ones + * @param y the response/output m-vector + * @param fname the feature/variable names (defaults to null) + * @param intercept whether to include the intercept term (column of ones) _1 (defaults to true) + * @param cross whether to include 2-way cross/interaction terms x_i x_j (defaults to false) + * @param hparam the hyper-parameters (defaults to Regression.hp) + */ + def quadratic (x: MatrixD, y: VectorD, fname: Array [String] = null, + intercept: Boolean = true, cross: Boolean = false, + hparam: HyperParameter = Regression.hp): Regression = + val mod = apply (x, y, fname, Set (1, 2), intercept, cross, false, hparam) + mod._modelName = "SymbolicRegression.quadratic" + (if cross then "X" else "") + mod + end quadratic + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a `Regression` object that uses multiple regression to fit a cubic + * surface to the data. For example in 2D, the cubic regression equation is + * y = b dot x + e = [b_0, ... b_k] dot [1, x_0, x_0^2, x_0^3, + * x_1, x_1^2, x_1^3, + * x_0*x_1, x_0^2*x_1, x_0*x_1^2] + e + * @param x the initial data/input m-by-n matrix (before quadratic term expansion) + * must not include an intercept column of all ones + * @param y the response/output m-vector + * @param fname the feature/variable names (defaults to null) + * @param intercept whether to include the intercept term (column of ones) _1 (defaults to true) + * @param cross whether to include 2-way cross/interaction terms x_i x_j (defaults to false) + * @param cross3 whether to include 3-way cross/interaction terms x_i x_j x_k (defaults to false) + * @param hparam the hyper-parameters (defaults to Regression.hp) + */ + def cubic (x: MatrixD, y: VectorD, fname: Array [String] = null, + intercept: Boolean = true, cross: Boolean = false, cross3: Boolean = false, + hparam: HyperParameter = Regression.hp): Regression = + val mod = apply (x, y, fname, Set (1, 2, 3), intercept, cross, cross3, hparam) + mod._modelName = "SymbolicRegression.cubic" + (if cross then "X" else "") + + (if cross3 then "X" else "") + mod + end cubic + +end SymbolicRegression + +import Example_AutoMPG._ + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `symbolicRegressionTest` main function tests the `SymbolicRegression` + * object using the AutoMPG dataset. Assumes no missing values. + * It tests custom "Symbolic Regression", with powers specified in "Set (...)" and + * applies forward selection, backward elimination, or stepwise regression. + * > runMain scalation.modeling.symbolicRegressionTest + */ +@main def symbolicRegressionTest (): Unit = + +// println (s"x = $x") +// println (s"y = $y") + + banner ("AutoMPG Symbolic Regression") + val mod = SymbolicRegression (x, y, x_fname, Set (-2, -1, 0.5, 2)) // add, intercept, cross-terms and given powers + mod.trainNtest ()() // train and test the model + println (mod.summary ()) // parameter/coefficient statistics + + for tech <- SelectionTech.values do + banner (s"Feature Selection Technique: $tech") + val (cols, rSq) = mod.selectFeatures (tech) // R^2, R^2 bar, R^2 cv + val k = cols.size + println (s"k = $k, n = ${x.dim2}") + new PlotM (null, rSq.transpose, Regression.metrics, s"R^2 vs n for Symbolic Regression with $tech", lines = true) + println (s"$tech: rSq = $rSq") + end for + + FitM.showQofStatTable (mod.crossValidate ()) // cross-validation for full model + val modBest = mod.getBest.mod // FIX - check this is really the best model + FitM.showQofStatTable (modBest.crossValidate ()) // cross-validation for best model + +end symbolicRegressionTest + + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `symbolicRegressionTest2` main function tests the `SymbolicRegression` + * object using the AutoMPG dataset. Assumes no missing values. + * It tests "Quadratic Regression" (with cross = false) and + * applies forward selection, backward elimination, or stepwise regression. + * > runMain scalation.modeling.symbolicRegressionTest2 + */ +@main def symbolicRegressionTest2 (): Unit = + +// println (s"x = $x") +// println (s"y = $y") + + banner ("AutoMPG Quadratic Regression") + val mod = SymbolicRegression.quadratic (x, y, x_fname) // add x^2 terms + // adds intercept by default + mod.trainNtest ()() // train and test the model + println (mod.summary ()) // parameter/coefficient statistics + + println (s"x_fname = ${stringOf (x_fname)}") + + for tech <- SelectionTech.values do + banner (s"Feature Selection Technique: $tech") + val (cols, rSq) = mod.selectFeatures (tech) // R^2, R^2 bar, R^2 cv + val k = cols.size + println (s"k = $k, n = ${x.dim2}") + new PlotM (null, rSq.transpose, Regression.metrics, s"R^2 vs n for Quadratic Regression with $tech", lines = true) + println (s"$tech: rSq = $rSq") + end for + +end symbolicRegressionTest2 + + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `symbolicRegressionTest3` main function tests the `SymbolicRegression` + * object using the AutoMPG dataset. Assumes no missing values. + * It tests "Quadratic X Regression" (with cross = true) and + * applies forward selection, backward elimination, or stepwise regression. + * > runMain scalation.modeling.symbolicRegressionTest3 + */ +@main def symbolicRegressionTest3 (): Unit = + +// println (s"x = $x") +// println (s"y = $y") + + banner ("AutoMPG Quadratic X Regression") + val mod = SymbolicRegression.quadratic (x, y, x_fname, // add x^2 terms + true, true) // add intercept and cross terms + mod.trainNtest ()() // train and test the model + println (mod.summary ()) // parameter/coefficient statistics + + for tech <- SelectionTech.values do + banner (s"Feature Selection Technique: $tech") + val (cols, rSq) = mod.selectFeatures (tech) // R^2, R^2 bar, R^2 cv + val k = cols.size + println (s"k = $k, n = ${x.dim2}") + new PlotM (null, rSq.transpose, Regression.metrics, s"R^2 vs n for Quadratic X Regression with $tech", lines = true) + println (s"$tech: rSq = $rSq") + end for + +end symbolicRegressionTest3 + + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `symbolicRegressionTest4` main function tests the `SymbolicRegression` + * object using the AutoMPG dataset. Assumes no missing values. + * It tests "Cubic Regression" (with cross = false) and + * applies forward selection, backward elimination, or stepwise regression. + * > runMain scalation.modeling.symbolicRegressionTest4 + */ +@main def symbolicRegressionTest4 (): Unit = + +// println (s"x = $x") +// println (s"y = $y") + + banner ("AutoMPG Cubic Regression") + val mod = SymbolicRegression.cubic (x, y, x_fname) // add x^2 and x^3 terms + // adds intercept by default + mod.trainNtest ()() // train and test the model + println (mod.summary ()) // parameter/coefficient statistics + + for tech <- SelectionTech.values do + banner (s"Feature Selection Technique: $tech") + val (cols, rSq) = mod.selectFeatures (tech) // R^2, R^2 bar, R^2 cv + val k = cols.size + println (s"k = $k, n = ${x.dim2}") + new PlotM (null, rSq.transpose, Regression.metrics, s"R^2 vs n for Cubic Regression with $tech", lines = true) + println (s"$tech: rSq = $rSq") + end for + +end symbolicRegressionTest4 + + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `symbolicRegressionTest5` main function tests the `SymbolicRegression` + * object using the AutoMPG dataset. Assumes no missing values. + * It tests "Cubic X Regression" (with cross = true) and + * applies forward selection, backward elimination, or stepwise regression. + * > runMain scalation.modeling.symbolicRegressionTest5 + */ +@main def symbolicRegressionTest5 (): Unit = + +// println (s"x = $x") +// println (s"y = $y") + + banner ("AutoMPG Cubic X Regression") + val mod = SymbolicRegression.cubic (x, y, x_fname, // add x^2 and x^3 terms + true, true) // add intercept and cross terms + mod.trainNtest ()() // train and test the model + println (mod.summary ()) // parameter/coefficient statistics + + for tech <- SelectionTech.values do + banner (s"Feature Selection Technique: $tech") + val (cols, rSq) = mod.selectFeatures (tech) // R^2, R^2 bar, R^2 cv + val k = cols.size + println (s"k = $k, n = ${x.dim2}") + new PlotM (null, rSq.transpose, Regression.metrics, s"R^2 vs n for Cubic X Regression with $tech", lines = true) + println (s"$tech: rSq = $rSq") + end for + +end symbolicRegressionTest5 + + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `symbolicRegressionTest6` main function tests the `SymbolicRegression` + * object using the AutoMPG dataset. Assumes no missing values. + * It tests "Cubic XX Regression" (with cross, cross3 = true) and + * applies forward selection, backward elimination, or stepwise regression. + * WARNING: setting cross3 = true can lead to an explosion of terms. + * > runMain scalation.modeling.symbolicRegressionTest6 + */ +@main def symbolicRegressionTest6 (): Unit = + +// println (s"x = $x") +// println (s"y = $y") + + banner ("AutoMPG Cubic XX Regression") + val mod = SymbolicRegression.cubic (x, y, x_fname, // add x^2 and x^3 terms + true, true, true) // add intercept, cross and cross3 terms + mod.trainNtest ()() // train and test the model + println (mod.summary ()) // parameter/coefficient statistics + + for tech <- SelectionTech.values do + banner (s"Feature Selection Technique: $tech") + val (cols, rSq) = mod.selectFeatures (tech) // R^2, R^2 bar, R^2 cv + val k = cols.size + println (s"k = $k, n = ${x.dim2}") + new PlotM (null, rSq.transpose, Regression.metrics, s"R^2 vs n for Cubic XX Regression with $tech", lines = true) + println (s"$tech: rSq = $rSq") + end for + +end symbolicRegressionTest6 + + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `symbolicRegressionTest7` main function tests the `SymbolicRegression` + * object using the AutoMPG dataset. Assumes no missing values. + * It tests custom "Symbolic Regression", with powers specified in "Set (...)" and + * applies forward selection, backward elimination, or stepwise regression. + * This test case performs data rescaling. + * > runMain scalation.modeling.symbolicRegressionTest7 + */ +@main def symbolicRegressionTest7 (): Unit = + + val RESCALE = true + +// println (s"x = $x") +// println (s"y = $y") + + banner ("AutoMPG Symbolic Regression") + val mod = + if RESCALE then + SymRidgeRegression.rescale (x, y, x_fname, Set (-2, -1, 2)) // add cross-terms and given powers & rescale (0.5 -> NaN) + else + SymRidgeRegression (x, y, x_fname, Set (-2, -1, 0.5, 2)) // add cross-terms and given powers + mod.trainNtest ()() // train and test the model + println (mod.summary ()) // parameter/coefficient statistics + + for tech <- SelectionTech.values do + banner (s"Feature Selection Technique: $tech") + val (cols, rSq) = mod.selectFeatures (tech) // R^2, R^2 bar, R^2 cv + val k = cols.size + println (s"k = $k, n = ${x.dim2}") + new PlotM (null, rSq.transpose, Regression.metrics, s"R^2 vs n for Symbolic Regression with $tech", lines = true) + println (s"$tech: rSq = $rSq") + end for + +end symbolicRegressionTest7 + + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `symbolicRegressionTest8` main function tests the `SymbolicRegression` + * object using a simulated gravity dataset. + * It tests custom "Symbolic Regression", with a custom term: x0 x1^(-2) + * FIX - acquire a real gravity dataset + * > runMain scalation.modeling.symbolicRegressionTest8 + */ +@main def symbolicRegressionTest8 (): Unit = + + import random.{Uniform, Normal} + + val noise = Normal (0, 10) // random noise + val rad = Uniform (6370, 7000) // distance from the center of the Earth in km + val mas = Uniform (50, 150) // mass of person + + val m1 = 5.97219E24 // mass of Earth in kg + val G = 6.67408E-11 // gravitational constant in m^3 kg^-1 s^-2 + + val xy = new MatrixD (100, 3) // simulated gravity data + for i <- xy.indices do + val m2 = mas.gen // unit of kilogram (kg) + val r = 1000 * rad.gen // unit of meter (m) + xy(i, 0) = m2 // mass of person + xy(i, 1) = r // radius/distance + xy(i, 2) = G * m1 * m2 / r~^2 + noise.gen // force of gravity + end for + + val fname = Array ("mass2", "radius") + + println (s"xy = $xy") + val (x, y) = (xy.not (?, 2), xy(?, 2)) + + banner ("Newton's Universal Gravity Symbolic Regression") + val mod = SymbolicRegression (x, y, fname, null, false, false, + terms = Array ((0, 1.0), (1, -2.0))) // add one custom term + + mod.trainNtest ()() // train and test the model + println (mod.summary ()) // parameter/coefficient statistics + println (s"b =~ GM = ${G * m1}") // Gravitational Constant * Mass of the Earth + +end symbolicRegressionTest8 + + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `symbolicRegressionTest9` main function tests the `SymbolicRegression` + * object using a simple dataset to compare Regression, Quadratic Regression + * and Cubic Regression. + * > runMain scalation.modeling.symbolicRegressionTest9 + */ +@main def symbolicRegressionTest9 (): Unit = + + val x = VectorD (1, 2, 3, 4, 5) + val y = VectorD (1, 3, 3, 5, 4) + val ox = MatrixD.one (x.dim) :^+ x + + banner ("Regression") + var mod = new Regression (ox, y) + mod.trainNtest ()() + println (mod.summary ()) + new Plot (null, y, mod.predict (mod.getX), s"${mod.modelName} y vs yp", lines = true) + + banner ("Quadratic Regression") + val fname = Array ("x") + mod = SymbolicRegression.quadratic (MatrixD (x).transpose, y, fname) + mod.trainNtest ()() + println (mod.summary ()) + new Plot (null, y, mod.predict (mod.getX), s"${mod.modelName} y vs yp", lines = true) + + banner ("Cubic Regression") + mod = SymbolicRegression.cubic (MatrixD (x).transpose, y, fname) + mod.trainNtest ()() + println (mod.summary ()) + new Plot (null, y, mod.predict (mod.getX), s"${mod.modelName} y vs yp", lines = true) + +end symbolicRegressionTest9 + + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `symbolicRegressionTest10` main function tests the `SymbolicRegression` + * object using the Forest Fires dataset. Assumes no missing values. + * It tests "Quadratic Regression" (with cross = false/true) and + * applies forward selection, backward elimination, or stepwise regression. + * > runMain scalation.modeling.symbolicRegressionTest10 + */ +@main def symbolicRegressionTest10 (): Unit = + + val xy = MatrixD.load ("forestfires.csv", 1, 4) // skip columns 0, 1, 2, 3 + val resp = xy.dim2 - 1 + val y = xy(?, resp) // response - burned area + val x = xy.not (?, resp) + val fname = Array ("FFMC", "DMC", "DC", "ISI", "temp", "RH", "wind", "rain") + +// println (s"x = $x") + println (s"y = $y") + + banner ("Forest Fires Quadratic Regression") + val mod = SymbolicRegression.quadratic (x, y, fname, cross = true) // add x^2 terms, try cross false/true + // adds intercept by default + mod.trainNtest ()() // train and test the model + println (mod.summary ()) // parameter/coefficient statistics + + println (s"fname = ${stringOf (fname)}") + + for tech <- SelectionTech.values do + banner (s"Feature Selection Technique: $tech") + val (cols, rSq) = mod.selectFeatures (tech) // R^2, R^2 bar, R^2 cv + val k = cols.size + println (s"k = $k, n = ${x.dim2}") + new PlotM (null, rSq.transpose, Regression.metrics, s"R^2 vs n for Quadratic Regression with $tech", lines = true) + println (s"$tech: rSq = $rSq") + end for + +end symbolicRegressionTest10 + + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `symbolicRegressionTest11` main function tests the `SymbolicRegression` + * object using the Forest Fires dataset. Assumes no missing values. + * It tests "Cubic Regression" (with cross = false, the default) and + * applies forward selection, backward elimination, or stepwise regression. + * It illustrates the conversion of string columns into ORDINAL/integer columns. + * > runMain scalation.modeling.symbolicRegressionTest11 + */ +@main def symbolicRegressionTest11 (): Unit = + + val month = VectorS ("jan", "feb", "mar", "apr", "may", "jun", "jul", "aug", "sep", "oct", "nov", "dec") + val day = VectorS ("sun", "mon", "tue", "wed", "thu", "fri", "sat") + + // loadStr: column 2 in file has month strings, column 3 has day strings + + val xy = MatrixD.loadStr ("forestfires.csv", 1, 0)(Set (2, 3), month, day) + val resp = xy.dim2 - 1 + val y = xy(?, resp) // response - burned area + val x = xy.not (?, resp) + val fname = Array ("X", "Y", "month", "day", "FFMC", "DMC", "DC", "ISI", "temp", "RH", "wind", "rain") + + println (s"x = $x") + println (s"y = $y") + + banner ("Forest Fires Cubic Regression with Log with Ordinal Values") + val mod = SymbolicRegression (x, y, fname, + Set (0, 1, 2, 3), cross = false) // use log(x), x, x^2 and x^3 terms + // adds intercept by default + mod.trainNtest ()() // train and test the model + println (mod.summary ()) // parameter/coefficient statistics + + println (s"fname = ${stringOf (fname)}") + +// for tech <- SelectionTech.values do + val tech = SelectionTech.Forward +// val tech = SelectionTech.Backward +// val tech = SelectionTech.Stepwise + + banner (s"Feature Selection Technique: $tech") + val (cols, rSq) = mod.selectFeatures (tech) // R^2, R^2 bar, R^2 cv + val k = cols.size + println (s"k = $k, n = ${x.dim2}") + new PlotM (null, rSq.transpose, Regression.metrics, s"R^2 vs n for Quadratic Regression with $tech", lines = true) + println (s"$tech: rSq = $rSq") + +// end for + +end symbolicRegressionTest11 + + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `symbolicRegressionTest12` main function tests the `SymbolicRegression` + * object using the Forest Fires dataset. Assumes no missing values. + * It tests "Cubic Regression" (with cross = false, the default) and + * applies forward selection, backward elimination, or stepwise regression. + * It illustrates the conversion of string columns into ORDINAL/integer columns. + * Then the day ordinal column (3) is converted to six DUMMY VARIABLE columns + * that are NOT expanded. + * > runMain scalation.modeling.symbolicRegressionTest12 + */ +@main def symbolicRegressionTest12 (): Unit = + + val month = VectorS ("jan", "feb", "mar", "apr", "may", "jun", "jul", "aug", "sep", "oct", "nov", "dec") + val day = VectorS ("sun", "mon", "tue", "wed", "thu", "fri", "sat") + + // loadStr: column 2 in file has month strings, column 3 has day strings + + val xy = MatrixD.loadStr ("forestfires.csv", 1, 0)(Set (2, 3), month, day) + val resp = xy.dim2 - 1 + val y = xy(?, resp) // response - burned area + var x = xy.not (?, resp) + val fname = Array ("X", "Y", "month", "day", "FFMC", "DMC", "DC", "ISI", "temp", "RH", "wind", "rain") + val fnam2 = Array ("dv1", "dv2", "dv3", "dv4", "dv5", "dv6") + + // remove column 3 (day) from x and convert it to six dummy variable columns in matrix dv + + val day_col = x(?, 3).toInt + x = x.not (?, 3) + + println (s"day_col = $day_col") + val dv = RegressionCat.dummyVars (MatrixI (day_col).transpose) + + println (s"x = $x") // regular predictor variables + println (s"dv = $dv") // dummy variables + println (s"y = $y") // response variable + + banner ("Forest Fires Quadratic Regression with Ordinal Values") + val mod = SymbolicRegression.withDvars (x, dv, y, fname, fnam2, + Set (1, 2, 3), cross = false) // use x, x^2 and x^3 terms + // adds intercept by default + mod.trainNtest ()() // train and test the model + println (mod.summary ()) // parameter/coefficient statistics + + println (s"fname = ${stringOf (fname)}") + +// for tech <- SelectionTech.values do + val tech = SelectionTech.Forward +// val tech = SelectionTech.Backward +// val tech = SelectionTech.Stepwise + + banner (s"Feature Selection Technique: $tech") + val (cols, rSq) = mod.selectFeatures (tech) // R^2, R^2 bar, R^2 cv + val k = cols.size + println (s"k = $k, n = ${x.dim2}") + new PlotM (null, rSq.transpose, Regression.metrics, s"R^2 vs n for Quadratic Regression with $tech", lines = true) + println (s"$tech: rSq = $rSq") + +// end for + +end symbolicRegressionTest12 + + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `symbolicRegressionTest13` main function tests the `SymbolicRegression` + * object using a simple dataset to compare Regression and Quadratic Regression. + * > runMain scalation.modeling.symbolicRegressionTest13 + */ +@main def symbolicRegressionTest13 (): Unit = + +/* + import neuralnet._ + import ActivationFun._ + import scala.math.tanh +*/ + + val x = VectorD (1, 2, 3, 4, 5, 6, 7, 8, 9) +// val y = VectorD (8, 6, 4, 2, 1, 3, 5, 9, 7) +// val y = VectorD (7, 8, 5, 3, 2, 1, 4, 6, 9) + val y = VectorD (7, 6, 5, 3, 2, 1, 4, 8, 9) + val ox = MatrixD.one (x.dim) :^+ x + + banner ("SimpleRegression") + val mod0 = SimpleRegression (x, y, null) + mod0.trainNtest ()() + println (mod0.summary ()) + new Plot (null, y, mod0.predict (mod0.getX), s"${mod0.modelName} y vs yp", lines = true) + + banner ("Regression") + var mod = new Regression (ox, y) + mod.trainNtest ()() + println (mod.summary ()) + new Plot (null, y, mod.predict (mod.getX), s"${mod.modelName} y vs yp", lines = true) + + banner ("Quadratic Regression") + val fname = Array ("x") + mod = SymbolicRegression.quadratic (MatrixD (x).transpose, y, fname) + mod.trainNtest ()() + println (mod.summary ()) + new Plot (null, y, mod.predict (mod.getX), s"${mod.modelName} y vs yp", lines = true) + +/* + banner ("NeuralNet 3-Layer") + Optimizer.hp("eta") = 0.3 + Optimizer.hp("bSize") = 2 + val mod2 = NeuralNet_3L.rescale (MatrixD (x).transpose, MatrixD (y).transpose, fname, nz = 3, f = f_sigmoid, f1 = f_id) + mod2.trainNtest2 ()() +// println (mod2.summary ()) + new Plot (null, y, (mod2.predict (mod2.getX))(?, 0), s"${mod2.modelName} y vs yp", lines = true) + + val yp = x.map { z => + 1.6366 * tanh(2.2771 * z - 1.3205) - + 0.8194 * tanh(1.5244 * z + 1.0258) + + 0.5099 * tanh(-0.6860 * z - 0.6764) - + 0.8904 * tanh(1.2081 * z + 1.6912) + 1.7239 } + + println (MatrixD (y, yp).transpose) +*/ + +end symbolicRegressionTest13 + diff --git a/src/main/scala/scalation/modeling/TranRegression.scala b/src/main/scala/scalation/modeling/TranRegression.scala index b6f9a1f64..9e0edab48 100644 --- a/src/main/scala/scalation/modeling/TranRegression.scala +++ b/src/main/scala/scalation/modeling/TranRegression.scala @@ -7,11 +7,13 @@ * * @note Model: Transformed Multiple Linear Regression (Transforms y) * + * Delegates transform (y) via y.map (tran) to Regression + * * @see data.princeton.edu/wws509/notes/c2s10.html * @see scala-lang.org/api/3.x/scala/math.html * @see `scalation.CommonFunctions` * - * Common transformation pairs: (tran, itran) or reversed + * Common transformation pairs: (f, fi) or reversed * (log, exp), (log1p, expm1), (ihs, sinh), (cbrt, cb), (sqrt, sq), (box_cox, cox_box) */ @@ -19,6 +21,7 @@ package scalation package modeling import scala.collection.mutable.IndexedSeq +//import scala.math.{exp, expm1, log, log1p, sqrt} import scala.math.{exp, log, sqrt} import scalation.mathstat._ @@ -28,9 +31,9 @@ import scalation.random.Normal /** The `TranRegression` class supports transformed multiple linear regression. * In this case, 'x' is multi-dimensional [1, x_1, ... x_k]. Fit the parameter * vector 'b' in the transformed regression equation - * transform (y) = b dot x + e = b_0 + b_1 * x_1 + b_2 * x_2 ... b_k * x_k + e + * tran (y) = b dot x + e = b_0 + b_1 * x_1 + b_2 * x_2 ... b_k * x_k + e * where 'e' represents the residuals (the part not explained by the model) and - * 'transform' is the function (defaults to log) used to transform the response vector 'y'. + * 'tran' is the function (defaults to log1p) used to transform the response vector 'y'. * Common transforms include 'log (y)', 'sqrt (y)' when 'y > 0', or even 'sq (y)', 'exp (y)'. * More generally, a Box-Cox Transformation may be applied. * @see citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.469.7176&rep=rep1&type=pdf @@ -38,27 +41,24 @@ import scalation.random.Normal * Note: this class does not provide transformations on columns of matrix 'x'. * @see www.ams.sunysb.edu/~zhu/ams57213/Team3.pptx * @param x the data/input m-by-n matrix - * @param y the response/output m-vector + * @param y the un-transformed response/output m-vector * @param fname_ the feature/variable names (defaults to null) * @param hparam the hyper-parameters (defaults to Regression.hp) - * @param tran the transformation function (defaults to log) - * @param itran the inverse transformation function to rescale predictions to original y scale (defaults to exp) + * @param yℱ the transformation (f with inverse fi) applied to y (e.g., Boxcox) */ class TranRegression (x: MatrixD, y: VectorD, fname_ : Array [String] = null, hparam: HyperParameter = Regression.hp, - tran: FunctionS2S = log, itran: FunctionS2S = exp) - extends Regression (x, y.map (tran), fname_, hparam): + yℱ: Transform = null) // null mean no transform + extends Regression (x, if yℱ == null then y else yℱ.f(y), // transform y and pass to `Regression` + fname_, hparam): private val debug = debugf ("TranRegression", true) // debug function private val flaw = flawf ("TranRegression") // flaw function private val inf = getY.findInfinity // infinite transformed response elements - modelName = s"TranRegression" + _modelName = "TranRegression" if ! inf.isEmpty then flaw ("init", s"the transformed response vector has infinite elements at $inf") - if ! y.isNonnegative then - throw new IllegalArgumentException ("y must be positive for transformed regression (log, sqrt)") - // FIX - may work for other transformations //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Test a predictive model y_ = f(x_) + e and return its QoF vector. @@ -68,10 +68,10 @@ class TranRegression (x: MatrixD, y: VectorD, fname_ : Array [String] = null, * @param x_ the testing/full data/input matrix (defaults to full x) * @param y_ the testing/full response/output vector (defaults to full y) */ - def test0 (x_ : MatrixD = x, y_ : VectorD = getY): (VectorD, VectorD) = - val yp = x_ * b // make predictions + def test_ (x_ : MatrixD = x, y_ : VectorD = getY): (VectorD, VectorD) = + val yp = predict_ (x_) // make predictions on transformed values (yp, diagnose (y_, yp)) // return predictions and QoF vector - end test0 + end test_ //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Test a predictive model y_ = f(x_) + e and return its QoF vector. @@ -82,7 +82,7 @@ class TranRegression (x: MatrixD, y: VectorD, fname_ : Array [String] = null, * @param y_ the testing/full response/output vector (defaults to full y) */ override def test (x_ : MatrixD = x, y_ : VectorD = y): (VectorD, VectorD) = - val yp = (x_ * b).map (itran) // make predictions + val yp = predict (x_) // make predictions on original values (yp, diagnose (y_, yp)) // return predictions and QoF vector end test @@ -101,56 +101,88 @@ class TranRegression (x: MatrixD, y: VectorD, fname_ : Array [String] = null, debug ("trainNTest", s"b = $b") val (yp, qof) = test (xx, yy) println (report (qof)) - if DO_PLOT then - val (ryy, ryp) = orderByY (yy, yp) // order by yy - new Plot (null, ryy, ryp, s"$modelName: y actual, predicted", lines = true) - end if + Predictor.plotPrediction (yy, yp, modelName, false) + Predictor.plotPrediction (yy, yp, modelName) // reordered (yp, qof) end trainNtest //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Predict the value of y = f(z) by evaluating the formula y = b dot z, - * e.g., (b_0, b_1, b_2) dot (1, z_1, z_2). + /** Predict the value of y = f(z) by evaluating the formula y = b dot z. + * It works on transformed values. + * @param z the new vector to predict + */ + def predict_ (z: VectorD): Double = b dot z + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Predict the value of vector y = f(x_, b). It works on transformed values. + * @param x_ the matrix to use for making predictions, one for each row + */ + def predict_ (x_ : MatrixD): VectorD = x_ * b + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Predict the value of y = f(z) by evaluating the formula y = b dot z. + * It is overridden to handle transformations. * @param z the new vector to predict */ - override def predict (z: VectorD): Double = itran (b dot z) + override def predict (z: VectorD): Double = + if yℱ == null then b dot z else yℱ.fi_(b dot z) + end predict //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Predict the value of y = f(z) by evaluating the formula y = b dot z for - * each row of matrix x_. - * @param z the new matrix to predict + /** Predict the value of vector y = f(x_, b). It is overridden to handle transformations. + * @param x_ the matrix to use for making predictions, one for each row */ - override def predict (x_ : MatrixD): VectorD = (x_ * b).map (itran) + override def predict (x_ : MatrixD): VectorD = + if yℱ == null then x_ * b else yℱ.fi(x_ * b) + end predict + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Perform In-Sample Testing, i.e., train and test on the full data set. + * Train on transformed y (via getY), test on original y. + * @param skip the number of initial data points to skip (due to insufficient information) + * @param showYp whether to show the prediction vector + */ + override def inSample_Test (skip: Int = 0, showYp: Boolean = false): Unit = + val (x_, y__, y_) = (x.drop (skip), getY.drop (skip), y.drop (skip)) + val yp = trainNtest (x_, y__)(x_, y_)._1 + if showYp then + println (s"Final In-Sample Prediction Vector yp = $yp") + end inSample_Test //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /* Use validation to compute test Quality of Fit (QoF) measures by dividing - * the full dataset into a TESTING set and a TRAINING set. - * The test set is defined by idx and the rest of the data is the training set. - * FIX - currently must override if y is transformed, @see `Predictor` + * the full dataset into a TESTING-set and a TRAINING-set. + * The testing-set is defined by idx and the rest of the data is the training-set. + * @note: currently must override if y is transformed, @see `Predictor` + * @see `modeling.Predictor.validate` about the RANDOM, FIRST, and LAST options + * for selecting the testing-set. * @param rando flag indicating whether to use randomized or simple validation - * @param ratio the ratio of the TESTING set to the full dataset (most common 70-30, 80-20) - * @param idx the prescribed TESTING set indices + * @param ratio the ratio of the TESTING-set to the full dataset (most common 70-30 (.3), 80-20 (.2)) + * @param idx the prescribed TESTING-set indices (default => generate) */ - override def validate (rando: Boolean = true, ratio: Double = 0.2) - (idx : IndexedSeq [Int] = testIndices ((ratio * y.dim).toInt, rando)): VectorD = + override def validate (rando: Boolean = true, ratio: Double = Model.TE_RATIO) +// (idx: IndexedSeq [Int] = testIndices ((ratio * y.dim).toInt, rando)): + (idx: IndexedSeq [Int] = testIndices (y.dim, (ratio * y.dim).toInt, rando)): + (VectorD, VectorD) = val (x_e, x_, y_e, y_) = TnT_Split (x, y, idx) // Test-n-Train Split - train (x_, y_.map (tran (_))) // train model on the training set - val qof = test (x_e, y_e)._2 // test on test-set and get QoF measures - if qof(QoF.sst.ordinal) <= 0.0 then // requires variation in test-set + val yy = if yℱ == null then y_ else yℱ.f (y_) + train (x_, yy) // train model on the TRAINING-set + val (yp, qof) = test (x_e, y_e) // test on TESTING-set and get QoF measures + if qof(QoF.sst.ordinal) <= 0.0 then // requires variation in TESTING-set flaw ("validate", "chosen testing set has no variability") - end if println (FitM.fitMap (qof, QoF.values.map (_.toString))) - qof + (yp, qof) end validate //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Build a sub-model that is restricted to the given columns of the data matrix. * @param x_cols the columns that the new model is restricted to + * @param fname2 the variable/feature names for the new model (defaults to null) */ - override def buildModel (x_cols: MatrixD): Regression = + override def buildModel (x_cols: MatrixD, fname2: Array [String] = null): TranRegression = debug ("buildModel", s"${x_cols.dim} by ${x_cols.dim2}") - new TranRegression (x_cols, y, null, hparam, tran, itran) + new TranRegression (x_cols, y, fname2, hparam, yℱ) end buildModel end TranRegression @@ -158,107 +190,67 @@ end TranRegression //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `TranRegression` companion object provides transformation and inverse - * transformation function based on the parameter 'lambda'. - * It support the family of Box-Cox transformations. + * transformation function based on the parameter λ. + * It support the family of Box-Cox and Yeo-Johnson Transformations. */ object TranRegression: - private val debug = debugf ("TranRegression", false) // debug function - private var lambda = 0.5 // the power parameter for Box-Cox transformations + private var _λ = 0.4 // the power parameter for transformations - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Set the value for the 'lambda' parameter. Must be called before Box-Cox - * 'apply' method. - * @param lambda_ the new value for the 'lambda' parameter - */ - def setLambda (lambda_ : Double): Unit = lambda = lambda_ + inline def λ: Double = _λ // getter-setter for λ + def λ_= (λ_ : Double): Unit = _λ = λ_ //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Transform y using the Box-Cox transformation. + /** Transform y using the Yeo-Johnson transformation. * @param y the value to be transformed */ - def box_cox (y: Double): Double = - if lambda == 0.0 then log (y) - else (y ~^ lambda - 1.0) / lambda - end box_cox + def yeo_john (y: Double): Double = + if y >= 0.0 then + if λ == 0.0 then log (1+y) + else ((1+y) ~^ λ - 1) / λ + else + if λ == 2.0 then -log (1-y) + else { val _2λ = 2-λ; -((1-y) ~^ _2λ - 1) / _2λ } + end yeo_john //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Inverse transform z using the Box-Cox transformation. + /** Inverse transform z using the Yeo-Johnson transformation. * @param z the value to be inverse transformed */ - def cox_box (z: Double): Double = - if lambda == 0.0 then exp (z) - else (lambda * z + 1.0) ~^ (1.0 / lambda) - end cox_box + def john_yeo (z: Double): Double = + if z >= 0.0 then + if λ == 0.0 then exp (z) - 1 + else (λ*z + 1) ~^ (1/λ) - 1 + else + if λ == 2.0 then 1 - exp (-z) + else { val _2λ = 2-λ; 1 - (-_2λ*z + 1) ~^ (1/_2λ) } + end john_yeo //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `TranRegression` object that uses a Box-Cox transformation. - * To change 'lambda' from its default value, call 'set_lambda' first. + /** Create a `TranRegression` object that uses a Yeo-Johnson Transformation. + * To change λ from its default value, call `set_lambda` first. * @param x the data/input matrix * @param y the response/output vector * @param fname the feature/variable names (defaults to null) * @param hparam the hyper-parameters (defaults to Regression.hp) - */ - def apply (x: MatrixD, y: VectorD, fname: Array [String] = null, + * + def app_yj (x: MatrixD, y: VectorD, fname: Array [String] = null, hparam: HyperParameter = Regression.hp): TranRegression = - new TranRegression (x, y, fname, hparam, box_cox, cox_box) - end apply - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `TranRegression` with automatic rescaling from a combined data matrix. - * @param xy the combined data/input and response/output matrix - * @param fname the feature/variable names - * @param hparam the hyper-parameters - * @param tran the transformation function (defaults to log) - * @param itran the inverse transformation function to rescale predictions to original y scale - * @param bounds the bounds for rescaling + new TranRegression (x, y, fname, hparam, yeo_john, john_yeo) + end app_yj */ - def apply (xy: MatrixD, fname: Array [String], - hparam: HyperParameter, tran: FunctionS2S, itran: FunctionS2S, - bounds: (Double, Double)): TranRegression = - val hp2 = if hparam == null then Regression.hp else hparam - val (x, y) = (xy.not(?, xy.dim2-1), xy(?, xy.dim2-1)) - - val y_s = // scaled version of y - if bounds != null then // scale to bounds - val extrem = extreme (y) - scaleV (extrem, bounds)(y) - else // normalize - val (mu_y, sig_y) = (y.mean, y.stdev) - normalizeV (mu_y, sig_y)(y) - end if - - debug ("apply", s"scaled: scaled y = $y_s") - new TranRegression (x, y_s, fname, hp2, tran, itran) - end apply //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `TranRegression` with automatic rescaling from a data matrix and - * response vector. - * @param x the data/input matrix - * @param y the response/output vector + /** Create a `TranRegression` with from a combined matrix. + * @param xy the combined data/input and response/output matrix * @param fname the feature/variable names * @param hparam the hyper-parameters - * @param tran the transformation function (defaults to log) - * @param itran the inverse transformation function to rescale predictions to original y scale - * @param bounds the bounds for rescaling + * @param yℱ the transformation applied to y (e.g., Boxcox) */ - def apply (x: MatrixD, y: VectorD, fname: Array [String], - hparam: HyperParameter, tran: FunctionS2S, itran: FunctionS2S, - bounds: (Double, Double)): TranRegression = - val hp2 = if hparam == null then Regression.hp else hparam - - val y_s = // scaled version of y - if bounds != null then // scale to bounds - val extrem = extreme (y) - scaleV (extrem, bounds)(y) - else // normalize - val (mu_y, sig_y) = (y.mean, y.stdev) - normalizeV (mu_y, sig_y)(y) - end if - - debug ("apply", s"scaled: scaled y = $y_s") - new TranRegression (x, y_s, fname, hp2, tran, itran) + def apply (xy: MatrixD, fname: Array [String] = null, hparam: HyperParameter = Regression.hp, + yℱ: Transform = null): TranRegression = + val (x, y) = (xy.not(?, xy.dim2-1), xy(?, xy.dim2-1)) + new TranRegression (x, y, fname, hparam, yℱ) end apply end TranRegression @@ -304,13 +296,13 @@ end TranRegressionEx println ("x = " + x) println ("y = " + y) - banner ("Parameter Estimation and Quality of Fit - log transform") + banner ("Parameter Estimation and Quality of Fit - no transform") val mod = new TranRegression (x, y) mod.trainNtest ()() // train and test the model println (mod.summary ()) // parameter/coefficient statistics banner ("Quality of Fit - based on transformed data") - println (mod.report (mod.test0 ()._2)) + println (mod.report (mod.test_ ()._2)) banner ("Prediction") val yp = mod.predict (x) @@ -343,12 +335,12 @@ end tranRegressionTest 1.0, 2.0, 3.0, 0.21, 1.0, 3.0, 3.0, 0.22) - val x_fname = Array ("intercept", "x1", "x2") + val x_fname = Array ("one", "x1", "x2") val (x, y) = (xy.not (?, 3), xy(?, 3)) - val xtx = x.transpose * x + val xtx = x.ᵀ * x val yy = y.map (sqrt) - val xtyy = x.transpose * yy + val xtyy = x.ᵀ * yy val b = new Fac_Cholesky (xtx).inverse * xtyy banner ("parameters") @@ -385,7 +377,7 @@ end tranRegressionTest println (s"rSq2 = $rSq2") banner ("Parameter Estimation and Quality of Fit") - val mod = new TranRegression (x, y, x_fname, Regression.hp, sqrt, sq) + val mod = new TranRegression (x, y, x_fname, yℱ = RootForm ()) mod.trainNtest ()() // train and test the model println (mod.summary ()) // parameter/coefficient statistics @@ -396,7 +388,7 @@ end tranRegressionTest2 /** The `tranRegressionTest3` main function tests `TranRegression` class using the * following regression equation and uses the simulated data in `TranRegressionEx`. * sqrt (y) = b dot x = b_0 + b_1*x_1 + b_2*x_2. - * @see 6.12.9 exercises 1, 2, and 3. + * @see 5.13.9 exercises 1, 2, and 3. * > runMain scalation.modeling.tranRegressionTest3 */ @main def tranRegressionTest3 (): Unit = @@ -443,8 +435,8 @@ end tranRegressionTest2 val ys2 = MatrixD (y2, yp2) val ys3 = MatrixD (y, yp3, yp) - new PlotM (null, ys2.transpose, null, "Transformed", lines = true) - new PlotM (null, ys3.transpose, null, "Tran-back", lines = true) + new PlotM (null, ys2.ᵀ, null, "Transformed", lines = true) + new PlotM (null, ys3.ᵀ, null, "Tran-back", lines = true) end tranRegressionTest3 @@ -468,12 +460,12 @@ end tranRegressionTest3 val e = y - yp banner ("TranRegression with sqrt") - val mod = new TranRegression (x, y, null, Regression.hp, sqrt, sq) + val mod = new TranRegression (x, y, yℱ = RootForm ()) mod.trainNtest ()() // train and test the model println (mod.summary ()) // parameter/coefficient statistics banner ("Quality of Fit - based on transformed data") - println (mod.report (mod.test0 ()._2)) // test on transformed data + println (mod.report (mod.test_ ()._2)) // test on transformed data val yp2 = mod.predict (x) val e2 = y - yp2 @@ -481,7 +473,7 @@ end tranRegressionTest3 println (s"e2.dim = ${e2.dim}") val ys = MatrixD (y, yp, yp2) - new PlotM (null, ys.transpose, lines = true) + new PlotM (null, ys.ᵀ, lines = true) new Plot (null, e, null, "e vs. t", lines = true) new Plot (null, e2, null, "e2 vs. t", lines = true) @@ -494,7 +486,7 @@ end tranRegressionTest4 * following regression equation. * sigmoid^{-1} (y) = b dot x = b_0 + b_1*x_1 + b_2*x_2. * > runMain scalation.modeling.tranRegressionTest5 - */ + * @main def tranRegressionTest5 (): Unit = import Example_AutoMPG._ @@ -518,12 +510,12 @@ end tranRegressionTest4 banner ("TranRegression") // val mod = new Regression (ox, yy.map (f)) // rescale & transform - val mod = new TranRegression (ox, yy, ox_fname, Regression.hp, f, fi) // rescale + val mod = new TranRegression (ox, yy, ox_fname, f, fi) // rescale mod.trainNtest ()() // train and test the model println (mod.summary ()) // parameter/coefficient statistics banner ("Quality of Fit - based on transformed data") - println (mod.report (mod.test0 ()._2)) // test with transformed + println (mod.report (mod.test_ ()._2)) // test with transformed val yp2 = mod.predict (ox) val e2 = yy - yp2 @@ -537,12 +529,13 @@ end tranRegressionTest4 val ryp2 = yp2_.reorder (rnk) // TranRegression - blue val ys = MatrixD (ry, ryp, ryp2) - new PlotM (null, ys.transpose, lines = true) + new PlotM (null, ys.ᵀ, lines = true) new Plot (null, e, null, "e vs. t", lines = true) new Plot (null, e2, null, "e2 vs. t", lines = true) end tranRegressionTest5 + */ //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -558,7 +551,7 @@ end tranRegressionTest5 val y = VectorD (14.0, 12.1, 10.9, 10.0, 9.3, 8.6, 8.0, 7.5, 7.0, 6.2, 5.5, 4.5, 3.5, 2.0, 0.9) val _1 = VectorD.one (x1.dim) - val x = MatrixD (_1, x1).transpose + val x = MatrixD (_1, x1).ᵀ banner ("SimpleRegression") val reg = new SimpleRegression (x, y) @@ -568,7 +561,7 @@ end tranRegressionTest5 new Plot (x1, y, yp, "SimpleRegression", lines = true) banner ("TranRegression (log)") - val mod = new TranRegression (x, y) + val mod = new TranRegression (x, y, yℱ = LogForm ()) mod.trainNtest ()() // train and test the model println (mod.summary ()) // parameter/coefficient statistics val yp2 = mod.predict (x) @@ -598,27 +591,31 @@ end tranRegressionTest6 // val f = (id, id, "id") // val f = (recip, recip, "recip") // val f = (log, exp, "log") - val f = (sqrt, sq, "sqrt") +// val f = (log1p, expm1, "log1p") +// val f = (sqrt, sq, "sqrt") // val f = (sq, sqrt, "sq") // val f = (exp, log, "exp") // import TranRegression.{box_cox, cox_box} -// TranRegression.setLambda (0.2); val f = (box_cox, cox_box, "box_cox") // try 0.2, 0.3, 0.4, 0.5, 0.6 +// TranRegression.λ = 0.4; val f = (box_cox, cox_box, "box_cox") // try 0.2, 0.3, 0.4, 0.5, 0.6 - banner (s"TranRegression with ${f._3} transform") - val mod = new TranRegression (ox, y, ox_fname, Regression.hp, f._1, f._2) + banner (s"TranRegression No transform vs. Boxcox transform") +// val mod = new TranRegression (ox, y, ox_fname) + val mod = new TranRegression (ox, y, ox_fname, yℱ = BoxcoxForm ()) +// val mod = new TranRegression (ox, y, ox_fname, Regression.hp, f._1, f._2) mod.trainNtest ()() // train and test the model println (mod.summary ()) // parameter/coefficient statistics banner ("Validation Test") mod.validate ()() +/* banner ("Forward Selection Test") val (cols, rSq) = mod.forwardSelAll () // R^2, R^2 bar, R^2 cv val k = cols.size println (s"k = $k, n = ${ox.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "R^2 cv"), - s"R^2 vs n for TranRegression ${f._3}", lines = true) + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs n for TranRegression ${f._3}", lines = true) println (s"rSq = $rSq") +*/ end tranRegressionTest7 @@ -661,15 +658,15 @@ end tranRegressionTest7 val yp = reg.predict (ox) // y predicted for Regression println (s"predict = $yp") - banner ("Quadrastic Regression") + banner ("Quadratic Regression") val qrg = SymbolicRegression.quadratic (x, y, x_fname) // create a Quadratic Regression model qrg.trainNtest ()() // train and test the model println (qrg.summary ()) // parameter/coefficient statistics val yp2 = qrg.predict (qrg.getX) // y predicted for Quadratic Regression println (s"predict = $yp2") - banner ("Transformed Regression") - val mod = new TranRegression (ox, y, ox_fname, Regression.hp, sqrt, sq) // sqrt Transformed Regression model + banner ("Transformed Regression -- sqrt") + val mod = new TranRegression (ox, y, ox_fname, yℱ = RootForm ()) // sqrt Transformed Regression model mod.trainNtest ()() // train and test the model println (mod.summary ()) // parameter/coefficient statistics val yp3 = mod.predict (ox) // y predicted for Transformed Regression @@ -677,7 +674,8 @@ end tranRegressionTest7 val mat = MatrixD (y, yp, yp2, yp3) println (s"mat = $mat") - new PlotM (null, mat, null, "y vs. yp vs. yp2 vs. yp3", true) + new PlotM (null, mat, Array ("actual", "linear", "quad", "sqrt"), + "y vs. yp vs. yp2 vs. yp3", true) banner ("Expanded Form") println (s"expanded x = ${mod.getX}") @@ -685,3 +683,43 @@ end tranRegressionTest7 end tranRegressionTest8 + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `tranRegressionTest9` main function tests the Box-Cox and Yeo-Johnson Transformations. + * > runMain scalation.modeling.tranRegressionTest9 + * +@main def tranRegressionTest9 (): Unit = + + import TranRegression._ + + banner ("Text Transformations on Positive Values") + var y = VectorD.range (1 until 11) + println (s"y = $y") + + banner ("Positive Values -- Box-Cox Transformation and Inverse: y -> z -> y'") + var z = y.map (box_cox (_)) + println (s"z = $z") + println (s"y' = ${z.map (cox_box (_))}") + + banner ("Positive Values -- Yeo-Johnson Transformation and Inverse: y -> z -> y'") + z = y.map (yeo_john (_)) + println (s"z = $z") + println (s"y' = ${z.map (john_yeo (_))}") + + banner ("Text Transformations on Negative Values") + y = -y + println (s"y = $y") + + banner ("Negative Values -- Box-Cox Transformation and Inverse: y -> z -> y'") + z = y.map (box_cox (_)) + println (s"z = $z") + println (s"y' = ${z.map (cox_box (_))}") + + banner ("Negative Values -- Yeo-Johnson Transformation and Inverse: y -> z -> y'") + z = y.map (yeo_john (_)) + println (s"z = $z") + println (s"y' = ${z.map (john_yeo (_))}") + +end tranRegressionTest9 + */ + diff --git a/src/main/scala/scalation/modeling/TranRegression2.scala b/src/main/scala/scalation/modeling/TranRegression2.scala new file mode 100644 index 000000000..a05b04407 --- /dev/null +++ b/src/main/scala/scalation/modeling/TranRegression2.scala @@ -0,0 +1,779 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author John Miller, Mustafa Nural + * @version 2.0 + * @date Sat Jan 20 15:41:27 EST 2018 + * @see LICENSE (MIT style license file). + * + * @note Model: Transformed Multiple Linear Regression (Transforms y) + * + * Delegates transform (y) via y.map (tran) to Regression + * + * @see data.princeton.edu/wws509/notes/c2s10.html + * @see scala-lang.org/api/3.x/scala/math.html + * @see `scalation.CommonFunctions` + * + * Common transformation pairs: (tran, itran) or reversed + * (log, exp), (log1p, expm1), (ihs, sinh), (cbrt, cb), (sqrt, sq), (box_cox, cox_box) + */ + +package scalation +package modeling + +import scala.collection.mutable.IndexedSeq +import scala.math.{exp, expm1, log, log1p, sqrt} + +import scalation.mathstat._ +import scalation.random.Normal + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `TranRegression2` class supports transformed multiple linear regression. + * In this case, 'x' is multi-dimensional [1, x_1, ... x_k]. Fit the parameter + * vector 'b' in the transformed regression equation + * tran (y) = b dot x + e = b_0 + b_1 * x_1 + b_2 * x_2 ... b_k * x_k + e + * where 'e' represents the residuals (the part not explained by the model) and + * 'tran' is the function (defaults to log1p) used to transform the response vector 'y'. + * Common transforms include 'log (y)', 'sqrt (y)' when 'y > 0', or even 'sq (y)', 'exp (y)'. + * More generally, a Box-Cox Transformation may be applied. + * @see citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.469.7176&rep=rep1&type=pdf + * Use Least-Squares (minimizing the residuals) to fit the parameter vector 'b' + * Note: this class does not provide transformations on columns of matrix 'x'. + * @see www.ams.sunysb.edu/~zhu/ams57213/Team3.pptx + * @param x the data/input m-by-n matrix + * @param y the response/output m-vector + * @param fname_ the feature/variable names (defaults to null) + * @param hparam the hyper-parameters (defaults to Regression.hp) + * @param tran the transformation function (defaults to log1p) + * @param itran the inverse transformation function to rescale predictions to original y scale (defaults to expm1) + */ +class TranRegression2 (x: MatrixD, y: VectorD, fname_ : Array [String] = null, + hparam: HyperParameter = Regression.hp, + tran: FunctionS2S = log1p, itran: FunctionS2S = expm1) // FIX - use Transform class + extends Regression (x, y.map (tran), fname_, hparam): // transform y and pass to `Regression` + + private val debug = debugf ("TranRegression2", true) // debug function + private val flaw = flawf ("TranRegression2") // flaw function + private val inf = getY.findInfinity // infinite transformed response elements + + _modelName = "TranRegression2" + + if ! inf.isEmpty then flaw ("init", s"the transformed response vector has infinite elements at $inf") + if ! y.isNonnegative then + throw new IllegalArgumentException ("y must be positive for transformed regression (log, sqrt)") + // FIX - may work for other transformations + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Test a predictive model y_ = f(x_) + e and return its QoF vector. + * Testing may be be in-sample (on the training set) or out-of-sample + * (on the testing set) as determined by the parameters passed in. + * Note: must call train before test. Test using the TRANSFORMED DATA. + * @param x_ the testing/full data/input matrix (defaults to full x) + * @param y_ the testing/full response/output vector (defaults to full y) + */ + def test0 (x_ : MatrixD = x, y_ : VectorD = getY): (VectorD, VectorD) = + val yp = x_ * b // make predictions + (yp, diagnose (y_, yp)) // return predictions and QoF vector + end test0 + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Test a predictive model y_ = f(x_) + e and return its QoF vector. + * Testing may be be in-sample (on the training set) or out-of-sample + * (on the testing set) as determined by the parameters passed in. + * Note: must call train before test. Test using the ORIGINAL DATA. + * @param x_ the testing/full data/input matrix (defaults to full x) + * @param y_ the testing/full response/output vector (defaults to full y) + */ + override def test (x_ : MatrixD = x, y_ : VectorD = y): (VectorD, VectorD) = + val yp = (x_ * b).map (itran) // make predictions + (yp, diagnose (y_, yp)) // return predictions and QoF vector + end test + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Train and test the predictive model y_ = f(x_) + e and report its QoF + * and plot its predictions. + * Currently must override if y is transformed, @see `Predictor` + * @param x_ the training/full data/input matrix (defaults to full x) + * @param y_ the training/full response/output vector (defaults to full y) + * @param xx the testing/full data/input matrix (defaults to full x) + * @param yy the testing/full response/output vector (defaults to full y) + */ + override def trainNtest (x_ : MatrixD = x, y_ : VectorD = getY) + (xx: MatrixD = x, yy: VectorD = y): (VectorD, VectorD) = + train (x_, y_) + debug ("trainNTest", s"b = $b") + val (yp, qof) = test (xx, yy) + println (report (qof)) + Predictor.plotPrediction (yy, yp, modelName, false) + Predictor.plotPrediction (yy, yp, modelName) // reordered + (yp, qof) + end trainNtest + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Predict the value of y = f(z) by evaluating the formula y = b dot z, + * e.g., (b_0, b_1, b_2) dot (1, z_1, z_2). + * @param z the new vector to predict + */ + override def predict (z: VectorD): Double = itran (b dot z) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Predict the value of y = f(z) by evaluating the formula y = b dot z for + * each row of matrix x_. + * @param z the new matrix to predict + */ + override def predict (x_ : MatrixD): VectorD = (x_ * b).map (itran) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Perform In-Sample Testing, i.e., train and test on the full data set. + * Train on transformed y (via getY), test on original y. + * @param skip the number of initial data points to skip (due to insufficient information) + * @param showYp whether to show the prediction vector + */ + override def inSample_Test (skip: Int = 0, showYp: Boolean = false): Unit = + val (x_, y__, y_) = (x.drop (skip), getY.drop (skip), y.drop (skip)) + val yp = trainNtest (x_, y__)(x_, y_)._1 + if showYp then + println (s"Final In-Sample Prediction Vector yp = $yp") + end inSample_Test + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /* Use validation to compute test Quality of Fit (QoF) measures by dividing + * the full dataset into a TESTING-set and a TRAINING-set. + * The testing-set is defined by idx and the rest of the data is the training-set. + * @note: currently must override if y is transformed, @see `Predictor` + * @see `modeling.Predictor.validate` about the RANDOM, FIRST, and LAST options + * for selecting the testing-set. + * @param rando flag indicating whether to use randomized or simple validation + * @param ratio the ratio of the TESTING-set to the full dataset (most common 70-30 (.3), 80-20 (.2)) + * @param idx the prescribed TESTING-set indices (default => generate) + */ + override def validate (rando: Boolean = true, ratio: Double = Model.TE_RATIO) +// (idx: IndexedSeq [Int] = testIndices ((ratio * y.dim).toInt, rando)): + (idx: IndexedSeq [Int] = testIndices (y.dim, (ratio * y.dim).toInt, rando)): + (VectorD, VectorD) = + val (x_e, x_, y_e, y_) = TnT_Split (x, y, idx) // Test-n-Train Split + + train (x_, y_.map (tran (_))) // train model on the TRAINING-set + val (yp, qof) = test (x_e, y_e) // test on TESTING-set and get QoF measures + if qof(QoF.sst.ordinal) <= 0.0 then // requires variation in TESTING-set + flaw ("validate", "chosen testing set has no variability") + println (FitM.fitMap (qof, QoF.values.map (_.toString))) + (yp, qof) + end validate + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Build a sub-model that is restricted to the given columns of the data matrix. + * @param x_cols the columns that the new model is restricted to + * @param fname2 the variable/feature names for the new model (defaults to null) + */ + override def buildModel (x_cols: MatrixD, fname2: Array [String] = null): TranRegression2 = + debug ("buildModel", s"${x_cols.dim} by ${x_cols.dim2}") + new TranRegression2 (x_cols, y, fname2, hparam, tran, itran) + end buildModel + +end TranRegression2 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `TranRegression2` companion object provides transformation and inverse + * transformation function based on the parameter λ. + * It support the family of Box-Cox and Yeo-Johnson Transformations. + */ +object TranRegression2: + + private val debug = debugf ("TranRegression2", false) // debug function + private var _λ = 0.5 // the power parameter for transformations + + inline def λ: Double = _λ // getter-setter for λ + def λ_= (λ_ : Double): Unit = _λ = λ_ + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Transform y using the Box-Cox transformation. + * @param y the value to be transformed + */ + def box_cox (y: Double): Double = + if λ == 0.0 then log (y) + else (y ~^ λ - 1) / λ + end box_cox + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Inverse transform z using the Box-Cox transformation. + * @param z the value to be inverse transformed + */ + def cox_box (z: Double): Double = + if λ == 0.0 then exp (z) + else (λ * z + 1) ~^ (1 / λ) + end cox_box + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Transform y using the Yeo-Johnson transformation. + * @param y the value to be transformed + */ + def yeo_john (y: Double): Double = + if y >= 0.0 then + if λ == 0.0 then log (1+y) + else ((1+y) ~^ λ - 1) / λ + else + if λ == 2.0 then -log (1-y) + else { val _2λ = 2-λ; -((1-y) ~^ _2λ - 1) / _2λ } + end yeo_john + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Inverse transform z using the Yeo-Johnson transformation. + * @param z the value to be inverse transformed + */ + def john_yeo (z: Double): Double = + if z >= 0.0 then + if λ == 0.0 then exp (z) - 1 + else (λ*z + 1) ~^ (1/λ) - 1 + else + if λ == 2.0 then 1 - exp (-z) + else { val _2λ = 2-λ; 1 - (-_2λ*z + 1) ~^ (1/_2λ) } + end john_yeo + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a `TranRegression2` object that uses a Box-Cox Transformation. + * To change λ from its default value, call setter first. + * @param x the data/input matrix + * @param y the response/output vector + * @param fname the feature/variable names (defaults to null) + * @param hparam the hyper-parameters (defaults to Regression.hp) + */ + def apply (x: MatrixD, y: VectorD, fname: Array [String] = null, + hparam: HyperParameter = Regression.hp): TranRegression2 = + new TranRegression2 (x, y, fname, hparam, box_cox, cox_box) + end apply + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a `TranRegression2` object that uses a Yeo-Johnson Transformation. + * To change λ from its default value, call `set_lambda` first. + * @param x the data/input matrix + * @param y the response/output vector + * @param fname the feature/variable names (defaults to null) + * @param hparam the hyper-parameters (defaults to Regression.hp) + */ + def app_yj (x: MatrixD, y: VectorD, fname: Array [String] = null, + hparam: HyperParameter = Regression.hp): TranRegression2 = + new TranRegression2 (x, y, fname, hparam, yeo_john, john_yeo) + end app_yj + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a `TranRegression2` with automatic rescaling from a combined data matrix. + * @param xy the combined data/input and response/output matrix + * @param fname the feature/variable names + * @param hparam the hyper-parameters + * @param tran the transformation function + * @param itran the inverse transformation function to rescale predictions to original y scale + * @param bounds the bounds for rescaling + */ + def apply (xy: MatrixD, fname: Array [String], + hparam: HyperParameter, tran: FunctionS2S, itran: FunctionS2S, + bounds: (Double, Double)): TranRegression2 = + val hp2 = if hparam == null then Regression.hp else hparam + val (x, y) = (xy.not(?, xy.dim2-1), xy(?, xy.dim2-1)) + + val y_s = // scaled version of y + if bounds != null then // scale to bounds + val extrem = extreme (y) + scaleV (extrem, bounds)(y) + else // normalize + val (mu_y, sig_y) = (y.mean, y.stdev) + normalizeV (mu_y, sig_y)(y) + + debug ("apply", s"scaled: scaled y = $y_s") + new TranRegression2 (x, y_s, fname, hp2, tran, itran) + end apply + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a `TranRegression2` with automatic rescaling from a data matrix and + * response vector. + * @param x the data/input matrix + * @param y the response/output vector + * @param fname the feature/variable names + * @param hparam the hyper-parameters + * @param tran the transformation function (defaults to log) + * @param itran the inverse transformation function to rescale predictions to original y scale + * @param bounds the bounds for rescaling + */ + def apply (x: MatrixD, y: VectorD, fname: Array [String], + hparam: HyperParameter, tran: FunctionS2S, itran: FunctionS2S, + bounds: (Double, Double)): TranRegression2 = + val hp2 = if hparam == null then Regression.hp else hparam + + val y_s = // scaled version of y + if bounds != null then // scale to bounds + val extrem = extreme (y) + scaleV (extrem, bounds)(y) + else // normalize + val (mu_y, sig_y) = (y.mean, y.stdev) + normalizeV (mu_y, sig_y)(y) + + debug ("apply", s"scaled: scaled y = $y_s") + new TranRegression2 (x, y_s, fname, hp2, tran, itran) + end apply + +end TranRegression2 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `TranRegression2Ex` provides a sample dataset for testing purposes. + * Move the comments on the line used to generate the response y(k) to test + * 1D and 2D cases. + */ +object TranRegression2Ex: + + private val cap = 30 + private val rng = 0 until cap + private val (m, n) = (cap * cap, 3) + private val err = Normal (0, cap) + + val x = new MatrixD (m, n) + val y = new VectorD (m) + for i <- rng; j <- rng do x(cap * i + j) = VectorD (1, i, j) + for k <- y.indices do y(k) = sq (10 + 2 * x(k, 1) + err.gen) +// for k <- y.indices do y(k) = sq (10 + 2 * x(k, 1) + 0.3 * x(k, 2) + err.gen) + +end TranRegression2Ex + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `tranRegression2Test` main function tests `TranRegression2` class using the + * following regression equation. + * log (y) = b dot x = b_0 + b_1*x_1 + b_2*x_2. + * > runMain scalation.modeling.tranRegression2Test + */ +@main def tranRegression2Test (): Unit = + + val x = MatrixD ((5, 3), 1.0, 36.0, 66.0, // 5-by-3 matrix + 1.0, 37.0, 68.0, + 1.0, 47.0, 64.0, + 1.0, 32.0, 53.0, + 1.0, 1.0, 101.0) + val y = VectorD (745.0, 895.0, 442.0, 440.0, 1598.0) + val z = VectorD (1.0, 20.0, 80.0) + + println ("x = " + x) + println ("y = " + y) + + banner ("Parameter Estimation and Quality of Fit - log transform") + val mod = new TranRegression2 (x, y) + mod.trainNtest ()() // train and test the model + println (mod.summary ()) // parameter/coefficient statistics + + banner ("Quality of Fit - based on transformed data") + println (mod.report (mod.test0 ()._2)) + + banner ("Prediction") + val yp = mod.predict (x) + println (s"predict (x) = $yp") + + val yp2 = mod.predict (z) + println (s"predict ($z) = $yp2") + +end tranRegression2Test + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `tranRegression2Test2` main function tests `TranRegression2` class using the + * following regression equation. + * sqrt (y) = b dot x = b_0 + b_1*x_1 + b_2*x_2. + * > runMain scalation.modeling.tranRegression2Test2 + */ +@main def tranRegression2Test2 (): Unit = + + // 9 data points: Constant x1 x2 y + val xy = MatrixD ((9, 4), 1.0, 1.0, 1.0, 0.04, + 1.0, 2.0, 1.0, 0.05, + 1.0, 3.0, 1.0, 0.06, + + 1.0, 1.0, 2.0, 0.10, + 1.0, 2.0, 2.0, 0.11, + 1.0, 3.0, 2.0, 0.12, + + 1.0, 1.0, 3.0, 0.20, + 1.0, 2.0, 3.0, 0.21, + 1.0, 3.0, 3.0, 0.22) + + val x_fname = Array ("one", "x1", "x2") + + val (x, y) = (xy.not (?, 3), xy(?, 3)) + val xtx = x.ᵀ * x + val yy = y.map (sqrt) + val xtyy = x.ᵀ * yy + val b = new Fac_Cholesky (xtx).inverse * xtyy + + banner ("parameters") + println (s"xtx = $xtx") + println (s"xtyy = $xtyy") + println (s"b = $b") + + val yyp = x * b // transformed + val sst = (yy - yy.mean).normSq + val e = yy - yyp + val sse = e.normSq + val rSq = 1.0 - sse / sst + + banner ("transformed") + println (s"yy = $yy") + println (s"yyp = $yyp") + println (s"e = $e") + println (s"sst = $sst") + println (s"sse = $sse") + println (s"rSq = $rSq") + + banner ("original") + val yp = yyp.map (sq) // orginal + val sst2 = (y - y.mean).normSq + val e2 = y - yp + val sse2 = e2.normSq + val rSq2 = 1.0 - sse2 / sst2 + + println (s"y = $y") + println (s"yp = $yp") + println (s"e2 = $e2") + println (s"sst2 = $sst2") + println (s"sse2 = $sse2") + println (s"rSq2 = $rSq2") + + banner ("Parameter Estimation and Quality of Fit") + val mod = new TranRegression2 (x, y, x_fname, Regression.hp, sqrt, sq) + mod.trainNtest ()() // train and test the model + println (mod.summary ()) // parameter/coefficient statistics + +end tranRegression2Test2 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `tranRegression2Test3` main function tests `TranRegression2` class using the + * following regression equation and uses the simulated data in `TranRegression2Ex`. + * sqrt (y) = b dot x = b_0 + b_1*x_1 + b_2*x_2. + * @see 5.13.9 exercises 1, 2, and 3. + * > runMain scalation.modeling.tranRegression2Test3 + */ +@main def tranRegression2Test3 (): Unit = + + import TranRegression2Ex.{x, y} + + // Phase 1 ================================================================ + banner ("Regression prediction yp") + val reg = new Regression (x, y) + reg.trainNtest ()() // train and test the model + println (reg.summary ()) // parameter/coefficient statistics + + val sst = reg.fit (QoF.sst.ordinal) + + val yp = reg.predict (x) + val e = y - yp + + new Plot (null, y, yp, "Original Regression y and yp vs. t", lines = true) + new Plot (null, e, null, "Original e vs. t", lines = true) + + // Phase 2 ================================================================ + banner ("Transform y to y2") + val y2 = y.map (sqrt) + val trg = new Regression (x, y2) + trg.trainNtest ()() // train and test the model + println (trg.summary ()) // parameter/coefficient statistics + + val yp2 = trg.predict (x) + val e2 = y2 - yp2 + + new Plot (null, y2, yp2, "Transformed Regression y2 and yp2 vs. t", lines = true) + new Plot (null, e2, null, "Transformed e2 vs. t", lines = true) + + // Phase 3 ================================================================ + banner ("Inverse Transform yp2 to yp3") + val yp3 = yp2.map (sq) + val e3 = y - yp3 + + val sse = e3 dot e3 + println (s"R^2 = ${1 - (sse / sst)}") + + new Plot (null, y, yp3, "Tran-back Regression y and yp3 vs. t", lines = true) + new Plot (null, e3, null, "Tran-back e3 vs. t", lines = true) + + val ys2 = MatrixD (y2, yp2) + val ys3 = MatrixD (y, yp3, yp) + new PlotM (null, ys2.ᵀ, null, "Transformed", lines = true) + new PlotM (null, ys3.ᵀ, null, "Tran-back", lines = true) + +end tranRegression2Test3 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `tranRegression2Test4` main function tests `TranRegression2` class using the + * following regression equation. + * sqrt (y) = b dot x = b_0 + b_1*x_1 + b_2*x_2. + * > runMain scalation.modeling.tranRegression2Test4 + */ +@main def tranRegression2Test4 (): Unit = + + import TranRegression2Ex.{x, y} + + banner ("Regression") + val reg = new Regression (x, y) + reg.trainNtest ()() // train and test the model + println (reg.summary ()) // parameter/coefficient statistics + + val yp = reg.predict (x) + val e = y - yp + + banner ("TranRegression2 with sqrt") + val mod = new TranRegression2 (x, y, null, Regression.hp, sqrt, sq) + mod.trainNtest ()() // train and test the model + println (mod.summary ()) // parameter/coefficient statistics + + banner ("Quality of Fit - based on transformed data") + println (mod.report (mod.test0 ()._2)) // test on transformed data + + val yp2 = mod.predict (x) + val e2 = y - yp2 + + println (s"e2.dim = ${e2.dim}") + + val ys = MatrixD (y, yp, yp2) + new PlotM (null, ys.ᵀ, lines = true) + + new Plot (null, e, null, "e vs. t", lines = true) + new Plot (null, e2, null, "e2 vs. t", lines = true) + +end tranRegression2Test4 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `tranRegression2Test5` main function tests `TranRegression2` class using the + * following regression equation. + * sigmoid^{-1} (y) = b dot x = b_0 + b_1*x_1 + b_2*x_2. + * > runMain scalation.modeling.tranRegression2Test5 + */ +@main def tranRegression2Test5 (): Unit = + + import Example_AutoMPG._ + + def f (u: Double): Double = -log (1/u - 1) // transform + def fi (t: Double): Double = 1 / (1 + exp (-t)) // inverse transform + + val extrem = extreme (y) // (min, max) for y + val bounds = (0.01, 0.99) // transform function domain bounds + + val yy = scaleV (extrem, bounds)(y) // rescale to domain of transform + println (s"yy = $yy") + + banner ("Regression") + val reg = new Regression (ox, yy) + reg.trainNtest ()() // train and test the model + println (reg.summary ()) // parameter/coefficient statistics + + val yp = reg.predict (ox) + val e = yy - yp + + banner ("TranRegression2") +// val mod = new Regression (ox, yy.map (f)) // rescale & transform + val mod = new TranRegression2 (ox, yy, ox_fname, Regression.hp, f, fi) // rescale + mod.trainNtest ()() // train and test the model + println (mod.summary ()) // parameter/coefficient statistics + + banner ("Quality of Fit - based on transformed data") + println (mod.report (mod.test0 ()._2)) // test with transformed + + val yp2 = mod.predict (ox) + val e2 = yy - yp2 + + val yp_ = scaleV (bounds, extrem)(yp) + val yp2_ = scaleV (bounds, extrem)(yp2) + + val rnk = y.iqsort // rank order for vector y + val ry = y.reorder (rnk) // actual - red + val ryp = yp_.reorder (rnk) // Regression - green + val ryp2 = yp2_.reorder (rnk) // TranRegression2 - blue + + val ys = MatrixD (ry, ryp, ryp2) + new PlotM (null, ys.ᵀ, lines = true) + + new Plot (null, e, null, "e vs. t", lines = true) + new Plot (null, e2, null, "e2 vs. t", lines = true) + +end tranRegression2Test5 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `tranRegression2Test6` main function tests `TranRegression2` class using the + * following regression equation on the beer foam dataset. + * @see www.tf.uni-kiel.de/matwis/amat/iss/kap_2/articles/beer_article.pdf + * exp (y) = b dot x = b_0 + b_1*x_1. + * > runMain scalation.modeling.tranRegression2Test6 + */ +@main def tranRegression2Test6 (): Unit = + + val x1 = VectorD (0, 15, 30, 45, 60, 75, 90, 105, 120, 150, 180, 210, 250, 300, 360) + val y = VectorD (14.0, 12.1, 10.9, 10.0, 9.3, 8.6, 8.0, 7.5, + 7.0, 6.2, 5.5, 4.5, 3.5, 2.0, 0.9) + val _1 = VectorD.one (x1.dim) + val x = MatrixD (_1, x1).ᵀ + + banner ("SimpleRegression") + val reg = new SimpleRegression (x, y) + reg.trainNtest ()() // train and test the model + println (reg.summary ()) // parameter/coefficient statistics + val yp = reg.predict (x) + new Plot (x1, y, yp, "SimpleRegression", lines = true) + + banner ("TranRegression2 (log)") + val mod = new TranRegression2 (x, y) + mod.trainNtest ()() // train and test the model + println (mod.summary ()) // parameter/coefficient statistics + val yp2 = mod.predict (x) + new Plot (x1, y, yp2, "TranRegression2", lines = true) + + banner ("ExpRegression") + val erg = new ExpRegression (x, y) + erg.trainNtest ()() // train and test the model + println (erg.summary ()) // parameter/coefficient statistics + val yp3 = erg.predict (x) + new Plot (x1, y, yp3, "ExpRegression", lines = true) + +end tranRegression2Test6 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `tranRegression2Test7` main function tests the `TranRegression2` class using + * the AutoMPG dataset. It also combines feature selection with cross-validation + * and plots R^2, R^2 bar and R^2 cv vs. the instance index. + * > runMain scalation.modeling.tranRegression2Test7 + */ +@main def tranRegression2Test7 (): Unit = + + import Example_AutoMPG._ + banner ("AutoMPG TranRegression2 feature selection") + +// val f = (id, id, "id") +// val f = (recip, recip, "recip") +// val f = (log, exp, "log") +// val f = (log1p, expm1, "log1p") +// val f = (sqrt, sq, "sqrt") +// val f = (sq, sqrt, "sq") +// val f = (exp, log, "exp") + import TranRegression2.{box_cox, cox_box} + TranRegression2.λ = 0.4; val f = (box_cox, cox_box, "box_cox") // try 0.2, 0.3, 0.4, 0.5, 0.6 + + banner (s"TranRegression2 with ${f._3} transform") + val mod = new TranRegression2 (ox, y, ox_fname, Regression.hp, f._1, f._2) + mod.trainNtest ()() // train and test the model + println (mod.summary ()) // parameter/coefficient statistics + + banner ("Validation Test") + mod.validate ()() + +/* + banner ("Forward Selection Test") + val (cols, rSq) = mod.forwardSelAll () // R^2, R^2 bar, R^2 cv + val k = cols.size + println (s"k = $k, n = ${ox.dim2}") + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs n for TranRegression2 ${f._3}", lines = true) + println (s"rSq = $rSq") +*/ + +end tranRegression2Test7 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `tranRegression2Test8` main function tests and compares `Regression` vs. + * `SymbolicRegression.quadratic` vs. `TranRegression2`. + * using the following regression equations. + * y = b dot x = b_0 + b_1*x + * y = b dot x' = b_0 + b_1*x + b_2*x^2 + * y = b dot x' = b_0 + b_1*x + b_2*x^2 + b_3*x^3 + * > runMain scalation.modeling.tranRegression2Test8 + */ +@main def tranRegression2Test8 (): Unit = + + // 8 data points: x y + val xy = MatrixD ((8, 2), 1, 2, // 8-by-2 matrix + 2, 5, + 3, 10, + 4, 15, + 5, 20, + 6, 30, + 7, 50, + 8, 60) + + val x_fname = Array ("x") // names of features/variables + val ox_fname = Array ("_1", "x") // names of features/variables + + println ("model: y = b0 + b1*x1 + b2*x1^2") + println (s"xy = $xy") + + val oxy = VectorD.one (xy.dim) +^: xy // combined data matrix with ones column prepended + val (ox, y) = (oxy.not(?, 2), oxy(?, 2)) // (data matrix, response column) + val x = xy.not(?, 1) // data matrix with no ones column + + banner ("Regression") + val reg = Regression (oxy, ox_fname)() // create a Regression model + reg.trainNtest ()() // train and test the model + println (reg.summary ()) // parameter/coefficient statistics + val yp = reg.predict (ox) // y predicted for Regression + println (s"predict = $yp") + + banner ("Quadratic Regression") + val qrg = SymbolicRegression.quadratic (x, y, x_fname) // create a Quadratic Regression model + qrg.trainNtest ()() // train and test the model + println (qrg.summary ()) // parameter/coefficient statistics + val yp2 = qrg.predict (qrg.getX) // y predicted for Quadratic Regression + println (s"predict = $yp2") + + banner ("Transformed Regression") + val mod = new TranRegression2 (ox, y, ox_fname, Regression.hp, sqrt, sq) // sqrt Transformed Regression model + mod.trainNtest ()() // train and test the model + println (mod.summary ()) // parameter/coefficient statistics + val yp3 = mod.predict (ox) // y predicted for Transformed Regression + println (s"predict = $yp2") + + val mat = MatrixD (y, yp, yp2, yp3) + println (s"mat = $mat") + new PlotM (null, mat, Array ("actual", "linear", "quad", "sqrt"), + "y vs. yp vs. yp2 vs. yp3", true) + + banner ("Expanded Form") + println (s"expanded x = ${mod.getX}") + println (s"y = ${mod.getY}") + +end tranRegression2Test8 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `tranRegression2Test9` main function tests the Box-Cox and Yeo-Johnson Transformations. + * > runMain scalation.modeling.tranRegression2Test9 + */ +@main def tranRegression2Test9 (): Unit = + + import TranRegression2._ + + banner ("Text Transformations on Positive Values") + var y = VectorD.range (1 until 11) + println (s"y = $y") + + banner ("Positive Values -- Box-Cox Transformation and Inverse: y -> z -> y'") + var z = y.map (box_cox (_)) + println (s"z = $z") + println (s"y' = ${z.map (cox_box (_))}") + + banner ("Positive Values -- Yeo-Johnson Transformation and Inverse: y -> z -> y'") + z = y.map (yeo_john (_)) + println (s"z = $z") + println (s"y' = ${z.map (john_yeo (_))}") + + banner ("Text Transformations on Negative Values") + y = -y + println (s"y = $y") + + banner ("Negative Values -- Box-Cox Transformation and Inverse: y -> z -> y'") + z = y.map (box_cox (_)) + println (s"z = $z") + println (s"y' = ${z.map (cox_box (_))}") + + banner ("Negative Values -- Yeo-Johnson Transformation and Inverse: y -> z -> y'") + z = y.map (yeo_john (_)) + println (s"z = $z") + println (s"y' = ${z.map (john_yeo (_))}") + +end tranRegression2Test9 + diff --git a/src/main/scala/scalation/modeling/TrigRegression.scala b/src/main/scala/scalation/modeling/TrigRegression.scala index 197641303..a6daedd99 100644 --- a/src/main/scala/scalation/modeling/TrigRegression.scala +++ b/src/main/scala/scalation/modeling/TrigRegression.scala @@ -38,16 +38,15 @@ class TrigRegression (t: MatrixD, y: VectorD, ord: Int, fname_ : Array [String] extends Regression (TrigRegression.allForms (t, ord), y, fname_, hparam): private val w = (2.0 * Pi) / (t.mmax - t.mmin) // base displacement angle in radians - private val n0 = 1 // number of terms/columns originally private val nt = TrigRegression.numTerms (ord) // number of terms/columns after expansion - modelName = "TrigRegression" + _modelName = s"TrigRegression_$ord" //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Expand the vector 'z' into a vector of that includes additional terms. * @param z the un-expanded vector */ - def expand (z: VectorD): VectorD = TrigRegression.forms (z, n0, nt, w) + def expand (z: VectorD): VectorD = TrigRegression.forms (z, nt, w) //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Given the scalar 'z', expand it and predict the response value. @@ -96,7 +95,7 @@ object TrigRegression: def apply (t: VectorD, y: VectorD, ord: Int, fname: Array [String], hparam: HyperParameter): TrigRegression = val hp2 = if hparam == null then Regression.hp else hparam - new TrigRegression (MatrixD (t).transpose, y, ord, fname, hp2) + new TrigRegression (MatrixD (t).ᵀ, y, ord, fname, hp2) end apply //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -119,11 +118,10 @@ object TrigRegression: * forms/terms, returning them as a vector. * '[1, sin (wt), cos (wt), sin (2wt), cos (2wt), ...]'. * @param v the vector/point (i-th row of t) for creating forms/terms - * @param k number of features/predictor variables (not counting intercept) = 1 * @param nt the number of terms * @param w the base displacement angle in radians */ - def forms (v: VectorD, k: Int, nt: Int, w: Double): VectorD = + def forms (v: VectorD, nt: Int, w: Double): VectorD = val wt = w * v(0) val u = new VectorD (nt) u(0) = 1.0 @@ -148,11 +146,10 @@ object TrigRegression: def allForms (x: MatrixD, ord: Int): MatrixD = val t = x(?, 0) // first and only column of x val w = (2.0 * Pi) / (t.max - t.min) // base displacement angle in radians - val k = 1 val nt = numTerms (ord) - println (s"allForms: create expanded data matrix with nt = $nt columns from k = $k columns") + println (s"allForms: create expanded data matrix with nt = $nt columns") val xe = new MatrixD (x.dim, nt) - for i <- x.indices do xe(i) = forms (x(i), k, nt, w) // vector with values for all forms/terms + for i <- x.indices do xe(i) = forms (x(i), nt, w) // vector with values for all forms/terms debug ("allForms", s"expanded data matrix xe = $xe") xe // expanded matrix end allForms @@ -197,7 +194,6 @@ end TrigRegression if harmonics == 16 then val stats = trg.crossValidate () FitM.showQofStatTable (stats) - end if end for end trigRegressionTest @@ -241,7 +237,6 @@ end trigRegressionTest if harmonics == 16 then val stats = trg.crossValidate () FitM.showQofStatTable (stats) - end if end for end trigRegressionTest2 diff --git a/src/main/scala/scalation/modeling/autograd/Adam.scala b/src/main/scala/scalation/modeling/autograd/Adam.scala new file mode 100644 index 000000000..4ce5b5080 --- /dev/null +++ b/src/main/scala/scalation/modeling/autograd/Adam.scala @@ -0,0 +1,88 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author Praveen Rangavajhula + * @version 2.0 + * @date Fri April 25 20:01:00 EDT 2025 + * @see LICENSE (MIT style license file). + * + * @note Autograd: Adam Optimizer for Parameter Updates + */ + +package scalation +package modeling +package autograd + +import scalation.mathstat.TensorD + +// FIX -- switch to using hyper-parameters + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `Adam` class implements the Adam optimization algorithm for updating model parameters. + * The Adam optimizer (Kingma & Ba, 2015) with optional L2 weight decay maintains + * first (m) and second (v) moment estimates and applies bias correction. + * Classical (non-decoupled) weight decay is applied by adding weightDecay * param to the raw gradient. + * @note Call zeroGrad() before backward + step. + * @see https://arxiv.org/abs/1412.6980 + * @param parameters indexed sequence of Variables representing model parameters. + * @param lr base Learning rate for updating the parameters. + * @param beta1 exponential decay rate for the first moment estimates. + * @param beta2 exponential decay rate for the second moment estimates. + * @param weightDecay L2 regularization coefficient (0.0 to disable) + * @param eps small constant added for numerical stability. + */ +case class Adam (parameters: IndexedSeq [Variabl], lr: Double = 0.001, + beta1: Double = 0.9, beta2: Double = 0.999, + weightDecay: Double = 0.0, eps: Double = 1e-8) + extends Optimizer (parameters, lr): + + /** First moment estimates for each parameter, initialized to zeros with the same shape as the parameter data. + */ + private val m = parameters.map (p => TensorD.zerosLike (p.data)) + + /** Second moment estimates for each parameter, initialized to zeros with the same shape as the parameter data. + */ + private val v = parameters.map (p => TensorD.zerosLike (p.data)) + + /** Time step counter that tracks the number of updates made. + */ + private var t = 0 + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Performs a single optimization step using the Adam algorithm. + * The step method increments the time step counter, then for each parameter: + * - Updates the biased first moment estimate. + * - Updates the biased second moment estimate. + * - Computes bias-corrected moment estimates. + * - Updates the parameter data using the computed moments. + */ + override def step (): Unit = + t += 1 // Increment time step + val beta1_t = (1 - beta1~^t) + val beta2_t = (1 - beta2~^t) + + for i <- parameters.indices do + val p = parameters(i) + var m_i = m(i) + var v_i = v(i) + + if p.grad != null then + val grad = p.grad + + // Apply weight decay if specified (L2 regularization) + val gradReg = if weightDecay > 0.0 then grad + p.data * weightDecay else grad + + m_i *= beta1 // Update biased first moment estimate + m_i += gradReg * (1 - beta1) + + v_i *= beta2 // Update biased second moment estimate + v_i += gradReg * gradReg * (1 - beta2) + + val biasCorr1 = m_i / beta1_t // Compute bias-corrected moment estimates + val biasCorr2 = v_i / beta2_t + + p.data -= biasCorr1 * learningRate / (biasCorr2.map_ (math.sqrt) + eps) // Update the parameter with the computed moments + end for + end step + +end Adam + diff --git a/src/main/scala/scalation/modeling/autograd/AutogradOps.scala b/src/main/scala/scalation/modeling/autograd/AutogradOps.scala new file mode 100644 index 000000000..f9f2de40d --- /dev/null +++ b/src/main/scala/scalation/modeling/autograd/AutogradOps.scala @@ -0,0 +1,630 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author Praveen Rangavajhula + * @version 2.0 + * @date Fri April 25 19:44:13 EDT 2025 + * @see LICENSE (MIT style license file). + * + * @note Autograd: Core Operations for Automatic Differentiation + */ + +package scalation +package modeling +package autograd + +import scalation.mathstat.{TensorD, tensorize} +import scalation.modeling.ActivationFun + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `AutogradOps` trait defines the core operations needed for automatic differentiation. + * It separates the mathematical operations on tensors (TensorD) + * from the autograd system (Variable, Function), allowing flexible extension. + * This trait is backed by a default implementation (see AutogradOps.default) + * using TensorD methods. + */ +trait AutogradOps: + + // ---------- Arithmetic operations ---------- + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Returns the sign of each element in tensor x. + */ + def sign (x: TensorD): TensorD + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Returns the absolute value of each element in tensor x. + */ + def abs (x: TensorD): TensorD + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Returns the negation of tensor x. + */ + def neg (x: TensorD): TensorD + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Returns the square root of each element in tensor x. + */ + def sqrt (x: TensorD): TensorD + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Returns the natural logarithm of each element in tensor x. + */ + def log (x: TensorD): TensorD + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Returns the logarithm of tensor x with the specified base. + */ + def logBase (x: TensorD, base: Double): TensorD + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Returns the reciprocal of each element in tensor x. + */ + def reciprocal (x: TensorD): TensorD + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Clips the elements of tensor x to be within the range [min, max]. + */ + def clipByValue (x: TensorD, min: Double, max: Double): TensorD + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Clips the elements of tensor x to have a maximum norm of maxNorm. + */ + def clipByNorm (x: TensorD, maxNorm: Double): TensorD + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Returns the element-wise maximum of tensors x and y. + */ + def max (x: TensorD, y: TensorD): TensorD + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Returns the element-wise maximum between tensor x and scalar s. + */ + def maxScalar (x: TensorD, s: Double): TensorD + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Returns the maximum value in tensor x. + */ + def maxValue (x: TensorD): Double + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Returns the element-wise minimum of tensors x and y. + */ + def min (x: TensorD, y: TensorD): TensorD + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Returns the element-wise minimum between tensor x and scalar s. + */ + def minScalar (x: TensorD, s: Double): TensorD + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Returns the minimum value in tensor x. + */ + def minValue (x: TensorD): Double + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Computes the mean of all elements in tensor x. + */ + def mean (x: TensorD): Double + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Computes the mean along the specified axis of tensor x. + */ + def meanAlongAxis (x: TensorD, axis: Int): TensorD + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Computes the Frobenius norm of tensor x. + */ + def normF (x: TensorD): Double + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Computes the Frobenius norm squared of tensor x. + */ + def normFSq (x: TensorD): Double + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Computes the variance of all elements in tensor x. + */ + def variance (x: TensorD): Double + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Computes the variance along the specified axis of tensor x. + */ + def varianceAlongAxis (x: TensorD, axis: Int): TensorD + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Computes the standard deviation of tensor x. + */ + def std (x: TensorD): Double + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Computes the standard deviation along the specified axis of tensor x. + */ + def stdAlongAxis (x: TensorD, axis: Int): TensorD + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Standardizes tensor x along the specified axis. + */ + def standardize (x: TensorD, axis: Int): TensorD + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Rounds each element in tensor x to the nearest integer. + */ + def round (x: TensorD): TensorD + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Applies ceiling to each element in tensor x. + */ + def ceil (x: TensorD): TensorD + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Applies floor to each element in tensor x. + */ + def floor (x: TensorD): TensorD + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Returns element-wise addition of tensors x and y. + */ + def add (x: TensorD, y: TensorD): TensorD + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Adds scalar s to each element in tensor x. + */ + def addScalar (x: TensorD, s: Double): TensorD + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Returns element-wise subtraction of tensor y from tensor x. + */ + def sub (x: TensorD, y: TensorD): TensorD + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Subtracts scalar s from each element in tensor x. + */ + def subScalar (x: TensorD, s: Double): TensorD + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Returns element-wise multiplication of tensors x and y. + */ + def mul (x: TensorD, y: TensorD): TensorD + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Multiplies each element in tensor x by scalar s. + */ + def mulScalar (x: TensorD, s: Double): TensorD + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Returns element-wise division of tensor x by tensor y. + */ + def div (x: TensorD, y: TensorD): TensorD + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Divides each element in tensor x by scalar s. + */ + def divScalar (x: TensorD, s: Double): TensorD + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Raises each element in tensor x to the power of s. + */ + def pow (x: TensorD, s: Int): TensorD + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Computes the exponential of each element in tensor x. + */ + def exp (x: TensorD): TensorD + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Computes the sum of all elements in tensor x. + */ + def sum (x: TensorD): Double + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Computes the sum along the specified axis of tensor x. + */ + def sumAlongAxis (x: TensorD, axis: Int): TensorD + + // ---------- Tensor operations ---------- + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Creates a tensor consisting of scalar value s. + */ + def scalar (s: Double): TensorD + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Creates a tensor with the same shape as x filled with zeros. + */ + def zerosLike (x: TensorD): TensorD + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Creates a tensor with the same shape as x filled with ones. + */ + def onesLike (x: TensorD): TensorD + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Creates a tensor with the same shape as t filled with the specified value. + */ + def fullLike (t: TensorD, value: Double): TensorD + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Returns the shape of tensor x as a list of dimension sizes. + */ + def shape (x: TensorD): List [Int] + + def getSlice (x: TensorD, r0: Range, r1: Range, r2: Range): TensorD + + def setSlice (x: TensorD, value: TensorD, r0: Range, r1: Range, r2: Range): TensorD + + def concat (tensors: Seq [TensorD], axis: Int): TensorD + + def reshape (x: TensorD, newShape: Seq [Int]): TensorD + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Permutes the axes of tensor x according to the specified order. + */ + def permute (x: TensorD, axes: Seq [Int]): TensorD + + // FIX: Gelu doesn't pass the test + // ---------- Activation functions ---------- + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Identity activation function. + */ + def id_ (yp: TensorD): TensorD + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Rectified Linear Unit (ReLU) activation function. + */ + def reLU_ (yp: TensorD): TensorD + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Leaky ReLU activation function with an optional alpha parameter. + */ + def lreLU_ (yp: TensorD, alpha: Double = 0.2): TensorD + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Exponential Linear Unit (ELU) activation function with an optional alpha parameter. + */ + def eLU_ (yp: TensorD, alpha: Double = 1.0): TensorD + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Hyperbolic tangent (tanh) activation function. + */ + def tanh_ (yp: TensorD): TensorD + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Sigmoid activation function. + */ + def sigmoid_ (yp: TensorD): TensorD + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Gaussian activation function. + */ + def gaussian_ (yp: TensorD): TensorD + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Gaussian Error Linear Unit (GeLU) activation function. + */ + def geLU_ (yp: TensorD): TensorD + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Softmax activation function. + */ + def softmax_ (yp: TensorD): TensorD + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Logit activation function. + */ + def logit_ (yp: TensorD): TensorD + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Logistic activation function with parameters a, b, and c. + */ + def logistic_ (yp: TensorD, a: Double = 1.0, b: Double = 1.0, c: Double = 1.0): TensorD + + // ---------- Activation Function Derivatives ---------- + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Derivative of the identity activation function. + */ + def idD_ (yp: TensorD): TensorD + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Derivative of the ReLU activation function. + */ + def reLUD_ (yp: TensorD): TensorD + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Derivative of the Leaky ReLU activation function. + */ + def lreLUD_ (yp: TensorD): TensorD + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Derivative of the ELU activation function with an optional alpha parameter. + */ + def eLUD_ (yp: TensorD, alpha: Double = 1.0): TensorD + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Derivative of the tanh activation function. + */ + def tanhD_ (yp: TensorD): TensorD + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Derivative of the sigmoid activation function. + */ + def sigmoidD_ (yp: TensorD): TensorD + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Derivative of the GeLU activation function. + */ + def geLUD_ (yp: TensorD): TensorD + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Derivative of the softmax activation function. + */ + def softmaxD_ (yp: TensorD): TensorD + + // ---------- Loss functions ---------- + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Computes the Sum of Squared Errors (SSE) loss between the prediction and target tensors. + */ + def sseLoss (pred: TensorD, target: TensorD): Double + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Computes the Mean Squared Error (MSE) loss between the prediction and target tensors. + */ + def mseLoss (pred: TensorD, target: TensorD): Double + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Computes the Mean Absolute Error (MAE) loss between the prediction and target tensors. + */ + def maeLoss (pred: TensorD, target: TensorD): Double + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Computes the binary cross entropy loss between the prediction and target tensors. + */ + def binaryCrossEntropy (pred: TensorD, target: TensorD): Double + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Computes the categorical cross entropy loss between the prediction and target tensors. + */ + def categoricalCrossEntropy (pred: TensorD, target: TensorD): Double + + // ---------- Matrix-like operations ---------- + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Transposes tensor x by swapping the specified axes i and j. + */ + def transpose (x: TensorD, i: Int, j: Int): TensorD + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Computes the dot product of tensors x and y. + */ + def dot (x: TensorD, y: TensorD): TensorD + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Performs matrix multiplication of tensors x and y. + */ + def matmul (x: TensorD, y: TensorD): TensorD + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Performs batched matrix multiplication of tensors x and y. + */ + def bmm (x: TensorD, y: TensorD): TensorD + +end AutogradOps + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Companion object for AutogradOps that provides a default implementation. + */ +object AutogradOps: + + /** Default instance of AutogradOps. + */ + given default: AutogradOps with + + // ---------- Arithmetic Operations ---------- + + def sign (x: TensorD): TensorD = x.sign + + def abs (x: TensorD): TensorD = x.abs + + def neg (x: TensorD): TensorD = -x + + def sqrt (x: TensorD): TensorD = x.sqrt + + def log (x: TensorD): TensorD = x.log + + def logBase (x: TensorD, base: Double): TensorD = x.logBase (base) + + def reciprocal (x: TensorD): TensorD = x.reciprocal + + def clipByValue (x: TensorD, min: Double, max: Double): TensorD = x.clipByValue (min, max) + + def clipByNorm (x: TensorD, maxNorm: Double): TensorD = x.clipByNorm (maxNorm) + + def max (x: TensorD, y: TensorD): TensorD = TensorD.max (x, y) + + def maxScalar (x: TensorD, s: Double): TensorD = x.maxScalar (s) + + def maxValue(x: TensorD): Double = x.maxValue + + def min (x: TensorD, y: TensorD): TensorD = TensorD.min (x, y) + + def minScalar (x: TensorD, s: Double): TensorD = x.minScalar (s) + + def minValue (x: TensorD): Double = x.minValue + + def mean (x: TensorD): Double = x.mean + + def meanAlongAxis (x: TensorD, axis: Int): TensorD = x.meanAlongAxis (axis) + + def normF (x: TensorD): Double = x.normF + + def normFSq (x: TensorD): Double = x.normFSq + + def variance (x: TensorD): Double = x.variance + + def varianceAlongAxis (x: TensorD, axis: Int): TensorD = x.varianceAlongAxis (axis) + + def std (x: TensorD): Double = x.std + + def stdAlongAxis (x: TensorD, axis: Int): TensorD = x.stdAlongAxis (axis) + + def standardize (x: TensorD, axis: Int): TensorD = x.standardize (axis) + + def round (x: TensorD): TensorD = x.round + + def ceil (x: TensorD): TensorD = x.ceil + + def floor (x: TensorD): TensorD = x.floor + + def add (x: TensorD, y: TensorD): TensorD = x + y + + def addScalar (x: TensorD, s: Double): TensorD = x + s + + def sub (x: TensorD, y: TensorD): TensorD = x - y + + def subScalar (x: TensorD, s: Double): TensorD = x - s + + def mul (x: TensorD, y: TensorD): TensorD = x * y + + def mulScalar (x: TensorD, s: Double): TensorD = x * s + + def div (x: TensorD, y: TensorD): TensorD = x / y + + def divScalar (x: TensorD, s: Double): TensorD = x / s + + def pow (x: TensorD, s: Int): TensorD = x ~^ s + + def exp (x: TensorD): TensorD = x.exp + + def sum (x: TensorD): Double = x.sum + + def sumAlongAxis (x: TensorD, axis: Int): TensorD = x.sumAlongAxis (axis) + + // ---------- Tensor Operations ---------- + + def scalar (s: Double): TensorD = TensorD.scalar (s) + + def zerosLike (x: TensorD): TensorD = x.zerosLike + + def onesLike (x: TensorD): TensorD = x.onesLike + + def fullLike (t: TensorD, value: Double): TensorD = t.fullLike (value) + + def shape (x: TensorD): List [Int] = x.shape + + def getSlice (x: TensorD, r0: Range, r1: Range, r2: Range): TensorD = x(r0, r1, r2) + + def setSlice (x: TensorD, value: TensorD, r0: Range, r1: Range, r2: Range): TensorD = + x(r0, r1, r2) = value + x + end setSlice + + def concat (tensors: Seq [TensorD], axis: Int): TensorD = TensorD.concat (tensors, axis) + + def reshape (x: TensorD, newShape: Seq [Int]): TensorD = x.reshape (newShape) + + def permute (x: TensorD, axes: Seq [Int]): TensorD = x.permute (axes) + + // ---------- Activation Functions ---------- + + def id_ (yp: TensorD): TensorD = yp.id + + def reLU_ (yp: TensorD): TensorD = yp.reLU + + def lreLU_ (yp: TensorD, alpha: Double = 0.2): TensorD = yp.lreLU (alpha) + + def eLU_ (yp: TensorD, alpha: Double = 1.0): TensorD = yp.eLU (alpha) + + def tanh_ (yp: TensorD): TensorD = yp.tanh + + def sigmoid_ (yp: TensorD): TensorD = yp.sigmoid + + def gaussian_ (yp: TensorD): TensorD = yp.gaussian + + def geLU_ (yp: TensorD): TensorD = yp.geLU + + def softmax_ (yp: TensorD): TensorD = yp.softmax + + def logit_ (yp: TensorD): TensorD = yp.logit + + def logistic_ (yp: TensorD, a: Double = 1.0, b: Double = 1.0, c: Double = 1.0): TensorD = + yp.logistic (a, b, c) + + // ---------- Activation Function Derivatives ---------- + + def idD_ (yp: TensorD): TensorD = yp.onesLike + + def reLUD_ (yp: TensorD): TensorD = tensorize (ActivationFun.reLUD)(yp) + + def lreLUD_ (yp: TensorD): TensorD = tensorize (ActivationFun.lreLUD)(yp) + + def eLUD_ (yp: TensorD, alpha: Double = 1.0): TensorD = tensorize (ActivationFun.eLUD)(yp) + + def tanhD_ (yp: TensorD): TensorD = tensorize (ActivationFun.tanhD)(yp) + + def sigmoidD_ (yp: TensorD): TensorD = tensorize (ActivationFun.sigmoidD)(yp) + + def geLUD_ (yp: TensorD): TensorD = tensorize (ActivationFun.geLUD)(yp) + + def softmaxD_ (yp: TensorD): TensorD = tensorize (ActivationFun.softmaxD)(yp) + + // ---------- Loss Functions ---------- + + def sseLoss (pred: TensorD, target: TensorD): Double = + val e = pred - target + (e ~^ 2).sum + + def mseLoss (pred: TensorD, target: TensorD): Double = + val e = pred - target + (e ~^ 2).mean + + def maeLoss (pred: TensorD, target: TensorD): Double = + val e = pred - target + e.abs.mean + + def binaryCrossEntropy (pred: TensorD, target: TensorD): Double = + val eps = 1e-15 + val predSafe = pred.clipByValue (eps, 1.0 - eps) + val loss = TensorD.zerosLike (pred) + cfor (pred.indices) { i => + cfor (pred.indices2) { j => + cfor (pred.indices3) { k => + loss(i, j, k) = + -target(i, j, k) * math.log (predSafe(i, j, k)) - + (1.0 - target(i, j, k)) * math.log (1.0 - predSafe(i, j, k)) + } // cfor + } // cfor + } // cfor + loss.sum / (pred.dim * pred.dim2 * pred.dim3) + + def categoricalCrossEntropy (pred: TensorD, target: TensorD): Double = + val eps = 1e-15 + val predSafe = pred.clipByValue (eps, 1.0 - eps) + var totalLoss = 0.0 + cfor (pred.indices) { i => + cfor (pred.indices2) { j => + var sampleLoss = 0.0 + cfor (pred.indices3) { k => + sampleLoss += -target(i, j, k) * math.log (predSafe(i, j, k)) + } // cfor + totalLoss += sampleLoss + } // cfor + } // cfor + totalLoss / (pred.dim * pred.dim2) + + // ---------- Matrix-like Operations ---------- + + def transpose (x: TensorD, i: Int, j: Int): TensorD = x.transpose(i, j) + + def dot (x: TensorD, y: TensorD): TensorD = x dot y + + def matmul (x: TensorD, y: TensorD): TensorD = x matmul y + + def bmm (x: TensorD, y: TensorD): TensorD = x bmm y + + end default + +end AutogradOps + diff --git a/src/main/scala/scalation/modeling/autograd/AutogradTest.scala b/src/main/scala/scalation/modeling/autograd/AutogradTest.scala new file mode 100644 index 000000000..490422556 --- /dev/null +++ b/src/main/scala/scalation/modeling/autograd/AutogradTest.scala @@ -0,0 +1,1189 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author Praveen Rangavajhula + * @version 2.0 + * @date Fri April 25 19:40:13 EDT 2025 + * @see LICENSE (MIT style license file). + * + * @note Autograd: Unit Tests for Autograd Functionality + */ + +package scalation +package modeling +package autograd + +import scala.language.implicitConversions +import scala.math.ceil + +import scalation.mathstat.{MatrixD, TensorD, TnT_Split, VectorD} +import scalation.modeling.neuralnet._ + +import AutogradOps.given +import Example_AutoMPG.{x, y} + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `AutogradTest` object contains various @main tests for autograd functionality. + * The tests validate basic arithmetic, complex expressions, activation functions, + * loss functions, and neural network layers with backpropagation. + */ +object AutogradTest: + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** The `autogradTest0` main function tests basic unary operations in the + * Autograd system. These include absolute value, negation, floor, ceil, + * rounding, sign, and clipping. For each operator, the test reports the + * forward results and performs gradient checking using `GradCheck.gradCheck`. + * This ensures that each scalar/tensor unary operation is correctly + * implemented in both the forward and backward passes. + * > runMain scalation.modeling.autograd.AutogradTest.autogradTest0 + */ + @main def autogradTest0 (): Unit = + + val R = TestReport () + + R.record ("Absolute Value") { + val data = TensorD ((2, 2, 1), -1.0, -2.0, 3.0, 4.0) + val x = Variabl (data, name = Some ("x")) + println (s"x: ${x.data}") + println (s"x.abs: ${x.abs.data}") + GradCheck.gradCheck (x, () => x.abs, quiet = true) + } + R.record ("Negation") { + val data = TensorD ((2, 2, 1), 1.0, -2.0, 3.0, -4.0) + val x = Variabl (data, name = Some ("x")) + println (s"x: ${x.data}") + println (s"x.neg: ${-x.data}") + GradCheck.gradCheck (x, () => -x, quiet = true) + } + R.record ("Floor") { + val x = Variabl (TensorD ((2, 2, 1), -1.2, -2.8, 3.4, 4.9)) + println (s"x: ${x.data}") + println (s"x.floor: ${x.floor.data}") + GradCheck.gradCheck (x, () => x.floor, quiet = true) + } + R.record ("Ceil") { + val x = Variabl (TensorD ((2, 2, 1), -1.2, -2.8, 3.4, 4.9)) + println (s"x: ${x.data}") + println (s"x.ceil: ${x.ceil.data}") + GradCheck.gradCheck (x, () => x.ceil, quiet = true) + } + R.record ("Round") { + val x = Variabl (TensorD ((2, 2, 1), -1.2, -2.3, 3.4, 4.7)) // not half-integers (since grad is undefined there) + println (s"x: ${x.data}") + println (s"x.round: ${x.round.data}") + GradCheck.gradCheck (x, () => x.round, quiet = true) + } + R.record ("Sign") { + val x = Variabl (TensorD ((2, 2, 1), -1.0,10.0, 2.0, -3.0)) // avoid zero (since grad is undefined there) + println (s"x: ${x.data}") + println (s"x.sign: ${x.sign.data}") + GradCheck.gradCheck (x, () => x.sign, quiet = true) + } + R.record ("Clip") { + val x = Variabl (TensorD ((2, 2, 1), -2.0, -0.5, 0.5, 3.0)) + println (s"x: ${x.data}") + println (s"x.clip(-1.0, 1.0): ${x.clip (-1.0, 1.0).data}") + GradCheck.gradCheck (x, () => x.clip (-1.0, 1.0), quiet = true) + } + R.summary ("Autograd Basic Operations - Test 0") + + end autogradTest0 + + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** The `autogradTest1` main function tests basic binary arithmetic operations + * in the Autograd system. These include addition, subtraction, element-wise + * multiplication, and element-wise division between two tensors, as well as + * unary operations involving constants. For each operation, the test applies + * `GradCheck.gradCheck` or `gradCheckAll` to verify that both the forward + * and backward passes are implemented correctly. + * > runMain scalation.modeling.autograd.AutogradTest.autogradTest1 + */ + @main def autogradTest1 (): Unit = + + val R = TestReport () + + R.record ("Addition") { + val data1 = TensorD ((2, 2, 1), 1.0, 2.0, 3.0, 4.0) + val data2 = TensorD ((2, 2, 1), 5.0, 6.0, 7.0, 8.0) + val x = Variabl (data1, name = Some ("x")) + val y = Variabl (data2, name = Some ("y")) + GradCheck.gradCheckAll (Seq (x, y), () => x + y, quiet = true) + } + R.record ("Subtraction") { + val data1 = TensorD ((2, 2, 1), 1.0, 2.0, 3.0, 4.0) + val data2 = TensorD ((2, 2, 1), 5.0, 6.0, 7.0, 8.0) + val x = Variabl (data1, name = Some ("x")) + val y = Variabl (data2, name = Some ("y")) + GradCheck.gradCheckAll (Seq (x, y), () => x - y, quiet = true) + } + R.record ("Multiplication") { + val data1 = TensorD ((2, 2, 1), 1.0, 2.0, 3.0, 4.0) + val data2 = TensorD ((2, 2, 1), 5.0, 6.0, 7.0, 8.0) + val x = Variabl (data1, name = Some ("x")) + val y = Variabl (data2, name = Some ("y")) + GradCheck.gradCheckAll (Seq (x, y), () => x * y, quiet = true) + } + R.record ("Division") { + val data1 = TensorD ((2, 2, 1), 1.0, 2.0, 3.0, 4.0) + val data2 = TensorD ((2, 2, 1), 5.0, 6.0, 7.0, 8.0) + val x = Variabl (data1, name = Some ("x")) + val y = Variabl (data2, name = Some ("y")) + GradCheck.gradCheckAll (Seq (x, y), () => x / y, quiet = true) + } + // Unary ops + R.record ("Add constant") { + val data1 = TensorD ((2, 2, 1), 1.0, 2.0, 3.0, 4.0) + val x = Variabl (data1, name = Some ("x")) + GradCheck.gradCheck (x, () => x + 10.0, quiet = true) + } + R.record ("Sub constant") { + val data1 = TensorD ((2, 2, 1), 1.0, 2.0, 3.0, 4.0) + val x = Variabl (data1, name = Some ("x")) + GradCheck.gradCheck (x, () => x - 5.0, quiet = true) + } + R.record ("Mul constant") { + val data1 = TensorD ((2, 2, 1), 1.0, 2.0, 3.0, 4.0) + val x = Variabl (data1, name = Some ("x")) + GradCheck.gradCheck (x, () => x * 2.0, quiet = true) + } + R.record ("Div constant") { + val data1 = TensorD ((2, 2, 1), 1.0, 2.0, 3.0, 4.0) + val x = Variabl (data1, name = Some ("x")) + GradCheck.gradCheck (x, () => x / 2.0, quiet = true) + } + R.summary (title = "Autograd Basic Operations - Test 1") + + end autogradTest1 + + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** The `autogradTest2` main function tests a more complex expression formed + * by combining several elementary operations, namely element-wise + * z = (x * y) + (x / y) - y. + * It evaluates the forward computation and then uses + * `GradCheck.gradCheckAll` to verify correct gradient propagation through + * the composite computation graph. The test also exports the resulting + * computation graph in DOT format for visualization. + * > runMain scalation.modeling.autograd.AutogradTest.autogradTest2 + */ + @main def autogradTest2 (): Unit = + + val data1 = TensorD ((2, 2, 1), 1.0, 2.0, 3.0, 4.0) + val data2 = TensorD ((2, 2, 1), 5.0, 6.0, 7.0, 8.0) + val x = Variabl (data1, name = Some ("x")) + val y = Variabl (data2, name = Some ("y")) + + // Complex operation: z = (x * y) + (x / y) - y + println ("\nTesting Complex Expression: z = (x * y) + (x / y) - y") + val zFinal = (x * y) + (x / y) - y + println (s"zFinal ((x * y) + (x / y) - y): $zFinal") + + val lossFunc = () => ((x * y) + (x / y) - y) + val R = TestReport () + + R.record ("Complex Expression") { + GradCheck.gradCheckAll (Seq (x, y), lossFunc, quiet = false, debug = true) + } + R.summary (title = "Autograd Complex Operations - Test 2") + + // Export computation graph (after grad checks so gradients populated) + val outPath = "target/autograd/visualization/computation_graph_test.dot" + GraphExporter.writeDot (zFinal, outPath, renderSvg = true) + println (s"Computation graph DOT written to $outPath") + + end autogradTest2 + + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** The `autogradTest3` main function tests a broad range of mathematical + * operations supported by the Autograd system. These include scalar + * operations (power, square root, logarithm, reciprocal, exponential), + * global reductions (mean, sum, variance, standard deviation), axis-wise + * reductions, and extrema extraction. For each operation, the forward + * result is displayed and `GradCheck.gradCheck` is applied to verify + * the correctness of the backward pass. + * > runMain scalation.modeling.autograd.AutogradTest.autogradTest3 + */ + @main def autogradTest3 (): Unit = + + banner ("Autograd Math Operations - Test 3") + + val R = TestReport () + + // ----------------------------------------------------------------- + // Scalar math + // ----------------------------------------------------------------- + + R.record ("Power") { + val x = Variabl (TensorD ((2, 2, 1), 1.0, 2.0, 3.0, 4.0), name = Some ("x")) + println (s"x: ${x.data}") + println (s"x ~^ 2: ${(x ~^ 2).data}") + GradCheck.gradCheck (x, () => x ~^ 2, quiet = true) + } + R.record ("Square Root") { + val x = Variabl (TensorD ((2, 2, 1), 1.0, 2.0, 4.0, 8.0), name = Some ("x")) + println (s"x: ${x.data}") + println (s"x.sqrt: ${x.sqrt.data}") + GradCheck.gradCheck (x, () => x.sqrt, quiet = true) + } + R.record ("Logarithm") { + val x = Variabl (TensorD ((2, 2, 1), 1.0, 2.0, 4.0, 8.0), name = Some ("x")) + println (s"x: ${x.data}") + println (s"x.log: ${x.log.data}") + GradCheck.gradCheck (x, () => x.log, quiet = true) + } + R.record ("Logarithm Base y") { + val x = Variabl (TensorD ((2, 2, 1), 1.0, 2.0, 4.0, 8.0), name = Some ("x")) + val y = 2.0 + println (s"x: ${x.data}") + println (s"x.logBase($y): ${x.logBase(y).data}") + GradCheck.gradCheck (x, () => x.logBase(y), quiet = true) + } + R.record ("Reciprocal") { + val x = Variabl (TensorD ((2, 2, 1), 1.0, 2.0, 4.0, 8.0), name = Some ("x")) + println (s"x: ${x.data}") + println (s"x.reciprocal: ${x.reciprocal.data}") + GradCheck.gradCheck (x, () => x.reciprocal, quiet = true) + } + R.record ("Exponential") { + val x = Variabl (TensorD ((2, 2, 1), 0.0, 1.0, 2.0, 3.0), name = Some ("x")) + println (s"x: ${x.data}") + println (s"x.exp: ${x.exp.data}") + GradCheck.gradCheck (x, () => x.exp, quiet = true) + } + + // ----------------------------------------------------------------- + // Reductions (global) + // ----------------------------------------------------------------- + + R.record ("Mean") { + val x = Variabl (TensorD ((2, 2, 1), 1.0, 2.0, 4.0, 8.0), name = Some ("x")) + println (s"x: ${x.data}") + println (s"x.mean: ${x.mean.data}") + GradCheck.gradCheck (x, () => x.mean, quiet = true) + } + R.record ("Sum") { + val x = Variabl (TensorD ((2, 2, 1), 1.0, 2.0, 4.0, 8.0), name = Some ("x")) + println (s"x: ${x.data}") + println (s"x.sum: ${x.sum.data}") + GradCheck.gradCheck (x, () => x.sum, quiet = true) + } + R.record ("Variance") { + val x = Variabl (TensorD ((2, 2, 1), 1.0, 2.0, 4.0, 8.0), name = Some ("x")) + println (s"x: ${x.data}") + println (s"x.variance: ${x.variance.data}") + GradCheck.gradCheck (x, () => x.variance, quiet = true) + } + R.record ("Standard Deviation") { + val x = Variabl (TensorD ((2, 2, 1), 1.0, 2.0, 4.0, 8.0), name = Some ("x")) + println (s"x: ${x.data}") + println (s"x.std: ${x.std.data}") + GradCheck.gradCheck (x, () => x.std, quiet = true) + } + + // ----------------------------------------------------------------- + // Axis-wise reductions + // ----------------------------------------------------------------- + + R.record ("Mean Along Axis 1") { + val x = Variabl (TensorD ((2, 2, 1), 1.0, 2.0, 3.0, 4.0), name = Some ("x")) + println (s"x: ${x.data}") + println (s"x.meanAxis(1): ${x.meanAxis(1).data}") + GradCheck.gradCheck (x, () => x.meanAxis(1), quiet = true) + } + R.record ("Variance Along Axis 1") { + val x = Variabl (TensorD ((2, 2, 1), 1.0, 2.0, 3.0, 4.0), name = Some ("x")) + println (s"x: ${x.data}") + println (s"x.varAxis(1): ${x.varAxis(1).data}") + GradCheck.gradCheck (x, () => x.varAxis(1), quiet = true) + } + R.record ("Std Along Axis 1") { + val x = Variabl (TensorD ((2, 2, 1), 1.0, 2.0, 3.0, 4.0), name = Some ("x")) + println (s"x: ${x.data}") + println (s"x.stdAxis(1): ${x.stdAxis(1).data}") + GradCheck.gradCheck (x, () => x.stdAxis(1), quiet = true) + } + + // ----------------------------------------------------------------- + // Extrema reductions + // ----------------------------------------------------------------- + + R.record ("Max Value") { + val x = Variabl (TensorD ((2, 2, 1), -1.0, 5.0, 3.0, 2.0), name = Some ("x")) + println (s"x: ${x.data}") + println (s"x.maxValue: ${x.maxValue.data}") + GradCheck.gradCheck (x, () => x.maxValue, quiet = true) + } + R.record ("Min Value") { + val x = Variabl (TensorD ((2, 2, 1), -1.0, 5.0, 3.0, 2.0), name = Some ("x")) + println (s"x: ${x.data}") + println (s"x.minValue: ${x.minValue.data}") + GradCheck.gradCheck (x, () => x.minValue, quiet = true) + } + R.summary (title = "Autograd Math Operations - Test 3") + + end autogradTest3 + + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** The `autogradTest4` main function tests a variety of activation functions + * supported by the Autograd system, including ReLU, Sigmoid, Tanh, GeLU, + * Softmax, Identity, LeakyReLU, and ELU. Each activation is applied to a + * representative set of inputs to check behavior across different regions + * (e.g., saturation, symmetry, negative/positive domains). Gradient checking + * via `GradCheck.gradCheck` is performed to ensure correct backward + * propagation through each nonlinearity. Note that GeLU and Softmax forward + * values are currently inconsistent with PyTorch and are marked for revision. + * > runMain scalation.modeling.autograd.AutogradTest.autogradTest4 + */ + @main def autogradTest4 (): Unit = + + banner ("Autograd Activation Functions - Test 4") + + val R = TestReport () + + R.record ("ReLU") { + val t = VectorD (-5.0, -2.0, -1.0, -0.5, 0.5, 2.0, 5.0) // skip 0 for gradCheck + val data = TensorD.fromVector (t) + val x = Variabl (data, name = Some ("x")) + println (s"x.relu: ${x.relu.data}") + GradCheck.gradCheck (x, () => x.relu, quiet = true) + } + R.record ("Sigmoid") { + val t = VectorD (-10.0, -5.0, -1.0, 0.0, 1.0, 5.0, 10.0) // test saturation + center + val data = TensorD.fromVector (t) + val x = Variabl (data, name = Some ("x")) + println (s"x.sigmoid: ${x.sigmoid.data}") + GradCheck.gradCheck (x, () => x.sigmoid, quiet = true) + } + R.record ("Tanh") { + val t = VectorD (-5.0, -2.0, -1.0, 0.0, 1.0, 2.0, 5.0) // symmetric coverage + val data = TensorD.fromVector (t) + val x = Variabl (data, name = Some ("x")) + println (s"x.tanh: ${x.tanh.data}") + GradCheck.gradCheck (x, () => x.tanh, quiet = true) + } + // TODO fix: Forward values are wrong compared to PyTorch + R.record ("GeLU") { + val t = VectorD (-5.0, -2.0, -1.0, -0.5, 0.0, 0.5, 1.0, 2.0, 5.0) // around 0 is key + val data = TensorD.fromVector (t) + val x = Variabl (data, name = Some ("x")) + println (s"x.gelu: ${x.gelu.data}") + GradCheck.gradCheck (x, () => x.gelu, quiet = true) + } + R.record ("Softmax") { + val t = VectorD (-2.0, -1.0, 0.0, 1.0, 2.0) // vector input, tests relative scale + val data = TensorD.fromVector (t) + val x = Variabl (data, name = Some ("x")) + println (s"x.softmax: ${x.softmax.data}") + GradCheck.gradCheck (x, () => x.softmax, quiet = true) + } + R.record ("Identity") { + val t = VectorD (-5.0, -1.0, 0.0, 1.0, 5.0) // trivial, but still cover range + val data = TensorD.fromVector (t) + val x = Variabl (data, name = Some ("x")) + println (s"x.id: ${x.id.data}") + GradCheck.gradCheck (x, () => x.id, quiet = true) + } + R.record ("Leaky ReLU") { + val t = VectorD (-5.0, -2.0, -1.0, -0.5, 0.5, 2.0, 5.0) // skip 0 for gradCheck + val data = TensorD.fromVector (t) + val x = Variabl (data, name = Some ("x")) + println (s"x.leakyReLU: ${x.leakyReLU ().data}") + GradCheck.gradCheck (x, () => x.leakyReLU (), quiet = true) + } + R.record ("ELU") { + val t = VectorD (-5.0, -2.0, -1.0, -0.5, 0.0, 0.5, 1.0, 2.0, 5.0) // negative + positive + val data = TensorD.fromVector (t) + val x = Variabl (data, name = Some ("x")) + println (s"x.elu: ${x.elu ().data}") + GradCheck.gradCheck (x, () => x.elu (), quiet = true) + } + R.summary (title = "Autograd Activation Functions - Test 4") + + end autogradTest4 + + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** The `autogradTest5` main function tests core regression loss functions + * supported by the Autograd system: SSE, MSE, and MAE. Each loss is evaluated + * on simple predicted and target tensors, and `GradCheck.gradCheck` is used + * to validate correctness of the backward pass. These losses form the basis + * of most regression models, so ensuring accurate gradients is essential. + * > runMain scalation.modeling.autograd.AutogradTest.autogradTest5 + */ + @main def autogradTest5 (): Unit = + + banner ("Autograd Loss Functions - Test 5") + val predData = TensorD ((2, 2, 1), 0.9, 0.1, 0.8, 0.2) + val targetData = TensorD ((2, 2, 1), 1.0, 0.0, 1.0, 0.0) + val pred = Variabl (predData, name = Some ("pred")) + val target = Variabl (targetData, name = Some ("target")) + + println ("\nTesting Loss Functions") + + val R = TestReport () + + R.record ("SSE Loss") { + println (s"sseLoss (pred, target): ${sseLoss (pred, target).data}") + GradCheck.gradCheck (pred, () => sseLoss (pred, target), quiet = true) + } + R.record ("MSE Loss") { + println (s"mseLoss (pred, target): ${mseLoss (pred, target).data}") + GradCheck.gradCheck (pred, () => mseLoss (pred, target), quiet = true) + } + R.record ("MAE Loss") { + println (s"maeLoss (pred, target): ${maeLoss (pred, target).data}") + GradCheck.gradCheck (pred, () => maeLoss (pred, target), quiet = true) + } + R.summary (title = "Autograd Loss Functions - Test 5") + + end autogradTest5 + + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** The `autogradTest6` main function verifies tensor-level linear algebra + * operations, including transpose, permutation, reshape, slice, concat, dot product, + * matrix multiplication, and batched matrix multiplication (BMM). + * For each operation, forward correctness is checked and gradient correctness + * is validated using `GradCheck.gradCheck` or `gradCheckAll`. + * BMM is also compared against a manually computed expected tensor to ensure + * alignment with the Autograd system’s shape conventions. + * > runMain scalation.modeling.autograd.AutogradTest.autogradTest6 + */ + @main def autogradTest6 (): Unit = + + banner ("Autograd Tensor Operations - Test 6") + + val R = TestReport () + + // Transpose + R.record ("Transpose") { + val C = MatrixD ((2, 3), 1, 2, 3, 4, 5, 6) + val mat = Variabl (TensorD.fromMatrix (C, Some ((1, 2, 3))), name = Some ("mat")) + println (s"mat: ${mat.data}") + println (s"mat.transpose (1,2): ${mat.transpose (1, 2).data}") + GradCheck.gradCheck (mat, () => mat.transpose (1, 2), quiet = true) + } + // Permute + R.record ("Permute") { + val data = TensorD ((2, 3, 1), 1.0, 2.0, 3.0, 4.0, 5.0, 6.0) + val x = Variabl (data, name = Some ("x")) + println (s"x: ${x.data}") + println (s"x.permute (Seq (1,0,2)): ${x.permute (Seq (1, 0, 2)).data}") + GradCheck.gradCheck (x, () => x.permute (Seq (1, 0, 2)), quiet = true) + } + // Reshape + R.record ("Reshape") { + val data = TensorD ((2, 3, 1), 1.0, 2.0, 3.0, 4.0, 5.0, 6.0) + val x = Variabl (data, name = Some ("x")) + println (s"x: ${x.data}") + println (s"x.reshape(Seq (6,1,1)): ${x.reshape (Seq (6, 1, 1)).data}") + GradCheck.gradCheck (x, () => x.reshape (Seq (6, 1, 1)), quiet = true) + } + // Slice + R.record ("Slice") { + val data = TensorD ((2, 3, 1), 1.0, 2.0, 3.0, 4.0, 5.0, 6.0) + val x = Variabl (data, name = Some ("x")) + println (s"x: ${x.data}") + println (s"x(?, 1 until 3, ?): ${x(?, 1 until 3, ?).data}") + GradCheck.gradCheck (x, () => x(?, 1 until 3, ?), quiet = true) + } + // Concat + R.record ("Concat") { + val dataA = TensorD ((2, 2, 1), 1.0, 2.0, 3.0, 4.0) + val dataB = TensorD ((2, 2, 1), 5.0, 6.0, 7.0, 8.0) + val x = Variabl (dataA, name = Some ("x")) + val y = Variabl (dataB, name = Some ("y")) + println (s"x: ${x.data}") + println (s"y: ${y.data}") + println (s"x.concat (y, axis=1): ${concat (Seq (x, y), axis=1).data}") + GradCheck.gradCheckAll (Seq (x, y), () => concat (Seq (x, y), axis=1), quiet = true) + } + // Dot product + R.record ("Dot Product") { + val vecA = Variabl (TensorD.fromVector (VectorD (1, 2, 3)), name = Some ("vecA")) + val vecB = Variabl (TensorD.fromVector (VectorD (4, 5, 6)), name = Some ("vecB")) + GradCheck.gradCheckAll (Seq (vecA, vecB), () => vecA.dot (vecB), quiet = true) + } + // Matrix multiplication + R.record ("Matrix Multiplication") { + val C = MatrixD ((2, 3), 1, 2, 3, 4, 5, 6) + val D = MatrixD ((3, 2), 7, 8, 9, 10, 11, 12) + val matA = Variabl (TensorD.fromMatrix (C, Some ((1, 2, 3))), name = Some ("matA")) + val matB = Variabl (TensorD.fromMatrix (D, Some ((1, 3, 2))), name = Some ("matB")) + GradCheck.gradCheckAll (Seq (matA, matB), () => matA.matmul (matB), quiet = true) + } + // Batched matrix multiplication + R.record ("Batched MatMul (BMM)") { + // batchedA: shape (2, 3, 1) + val batchedA = Variabl ( + TensorD ((2, 3, 1), + 1.0, 2.0, 3.0, // batch 0 + 4.0, 5.0, 6.0), // batch 1 + name = Some ("bmmA")) + + // batchedB: shape (2, 1, 2) + val batchedB = Variabl ( + TensorD ((2, 1, 2), + 10.0, 20.0, // first column (k=0) for batch0, batch1 + 30.0, 40.0), // second column (k=1) for batch0, batch1 + name = Some ("bmmB")) + + val result = batchedA.bmm (batchedB) + + val expected = TensorD ((2, 3, 2), + // k=0 column then k=1 column (TensorD’s fill order is k,i,j) + 10.0, 20.0, 30.0, 80.0, 100.0, 120.0, + 30.0, 60.0, 90.0, 160.0, 200.0, 240.0 + ) + println (s"result: ${result.data}") + println (s"expected: $expected") + assert (result.data == expected, s"BMM result ${result.data} did not match expected $expected") + + GradCheck.gradCheckAll (Seq (batchedA, batchedB), () => batchedA.bmm(batchedB), quiet = true) + } + // Final summary + R.summary (title = "Autograd Tensor Operations - Test 6") + + end autogradTest6 + + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** The `autogradTest7` main function tests autograd through a small two-layer + * fully connected neural network. A Linear → ReLU → Linear architecture is + * constructed, and gradients are validated for inputs, weights, and biases + * using `GradCheck.gradCheck`. This test ensures end-to-end correctness of + * forward propagation, backward propagation, and parameter gradient tracking. + * A computation graph for the network output is also exported for debugging. + * > runMain scalation.modeling.autograd.AutogradTest.autogradTest7 + */ + @main def autogradTest7 (): Unit = + + banner ("Autograd 2-Layer Net - Test 7") + + val inFeatures = 4 + val hiddenUnits = 5 + val outFeatures = 3 + + // Two layers + val fc1 = Linear (inFeatures, hiddenUnits) + val fc2 = Linear (hiddenUnits, outFeatures) + + // Dummy batch of 2 examples, each of size 4 + val inputData = TensorD ((2, 4, 1), + 1.0, 2.0, 3.0, 4.0, + 5.0, 6.0, 7.0, 8.0) + val inputVar = Variabl (inputData, name = Some ("input")) + + def net (x: Variabl): Variabl = + x ~> fc1 ~> relu ~> fc2 + + val R = TestReport () + + // Forward sanity check + R.record ("2-Layer Net - Forward") { + val out = net (inputVar) + println (s"Forward output: ${out.data}") + true + } + // GradCheck: input + R.record ("2-Layer Net - Input GradCheck") { + GradCheck.gradCheck (inputVar, () => net (inputVar).sum, quiet = true) + } + // GradCheck: fc1 weights + R.record ("2-Layer Net - FC1 Weight GradCheck") { + GradCheck.gradCheck (fc1.weight, () => net (inputVar).sum, quiet = true) + } + // GradCheck: fc1 bias + R.record ("2-Layer Net - FC1 Bias GradCheck") { + GradCheck.gradCheck (fc1.bias, () => net (inputVar).sum, quiet = true) + } + // GradCheck: fc2 weights + R.record ("2-Layer Net - FC2 Weight GradCheck") { + GradCheck.gradCheck (fc2.weight, () => net (inputVar).sum, quiet = true) + } + // GradCheck: fc2 bias + R.record ("2-Layer Net - FC2 Bias GradCheck") { + GradCheck.gradCheck (fc2.bias, () => net (inputVar).sum, quiet = true) + } + // Final summary + R.summary (title = "Autograd 2-Layer Net - Test 7") + + val outPath = "target/autograd/visualization/computation_graph_2layer_net.dot" + GraphExporter.writeDot (net (inputVar), outPath, renderSvg = true) + println (s"Computation graph DOT written to $outPath") + + end autogradTest7 + + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** The `autogradTest8` main function trains a single-layer neural network on + * the AutoMPG regression dataset using Autograd. The dataset is standardized, + * fed through a Linear ~> Identity model, and optimized using stochastic + * gradient descent. This test validates that the autograd engine supports + * full training loops, gradient accumulation, parameter updates, and evaluation + * against regression metrics. + * > runMain scalation.modeling.autograd.AutogradTest.autogradTest8 + */ + @main def autogradTest8 (): Unit = + + banner ("Autograd 1 Layer Net with AutoMPG (Testing against Regression) - Test 8") + + println (s"x.shape: ${x.dims}") + println (s"y.shape: ${y.dim}") + val tensorX = TensorD.fromMatrix (x).permute (Seq (1, 2, 0)) + val tensorY = TensorD.fromVector (y) +// val meanX = TensorD.meanAlongAxis (tensorX, axis=0) +// val stdX = TensorD.stdAlongAxis (tensorX, axis=0) + val meanY = TensorD.meanAlongAxis (tensorY, axis=0) + val stdY = TensorD.stdAlongAxis (tensorY, axis=0) + val norm_x = TensorD.standardize (tensorX, axis=0) + val norm_y = TensorD.standardize (tensorY, axis=0) + + println (s"norm_x.shape: ${norm_x.shape}") + println (s"norm_y.shape: ${norm_y.shape}") + val input = Variabl (norm_x) + var y_actual = Variabl (norm_y) + + case class Net () extends Module with Fit (x.dim2 - 1, x.dim2 - x.dim): + val nf1: Int = tensorX.dim2 + val outputNodes: Int = tensorY.dim2 + val fc1: Linear = Linear (nf1, outputNodes) + + override def forward (x: Variabl): Variabl = x ~> fc1 ~> identity + end Net + + object Net: + def apply (): Net = new Net () + end Net + + val net = Net () + val optimizer = SGD (parameters = net.parameters, lr = 0.01, momentum = 0.9) + val permGen = new Optimizer_SGDM {}.permGenerator (norm_x.shape(0)) + val batchSize = 64 + val nB = norm_x.shape(0) / batchSize + + for j <- 0 to 1000 do + optimizer.zeroGrad () + val batches = permGen.igen.chop (nB) + var totalLoss = 0.0 + var batchCount = 0 + for ib <- batches do + val inputBatch = Variabl (norm_x(ib)) + val yBatch = Variabl (norm_y(ib)) + val output = net (inputBatch) + val loss = mseLoss (output, yBatch) + totalLoss += loss.data(0)(0)(0) + batchCount += 1 + loss.backward () + optimizer.step () + end for + val avgLoss = totalLoss / batchCount + if j % 100 == 0 then println (s"Epoch $j: Loss = $avgLoss") + end for + + println (s"Model structure: $net") + println (s"Weight shape: ${net.fc1.weight.shape}") + println (s"Bias shape: ${net.fc1.bias.shape}") + + val outputFinal = net (input) + val y_pred = outputFinal * Variabl (stdY) + Variabl (meanY) + y_actual = y_actual * Variabl (stdY) + Variabl (meanY) + println (s"y_pred shape: ${y_pred.shape}") + val qof = net.diagnose (y_actual.data.flattenToVector, y_pred.data.flattenToVector) + println (FitM.fitMap (qof, qoF_names)) + println (s"grad after convergence: ${net.fc1.weight.grad}") + println (s"weights after convergence: ${net.fc1.weight.data}") + end autogradTest8 + + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** The `autogradTest9` main function trains a two-layer neural network on the + * AutoMPG regression dataset. The architecture uses a hidden layer with a + * sigmoid activation followed by a linear output layer. + * Optimization uses the Adam optimizer, demonstrating compatibility between + * Autograd parameter graphs and adaptive optimizers. + * The test evaluates gradient flow, training stability, and final regression + * quality after rescaling predictions back to the original domain. + * > runMain scalation.modeling.autograd.AutogradTest.autogradTest9 + */ + @main def autogradTest9 (): Unit = + + banner ("Autograd 2 Layer Net with AutoMPG (Testing against NeuralNet_3L - Test 9") + + println (s"x.shape: ${x.shape}") + println (s"y.shape: ${y.dim}") + val tensorX = TensorD.fromMatrix (x).permute (Seq (1, 2, 0)) + val tensorY = TensorD.fromVector (y) +// val meanX = TensorD.meanAlongAxis (tensorX, axis = 0) +// val stdX = TensorD.stdAlongAxis (tensorX, axis = 0) + val meanY = TensorD.meanAlongAxis (tensorY, axis = 0) + val stdY = TensorD.stdAlongAxis (tensorY, axis = 0) + val norm_x = TensorD.standardize (tensorX, axis = 0) + val norm_y = TensorD.standardize (tensorY, axis = 0) + + println (s"norm_x.shape: ${norm_x.shape}") + println (s"norm_y.shape: ${norm_y.shape}") + val input = Variabl (norm_x) + var y_actual = Variabl (norm_y) + + case class Net () extends Module with Fit (x.dim2 - 1, x.dim - x.dim2): + val nf1: Int = tensorX.dim2 + val hiddenNodes: Int = 2 * nf1 + 1 + val outputNodes: Int = tensorY.dim2 + val fc1: Linear = Linear (nf1, hiddenNodes) + val fc2: Linear = Linear (hiddenNodes, outputNodes) + + override def forward (x: Variabl): Variabl = x ~> fc1 ~> sigmoid ~> fc2 ~> identity + end Net + + object Net: + def apply (): Net = new Net () + end Net + + val net = Net () + val optimizer = Adam (parameters = net.parameters, lr = 0.002, beta1 = 0.9, beta2 = 0.999) + val batchSize = 20 + val nB = norm_x.shape(0) / batchSize + + for j <- 0 to 400 do + val permGen = new Optimizer_SGDM {}.permGenerator (norm_x.shape(0)) + val batches = permGen.igen.chop (nB) + var totalLoss = 0.0 + var totalElem = 0 + for ib <- batches do + optimizer.zeroGrad() + val inputBatch = Variabl (norm_x(ib)) + val yBatch = Variabl (norm_y(ib)) + val output = net (inputBatch) + val loss = mseLoss (output, yBatch) + val numel = yBatch.shape.product + totalLoss += loss.data(0)(0)(0) * numel + totalElem += numel + loss.backward () + optimizer.step () + end for + val avgLoss = totalLoss / totalElem + if j % 100 == 0 then println (s"Epoch $j: Loss = $avgLoss") + end for + + println (s"Model structure: $net") + println (s"Weight shape: ${net.fc1.weight.shape}") + println (s"Bias shape: ${net.fc1.bias.shape}") + + val outputFinal = net (input) + val y_pred = outputFinal * Variabl (stdY) + Variabl (meanY) + y_actual = y_actual * Variabl (stdY) + Variabl (meanY) + println (s"y_pred shape: ${y_pred.shape}") + val qof = net.diagnose (y_actual.data.flattenToVector, y_pred.data.flattenToVector) + println (FitM.fitMap (qof, qoF_names)) + println (s"grad after convergence: ${net.fc1.weight.grad}") + println (s"weights after convergence: ${net.fc1.weight.data}") + + end autogradTest9 + + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** The `autogradTest10` main function trains a three-layer neural network on + * the AutoMPG dataset using SGD with momentum and an early-stopping rule. + * Multiple nonlinearities (tanh and sigmoid) are used to exercise diverse + * gradient shapes. + * The test demonstrates the autograd system’s support for: multi-layer models, + * training loops, early stopping, loss monitoring, and post-training evaluation + * with rescaled predictions. + * > runMain scalation.modeling.autograd.AutogradTest.autogradTest10 + */ + @main def autogradTest10 (): Unit = + + banner ("Autograd 3 Layer Net with AutoMPG (Testing against NeuralNet_3L - Test 10") + println (s"x.shape: ${x.shape}") + println (s"y.shape: ${y.dim}") + val tensorX = TensorD.fromMatrix (x).permute (Seq (1, 2, 0)) + val tensorY = TensorD.fromVector (y) +// val meanX = TensorD.meanAlongAxis (tensorX, axis = 0) +// val stdX = TensorD.stdAlongAxis (tensorX, axis = 0) + val meanY = TensorD.meanAlongAxis (tensorY, axis = 0) + val stdY = TensorD.stdAlongAxis (tensorY, axis = 0) + val norm_x = TensorD.standardize (tensorX, axis = 0) + val norm_y = TensorD.standardize (tensorY, axis = 0) + + println (s"norm_x.shape: ${norm_x.shape}") + println (s"norm_y.shape: ${norm_y.shape}") + val input = Variabl (norm_x) + var y_actual = Variabl (norm_y) + + case class Net () extends Module with Fit (x.dim2 - 1, x.dim - x.dim2): + val nf1: Int = tensorX.dim2 + val hiddenNodes: Int = 2 * nf1 + 1 + val outputNodes: Int = tensorY.dim2 + val fc1: Linear = Linear (nf1, hiddenNodes) + val fc2: Linear = Linear (hiddenNodes, hiddenNodes) + val fc3: Linear = Linear (hiddenNodes, outputNodes) + + override def forward (x: Variabl): Variabl = + val h1 = x ~> fc1 ~> tanh + val h2 = h1 ~> fc2 ~> sigmoid + val out = h2 ~> fc3 ~> identity + out + end Net + + object Net: + def apply (): Net = new Net () + end Net + + val net = Net () + val optimizer = SGD (parameters = net.parameters, lr = 0.25, momentum = 0.90) + val batchSize = 20 + val nB = ceil (norm_x.shape(0).toDouble / batchSize).toInt + println (s"nB: $nB") + + object monitor extends MonitorLoss + object opti extends Optimizer_SGDM + object EarlyStopper extends StoppingRule + val limit = 15 + var stopTraining = false + + for j <- 0 to 400 if ! stopTraining do + val permGen = opti.permGenerator (norm_x.shape(0)) + val batches = permGen.igen.chop (nB) + var totalLoss = 0.0 + var totalElem = 0 + for ib <- batches do + optimizer.zeroGrad () + val inputBatch = Variabl (norm_x(ib)) + val yBatch = Variabl (norm_y(ib)) + val output = net (inputBatch) + val loss = mseLoss (output, yBatch) + val numel = yBatch.shape.product + totalLoss += loss.data(0)(0)(0) * numel + totalElem += numel + loss.backward () + optimizer.step () + end for + val avgLoss = totalLoss / totalElem + monitor.collectLoss (avgLoss) + if j % 100 == 0 then println (s"Epoch $j: Loss = $avgLoss") + val (stopParams, currentBestLoss) = EarlyStopper.stopWhenContinuous (net.parameters, avgLoss, limit) + if stopParams != null then + println (s"Early stopping triggered at epoch $j with best loss $currentBestLoss") + net.setParameters (stopParams) + stopTraining = true + end if + end for + + monitor.plotLoss ("NeuralNet4L") + val varData = y.variance + val bestLoss = monitor.getBestLoss * varData + println (s"Best Loss Unscaled: $bestLoss") + println (s"Best Loss Scaled: ${monitor.getBestLoss}") + println (s"Model structure: $net") + println (s"Weight shape: ${net.fc1.weight.shape}") + println (s"Bias shape: ${net.fc1.bias.shape}") + + val outputFinal = net (input) + val y_pred = outputFinal * Variabl (stdY) + Variabl (meanY) + y_actual = y_actual * Variabl (stdY) + Variabl (meanY) + println (s"y_pred shape: ${y_pred.shape}") + val qof = net.diagnose(y_actual.data.flattenToVector, y_pred.data.flattenToVector) + println (FitM.fitMap (qof, qoF_names)) + println (s"grad after convergence: ${net.fc1.weight.grad}") + println (s"weights after convergence: ${net.fc1.weight.data}") + + end autogradTest10 + + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** The `autogradTest11` main function tests a three-layer neural network on + * a train/test split of the AutoMPG dataset. The network uses sigmoid and + * ReLU activations and trains with SGD + momentum and an early stopping rule + * based on validation loss. + * This test validates the autograd system’s ability to handle data splitting, + * model evaluation on unseen data, and training control flow involving + * patience-based stopping. + * > runMain scalation.modeling.autograd.AutogradTest.autogradTest11 + */ + @main def autogradTest11 (): Unit = + + banner ("Autograd 3 Layer Net with AutoMPG (Testing against NeuralNet_XL - Test 11") + + val permGenSplit = TnT_Split.makePermGen (x.dim) + val n_test = (0.4 * x.dim).toInt + val splitIdx = TnT_Split.testIndices (permGenSplit, n_test) + val (x_test, x_train, y_test, y_train) = TnT_Split (x, y, splitIdx) + println (s"x.shape: ${x.shape}") + println (s"y.shape: ${y.dim}") + val tensorX = TensorD.fromMatrix (x_train).permute (Seq (1, 2, 0)) + val tensorY = TensorD.fromVector (y_train) + val meanX = TensorD.meanAlongAxis (tensorX, axis = 0) + val stdX = TensorD.stdAlongAxis (tensorX, axis = 0) + val meanY = TensorD.meanAlongAxis (tensorY, axis = 0) + val stdY = TensorD.stdAlongAxis (tensorY, axis = 0) + val norm_x = TensorD.standardize (tensorX, axis = 0) + val norm_y = TensorD.standardize (tensorY, axis = 0) + println (s"norm_x.shape: ${norm_x.shape}") + println (s"norm_y.shape: ${norm_y.shape}") + val input = Variabl (norm_x) + var y_actual = Variabl (norm_y) + + case class Net () extends Module with Fit (x.dim2 - 1, x.dim - x.dim2): + val nf1: Int = tensorX.dim2 + val hiddenNodes: Int = 2 * nf1 + 1 + val outputNodes: Int = tensorY.dim2 + val fc1: Linear = Linear (nf1, hiddenNodes) + val fc2: Linear = Linear (hiddenNodes, hiddenNodes) + val fc3: Linear = Linear (hiddenNodes, outputNodes) + + override def forward (x: Variabl): Variabl = + val h1 = x ~> fc1 ~> sigmoid + val h2 = h1 ~> fc2 ~> relu + val out = h2 ~> fc3 ~> identity + out + end Net + + object Net: + def apply (): Net = new Net () + end Net + + val net = Net () + val optimizer = SGD (parameters = net.parameters, lr = 0.1, momentum = 0.90) + val batchSize = 20 + val nB = ceil (norm_x.shape(0).toDouble / batchSize).toInt + println (net.parameters.toString ()) + println (s"nB: $nB") + + object monitor extends MonitorLoss + object opti extends Optimizer_SGDM + object EarlyStopper extends StoppingRule + val limit = 40 + var stopTraining = false + + for j <- 0 to 400 if ! stopTraining do + val permGen = opti.permGenerator (norm_x.shape(0)) + val batches = permGen.igen.chop (nB) + var totalLoss = 0.0 + var totalElem = 0 + for ib <- batches do + optimizer.zeroGrad () + val inputBatch = Variabl (norm_x(ib)) + val yBatch = Variabl (norm_y(ib)) + val output = net (inputBatch) + val loss = mseLoss (output, yBatch) + val numel = yBatch.shape.product + totalLoss += loss.data(0)(0)(0) * numel + totalElem += numel + loss.backward () + optimizer.step () + end for + val avgLoss = totalLoss / totalElem + monitor.collectLoss (avgLoss) + if j % 100 == 0 then println (s"Epoch $j: Loss = $avgLoss") + val (stopParams, currentBestLoss) = EarlyStopper.stopWhenPatience (net.parameters, avgLoss, limit) + if stopParams != null then + println (s"Early stopping triggered at epoch $j with best loss $currentBestLoss") + net.setParameters (stopParams) + stopTraining = true + end if + end for + + monitor.plotLoss ("NeuralNet3L") + val varData = y.variance + val bestLoss = monitor.getBestLoss * varData + println (s"Best Loss Unscaled: $bestLoss") + println (s"Best Loss Scaled: ${monitor.getBestLoss}") + println (s"Model structure: $net") + println (s"Weight shape: ${net.fc1.weight.shape}") + println (s"Bias shape: ${net.fc1.bias.shape}") + + val outputFinal = net (input) + val y_pred = outputFinal * Variabl (stdY) + Variabl (meanY) + y_actual = y_actual * Variabl (stdY) + Variabl (meanY) + println (s"y_pred shape: ${y_pred.shape}") + banner ("Final Train Statistics") + val qof = net.diagnose (y_actual.data.flattenToVector, y_pred.data.flattenToVector) + println (FitM.fitMap (qof, qoF_names)) + val testX = TensorD.fromMatrix (x_test).permute (Seq (1, 2, 0)) + val testY = TensorD.fromVector (y_test) + val testNormX = (testX - meanX)/stdX + val testNormY = (testY - meanY)/stdY + val testInput = Variabl (testNormX) + val testActual = Variabl (testNormY) + val testOutput = net (testInput) + val testPred = testOutput * Variabl (stdY) + Variabl (meanY) + val testActualRescaled = testActual * Variabl (stdY) + Variabl (meanY) + + println (s"test_pred shape: ${testPred.shape}") + println (s"Final Test Loss: ${mseLoss (testOutput, testActual).data(0)(0)(0)}") + banner ("Final Test Statistics") + val testQoF = net.diagnose (testActualRescaled.data.flattenToVector, testPred.data.flattenToVector) + println (FitM.fitMap (testQoF, qoF_names)) + + end autogradTest11 + + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** The `autogradTest12` main function provides an alternative training pipeline + * for a three-layer neural network on AutoMPG, using randomized train/test + * splitting. + * It tests the autograd engine under variations in data sampling, training + * strategy, and network depth, while also exercising patience-based early + * stopping and full regression evaluation on both training and test sets. + * > runMain scalation.modeling.autograd.AutogradTest.autogradTest12 + */ + @main def autogradTest12 (): Unit = + + banner ("Autograd 3 Layer Net with AutoMPG (Testing against NeuralNet_XL - Test 12") + + val permGenSplit = TnT_Split.makePermGen (x.dim) + val rando = true + val n_test = (0.2 * x.dim).toInt + val splitIdx = TnT_Split.testIndices (permGenSplit, n_test, rando = rando) + val (x_test, x_train, y_test, y_train) = TnT_Split (x, y, splitIdx) + println (s"x.shape: ${x.shape}") + println (s"y.shape: ${y.dim}") + val tensorX = TensorD.fromMatrix (x_train).permute (Seq (1, 2, 0)) + val tensorY = TensorD.fromVector (y_train) + val meanX = TensorD.meanAlongAxis (tensorX, axis = 0) + val stdX = TensorD.stdAlongAxis (tensorX, axis = 0) + val meanY = TensorD.meanAlongAxis (tensorY, axis = 0) + val stdY = TensorD.stdAlongAxis( tensorY, axis = 0) + val norm_x = TensorD.standardize (tensorX, axis = 0) + val norm_y = TensorD.standardize (tensorY, axis = 0) + + println (s"norm_x.shape: ${norm_x.shape}") + println (s"norm_y.shape: ${norm_y.shape}") + val input = Variabl (norm_x) + var y_actual = Variabl (norm_y) + + case class Net () extends Module with Fit (x.dim2 - 1, x.dim - x.dim2): + val nf1: Int = tensorX.dim2 + val hiddenNodes: Int = 2 * nf1 + 1 + val outputNodes: Int = tensorY.dim2 + val fc1: Linear = Linear (nf1, hiddenNodes) + val fc2: Linear = Linear (hiddenNodes, hiddenNodes) + val fc3: Linear = Linear (hiddenNodes, outputNodes) + + override def forward (x: Variabl): Variabl = + val h1 = x ~> fc1 ~> sigmoid + val h2 = h1 ~> fc2 ~> relu + val out = h2 ~> fc3 ~> identity + out + end Net + + object Net: + def apply (): Net = new Net () + end Net + + val net = Net () + val optimizer = SGD (parameters = net.parameters, lr = 0.25, momentum = 0.90) + val batchSize = 20 + val nB = ceil (norm_x.shape(0).toDouble / batchSize).toInt + println (net.parameters.toString ()) + println (s"nB: $nB") + + object monitor extends MonitorLoss + object opti extends Optimizer_SGDM + object EarlyStopper extends StoppingRule + val limit = 40 + var stopTraining = false + + for j <- 0 to 400 if !stopTraining do + val permGen = opti.permGenerator (norm_x.shape(0), rando=true) + val batches = permGen.igen.chop (nB) + var totalLoss = 0.0 + var totalElem = 0 + for ib <- batches do + optimizer.zeroGrad () + val inputBatch = Variabl (norm_x(ib)) + val yBatch = Variabl (norm_y(ib)) + val output = net (inputBatch) + val loss = mseLoss (output, yBatch) + val numel = yBatch.shape.product + totalLoss += loss.data(0)(0)(0) * numel + totalElem += numel + loss.backward () + optimizer.step () + end for + val avgLoss = totalLoss / totalElem + monitor.collectLoss (avgLoss) + if j % 100 == 0 then println (s"Epoch $j: Loss = $avgLoss") + val (stopParams, currentBestLoss) = EarlyStopper.stopWhenPatience (net.parameters, avgLoss, limit) + if stopParams != null then + println (s"Early stopping triggered at epoch $j with best loss $currentBestLoss") + net.setParameters (stopParams) + stopTraining = true + end if + end for + + monitor.plotLoss ("NeuralNet3L") + val varData = y.variance + val bestLoss = monitor.getBestLoss * varData + println (s"Best Loss Unscaled: $bestLoss") + println (s"Best Loss Scaled: ${monitor.getBestLoss}") + println (s"Model structure: $net") + println (s"Weight shape: ${net.fc1.weight.shape}") + println (s"Bias shape: ${net.fc1.bias.shape}") + + val outputFinal = net (input) + val y_pred = outputFinal * Variabl (stdY) + Variabl (meanY) + y_actual = y_actual * Variabl (stdY) + Variabl (meanY) + println (s"y_pred shape: ${y_pred.shape}") + banner ("Final Train Statistics") + val qof = net.diagnose (y_actual.data.flattenToVector, y_pred.data.flattenToVector) + println (FitM.fitMap (qof, qoF_names)) + val testX = TensorD.fromMatrix (x_test).permute (Seq (1, 2, 0)) + val testY = TensorD.fromVector (y_test) + val testNormX = (testX - meanX) / stdX + val testNormY = (testY - meanY) / stdY + val testInput = Variabl (testNormX) + val testActual = Variabl (testNormY) + val testOutput = net (testInput) + val testPred = testOutput * Variabl (stdY) + Variabl (meanY) + val testActualRescaled = testActual * Variabl (stdY) + Variabl (meanY) + + println (s"test_pred shape: ${testPred.shape}") + println (s"Final Test Loss: ${mseLoss (testOutput, testActual).data(0)(0)(0)}") + banner ("Final Test Statistics") + val testQoF = net.diagnose (testActualRescaled.data.flattenToVector, testPred.data.flattenToVector) + println (FitM.fitMap (testQoF, qoF_names)) + + end autogradTest12 + + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** The `autogradTestAll` main function sequentially runs the core autograd unit + * tests (0–7) to verify correctness across basic ops, math operations, + * activations, losses, tensor algebra, and small neural networks. + * Longer AutoMPG training tests (8–11) are provided but commented out to avoid + * extended runtime by default. + * > runMain scalation.modeling.autograd.AutogradTest.autogradTestAll + */ + @main def autogradTestAll (): Unit = + + banner ("All Autograd Tests Starting") + + // Run core autograd unit tests to verify correctness + autogradTest0 () + autogradTest1 () + autogradTest2 () + autogradTest3 () + autogradTest4 () + autogradTest5 () + autogradTest6 () + autogradTest7 () + + // The following tests are more time-consuming due to training loops + // and are commented out by default. + // + // autogradTest8 () + // autogradTest9 () + // autogradTest10 () + // autogradTest11 () + + banner ("All Autograd Tests Completed") + + end autogradTestAll + +end AutogradTest + diff --git a/src/main/scala/scalation/modeling/autograd/AutogradTest.scala.bak b/src/main/scala/scalation/modeling/autograd/AutogradTest.scala.bak new file mode 100644 index 000000000..25110dd21 --- /dev/null +++ b/src/main/scala/scalation/modeling/autograd/AutogradTest.scala.bak @@ -0,0 +1,867 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author Praveen Rangavajhula + * @version 2.0 + * @date Fri April 25 19:40:13 EDT 2025 + * @see LICENSE (MIT style license file). + * + * @note Autograd: Unit Tests for Autograd Functionality + */ + +package scalation +package modeling +package autograd + +import scala.language.implicitConversions +import scala.math.ceil + +import scalation.calculus.Differential +import scalation.mathstat.{MatrixD, TensorD, TnT_Split, VectorD} +import scalation.modeling.neuralnet._ + +import AutogradOps.given +import Example_AutoMPG.{x, y} + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `AutogradTest` object contains various @main tests for autograd functionality. + * The tests validate basic arithmetic, complex expressions, activation functions, + * loss functions, and neural network layers with backpropagation. + */ +object AutogradTest: + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** The `autogradTest0` main function tests basic unary operations in the + * Autograd system. These include absolute value, negation, floor, ceil, + * rounding, sign, and clipping. For each operator, the test reports the + * forward results and performs gradient checking using `Differential.gradCheck`. + * This ensures that each scalar/tensor unary operation is correctly + * implemented in both the forward and backward passes. + * > runMain scalation.modeling.autograd.AutogradTest.autogradTest0 + */ + @main def autogradTest0 (): Unit = + + val R = TestReport () + + R.record ("Absolute Value") { + val data = TensorD ((2, 2, 1), -1.0, -2.0, 3.0, 4.0) + val x = Variabl (data, name = Some("x")) + println (s"x: ${x.data}") + println (s"x.abs: ${x.abs.data}") + Differential.gradCheck (x, () => x.abs, quiet = true) + } + R.record ("Negation") { + val data = TensorD ((2, 2, 1), 1.0, -2.0, 3.0, -4.0) + val x = Variabl (data, name = Some("x")) + println (s"x: ${x.data}") + println (s"x.neg: ${-x.data}") + Differential.gradCheck (x, () => -x, quiet = true) + } + R.record ("Floor") { + val x = Variabl (TensorD ((2, 2, 1), -1.2, -2.8, 3.4, 4.9)) + println (s"x: ${x.data}") + println (s"x.floor: ${x.floor.data}") + Differential.gradCheck (x, () => x.floor, quiet = true) + } + R.record ("Ceil") { + val x = Variabl (TensorD ((2, 2, 1), -1.2, -2.8, 3.4, 4.9)) + println (s"x: ${x.data}") + println (s"x.ceil: ${x.ceil.data}") + Differential.gradCheck (x, () => x.ceil, quiet = true) + } + R.record ("Round") { + val x = Variabl (TensorD ((2, 2, 1), -1.2, -2.3, 3.4, 4.7)) // not half-integers (since grad is undefined there) + println (s"x: ${x.data}") + println (s"x.round: ${x.round.data}") + Differential.gradCheck (x, () => x.round, quiet = true) + } + R.record ("Sign") { + val x = Variabl (TensorD ((2, 2, 1), -1.0,10.0, 2.0, -3.0)) // avoid zero (since grad is undefined there) + println (s"x: ${x.data}") + println (s"x.sign: ${x.sign.data}") + Differential.gradCheck (x, () => x.sign, quiet = true) + } + R.record ("Clip") { + val x = Variabl (TensorD ((2, 2, 1), -2.0, -0.5, 0.5, 3.0)) + println (s"x: ${x.data}") + println (s"x.clip(-1.0, 1.0): ${x.clip(-1.0, 1.0).data}") + Differential.gradCheck (x, () => x.clip(-1.0, 1.0), quiet = true) + } + R.summary ("Autograd Basic Operations - Test 0") + + end autogradTest0 + + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Basic operations test (Addition, Subtraction, Multiplication, Division). + * > runMain scalation.modeling.autograd.AutogradTest.autogradTest1 + */ + @main def autogradTest1 (): Unit = + + banner ("Autograd Basic Operations - Test 1") + // Initial setup with two tensors and wrapping in Variabls. + val data1 = TensorD ((2, 2, 1), 1.0, 2.0, 3.0, 4.0) + val data2 = TensorD ((2, 2, 1), 5.0, 6.0, 7.0, 8.0) + val x = Variabl (data1, name = Some("x")) + val y = Variabl (data2, name = Some("y")) + + // Test Addition: z = x + y. + println ("Testing Addition") + val addOp = Add (x, y) + val z = addOp.forward () + println (s"z (x + y): $z") + z.backward () + println (s"x.grad after addition: ${x.grad}") + println (s"y.grad after addition: ${y.grad}") + + // Test Addition with constant. + println ("\nTesting Addition with Constant") + val addConstOp = AddConstant (x, 10.0) + val zConst = addConstOp.forward () + println (s"z (x + 10.0): $zConst") + zConst.backward () + println (s"x.grad after constant addition: ${x.grad}") + + // Reset gradients for subtraction tests. + x.grad = TensorD.zerosLike (x.data) + y.grad = TensorD.zerosLike (y.data) + + // Test Subtraction: z = x - y. + println ("\nTesting Subtraction") + val subOp = Sub (x, y) + val zSub = subOp.forward () + println (s"z (x - y): $zSub") + zSub.backward () + println (s"x.grad after subtraction: ${x.grad}") + println (s"y.grad after subtraction: ${y.grad}") + + // Test Subtraction with constant. + println ("\nTesting Subtraction with Constant") + val subConstOp = SubConstant (x, 5.0) + val zSubConst = subConstOp.forward () + println (s"z (x - 5.0): $zSubConst") + zSubConst.backward () + println (s"x.grad after constant subtraction: ${x.grad}") + + // Reset gradients for multiplication tests. + x.grad = TensorD.zerosLike (x.data) + y.grad = TensorD.zerosLike (y.data) + + // Test Multiplication: z = x * y. + println ("\nTesting Multiplication") + val mulOp = Mul (x, y) + val zMul = mulOp.forward () + println (s"z (x * y): $zMul") + zMul.backward () + println (s"x.grad after multiplication: ${x.grad}") + println (s"y.grad after multiplication: ${y.grad}") + + // Test Multiplication with constant. + println ("\nTesting Multiplication with Constant") + val mulConstOp = MulConstant (x, 2.0) + val zMulConst = mulConstOp.forward () + println (s"z (x * 2.0): $zMulConst") + zMulConst.backward () + println (s"x.grad after constant multiplication: ${x.grad}") + + // Reset gradients for division tests. + x.grad = TensorD.zerosLike (x.data) + y.grad = TensorD.zerosLike (y.data) + + // Test Division: z = x / y. + println ("\nTesting Division") + val divOp = Div (x, y) + val zDiv = divOp.forward () + println (s"z (x / y): $zDiv") + zDiv.backward () + println (s"x.grad after division: ${x.grad}") + println (s"y.grad after division: ${y.grad}") + + // Test Division with constant. + println ("\nTesting Division with Constant") + val divConstOp = DivConstant (x, 2.0) + val zDivConst = divConstOp.forward () + println (s"z (x / 2.0): $zDivConst") + zDivConst.backward () + println (s"x.grad after constant division: ${x.grad}") + + println ("\nAll tests completed.") + end autogradTest1 + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Complex expression test combining multiple operations. + * > runMain scalation.modeling.autograd.AutogradTest.autogradTest2 + */ + @main def autogradTest2 (): Unit = + banner ("Autograd Complex Operations - Test 2") + val data1 = TensorD ((2, 2, 1), 1.0, 2.0, 3.0, 4.0) + val data2 = TensorD ((2, 2, 1), 5.0, 6.0, 7.0, 8.0) + val x = Variabl (data1, name = Some ("x")) + val y = Variabl (data2, name = Some ("y")) + + banner ("Autograd Complex Expression Test") + // Complex operation: z = (x * y) + (x / y) - y + println ("\nTesting Complex Expression: z = (x * y) + (x / y) - y") + val zFinal = (x * y) + (x / y) - y + println (s"zFinal ((x * y) + (x / y) - y): $zFinal") + zFinal.backward () + println (s"x.grad after complex operation: ${x.grad}") + println (s"y.grad after complex operation: ${y.grad}") + end autogradTest2 + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Test math operations: square root, logarithm, reciprocal, and mean. + * > runMain scalation.modeling.autograd.AutogradTest.autogradTest3 + */ + @main def autogradTest3 (): Unit = + banner (" Autograd Math Operations - Test 3") + val data = TensorD ((2, 2, 1), 1.0, 2.0, 4.0, 8.0) + val x = Variabl (data, name = Some("x")) + + println ("\nTesting Exponentiation and Logarithms") + // Test Sqrt. + println ("Testing Sqrt") + val sqrtOp = Sqrt (x) + val zSqrt = sqrtOp.forward () + println (s"z (sqrt(x)): $zSqrt") + zSqrt.backward () + println (s"x.grad after sqrt: ${x.grad}") + + // Reset gradient. + x.grad = TensorD.zerosLike (x.data) + + // Test Log. + println ("\nTesting Logarithm") + val logOp = Log (x) + val zLog = logOp.forward () + println (s"z (log(x)): $zLog") + zLog.backward () + println (s"x.grad after log: ${x.grad}") + + // Reset gradient. + x.grad = TensorD.zerosLike (x.data) + + // Test Reciprocal. + println ("\nTesting Reciprocal") + val reciprocalOp = Reciprocal (x) + val zReciprocal = reciprocalOp.forward () + println (s"z (1/x): $zReciprocal") + zReciprocal.backward () + println (s"x.grad after reciprocal: ${x.grad}") + + // Reset gradient. + x.grad = TensorD.zerosLike (x.data) + + // Test Mean. + println ("\nTesting Mean") + val meanOp = Mean (x) + val zMean = meanOp.forward () + println (s"z (mean(x)): $zMean") + zMean.backward () + println (s"x.grad after mean: ${x.grad}") + + println ("\nAll tests in Test 3 completed.") + end autogradTest3 + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Test activation functions, including ReLU, Sigmoid, Tanh, GeLU, Softmax, Identity, + * LeakyReLU, and ELU. + * > runMain scalation.modeling.autograd.AutogradTest.autogradTest4 + */ + @main def autogradTest4 (): Unit = + banner (" Autograd Activation Functions - Test 4") + val t = VectorD.range (-50, 50) / 10.0 + val data = TensorD.fromVector (t) + val x = Variabl (data, name = Some ("x")) + + println ("\nTesting Activation Functions") + // Test ReLU. + println ("Testing ReLU") + val reluOp = ReLU (x) + val zReLU = reluOp.forward () + println (s"z (ReLU(x)): $zReLU") + zReLU.backward () + println (s"x.grad after ReLU: ${x.grad}") + + // Reset gradient. + x.grad = TensorD.zerosLike (x.data) + + // Test Sigmoid. + println ("\nTesting Sigmoid") + val sigmoidOp = Sigmoid (x) + val zSigmoid = sigmoidOp.forward () + println (s"z (sigmoid(x)): $zSigmoid") + zSigmoid.backward () + println (s"x.grad after sigmoid: ${x.grad}") + + // Reset gradient. + x.grad = TensorD.zerosLike (x.data) + + // Test Tanh. + println ("\nTesting Tanh") + val tanhOp = Tanh (x) + val zTanh = tanhOp.forward () + println (s"z (tanh(x)): $zTanh") + zTanh.backward () + println (s"x.grad after tanh: ${x.grad}") + + // Reset gradient. + x.grad = TensorD.zerosLike (x.data) + + // Test GeLU. + println ("\nTesting GeLU") + val geluOp = GeLU (x) + val zGeLU = geluOp.forward () + println (s"z (GeLU(x)): $zGeLU") + zGeLU.backward () + println (s"x.grad after GeLU: ${x.grad}") + + // Reset gradient. + x.grad = TensorD.zerosLike (x.data) + + // Test Softmax. + println ("\nTesting Softmax") + val softmaxOp = Softmax (x) + val zSoftmax = softmaxOp.forward () + println (s"z (Softmax(x)): $zSoftmax") + zSoftmax.backward () + println (s"x.grad after Softmax: ${x.grad}") + + // Reset gradient. + x.grad = TensorD.zerosLike (x.data) + + // Test Identity. + println ("\nTesting Identity") + val identityOp = Identity (x) + val zIdentity = identityOp.forward () + println (s"z (Identity(x)): $zIdentity") + zIdentity.backward () + println (s"x.grad after Identity: ${x.grad}") + + // Reset gradient. + x.grad = TensorD.zerosLike (x.data) + + // Test LeakyReLU. + println ("\nTesting LeakyReLU") + val leakyReLUOp = LeakyReLU (x) + val zLeakyReLU = leakyReLUOp.forward () + println (s"z (LeakyReLU(x)): $zLeakyReLU") + zLeakyReLU.backward () + println (s"x.grad after LeakyReLU: ${x.grad}") + + // Reset gradient. + x.grad = TensorD.zerosLike (x.data) + + // Test ELU. + println ("\nTesting ELU") + val eluOp = ELU (x) + val zELU = eluOp.forward () + println (s"z (ELU(x)): $zELU") + zELU.backward () + println (s"x.grad after ELU: ${x.grad}") + + println ("\nAll tests in Test 4 completed.") + end autogradTest4 + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Tests the loss functions: SSE, MSE, and MAE. + * > runMain scalation.modeling.autograd.AutogradTest.autogradTest5 + */ + @main def autogradTest5 (): Unit = + banner (" Autograd Loss Functions - Test 5") + val predData = TensorD ((2, 2, 1), 0.9, 0.1, 0.8, 0.2) + val targetData = TensorD ((2, 2, 1), 1.0, 0.0, 1.0, 0.0) + val pred = Variabl (predData, name = Some ("pred")) + val target = Variabl (targetData, name = Some ("target")) + + println ("\nTesting Loss Functions") + // Test SSE Loss. + println ("Testing SSE Loss") + val sseLossOp = SSELoss (pred, target) + val zSSE = sseLossOp.forward () + println (s"z (SSE Loss): $zSSE") + zSSE.backward () + println (s"pred.grad after SSE Loss: ${pred.grad}") + + // Reset gradients. + pred.grad = TensorD.zerosLike (pred.data) + + // Test MSE Loss. + println ("\nTesting MSE Loss") + val mseLossOp = MSELoss (pred, target) + val zMSE = mseLossOp.forward () + println (s"z (MSE Loss): $zMSE") + zMSE.backward () + println (s"pred.grad after MSE Loss: ${pred.grad}") + + // Reset gradients. + pred.grad = TensorD.zerosLike (pred.data) + + // Test MAE Loss. + println ("\nTesting MAE Loss") + val maeLossOp = MAELoss (pred, target) + val zMAE = maeLossOp.forward () + println (s"z (MAE Loss): $zMAE") + zMAE.backward () + println (s"pred.grad after MAE Loss: ${pred.grad}") + + println ("\nAll tests in Test 5 completed.") + end autogradTest5 + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Tests tensor operations including dot product, matrix multiplication, and batched + * matrix multiplication. + * > runMain scalation.modeling.autograd.AutogradTest.autogradTest6 + */ + @main def autogradTest6 (): Unit = + banner ("Autograd Tensor Operations - Test 6") + // Dot product test. + banner ("Testing Dot Product") + val vectorA = TensorD.fromVector (VectorD(1, 2, 3)) + val vectorB = TensorD.fromVector (VectorD(4, 5, 6)) + val varVecA = Variabl (vectorA, name = Some ("vecA")) + val varVecB = Variabl (vectorB, name = Some ("vecB")) + val dotResult = varVecA.dot (varVecB) + println (s"Dot product result: $dotResult.data") + dotResult.backward () + println (s"vecA.grad after dot product: ${varVecA.grad}") + println (s"vecB.grad after dot product: ${varVecB.grad}") + println ("✅ Dot product forward & backward test passed!") + + // Matrix multiplication test. + banner ("Testing Matrix Multiplication (batch‑first, depth=1)") + val C = MatrixD ((2, 3), 1, 2, 3, 4, 5, 6) + val D = MatrixD ((3, 2), 7, 8, 9, 10, 11, 12) + println (s"C :\n$C") + println (s"D :\n$D") + val matA = TensorD.fromMatrix (C, Some ((1, 2, 3))) + val matB = TensorD.fromMatrix (D, Some ((1, 3, 2))) + println (s"matA shape: ${matA.shape}") + println (s"matB shape: ${matB.shape}") + val varA = Variabl (matA, name = Some ("matA")) + val varB = Variabl (matB, name = Some ("matB")) + val out = varA.matmul (varB) + println (s"Raw TensorD result:\n${out.data}") + val matRes = out.data(0) + println (s"As MatrixD:\n$matRes") + val expected = TensorD.fromMatrix (MatrixD((2, 2), 58, 64, 139, 154)) + assert (out.data == expected, s"Forward failed, expected\n$expected\nbut got\n$out") + out.backward () + println (s"A.grad:\n${varA.grad}") + println (s"B.grad:\n${varB.grad}") + println ("✅ MatMul forward & backward test passed!") + + // Batched Matrix Multiplication test. + banner ("Testing Batched Matrix Multiplication") + val A0 = MatrixD ((3, 1), 1.0, 2.0, 3.0) + val A1 = MatrixD ((3, 1), 4.0, 5.0, 6.0) + val batchedA = TensorD (A0, A1) + val B0 = MatrixD((1, 2), 10.0, 20.0) + val B1 = MatrixD((1, 2), 30.0, 40.0) + val batchedB = TensorD (B0, B1) + val varBmmA = Variabl (batchedA, name = Some ("bmmA")) + val varBmmB = Variabl (batchedB, name = Some ("bmmB")) + println (s"BMM A shape: ${varBmmA.shape}") + println (s"BMM B shape: ${varBmmB.shape}") + val bmmResult = varBmmA.bmm (varBmmB) + println (s"BMM result:\n${bmmResult.data}") + bmmResult.backward () + println (s"bmmA.grad after BMM:\n${varBmmA.grad}") + println (s"bmmB.grad after BMM:\n${varBmmB.grad}") + println ("✅ BMM forward & backward test passed!") + end autogradTest6 + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Tests the Linear layer with autograd. + * > runMain scalation.modeling.autograd.AutogradTest.autogradTest7 + */ + @main def autogradTest7 (): Unit = + banner ("Autograd Linear Layer - Test 7") + val inFeatures = 4 + val outFeatures = 3 + val linear = Linear (inFeatures, outFeatures) + val inputData = TensorD ((2, 4, 1), 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0) + val inputVar = Variabl (inputData, name = Some ("input")) + val output = linear (inputVar) + println (s"Linear layer output: $output") + println (s"Weight before: ${linear.weight.data}") + println (s"Bias before: ${linear.bias.data}") + output.backward () + println (s"Input gradient: ${inputVar.grad}") + println (s"Weight gradient: ${linear.weight.grad}") + println (s"Bias gradient: ${linear.bias.grad}") + end autogradTest7 + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Tests a single-layer network on the AutoMPG regression dataset. + * > runMain scalation.modeling.autograd.AutogradTest.autogradTest8 + */ + @main def autogradTest8 (): Unit = + banner ("Autograd 1 Layer Net with AutoMPG (Testing against Regression) - Test 8") + println (s"x.shape: ${x.dims}") + println (s"y.shape: ${y.dim}") + val tensorX = TensorD.fromMatrix (x).permute (Seq(1, 2, 0)) + val tensorY = TensorD.fromVector (y) +// val meanX = TensorD.meanAlongAxis (tensorX, axis=0) +// val stdX = TensorD.stdAlongAxis (tensorX, axis=0) + val meanY = TensorD.meanAlongAxis (tensorY, axis=0) + val stdY = TensorD.stdAlongAxis (tensorY, axis=0) + val norm_x = TensorD.standardize (tensorX, axis=0) + val norm_y = TensorD.standardize (tensorY, axis=0) + println (s"norm_x.shape: ${norm_x.shape}") + println (s"norm_y.shape: ${norm_y.shape}") + val input = Variabl (norm_x) + var y_actual = Variabl (norm_y) + + case class Net() extends Module with Fit(x.dim2 - 1, x.dim2 - x.dim): + val nf1: Int = tensorX.dim2 + val outputNodes: Int = tensorY.dim2 + val fc1: Linear = Linear (nf1, outputNodes) + override def forward (x: Variabl): Variabl = x ~> fc1 ~> identity + end Net + + object Net: + def apply (): Net = new Net () + end Net + + val net = Net () + val optimizer = SGD (parameters = net.parameters, lr = 0.01, momentum = 0.9) + val permGen = new Optimizer_SGDM {}.permGenerator (norm_x.shape(0)) + val batchSize = 64 + val nB = norm_x.shape(0) / batchSize + + for j <- 0 to 1000 do + optimizer.zeroGrad () + val batches = permGen.igen.chop (nB) + var totalLoss = 0.0 + var batchCount = 0 + for ib <- batches do + val inputBatch = Variabl (norm_x(ib)) + val yBatch = Variabl (norm_y(ib)) + val output = net (inputBatch) + val loss = mseLoss (output, yBatch) + totalLoss += loss.data(0)(0)(0) + batchCount += 1 + loss.backward () + optimizer.step () + end for + val avgLoss = totalLoss / batchCount + if j % 100 == 0 then println (s"Epoch $j: Loss = $avgLoss") + end for + + println (s"Model structure: $net") + println (s"Weight shape: ${net.fc1.weight.shape}") + println (s"Bias shape: ${net.fc1.bias.shape}") + + val outputFinal = net (input) + val y_pred = outputFinal * Variabl (stdY) + Variabl (meanY) + y_actual = y_actual * Variabl (stdY) + Variabl (meanY) + println (s"y_pred shape: ${y_pred.shape}") + val qof = net.diagnose (y_actual.data.flattenToVector, y_pred.data.flattenToVector) + println (FitM.fitMap (qof, qoF_names)) + println (s"grad after convergence: ${net.fc1.weight.grad}") + println (s"weights after convergence: ${net.fc1.weight.data}") + end autogradTest8 + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Tests a two-layer network. + * > runMain scalation.modeling.autograd.AutogradTest.autogradTest9 + */ + @main def autogradTest9 (): Unit = + banner ("Autograd 2 Layer Net with AutoMPG (Testing against NeuralNet_3L - Test 9") + println (s"x.shape: ${x.shape}") + println (s"y.shape: ${y.dim}") + val tensorX = TensorD.fromMatrix (x).permute (Seq(1, 2, 0)) + val tensorY = TensorD.fromVector (y) +// val meanX = TensorD.meanAlongAxis (tensorX, axis = 0) +// val stdX = TensorD.stdAlongAxis (tensorX, axis = 0) + val meanY = TensorD.meanAlongAxis (tensorY, axis = 0) + val stdY = TensorD.stdAlongAxis (tensorY, axis = 0) + val norm_x = TensorD.standardize (tensorX, axis = 0) + val norm_y = TensorD.standardize (tensorY, axis = 0) + println (s"norm_x.shape: ${norm_x.shape}") + println (s"norm_y.shape: ${norm_y.shape}") + val input = Variabl (norm_x) + var y_actual = Variabl (norm_y) + + case class Net () extends Module with Fit (x.dim2 - 1, x.dim - x.dim2): + val nf1: Int = tensorX.dim2 + val hiddenNodes: Int = 2 * nf1 + 1 + val outputNodes: Int = tensorY.dim2 + val fc1: Linear = Linear (nf1, hiddenNodes) + val fc2: Linear = Linear (hiddenNodes, outputNodes) + override def forward (x: Variabl): Variabl = x ~> fc1 ~> tanh ~> fc2 ~> identity + end Net + + object Net: + def apply (): Net = new Net () + end Net + + val net = Net () + val optimizer = Adam (parameters = net.parameters, lr = 0.002, beta1 = 0.9, beta2 = 0.999) + val batchSize = 20 + val nB = norm_x.shape(0) / batchSize + + for j <- 0 to 400 do + val permGen = new Optimizer_SGDM {}.permGenerator (norm_x.shape(0)) + val batches = permGen.igen.chop (nB) + var totalLoss = 0.0 + var batchCount = 0 + for ib <- batches do + optimizer.zeroGrad () + val inputBatch = Variabl (norm_x(ib)) + val yBatch = Variabl (norm_y(ib)) + val output = net (inputBatch) + val loss = mseLoss (output, yBatch) + totalLoss += loss.data(0)(0)(0) + batchCount += 1 + loss.backward () + optimizer.step () + end for + val avgLoss = totalLoss / batchCount + if j % 100 == 0 then println (s"Epoch $j: Loss = $avgLoss") + end for + + println (s"Model structure: $net") + println (s"Weight shape: ${net.fc1.weight.shape}") + println (s"Bias shape: ${net.fc1.bias.shape}") + + val outputFinal = net(input) + val y_pred = outputFinal * Variabl (stdY) + Variabl (meanY) + y_actual = y_actual * Variabl (stdY) + Variabl (meanY) + println (s"y_pred shape: ${y_pred.shape}") + val qof = net.diagnose (y_actual.data.flattenToVector, y_pred.data.flattenToVector) + println (FitM.fitMap (qof, qoF_names)) + println (s"grad after convergence: ${net.fc1.weight.grad}") + println (s"weights after convergence: ${net.fc1.weight.data}") + end autogradTest9 + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Tests a three-layer network with early stopping. + * > runMain scalation.modeling.autograd.AutogradTest.autogradTest10 + */ + @main def autogradTest10 (): Unit = + banner ("Autograd 3 Layer Net with AutoMPG (Testing against NeuralNet_3L - Test 10") + println (s"x.shape: ${x.shape}") + println (s"y.shape: ${y.dim}") + val tensorX = TensorD.fromMatrix (x).permute (Seq(1, 2, 0)) + val tensorY = TensorD.fromVector (y) +// val meanX = TensorD.meanAlongAxis (tensorX, axis = 0) +// val stdX = TensorD.stdAlongAxis (tensorX, axis = 0) + val meanY = TensorD.meanAlongAxis (tensorY, axis = 0) + val stdY = TensorD.stdAlongAxis (tensorY, axis = 0) + val norm_x = TensorD.standardize (tensorX, axis = 0) + val norm_y = TensorD.standardize (tensorY, axis = 0) + println (s"norm_x.shape: ${norm_x.shape}") + println (s"norm_y.shape: ${norm_y.shape}") + val input = Variabl (norm_x) + var y_actual = Variabl (norm_y) + + case class Net () extends Module with Fit(x.dim2 - 1, x.dim - x.dim2): + val nf1: Int = tensorX.dim2 + val hiddenNodes: Int = 2 * nf1 + 1 + val outputNodes: Int = tensorY.dim2 + val fc1: Linear = Linear (nf1, hiddenNodes) + val fc2: Linear = Linear (hiddenNodes, hiddenNodes) + val fc3: Linear = Linear (hiddenNodes, outputNodes) + override def forward (x: Variabl): Variabl = + val h1 = x ~> fc1 ~> tanh + val h2 = h1 ~> fc2 ~> sigmoid + val out = h2 ~> fc3 ~> identity + out + end Net + + object Net: + def apply (): Net = new Net () + end Net + + val net = Net () + val optimizer = SGD (parameters = net.parameters, lr = 0.25, momentum = 0.90) + val batchSize = 20 + val nB = ceil (norm_x.shape(0).toDouble / batchSize).toInt + println (s"nB: $nB") + + object monitor extends MonitorLoss + object opti extends Optimizer_SGDM + object EarlyStopper extends StoppingRule + val limit = 15 + var stopTraining = false + + for j <- 0 to 400 if ! stopTraining do + val permGen = opti.permGenerator (norm_x.shape(0)) + val batches = permGen.igen.chop (nB) + var totalLoss = 0.0 + var batchCount = 0 + for ib <- batches do + optimizer.zeroGrad () + val inputBatch = Variabl (norm_x(ib)) + val yBatch = Variabl (norm_y(ib)) + val output = net (inputBatch) + val loss = mseLoss (output, yBatch) + totalLoss += loss.data(0)(0)(0) + batchCount += 1 + loss.backward () + optimizer.step () + end for + val avgLoss = totalLoss / batchCount + monitor.collectLoss (avgLoss) + if j % 100 == 0 then println (s"Epoch $j: Loss = $avgLoss") + val (stopParams, currentBestLoss) = EarlyStopper.stopWhenContinuous (net.parameters, avgLoss, limit) + if stopParams != null then + println (s"Early stopping triggered at epoch $j with best loss $currentBestLoss") + net.setParameters (stopParams) + stopTraining = true + end for + + monitor.plotLoss ("NeuralNet4L") + val varData = y.variance + val bestLoss = monitor.getBestLoss * varData + println (s"Best Loss Unscaled: $bestLoss") + println (s"Best Loss Scaled: ${monitor.getBestLoss}") + println (s"Model structure: $net") + println (s"Weight shape: ${net.fc1.weight.shape}") + println (s"Bias shape: ${net.fc1.bias.shape}") + + val outputFinal = net(input) + val y_pred = outputFinal * Variabl (stdY) + Variabl (meanY) + y_actual = y_actual * Variabl (stdY) + Variabl (meanY) + println (s"y_pred shape: ${y_pred.shape}") + val qof = net.diagnose(y_actual.data.flattenToVector, y_pred.data.flattenToVector) + println (FitM.fitMap (qof, qoF_names)) + println (s"grad after convergence: ${net.fc1.weight.grad}") + println (s"weights after convergence: ${net.fc1.weight.data}") + end autogradTest10 + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Tests a three-layer network with early stopping on train/test split. + * > runMain scalation.modeling.autograd.AutogradTest.autogradTest11 + */ + @main def autogradTest11 (): Unit = + banner ("Autograd 3 Layer Net with AutoMPG (Testing against NeuralNet_XL - Test 11") + val permGenSplit = TnT_Split.makePermGen (x.dim) + val n_test = (0.4 * x.dim).toInt + val splitIdx = TnT_Split.testIndices (permGenSplit, n_test) + val (x_test, x_train, y_test, y_train) = TnT_Split (x, y, splitIdx) + println (s"x.shape: ${x.shape}") + println (s"y.shape: ${y.dim}") + val tensorX = TensorD.fromMatrix (x_train).permute(Seq(1, 2, 0)) + val tensorY = TensorD.fromVector (y_train) + val meanX = TensorD.meanAlongAxis (tensorX, axis = 0) + val stdX = TensorD.stdAlongAxis (tensorX, axis = 0) + val meanY = TensorD.meanAlongAxis (tensorY, axis = 0) + val stdY = TensorD.stdAlongAxis (tensorY, axis = 0) + val norm_x = TensorD.standardize (tensorX, axis = 0) + val norm_y = TensorD.standardize (tensorY, axis = 0) + println (s"norm_x.shape: ${norm_x.shape}") + println (s"norm_y.shape: ${norm_y.shape}") + val input = Variabl (norm_x) + var y_actual = Variabl (norm_y) + + case class Net () extends Module with Fit (x.dim2 - 1, x.dim - x.dim2): + val nf1: Int = tensorX.dim2 + val hiddenNodes: Int = 2 * nf1 + 1 + val outputNodes: Int = tensorY.dim2 + val fc1: Linear = Linear (nf1, hiddenNodes) + val fc2: Linear = Linear (hiddenNodes, hiddenNodes) + val fc3: Linear = Linear (hiddenNodes, outputNodes) + override def forward (x: Variabl): Variabl = + val h1 = x ~> fc1 ~> tanh + val h2 = h1 ~> fc2 ~> sigmoid + val out = h2 ~> fc3 ~> identity + out + end Net + + object Net: + def apply (): Net = new Net () + end Net + + val net = Net () + val optimizer = SGD (parameters = net.parameters, lr = 0.25, momentum = 0.90) + val batchSize = 20 + val nB = ceil (norm_x.shape(0).toDouble / batchSize).toInt + println (net.parameters.toString ()) + println (s"nB: $nB") + + object monitor extends MonitorLoss + object opti extends Optimizer_SGDM + object EarlyStopper extends StoppingRule + val limit = 20 + var stopTraining = false + + for j <- 0 to 400 if ! stopTraining do + val permGen = opti.permGenerator (norm_x.shape(0)) + val batches = permGen.igen.chop (nB) + var totalLoss = 0.0 + var batchCount = 0 + for ib <- batches do + optimizer.zeroGrad () + val inputBatch = Variabl (norm_x(ib)) + val yBatch = Variabl (norm_y(ib)) + val output = net (inputBatch) + val loss = mseLoss (output, yBatch) + totalLoss += loss.data(0)(0)(0) + batchCount += 1 + loss.backward () + optimizer.step () + end for + val avgLoss = totalLoss / batchCount + monitor.collectLoss (avgLoss) + if j % 100 == 0 then println (s"Epoch $j: Loss = $avgLoss") + val (stopParams, currentBestLoss) = EarlyStopper.stopWhenPatience (net.parameters, avgLoss, limit) + if stopParams != null then + println (s"Early stopping triggered at epoch $j with best loss $currentBestLoss") + net.setParameters (stopParams) + stopTraining = true + end for + + monitor.plotLoss("NeuralNet3L") + val varData = y.variance + val bestLoss = monitor.getBestLoss * varData + println (s"Best Loss Unscaled: $bestLoss") + println (s"Best Loss Scaled: ${monitor.getBestLoss}") + println (s"Model structure: $net") + println (s"Weight shape: ${net.fc1.weight.shape}") + println (s"Bias shape: ${net.fc1.bias.shape}") + + val outputFinal = net (input) + val y_pred = outputFinal * Variabl (stdY) + Variabl (meanY) + y_actual = y_actual * Variabl (stdY) + Variabl (meanY) + println (s"y_pred shape: ${y_pred.shape}") + banner ("Final Train Statistics") + val qof = net.diagnose (y_actual.data.flattenToVector, y_pred.data.flattenToVector) + println (FitM.fitMap (qof, qoF_names)) + val testX = TensorD.fromMatrix (x_test).permute (Seq(1, 2, 0)) + val testY = TensorD.fromVector (y_test) + val testNormX = (testX - meanX)/stdX + val testNormY = (testY - meanY)/stdY + val testInput = Variabl (testNormX) + val testActual = Variabl (testNormY) + val testOutput = net (testInput) + val testPred = testOutput * Variabl (stdY) + Variabl (meanY) + val testActualRescaled = testActual * Variabl (stdY) + Variabl (meanY) + println (s"test_pred shape: ${testPred.shape}") + println (s"Final Test Loss: ${mseLoss (testOutput, testActual).data(0)(0)(0)}") + banner ("Final Test Statistics") + val testQoF = net.diagnose (testActualRescaled.data.flattenToVector, testPred.data.flattenToVector) + println (FitM.fitMap (testQoF, qoF_names)) + end autogradTest11 + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** The `autogradTestAll` Main method runs all autograd tests sequentially. + */ + @main def autogradTestAll (): Unit = + autogradTest1 () + autogradTest2 () + autogradTest3 () + autogradTest4 () + autogradTest5 () + autogradTest6 () + autogradTest7 () + autogradTest8 () + autogradTest9 () + autogradTest10 () + autogradTest11 () + banner ("All Autograd Tests Completed") + end autogradTestAll + +end AutogradTest + diff --git a/src/main/scala/scalation/modeling/autograd/Function.scala b/src/main/scala/scalation/modeling/autograd/Function.scala new file mode 100644 index 000000000..59e6cd88c --- /dev/null +++ b/src/main/scala/scalation/modeling/autograd/Function.scala @@ -0,0 +1,1853 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author Praveen Rangavajhula + * @version 2.0 + * @date Fri April 25 19:46:13 EDT 2025 + * @see LICENSE (MIT style license file). + * + * @note Autograd: Base Trait and Operations for Differentiable Functions + */ + +package scalation +package modeling +package autograd + +import scala.compiletime.uninitialized + +import scalation.mathstat.TensorD + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** A utility object for generating unique numeric IDs for `Function` nodes. + * This is primarily used for debugging and visualization purposes in the autograd system. + */ +private [autograd] object FunctionIdGen: + + /** Counter to keep track of the current ID. + */ + private var c = 0 + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Generates the next unique ID by incrementing the counter. + * @return the next unique integer ID + */ + def next (): Int = { c += 1; c } + +end FunctionIdGen + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `Function` base trait for all differentiable operations in the autograd system. + * A Function encapsulates both the forward computation (producing outputs) + * and the backward computation (propagating gradients). + * It also provides utility methods for handling unbroadcasting of shapes + * during the backward pass, ensuring correct gradient flow. + * Every custom operation should extend this trait and implement `forward` and `backward`. + */ +trait Function (using ops: AutogradOps): + + /** Unique numeric ID for this Function node (for graph viz/debugging). + */ + val id: Int = FunctionIdGen.next () + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Human-readable name of this op (defaults to simple class name). + */ + def opName: String = + val n = this.getClass.getSimpleName + if n.endsWith ("$") then n.dropRight (1) else n + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Map of attributes for visualization/debugging (default: empty). + */ + def attributes: Map [String, String] = Map.empty + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Returns the input variables to this Function. + * This works automatically for all case-class ops by iterating over their + * constructor fields and collecting those of type `Variabl`. + * @see Case classes and Product: https://scala-lang.org/api/3.x/scala/Product.html + */ + def inputs: Seq [Variabl] = this match + case p: Product => + p.productIterator.flatMap { + case v: Variabl => Seq (v) + case s: Seq[?] => s.collect { case v: Variabl => v } + case _ => Seq.empty + }.toSeq + case _ => Seq.empty + end inputs + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Performs the forward pass to compute the output variable. + * @return a Variabl containing the output data and gradient function. + */ + def forward (): Variabl + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Performs the backward pass given the upstream gradient. + * @param gradOutput the gradient tensor from the next layer. + */ + def backward (gradOutput: TensorD): Unit + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Backpropagates gradients for functions with two inputs. + * @param v1 the first input variable. + * @param v2 the second input variable. + * @param gradOutput the upstream gradient tensor. + * @param computeGrad1 function to compute the gradient for v1. + * @param computeGrad2 function to compute the gradient for v2. + */ + def backpropForTwoInputs (v1: Variabl, v2: Variabl, gradOutput: TensorD, + computeGrad1: TensorD => TensorD, computeGrad2: TensorD => TensorD): Unit = + v1.backward (unbroadcast (computeGrad1 (gradOutput), v1.shape)) + v2.backward (unbroadcast (computeGrad2 (gradOutput), v2.shape)) + end backpropForTwoInputs + + // Possible improvements: Add requiresGrad and retainGraph options... + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Unbroadcasts a variable's tensor data to a specified old shape. + * @param v the variable to unbroadcast. + * @param oldShape the target shape. + * @return a new Variabl with data unbroadcasted. + */ + def unbroadcast (v: Variabl, oldShape: List [Int]): Variabl = + Variabl (unbroadcast (data = v.data, oldShape = oldShape)) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Unbroadcasts a tensor to a given shape by summing across reduced dimensions. + * @param data the tensor data. + * @param oldShape the original shape. + * @return a TensorD with shape adjusted to oldShape. + * @throws Exception if unbroadcasting is not feasible. + */ + def unbroadcast (data: TensorD, oldShape: List [Int]): TensorD = + val currentShape = ops.shape(data) + var cur = data + for i <- oldShape.indices do + val (oldDim, newDim) = (oldShape(i), currentShape(i)) + if oldDim == newDim then {} // no change if dimensions match + else if oldDim == 1 then + cur = ops.sumAlongAxis (cur, i) // reduce dimension i by summing + else if oldDim != newDim then + throw new Exception ( + s"Cannot unbroadcast from shape $currentShape to $oldShape at axis $i") + cur + end unbroadcast + +end Function + +// ----------------------------------------------------------------------- +// ------------------------- ARITHMETIC FUNCTIONS ------------------------ +// ----------------------------------------------------------------------- + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Computes the element-wise absolute value of a variable. + * @param v the input variable. + */ +case class Abs (v: Variabl)(using ops: AutogradOps) extends Function: + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forward pass: computes the absolute value of v. + * @return a new Variabl containing |v|. + */ + override def forward (): Variabl = Variabl (ops.abs (v.data), gradFn = Some (this)) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Backward pass: applies the chain rule using the sign of v. + * @param gradOutput the upstream gradient. + */ + override def backward (gradOutput: TensorD): Unit = v.backward (ops.mul (gradOutput, ops.sign (v.data))) +end Abs + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Computes the negation of a variable. + * @param v the input variable. + */ +case class Neg (v: Variabl)(using ops: AutogradOps) extends Function: + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forward pass: computes the negation of v. + * @return a Variabl containing -v. + */ + override def forward (): Variabl = Variabl (ops.neg (v.data), gradFn = Some (this)) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Backward pass: propagates the negated gradient. + * @param gradOutput the upstream gradient. + */ + override def backward (gradOutput: TensorD): Unit = v.backward (ops.neg (gradOutput)) +end Neg + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Computes the floor of a variable (element-wise). + * @param v the input variable. + */ +case class Floor (v: Variabl)(using ops: AutogradOps) extends Function: + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forward pass: computes floor(v). + * @return a Variabl with floor applied. + */ + override def forward (): Variabl = Variabl (ops.floor (v.data), gradFn = Some (this)) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Backward pass: gradient is zero almost everywhere (undefined at integers). + * @note: The derivative of floor is zero almost everywhere + * @param gradOutput the upstream gradient. + */ + override def backward (gradOutput: TensorD): Unit = v.backward (ops.zerosLike (v.data)) +end Floor + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Computes the ceil of a variable (element-wise). + * @param v the input variable. + */ +case class Ceil (v: Variabl)(using ops: AutogradOps) extends Function: + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forward pass: computes ceil(v). + * @return a Variabl with ceil applied. + */ + override def forward (): Variabl = Variabl (ops.ceil (v.data), gradFn = Some (this)) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Backward pass: gradient is zero almost everywhere (undefined at integers). + * @note: The derivative of ceil is zero almost everywhere + * @param gradOutput the upstream gradient. + */ + override def backward (gradOutput: TensorD): Unit = v.backward (ops.zerosLike (v.data)) +end Ceil + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Computes the round of a variable (element-wise). + * @param v the input variable. + */ +case class Round (v: Variabl)(using ops: AutogradOps) extends Function: + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forward pass: computes round(v). + * @return a Variabl with round applied. + */ + override def forward (): Variabl = Variabl (ops.round (v.data), gradFn = Some (this)) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Backward pass: gradient is zero almost everywhere (undefined at half-integers). + * @note: The derivative of round is zero almost everywhere + * @param gradOutput the upstream gradient. + */ + override def backward (gradOutput: TensorD): Unit = v.backward (ops.zerosLike (v.data)) +end Round + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Clips the elements of a variable to the range [min, max] (element-wise). + * Gradient is 1 for elements strictly inside (min, max), 0 for clipped ones (ties get 0.25 via mask product heuristic). + * @param v the input variable. + * @param min lower bound. + * @param max upper bound. + */ +case class Clip (v: Variabl, min: Double, max: Double)(using ops: AutogradOps) extends Function: + + override def forward (): Variabl = Variabl (ops.clipByValue (v.data, min, max), gradFn = Some (this)) + + override def backward (gradOutput: TensorD): Unit = + // diffLow > 0 and diffHigh > 0 indicates interior points + val diffLow = ops.subScalar (v.data, min) // v - min + val diffHigh = ops.sub (ops.fullLike(v.data, max), v.data) // max - v + val signLow = ops.sign (diffLow) + val signHigh = ops.sign (diffHigh) + val maskLow = ops.mulScalar (ops.addScalar (signLow, 1.0), 0.5) // 1 where v>min + val maskHigh = ops.mulScalar (ops.addScalar (signHigh, 1.0), 0.5) // 1 where vv2, 0 if v1v2, 0.5 tie + val mask2 = ops.sub (ones, mask1) + val g1 = unbroadcast (ops.mul (gradOutput, mask1), v1.shape) + val g2 = unbroadcast (ops.mul (gradOutput, mask2), v2.shape) + v1.backward (g1) + v2.backward (g2) +end Min + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Element-wise maximum between a variable and a scalar. + * Gradient is 1 where v > s; 0 where v < s; 0.5 where equal. + * @param v the input variable. + * @param s the scalar. + */ +case class MaxScalar (v: Variabl, s: Double)(using ops: AutogradOps) extends Function: + + override def forward (): Variabl = Variabl (ops.maxScalar (v.data, s), gradFn = Some (this)) + + override def backward (gradOutput: TensorD): Unit = + val diff = ops.subScalar (v.data, s) // v - s + val mask = ops.mulScalar (ops.addScalar (ops.sign(diff), 1.0), 0.5) + v.backward (ops.mul (gradOutput, mask)) +end MaxScalar + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Element-wise minimum between a variable and a scalar. + * Gradient is 1 where v < s; 0 where v > s; 0.5 where equal. + * @param v the input variable. + * @param s the scalar. + */ +case class MinScalar (v: Variabl, s: Double)(using ops: AutogradOps) extends Function: + + override def forward (): Variabl = Variabl (ops.minScalar (v.data, s), gradFn = Some (this)) + + override def backward (gradOutput: TensorD): Unit = + val diff = ops.subScalar (v.data, s) // v - s + val signD = ops.sign (diff) + val ones = ops.onesLike (signD) + val mask = ops.mulScalar (ops.sub(ones, signD), 0.5) // (1 - sign)/2 + v.backward (ops.mul (gradOutput, mask)) +end MinScalar + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Computes the maximum value in a variable (reduces to a scalar). + * Gradient is distributed equally among all elements achieving the max (handles ties). + * @param v the input variable. + */ +case class MaxValue (v: Variabl)(using ops: AutogradOps) extends Function: + + private var maxVal: Double = Double.NaN + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forward pass: computes max(v) -> scalar tensor. + * @return a scalar Variabl containing the maximum value. + */ + override def forward (): Variabl = + maxVal = ops.maxValue (v.data) + Variabl (ops.scalar (maxVal), gradFn = Some (this)) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Backward pass: gradient flows to max positions; ties split 1/k. + * @param gradOutput upstream scalar gradient. + */ + override def backward (gradOutput: TensorD): Unit = + // sign(v - max) gives 0 at max positions, negative elsewhere. + val diff = ops.subScalar (v.data, maxVal) + val signDiff = ops.sign (diff) + val absSign = ops.abs (signDiff) + val ones = ops.onesLike (absSign) + val mask = ops.sub (ones, absSign) // 1 at maxima, 0 elsewhere + val k = ops.sum (mask) match + case 0.0 => 1.0 // safety (should not happen) + case kk => kk + val normMask = ops.divScalar (mask, k) + val scaled = ops.mulScalar (normMask, gradOutput(0)(0)(0)) + v.backward (scaled) +end MaxValue + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Computes the minimum value in a variable (reduces to a scalar). + * Gradient is distributed equally among all elements achieving the min (handles ties). + * @param v the input variable. + */ +case class MinValue (v: Variabl)(using ops: AutogradOps) extends Function: + + private var minVal: Double = Double.NaN + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forward pass: computes min(v) -> scalar tensor. + * @return a scalar Variabl containing the minimum value. + */ + override def forward (): Variabl = + minVal = ops.minValue (v.data) + Variabl (ops.scalar (minVal), gradFn = Some (this)) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Backward pass: gradient flows to min positions; ties split 1/k. + * + * @param gradOutput upstream scalar gradient. + */ + override def backward (gradOutput: TensorD): Unit = + val diff = ops.subScalar (v.data, minVal) // 0 at minima, positive elsewhere + val signDiff = ops.sign (diff) // 0 at minima, 1 elsewhere + val absSign = ops.abs (signDiff) + val ones = ops.onesLike (absSign) + val mask = ops.sub (ones, absSign) // 1 at minima + val k = ops.sum (mask) match + case 0.0 => 1.0 + case kk => kk + val normMask = ops.divScalar (mask, k) + val scaled = ops.mulScalar (normMask, gradOutput(0)(0)(0)) + v.backward (scaled) +end MinValue + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Computes the square root of a variable. + * @param v the input variable. + */ +case class Sqrt (v: Variabl)(using ops: AutogradOps) extends Function: + + private var sqrtCache: Option [TensorD] = None + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forward pass: computes the square root of v. + * @return a Variabl containing sqrt(v). + */ + override def forward (): Variabl = + sqrtCache = Some (ops.sqrt (v.data)) + Variabl (sqrtCache.get, gradFn = Some (this)) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Backward pass: propagates gradient using the derivative of sqrt. + * @param gradOutput the upstream gradient. + */ + override def backward (gradOutput: TensorD): Unit = + v.backward (ops.div (gradOutput, ops.mulScalar (sqrtCache.get, 2.0))) +end Sqrt + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Computes the natural logarithm of a variable. + * @param v the input variable. + */ +case class Log (v: Variabl)(using ops: AutogradOps) extends Function: + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forward pass: computes log(v). + * @return a Variabl containing log(v). + */ + override def forward (): Variabl = Variabl (ops.log (v.data), gradFn = Some (this)) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Backward pass: applies the derivative 1/v. + * @param gradOutput the upstream gradient. + */ + override def backward (gradOutput: TensorD): Unit = + v.backward (ops.div (gradOutput, v.data)) +end Log + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Computes the reciprocal of a variable. + * @param v the input variable. + */ +case class Reciprocal (v: Variabl)(using ops: AutogradOps) extends Function: + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forward pass: computes the reciprocal of v. + * @return a Variabl containing 1/v. + */ + override def forward (): Variabl = Variabl (ops.reciprocal (v.data), gradFn = Some (this)) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Backward pass: computes the derivative -1/v^2. + * @param gradOutput the upstream gradient. + */ + override def backward (gradOutput: TensorD): Unit = + v.backward (ops.div (ops.neg (gradOutput), ops.pow(v.data, 2))) + +end Reciprocal + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Computes the logarithm of a variable with a specified base. + * @param v the input variable. + * @param base the base for the logarithm. + */ +case class LogBase (v: Variabl, base: Double)(using ops: AutogradOps) extends Function: + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forward pass: computes log base 'base' of v. + * @return a Variabl containing log_base(v). + */ + override def forward (): Variabl = Variabl (ops.logBase (v.data, base), gradFn = Some (this)) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Backward pass: adjusts the gradient by dividing by (v * log(base)). + * @param gradOutput the upstream gradient. + */ + override def backward (gradOutput: TensorD): Unit = + val denominator = ops.mulScalar (v.data, math.log (base)) + v.backward (ops.div (gradOutput, denominator)) + +end LogBase + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Computes the sum of all elements in a variable. + * @param v the input variable. + */ +case class Sum (v: Variabl)(using ops: AutogradOps) extends Function: + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forward pass: computes the sum and returns a scalar tensor. + * @return a Variabl containing the sum as a scalar. + */ + override def forward (): Variabl = Variabl (ops.scalar (ops.sum(v.data)), gradFn = Some (this)) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Backward pass: propagates the gradient by filling a tensor with the scalar gradient. + * @param gradOutput the upstream gradient (scalar). + */ + override def backward (gradOutput: TensorD): Unit = + val grad = ops.fullLike (v.data, gradOutput(0)(0)(0)) + v.backward (grad) +end Sum + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Computes element-wise addition of two variables. + * @param v1 the first variable. + * @param v2 the second variable. + */ +case class Add (v1: Variabl, v2: Variabl)(using ops: AutogradOps) extends Function: + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forward pass: computes v1 + v2. + * @return a Variabl containing the sum. + */ + override def forward (): Variabl = Variabl (ops.add (v1.data, v2.data), gradFn = Some (this)) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Backward pass: propagates gradient to both inputs (unbroadcast if necessary). + * @param gradOutput the upstream gradient. + */ + override def backward (gradOutput: TensorD): Unit = + val g1 = unbroadcast (gradOutput, v1.shape) + v1.backward (g1) + val g2 = unbroadcast (gradOutput, v2.shape) + v2.backward (g2) +end Add + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Adds a constant value to a variable. + * @param v the input variable. + * @param d the constant to add. + */ +case class AddConstant (v: Variabl, d: Double)(using ops: AutogradOps) extends Function: + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forward pass: computes v + d. + * @return a Variabl with the constant added. + */ + override def forward (): Variabl = Variabl (ops.addScalar (v.data, d), gradFn = Some (this)) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Backward pass: simply propagates the upstream gradient. + * @param gradOutput the upstream gradient. + */ + override def backward (gradOutput: TensorD): Unit = v.backward (gradOutput) +end AddConstant + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Computes element-wise subtraction of two variables. + * @param v1 the minuend. + * @param v2 the subtrahend. + */ +case class Sub (v1: Variabl, v2: Variabl)(using ops: AutogradOps) extends Function: + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forward pass: computes v1 - v2. + * @return a Variabl containing the difference. + */ + override def forward (): Variabl = Variabl (ops.sub (v1.data, v2.data), gradFn = Some (this)) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Backward pass: propagates gradient to v1 normally and to v2 as negative. + * @param gradOutput the upstream gradient. + */ + override def backward (gradOutput: TensorD): Unit = + val g1 = unbroadcast (gradOutput, v1.shape) + v1.backward (g1) + val g2 = unbroadcast (gradOutput, v2.shape) + v2.backward (ops.neg (g2)) +end Sub + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Subtracts a constant value from a variable. + * @param v the input variable. + * @param d the constant to subtract. + */ +case class SubConstant (v: Variabl, d: Double)(using ops: AutogradOps) extends Function: + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forward pass: computes v - d. + * @return a Variabl with the constant subtracted. + */ + override def forward (): Variabl = Variabl (ops.subScalar (v.data, d), gradFn = Some (this)) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Backward pass: simply propagates the upstream gradient. + * @param gradOutput the upstream gradient. + */ + override def backward (gradOutput: TensorD): Unit = v.backward (gradOutput) +end SubConstant + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Computes element-wise multiplication of two variables. + * @param v1 the first variable. + * @param v2 the second variable. + */ +case class Mul (v1: Variabl, v2: Variabl)(using ops: AutogradOps) extends Function: + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forward pass: computes v1 * v2. + * @return a Variabl containing the product. + */ + override def forward (): Variabl = Variabl (ops.mul (v1.data, v2.data), gradFn = Some (this)) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Backward pass: uses the chain rule to propagate gradients. + * @param gradOutput the upstream gradient. + */ + override def backward (gradOutput: TensorD): Unit = + backpropForTwoInputs (v1, v2, gradOutput, + (g: TensorD) => ops.mul (g, v2.data), + (g: TensorD) => ops.mul (v1.data, g)) +end Mul + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Multiplies a variable by a constant. + * @param v the input variable. + * @param d the constant multiplier. + */ +case class MulConstant (v: Variabl, d: Double)(using ops: AutogradOps) extends Function: + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forward pass: computes v * d. + * @return a Variabl with data scaled by d. + */ + override def forward (): Variabl = Variabl (ops.mulScalar (v.data, d), gradFn = Some (this)) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Backward pass: multiplies the gradient by d. + * @param gradOutput the upstream gradient. + */ + override def backward (gradOutput: TensorD): Unit = + val g = ops.mulScalar (gradOutput, d) + v.backward (g) +end MulConstant + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Computes element-wise division of two variables. + * @param v1 the dividend. + * @param v2 the divisor. + */ +case class Div (v1: Variabl, v2: Variabl)(using ops: AutogradOps) extends Function: + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forward pass: computes v1 / v2. + * @return a Variabl with divided data. + */ + override def forward (): Variabl = Variabl (ops.div(v1.data, v2.data), gradFn = Some (this)) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Backward pass: propagates gradients with appropriate adjustments. + * @param gradOutput the upstream gradient. + */ + override def backward (gradOutput: TensorD): Unit = + backpropForTwoInputs (v1, v2, gradOutput, + (g: TensorD) => ops.div (g, v2.data), + (g: TensorD) => ops.div (ops.mul (ops.neg (v1.data), g), ops.pow (v2.data, 2))) +end Div + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Divides a variable by a constant. + * @param v the input variable. + * @param d the constant divisor. + */ +case class DivConstant (v: Variabl, d: Double)(using ops: AutogradOps) extends Function: + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forward pass: computes v / d. + * @return a Variabl with data divided by d. + */ + override def forward (): Variabl = Variabl (ops.divScalar (v.data, d), gradFn = Some (this)) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Backward pass: propagates gradient scaled by 1/d. + * @param gradOutput the upstream gradient. + */ + override def backward (gradOutput: TensorD): Unit = + val g = ops.divScalar (gradOutput, d) + v.backward (g) +end DivConstant + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Raises a variable to an integer power. + * @param v the input variable. + * @param s the exponent. + */ +case class Pow (v: Variabl, s: Int)(using ops: AutogradOps) extends Function: + + private var powCache: Option [TensorD] = None + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forward pass: computes v raised to the power s. + * @return a Variabl with powered data. + */ + override def forward (): Variabl = + powCache = Some (ops.pow (v.data, s)) + Variabl (powCache.get, gradFn = Some (this)) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Backward pass: applies the derivative of the power function. + * @param gradOutput the upstream gradient. + */ + override def backward (gradOutput: TensorD): Unit = + val factor = ops.div (powCache.get, v.data) + v.backward (ops.mul (ops.mulScalar (gradOutput, s), factor)) +end Pow + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Computes the exponential of a variable. + * @param v the input variable. + */ +case class Exp (v: Variabl)(using ops: AutogradOps) extends Function: + + private var expCache: Option [TensorD] = None + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forward pass: computes exp(v). + * @return a Variabl containing the exponential. + */ + override def forward (): Variabl = + expCache = Some (ops.exp(v.data)) + Variabl (expCache.get, gradFn = Some (this)) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Backward pass: propagates gradient scaled by the exponential. + * @param gradOutput the upstream gradient. + */ + override def backward (gradOutput: TensorD): Unit = + v.backward (ops.mul (gradOutput, expCache.get)) + expCache = None +end Exp + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Computes the mean of all elements in a variable. + * @param v the input variable. + */ +case class Mean (v: Variabl)(using ops: AutogradOps) extends Function: + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forward pass: computes the mean and fills a tensor with it. + * @return a Variabl with data filled by the mean value. + */ + override def forward (): Variabl = + val out = ops.mean (v.data) + Variabl (ops.scalar (out), gradFn = Some (this)) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Backward pass: scales the gradient and fills a tensor accordingly. + * @param gradOutput the upstream gradient. + */ + override def backward (gradOutput: TensorD): Unit = + val n = ops.shape (v.data).product.toDouble + v.backward (ops.fullLike (v.data, gradOutput(0)(0)(0) / n)) +end Mean + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Computes the variance of all elements in a variable (population variance). + * Uses definition Var(x) = mean((x - mean(x))^2). + * @param v the input variable. + */ +case class Variance (v: Variabl)(using ops: AutogradOps) extends Function: + + private var meanCache: Double = Double.NaN + private var varCache: Double = Double.NaN + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forward pass: computes variance over all elements -> scalar tensor. + * @return a scalar Variabl containing variance. + */ + override def forward (): Variabl = + meanCache = ops.mean (v.data) + val centered = ops.subScalar (v.data, meanCache) + val sq = ops.pow (centered, 2) + varCache = ops.mean (sq) + Variabl (ops.scalar (varCache), gradFn = Some (this)) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Backward pass: dVar/dx = 2 (x - mean)/N. + * @param gradOutput upstream scalar gradient. + */ + override def backward (gradOutput: TensorD): Unit = + val n = v.shape.product.toDouble + val diff = ops.subScalar (v.data, meanCache) + val coeff = (2.0 / n) * gradOutput(0)(0)(0) + val grad = ops.mulScalar (diff, coeff) + v.backward (grad) +end Variance + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Computes the standard deviation of all elements in a variable. + * Std(x) = sqrt(Var(x)); derivative ds/dx = (x - mean)/(N * std). + * @param v the input variable. + */ +case class Std (v: Variabl)(using ops: AutogradOps) extends Function: + + private var meanCache: Double = Double.NaN + private var varCache: Double = Double.NaN + private var stdCache: Double = Double.NaN + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forward pass: computes std over all elements -> scalar tensor. + * @return a scalar Variabl containing std. + */ + override def forward (): Variabl = + meanCache = ops.mean (v.data) + val centered = ops.subScalar (v.data, meanCache) + val sq = ops.pow (centered, 2) + varCache = ops.mean (sq) + stdCache = math.sqrt (varCache) + Variabl (ops.scalar (stdCache), gradFn = Some (this)) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Backward pass: dStd/dx = (x - mean)/(N * std). If std=0 => gradient 0. + * + * @param gradOutput upstream scalar gradient. + */ + override def backward (gradOutput: TensorD): Unit = + val n = v.shape.product.toDouble + if stdCache == 0.0 || stdCache.isNaN then + v.backward (ops.zerosLike (v.data)) + else + val diff = ops.subScalar (v.data, meanCache) + val coeff = gradOutput(0)(0)(0) / (n * stdCache) + val grad = ops.mulScalar (diff, coeff) + v.backward (grad) +end Std + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Computes the mean of a variable along a specified axis (dimension reduced to size 1). + * @param v the input variable. + * @param axis the axis along which to compute the mean. + */ +case class MeanAlongAxis(v: Variabl, axis: Int)(using ops: AutogradOps) extends Function: + + private var nAxis: Int = 0 + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forward pass: mean along axis -> keeps dimensionality (axis size becomes 1). + * @return a Variabl containing the reduced mean tensor. + */ + override def forward (): Variabl = + nAxis = v.shape (axis) + Variabl (ops.meanAlongAxis (v.data, axis), gradFn = Some (this)) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Backward pass: distributes gradient equally across the axis (broadcasted). + * @param gradOutput upstream gradient with axis size 1. + */ + override def backward (gradOutput: TensorD): Unit = + val scale = 1.0 / nAxis + // Broadcast gradOutput over axis by multiplying with a tensor of ones matching v.shape. + val expanded = ops.mul (ops.fullLike (v.data, 1.0), gradOutput) + val grad = ops.mulScalar (expanded, scale) + v.backward (grad) +end MeanAlongAxis + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Computes the variance of a variable along a specified axis (population variance). + * @param v the input variable. + * @param axis the axis along which to compute variance. + */ +case class VarianceAlongAxis (v: Variabl, axis: Int)(using ops: AutogradOps) extends Function: + + private var nAxis: Int = 0 + private var meanAxis: TensorD = uninitialized + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forward pass: variance along axis -> axis dimension becomes 1. + * @return a Variabl containing variance along axis. + */ + override def forward (): Variabl = + nAxis = v.shape (axis) + meanAxis = ops.meanAlongAxis (v.data, axis) + val diff = ops.sub (v.data, meanAxis) + val sq = ops.pow (diff, 2) + val varAxis = ops.meanAlongAxis (sq, axis) + Variabl (varAxis, gradFn = Some (this)) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Backward pass: dVar/dx = 2*(x - mean)/n along the specified axis (broadcasted). + * @param gradOutput upstream gradient with axis size 1. + */ + override def backward (gradOutput: TensorD): Unit = + val coeff = 2.0 / nAxis + val diff = ops.sub (v.data, meanAxis) + val base = ops.mulScalar (diff, coeff) + val grad = ops.mul (base, gradOutput) // broadcast gradOutput + v.backward (grad) +end VarianceAlongAxis + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Computes the standard deviation of a variable along a specified axis. + * @param v the input variable. + * @param axis the axis along which to compute std. + */ +case class StdAlongAxis (v: Variabl, axis: Int)(using ops: AutogradOps) extends Function: + + private var nAxis: Int = 0 + private var meanAxis: TensorD = uninitialized + private var stdAxis: TensorD = uninitialized + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forward pass: std along axis -> axis dimension becomes 1. + * @return a Variabl containing std along axis. + */ + override def forward (): Variabl = + nAxis = v.shape (axis) + meanAxis = ops.meanAlongAxis (v.data, axis) + val diff = ops.sub (v.data, meanAxis) + val sq = ops.pow (diff, 2) + val varAxis = ops.meanAlongAxis (sq, axis) + + // stdAxis = sqrt(varAxis) element-wise (varAxis axis size already 1). + // Using reciprocal + pow not needed; rely on sqrt over broadcasting by reuse Sqrt op? + // Simpler: convert to Variabl then sqrt not desired. + // We'll leverage math.sqrt by mapping via TensorD operations already exposed as sqrt at AutogradOps? + // Not available for tensor element wise with shape? We have ops.sqrt. + + stdAxis = ops.sqrt (varAxis) + Variabl (stdAxis, gradFn = Some (this)) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Backward pass: dStd/dx = (x - mean)/(N * std). Handle std=0 with zero gradient. + * @param gradOutput upstream gradient with axis size 1. + */ + override def backward (gradOutput: TensorD): Unit = + // Avoid divide by zero: clamp stdAxis. + val denom = ops.maxScalar (ops.mulScalar (stdAxis, nAxis.toDouble), 1e-12) + val diff = ops.sub (v.data, meanAxis) + val base = ops.div (diff, denom) + val grad = ops.mul (base, gradOutput) + v.backward (grad) +end StdAlongAxis + +// ----------------------------------------------------------------------- +// ----------------------- ACTIVATION FUNCTIONS -------------------------- +// ----------------------------------------------------------------------- + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Applies the identity activation function. + * @param v the input variable. + */ +case class Identity (v: Variabl)(using ops: AutogradOps) extends Function: + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forward pass: returns v unchanged. + * @return a Variabl with the same data as v. + */ + override def forward (): Variabl = Variabl (ops.id_ (v.data), gradFn = Some (this)) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Backward pass: propagates the upstream gradient using the identity derivative. + * @param gradOutput the upstream gradient. + */ + override def backward (gradOutput: TensorD): Unit = + v.backward (ops.mul (gradOutput, ops.idD_ (v.data))) +end Identity + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Applies the ReLU activation function. + * @param v the input variable. + */ +case class ReLU (v: Variabl)(using ops: AutogradOps) extends Function: + + private var reluCache: Option [TensorD] = None + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forward pass: computes the ReLU activation of v. + * @return a Variabl with ReLU applied. + */ + override def forward (): Variabl = + reluCache = Some (ops.reLU_(v.data)) + Variabl (reluCache.get, gradFn = Some (this)) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Backward pass: propagates the gradient through ReLU. + * @param gradOutput the upstream gradient. + */ + override def backward (gradOutput: TensorD): Unit = + v.backward (ops.mul (gradOutput, ops.reLUD_ (reluCache.get))) +end ReLU + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Applies the LeakyReLU activation function. + * @param v the input variable. + * @param alpha the negative slope coefficient. + */ +case class LeakyReLU (v: Variabl, alpha: Double = 0.2)(using ops: AutogradOps) extends Function: + + private var leakyReLUCache: Option [TensorD] = None + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forward pass: computes the LeakyReLU activation of v. + * @return a Variabl with LeakyReLU applied. + */ + override def forward (): Variabl = + leakyReLUCache = Some (ops.lreLU_(v.data, alpha)) + Variabl (leakyReLUCache.get, gradFn = Some (this)) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Backward pass: propagates the gradient using the LeakyReLU derivative. + * @param gradOutput the upstream gradient. + */ + override def backward (gradOutput: TensorD): Unit = + v.backward (ops.mul (gradOutput, ops.lreLUD_ (leakyReLUCache.get))) +end LeakyReLU + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Applies the ELU activation function. + * @param v the input variable. + * @param alpha the ELU scaling parameter. + */ +case class ELU (v: Variabl, alpha: Double = 1.0)(using ops: AutogradOps) extends Function: + + private var eluCache: Option [TensorD] = None + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forward pass: computes the ELU activation of v. + * @return a Variabl with ELU applied. + */ + override def forward (): Variabl = + eluCache = Some (ops.eLU_ (v.data, alpha)) + Variabl (eluCache.get, gradFn = Some (this)) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Backward pass: propagates the gradient using the ELU derivative. + * @param gradOutput the upstream gradient. + */ + override def backward (gradOutput: TensorD): Unit = + v.backward (ops.mul (gradOutput, ops.eLUD_(eluCache.get, alpha))) +end ELU + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Applies the tanh activation function. + * @param v the input variable. + */ +case class Tanh (v: Variabl)(using ops: AutogradOps) extends Function: + + private var tanhCache: Option [TensorD] = None + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forward pass: computes tanh(v). + * @return a Variabl with tanh applied. + */ + override def forward (): Variabl = + tanhCache = Some (ops.tanh_ (v.data)) + Variabl (tanhCache.get, gradFn = Some (this)) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Backward pass: propagates the gradient using the tanh derivative. + * @param gradOutput the upstream gradient. + */ + override def backward (gradOutput: TensorD): Unit = + v.backward (ops.mul (gradOutput, ops.tanhD_ (tanhCache.get))) +end Tanh + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Applies the sigmoid activation function. + * @param v the input variable. + */ +case class Sigmoid (v: Variabl)(using ops: AutogradOps) extends Function: + + private var sigmoidCache: Option [TensorD] = None + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forward pass: computes sigmoid(v). + * @return a Variabl with sigmoid applied. + */ + override def forward (): Variabl = + sigmoidCache = Some (ops.sigmoid_(v.data)) + Variabl (sigmoidCache.get, gradFn = Some (this)) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Backward pass: propagates the gradient using the sigmoid derivative. + * @param gradOutput the upstream gradient. + */ + override def backward (gradOutput: TensorD): Unit = + v.backward (ops.mul (gradOutput, ops.sigmoidD_ (sigmoidCache.get))) +end Sigmoid + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Applies the GeLU activation function. + * @param v the input variable. + */ +case class GeLU (v: Variabl)(using ops: AutogradOps) extends Function: + + private var geluCache: Option [TensorD] = None + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forward pass: computes GeLU(v). + * @return a Variabl with GeLU applied. + */ + override def forward (): Variabl = + geluCache = Some (ops.geLU_ (v.data)) + Variabl (geluCache.get, gradFn = Some (this)) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Backward pass: uses the GeLU derivative to propagate the gradient. + * @param gradOutput the upstream gradient. + */ + override def backward (gradOutput: TensorD): Unit = + v.backward (ops.mul (gradOutput, ops.geLUD_ (geluCache.get))) +end GeLU + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Applies the softmax activation function. + * @param v the input variable. + */ +case class Softmax (v: Variabl)(using ops: AutogradOps) extends Function: + + private var softmaxCache: Option [TensorD] = None + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forward pass: computes softmax(v). + * @return a Variabl with softmax applied. + */ + override def forward (): Variabl = + softmaxCache = Some (ops.softmax_ (v.data)) + Variabl (softmaxCache.get, gradFn = Some (this)) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Backward pass: propagates the gradient using the softmax derivative. + * @param gradOutput the upstream gradient. + */ + override def backward (gradOutput: TensorD): Unit = + val s = softmaxCache.get + // Compute Jacobian-vector product for softmax derivative. + val dot = ops.sumAlongAxis (gradOutput * s, axis = 2) + val dotFull = TensorD.broadcastTo (dot, s.shape) // Broadcast to match shape (FIX: ops should handle this) + val gradInput = s * (gradOutput - dotFull) + v.backward (gradInput) +end Softmax + +// ----------------------------------------------------------------------- +// ------------------------- LOSS FUNCTIONS ------------------------------ +// ----------------------------------------------------------------------- + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Computes the Sum of Squared Errors (SSE) loss. + * @param pred the prediction variable. + * @param target the target variable. + */ +case class SSELoss (pred: Variabl, target: Variabl)(using ops: AutogradOps) extends Function: + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forward pass: computes the SSE loss. + * @return a Variabl with loss data. + */ + override def forward (): Variabl = + val loss = ops.sseLoss (pred.data, target.data) + Variabl (ops.scalar (loss), gradFn = Some (this)) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Backward pass: propagates the gradient scaled by 2*(pred - target). + * @param gradOutput the upstream gradient. + */ + override def backward (gradOutput: TensorD): Unit = + val grad = ops.mulScalar (ops.sub (pred.data, target.data), 2) + val gFinal = ops.mulScalar (grad, gradOutput(0)(0)(0)) // Since gradOutput is a scalar tensor + pred.backward (gFinal) +end SSELoss + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Computes the Mean Squared Error (MSE) loss. + * @param pred the prediction variable. + * @param target the target variable. + */ +case class MSELoss (pred: Variabl, target: Variabl)(using ops: AutogradOps) extends Function: + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forward pass: computes the MSE loss. + * @return a Variabl with loss data. + */ + override def forward (): Variabl = + val loss = ops.mseLoss (pred.data, target.data) + Variabl (ops.scalar(loss), gradFn = Some (this)) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Backward pass: scales the gradient by 2*(pred-target)/batchSize. + * @param gradOutput the upstream gradient. + */ + override def backward (gradOutput: TensorD): Unit = + val numel = pred.shape.product.toDouble + val grad = ops.mulScalar (ops.sub (pred.data, target.data), 2.0 / numel) + val gFinal = ops.mulScalar (grad, gradOutput(0)(0)(0)) // Since gradOutput is a scalar tensor + pred.backward (gFinal) +end MSELoss + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Computes the Mean Absolute Error (MAE) loss. + * @param pred the prediction variable. + * @param target the target variable. + */ +case class MAELoss (pred: Variabl, target: Variabl)(using ops: AutogradOps) extends Function: + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forward pass: computes the MAE loss. + * @return a Variabl with loss data. + */ + override def forward (): Variabl = + val loss = ops.maeLoss (pred.data, target.data) + Variabl (ops.scalar (loss), gradFn = Some (this)) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Backward pass: propagates the gradient using the sign of (pred-target). + * @param gradOutput the upstream gradient. + */ + override def backward (gradOutput: TensorD): Unit = + val grad = ops.sign (ops.sub (pred.data, target.data)) + val prod = ops.mulScalar (grad, gradOutput(0)(0)(0)) // Since gradOutput is a scalar tensor + val gFinal = ops.divScalar (prod, pred.shape.product) + pred.backward (gFinal) +end MAELoss + +// ----------------------------------------------------------------------- +// ------------------------- TENSOR OPERATIONS --------------------------- +// ----------------------------------------------------------------------- + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Computes the dot product of two variables. + * @param v1 the first variable. + * @param v2 the second variable. + */ +case class Dot (v1: Variabl, v2: Variabl)(using ops: AutogradOps) extends Function: + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forward pass: computes dot(v1, v2). + * @return a Variabl containing the dot product. + */ + override def forward (): Variabl = + Variabl (ops.dot (v1.data, v2.data), gradFn = Some (this)) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Backward pass: propagates gradients for dot product. + * @param gradOutput the upstream gradient. + */ + override def backward (gradOutput: TensorD): Unit = + backpropForTwoInputs (v1, v2, gradOutput, + (g: TensorD) => ops.mul (g, v2.data), + (g: TensorD) => ops.mul (v1.data, g)) +end Dot + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Computes the matrix multiplication of two variables. + * @param v1 the first variable. + * @param v2 the second variable. + */ +case class MatMul (v1: Variabl, v2: Variabl)(using ops: AutogradOps) extends Function: + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forward pass: computes matrix multiplication of v1 and v2. + * @return a Variabl with the result. + */ + override inline def forward (): Variabl = + Variabl (ops.matmul (v1.data, v2.data), gradFn = Some (this)) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Backward pass: propagates gradients using transposed matrices. + * @param gradOutput the upstream gradient. + */ + override inline def backward (gradOutput: TensorD): Unit = + backpropForTwoInputs (v1, v2, gradOutput, + (g: TensorD) => ops.matmul (g, ops.transpose (v2.data, 1, 2)), + (g: TensorD) => ops.matmul (ops.transpose (v1.data, 1 , 2), g)) +end MatMul + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Computes the batched matrix multiplication of two variables. + * @param v1 the first variable. + * @param v2 the second variable. + */ +case class BatchMatMul (v1: Variabl, v2: Variabl)(using ops: AutogradOps) extends Function: + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forward pass: computes batched matrix multiplication. + * @return a Variabl with the batched result. + */ + override inline def forward (): Variabl = + Variabl (ops.bmm (v1.data, v2.data), gradFn = Some (this)) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Backward pass: propagates the gradients for batched matrix multiplication, unbroadcasting as necessary. + * @param gradOutput the upstream gradient. + */ + override inline def backward (gradOutput: TensorD): Unit = + val v1T = ops.transpose (v1.data, 1, 2) + val v2T = ops.transpose (v2.data, 1, 2) + + val gradA = ops.bmm (gradOutput, v2T) + val gradB = ops.bmm (v1T, gradOutput) + + val gradAFinal = unbroadcast (gradA, v1.shape) + val gradBFinal = unbroadcast (gradB, v2.shape) + + v1.backward (gradAFinal) + v2.backward (gradBFinal) +end BatchMatMul + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Transposes (swaps) two axes of a tensor variable. + * @param v the input variable. + * @param i first axis index. + * @param j second axis index. + */ +case class Transpose (v: Variabl, i: Int, j: Int)(using ops: AutogradOps) extends Function: + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forward pass: swaps axes i and j. + * @return a Variabl with transposed data. + */ + override def forward (): Variabl = Variabl (ops.transpose (v.data, i, j), gradFn = Some (this)) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Backward pass: transpose again (swap i and j) to propagate gradient. + * @param gradOutput upstream gradient in transposed shape. + */ + override def backward (gradOutput: TensorD): Unit = + v.backward (ops.transpose (gradOutput, i, j)) +end Transpose + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Permutes axes of a tensor variable according to a specified ordering. + * @param v the input variable. + * @param axes the permutation of axes. + */ +case class Permute (v: Variabl, axes: Seq [Int])(using ops: AutogradOps) extends Function: + + private var inverse: Array[Int] = uninitialized + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forward pass: reorders axes as specified. + * @return a Variabl with permuted data. + */ + override def forward (): Variabl = + // Pre-compute inverse permutation for backward. + inverse = Array.ofDim [Int] (axes.length) + var idx = 0 + while idx < axes.length do + inverse(axes(idx)) = idx + idx += 1 + Variabl (ops.permute (v.data, axes), gradFn = Some (this)) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Backward pass: apply inverse permutation to gradient. + * @param gradOutput upstream gradient in permuted layout. + */ + override def backward (gradOutput: TensorD): Unit = + v.backward (ops.permute (gradOutput, inverse.toSeq)) +end Permute + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Reshape operation for a variable. + * This class represents a differentiable operation that reshapes a tensor + * variable to a new shape during the forward pass and reshapes the gradient + * back to the original shape during the backward pass. + * @param v the input variable to be reshaped + * @param newShape the target shape for the variable + */ +case class Reshape (v: Variabl, newShape: Seq[Int])(using ops: AutogradOps) extends Function: + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forward pass: reshapes the variable to newShape. + * @return a Variabl with reshaped data. + */ + override def forward (): Variabl = + Variabl (ops.reshape (v.data, newShape), gradFn = Some (this)) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Backward pass: reshapes the gradient back to the original shape. + * @param gradOutput the upstream gradient. + */ + override def backward (gradOutput: TensorD): Unit = + val origShape = v.shape + val gradReshaped = ops.reshape (gradOutput, origShape) + v.backward (gradReshaped) +end Reshape + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Represents a slicing operation on a tensor variable. + * This class performs a differentiable slicing operation during the forward pass + * and propagates the gradient to the sliced region during the backward pass. + * @param v the input variable to be sliced + * @param r0 the range for the first dimension + * @param r1 the range for the second dimension + * @param r2 the range for the third dimension + */ +case class Slice (v: Variabl, r0: Range, r1: Range, r2: Range)(using ops: AutogradOps) extends Function: + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forward pass: slices the variable according to the specified ranges. + * @return a Variabl with sliced data. + */ + override def forward (): Variabl = + Variabl (ops.getSlice (v.data, r0, r1, r2), gradFn = Some (this)) + end forward + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Backward pass: propagates the gradient to the sliced region. + * @param gradOutput upstream gradient corresponding to the sliced output. + */ + override def backward (gradOutput: TensorD): Unit = + val gradInput = ops.setSlice (ops.zerosLike (v.data), gradOutput, r0, r1, r2) + v.backward (gradInput) +end Slice + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Represents a concatenation operation on a sequence of variables along a specified axis. + * This class performs a differentiable concatenation operation during the forward pass + * and splits the gradient during the backward pass to propagate it to the input variables. + * @param vs the sequence of input variables to concatenate + * @param axis the axis along which to concatenate the variables + */ +case class Concat (vs: Seq[Variabl], axis: Int)(using ops: AutogradOps) extends Function: + + private var splitSizes: Seq [Int] = uninitialized + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forward pass: concatenates the input variables along the specified axis. + * @return a Variabl with concatenated data. + */ + override def forward (): Variabl = + val dataSeq = vs.map (_.data) + splitSizes = dataSeq.map (t => t.shape (axis)) + Variabl (ops.concat (dataSeq, axis), gradFn = Some (this)) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Backward pass: splits the gradient and propagates to each input variable. + * @param gradOutput upstream gradient corresponding to the concatenated output. + */ + override def backward (gradOutput: TensorD): Unit = + var cursor = 0 + for ((v, sz) <- vs.zip (splitSizes)) do + // Build slice ranges for each axis + val ir = if axis == 0 then cursor until cursor + sz else 0 until gradOutput.shape(0) + val jr = if axis == 1 then cursor until cursor + sz else 0 until gradOutput.shape(1) + val kr = if axis == 2 then cursor until cursor + sz else 0 until gradOutput.shape(2) + + // Extract gradient slice for this variable + val gradSlice = ops.getSlice (gradOutput, ir, jr, kr) + // Propagate gradient to the input variable + v.backward (gradSlice) + cursor += sz + end for +end Concat + +// ----------------------------------------------------------------------- +// -------------------- LAYER LEVEL FUNCTIONS ---------------------------- +// ----------------------------------------------------------------------- + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `RNNCellFused` Function implements a single RNN cell as one fused autograd op. + * It fuses the input/hidden projections and activation into a single node for + * improved performance and reduced autograd graph size. + * Equation: + * h_t = φ(W_ih * x + b_ih + W_hh * hPrev + b_hh) + * where φ ∈ {tanh, relu} + * Shapes: + * input : (B, I, 1) + * hidden : (B, H, 1) + * W_ih : (1, H, I) + * W_hh : (1, H, H) + * b_ih : (1, H, 1) + * b_hh : (1, H, 1) + * The function caches only what is needed for the backward pass: + * - input and hidden states + * - pre-activation value + * - output after activation + */ +case class RNNCellFused (input: Variabl, hidden: Variabl, + W_ih: Variabl, W_hh: Variabl, + b_ih: Variabl, b_hh: Variabl, + activation: String = "tanh") (using ops: AutogradOps) extends Function: + + private var inputCache: TensorD = uninitialized + private var hiddenCache: TensorD = uninitialized + private var preActCache: TensorD = uninitialized + private var outputCache: TensorD = uninitialized + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forward pass: computes the RNN cell output. + * @return a Variabl with the RNN cell output. + */ + override def forward (): Variabl = + val xProj = ops.add (ops.bmm (W_ih.data, input.data), b_ih.data) + val hProj = ops.add (ops.bmm (W_hh.data, hidden.data), b_hh.data) + val preAct = ops.add (xProj, hProj) + val hNext = activation match + case "tanh" => ops.tanh_ (preAct) + case "relu" => ops.reLU_ (preAct) + case other => throw new IllegalArgumentException (s"Unsupported activation: $other") + + // Cache tensors for backward + inputCache = input.data + hiddenCache = hidden.data + preActCache = preAct + outputCache = hNext + Variabl (hNext, gradFn = Some (this)) + end forward + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Backward pass: propagates gradients through the RNN cell. + * @param gradOutput the upstream gradient. + */ + override def backward (gradOutput: TensorD): Unit = // gradOutput: (B, H, 1) + + // 1. Activation derivative + val actGrad = activation match + case "tanh" => ops.tanhD_ (outputCache) + case "relu" => ops.reLUD_ (preActCache) + case other => throw new IllegalArgumentException (s"Unsupported activation: $other") + + // 2. Elementwise multiply: (B, H, 1) + val gradPreAct = ops.mul (gradOutput, actGrad) + + // 3. dL/dinput = W_ih.T @ gradPreAct -> (1, I, H) bmm (B, H, 1) = (B, I, 1) + val gradInput = ops.bmm (ops.transpose (W_ih.data, 1, 2), gradPreAct) + + // 4. dL/dhidden = W_hh.T @ gradPreAct -> (1, H, H).T bmm (B, H, 1) = (B, H, 1) + val gradHidden = ops.bmm (ops.transpose (W_hh.data, 1, 2), gradPreAct) + + // 5. Weight grads: (B, H, 1) bmm (B, 1, I) = (B, H, I), then sum across batch + val gradWih_batch = ops.bmm (gradPreAct, ops.transpose (inputCache, 1, 2)) // (B, H, I) + val gradWhh_batch = ops.bmm (gradPreAct, ops.transpose (hiddenCache, 1, 2)) // (B, H, H) + val gradW_ih = ops.sumAlongAxis (gradWih_batch, 0) // (1, H, I) + val gradW_hh = ops.sumAlongAxis (gradWhh_batch, 0) // (1, H, H) + + // 6. Bias grads: sum across batch axis (0) + val gradB = ops.sumAlongAxis (gradPreAct, 0) // (1, H, 1) + + // 7. Backpropagate + input.backward (gradInput) + hidden.backward (gradHidden) + W_ih.backward (gradW_ih) + W_hh.backward (gradW_hh) + b_ih.backward (gradB) + b_hh.backward (gradB) + end backward +end RNNCellFused + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `GRUCellFused` Function implements a single GRU cell as one fused autograd op. + * It fuses all gate computations for better performance and fewer autograd nodes. + * Equations: + * r_t = sigmoid(W_ir * x + b_ir + W_hr * hPrev + b_hr) + * z_t = sigmoid(W_iz * x + b_iz + W_hz * hPrev + b_hz) + * n_t = tanh(W_in * x + b_in + r_t ⊙ (W_hn * hPrev + b_hn)) + * h_t = (1 - z_t) ⊙ n_t + z_t ⊙ hPrev + * Shapes: + * input : (B, I, 1) + * hidden : (B, H, 1) + * W_i* : (1, H, I) + * W_h* : (1, H, H) + * b_i*,b_h* : (1, H, 1) + */ +case class GRUCellFused (input: Variabl, hidden: Variabl, + W_ir: Variabl, W_hr: Variabl, b_ir: Variabl, b_hr: Variabl, + W_iz: Variabl, W_hz: Variabl, b_iz: Variabl, b_hz: Variabl, + W_in: Variabl, W_hn: Variabl, b_in: Variabl, b_hn: Variabl) + (using ops: AutogradOps) extends Function: + + private var r_t, z_t, n_t: TensorD = uninitialized + private var x_t, hPrev: TensorD = uninitialized + private var h_hn_lin: TensorD = uninitialized + + override def forward (): Variabl = + x_t = input.data + hPrev = hidden.data + + // Reset gate + val pre_r = ops.add (ops.add (ops.bmm (W_ir.data, x_t), b_ir.data), + ops.add (ops.bmm (W_hr.data, hPrev), b_hr.data)) + val r = ops.sigmoid_ (pre_r) + + // Update gate + val pre_z = ops.add (ops.add (ops.bmm (W_iz.data, x_t), b_iz.data), + ops.add (ops.bmm (W_hz.data, hPrev), b_hz.data)) + val z = ops.sigmoid_ (pre_z) + + // Candidate gate + val h_hn = ops.add (ops.bmm (W_hn.data, hPrev), b_hn.data) + val pre_n = ops.add (ops.add (ops.bmm (W_in.data, x_t), b_in.data), ops.mul (r, h_hn)) + val n = ops.tanh_ (pre_n) + + // Hidden update + val oneMinusZ = ops.addScalar (ops.neg (z), 1.0) + val hNext = ops.add (ops.mul (oneMinusZ, n), ops.mul(z, hPrev)) + + // Cache + r_t = r; z_t = z; n_t = n; h_hn_lin = h_hn + + Variabl (hNext, gradFn = Some (this)) + end forward + + override def backward (gradOutput: TensorD): Unit = + // dhNext/dn = (1 - z), dhNext/dz = (hPrev - n) + val dh_dn = ops.addScalar (ops.neg (z_t), 1.0) + val dh_dz = ops.sub (hPrev, n_t) + + val grad_n = ops.mul (gradOutput, dh_dn) + val grad_z = ops.mul (gradOutput, dh_dz) + val grad_hPrev_part = ops.mul (gradOutput, z_t) + + // Through activations + val grad_pre_n = ops.mul (grad_n, ops.tanhD_ (n_t)) + val grad_pre_z = ops.mul (grad_z, ops.sigmoidD_ (z_t)) + val grad_pre_r = ops.mul (ops.mul (grad_pre_n, h_hn_lin), ops.sigmoidD_ (r_t)) + + // --- Grad wrt hPrev --- + val grad_h_from_n = ops.mul (r_t, ops.bmm (ops.transpose (W_hn.data, 1, 2), grad_pre_n)) + val grad_h_from_r = ops.bmm (ops.transpose (W_hr.data, 1, 2), grad_pre_r) + val grad_h_from_z = ops.bmm (ops.transpose (W_hz.data, 1, 2), grad_pre_z) + val grad_hPrev_total = ops.add (grad_hPrev_part, ops.add (grad_h_from_n, + ops.add (grad_h_from_r, grad_h_from_z))) + + // --- Grad wrt input --- + val grad_x_r = ops.bmm (ops.transpose (W_ir.data, 1, 2), grad_pre_r) + val grad_x_z = ops.bmm (ops.transpose (W_iz.data, 1, 2), grad_pre_z) + val grad_x_n = ops.bmm (ops.transpose (W_in.data, 1, 2), grad_pre_n) + val grad_x_total = ops.add (ops.add (grad_x_r, grad_x_z), grad_x_n) + + // --- Weight and bias grads --- + def sumB (batch: TensorD): TensorD = ops.sumAlongAxis (batch, 0) + + val gradW_ir = sumB (ops.bmm (grad_pre_r, ops.transpose (x_t, 1, 2))) + val gradW_hr = sumB (ops.bmm (grad_pre_r, ops.transpose (hPrev, 1, 2))) + val gradW_iz = sumB (ops.bmm (grad_pre_z, ops.transpose (x_t, 1, 2))) + val gradW_hz = sumB (ops.bmm (grad_pre_z, ops.transpose (hPrev, 1, 2))) + val gradW_in = sumB (ops.bmm (grad_pre_n, ops.transpose (x_t, 1, 2))) + val gradW_hn = sumB (ops.bmm (ops.mul (grad_pre_n, r_t), ops.transpose (hPrev, 1, 2))) + + val gradB_ir = sumB (grad_pre_r) + val gradB_hr = sumB (grad_pre_r) + val gradB_iz = sumB (grad_pre_z) + val gradB_hz = sumB (grad_pre_z) + val gradB_in = sumB (grad_pre_n) + val gradB_hn = sumB (ops.mul (grad_pre_n, r_t)) + + // --- Backprop --- + input.backward (grad_x_total) + hidden.backward (grad_hPrev_total) + + W_ir.backward (gradW_ir); W_hr.backward (gradW_hr) + W_iz.backward (gradW_iz); W_hz.backward (gradW_hz) + W_in.backward (gradW_in); W_hn.backward (gradW_hn) + + b_ir.backward (gradB_ir); b_hr.backward (gradB_hr) + b_iz.backward (gradB_iz); b_hz.backward (gradB_hz) + b_in.backward (gradB_in); b_hn.backward (gradB_hn) + end backward +end GRUCellFused + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Fused RNN over a whole input sequence (vanilla RNN). + * - Unrolls the sequence in a single Function. + * - Returns the last hidden state as the output Variabl. + * - On backward (), performs full BPTT and accumulates parameter grads. + * Shapes: + * input(t): (B, I, 1) + * hidden: (B, H, 1) // initial hidden (h0) + * W_ih: (1, H, I) + * W_hh: (1, H, H) + * b_ih: (1, H, 1) + * b_hh: (1, H, 1) + */ +case class RNNFused (input: IndexedSeq [Variabl], // sequence: x_0 ... x_{T-1} + hidden: Variabl, + W_ih: Variabl, W_hh: Variabl, + b_ih: Variabl, b_hh: Variabl, + activation: String = "tanh", tbptt: Int = 0) + (using ops: AutogradOps) extends Function: + + // --------- Caches for BPTT (one entry per time step) -------------------------- + private var xCache: Array [TensorD] = Array.empty // x_t + private var hPrevCache: Array [TensorD] = Array.empty // h_{t-1} + private var preActCache: Array [TensorD] = Array.empty // W_ih x_t + W_hh h_{t-1} + b + private var hCache: Array [TensorD] = Array.empty // h_t + + private var outputsVar: IndexedSeq [Variabl] = uninitialized + private var finalHiddenVar: Variabl = uninitialized + + def outputs: IndexedSeq [Variabl] = outputsVar + def finalHidden: Variabl = finalHiddenVar + + private [autograd] def clearCaches (): Unit = + xCache = Array.empty + hPrevCache = Array.empty + preActCache = Array.empty + hCache = Array.empty + end clearCaches + + private class OutputNode (t: Int) extends Function: + + override def forward (): Variabl = + throw new IllegalStateException ("" + "OutputNode.forward () should never be called.") + + override def backward (gradOutput: TensorD): Unit = backwardFromT (t, gradOutput) + end OutputNode + + private [autograd] def backwardFromT (tStop: Int, gradAtT: TensorD): Unit = + val T = hCache.length + require (T > 0, "RNNFused.backwardFromT: no cached forward pass found") + require (tStop >= 0 && tStop < T, + s"RNNFused.backwardFromT: tStop=$tStop out of range 0..${T - 1}") + + // starting gradient: dL/dh_{tStop} + var gradNextHidden = gradAtT + + // Determine tMin for TBPTT + val tMin = if tbptt > 0 then math.max (0, tStop - tbptt + 1) + else 0 + + var t = tStop + while t >= tMin do + val x_t = xCache (t) + val h_prev = hPrevCache (t) + val pre_t = preActCache (t) + val h_t = hCache (t) + + val actGrad = activation match + case "tanh" => ops.tanhD_(h_t) + case "relu" => ops.reLUD_(pre_t) + case other => throw new IllegalArgumentException(s"Unsupported activation: $other") + + val gradPreAct = ops.mul (gradNextHidden, actGrad) + + val gradInput_t = + ops.bmm (ops.transpose (W_ih.data, 1, 2), gradPreAct) + + val gradHiddenPrev = + ops.bmm (ops.transpose (W_hh.data, 1, 2), gradPreAct) + + val gradWih_batch = ops.bmm (gradPreAct, ops.transpose (x_t, 1, 2)) + val gradWhh_batch = ops.bmm (gradPreAct, ops.transpose (h_prev, 1, 2)) + val gradW_ih = ops.sumAlongAxis (gradWih_batch, 0) + val gradW_hh = ops.sumAlongAxis (gradWhh_batch, 0) + val gradB = ops.sumAlongAxis (gradPreAct, 0) + + input(t).backward (gradInput_t) + W_ih.backward (gradW_ih) + W_hh.backward (gradW_hh) + b_ih.backward (gradB) + b_hh.backward (gradB) + + gradNextHidden = gradHiddenPrev + t -= 1 + end while + + // Backprop into h_{tMin-1} (which is the "incoming" hidden state seen in this chunk) + hidden.backward (gradNextHidden) + end backwardFromT + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forward pass: unroll the RNN through the entire sequence. + * Returns the last hidden state h_T as a Variabl (with gradFn = this). + */ + def forwardAll (): (IndexedSeq [Variabl], Variabl) = + import scala.collection.mutable.ArrayBuffer + require (input.nonEmpty, "RNNFused.forward: input sequence must be non-empty") + +// val T = input.length +// val B = input.head.shape.head + + val xsBuf = ArrayBuffer.empty [TensorD] + val hPrevBuf = ArrayBuffer.empty [TensorD] + val preBuf = ArrayBuffer.empty [TensorD] + val hBuf = ArrayBuffer.empty [TensorD] + + // For outputs (Variabl) and state tracking + val outputsBuf = scala.collection.mutable.ArrayBuffer.empty [Variabl] + + var hPrev = hidden.data // (B, H, 1) + + input.foreach { xVar => + val x = xVar.data // (B, I, 1) + val xProj = ops.add (ops.bmm (W_ih.data, x), b_ih.data) // (B, H, 1) + val hProj = ops.add (ops.bmm (W_hh.data, hPrev), b_hh.data) // (B, H, 1) + val preAct = ops.add (xProj, hProj) // (B, H, 1) + + val hNext = activation match + case "tanh" => ops.tanh_ (preAct) // (B, H, 1) + case "relu" => ops.reLU_ (preAct) + case other => throw new IllegalArgumentException (s"Unsupported activation: $other") + + // Cache per-timestep tensors + xsBuf += x + hPrevBuf += hPrev + preBuf += preAct + hBuf += hNext + + // Store output Variabl + val t = outputsBuf.length + outputsBuf += Variabl (hNext, gradFn = Some (OutputNode(t)), + name = Some (s"h_${outputsBuf.length}")) + + hPrev = hNext // Advance to next timestep + } // foreach + + // Freeze caches to be used in backward + xCache = xsBuf.toArray + hPrevCache = hPrevBuf.toArray + preActCache = preBuf.toArray + hCache = hBuf.toArray + + // Return final hidden state as the "output" of this Function + (outputsBuf.toIndexedSeq, outputsBuf.last) + end forwardAll + + override def forward (): Variabl = + val (outs, last) = forwardAll () + outputsVar = outs + finalHiddenVar = last + last + end forward + + override def backward (gradOutput: TensorD): Unit = + val T = hCache.length + require (T > 0, "RNNFused.backward: no cached forward pass found") + backwardFromT (T - 1, gradOutput) + end backward +end RNNFused + diff --git a/src/main/scala/scalation/modeling/autograd/GradCheck.scala b/src/main/scala/scalation/modeling/autograd/GradCheck.scala new file mode 100644 index 000000000..1042b054d --- /dev/null +++ b/src/main/scala/scalation/modeling/autograd/GradCheck.scala @@ -0,0 +1,113 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author Praveen Rangavajhula + * @version 2.0 + * @date Fri April 25 19:44:13 EDT 2025 + * @see LICENSE (MIT style license file). + * + * @note Autograd: Core Operations for Automatic Differentiation + */ + +package scalation +package modeling +package autograd + +import scala.collection.immutable.ArraySeq + +import scalation.calculus.Differential.grad +import scalation.mathstat._ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `GradCheck` object provides methods the check the agreement between numerically + * computed gradient those computed using Automatic Differentiation (AD). + * @see `calculus.Differential` + */ +object GradCheck: + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Check the gradient for all given variable. + */ + def gradCheck (param: Variabl, loss: () => Variabl, + atol: Double = 1e-5, rtol: Double = 1e-3, + maxMismatches: Int = 10, quiet: Boolean = true, + debug: Boolean = false): Boolean = + val fScalar = () => loss ().data.sum + val forwardVal = fScalar () + val v = param.data.flattenToVector + val fn = toFunctionV2S (param, fScalar) + val numerlVec = grad (fn, v) + val numerical = TensorD (param.data.dims, ArraySeq.unsafeWrapArray (numerlVec.toArray)*) + + // ----- Analytical gradient via backprop ----- + param.grad = TensorD.zerosLike (param.data) + val out = loss () + out.backward () + val analytical = param.grad + + // println for debugging + if debug then + println (s"param: ${param.name.getOrElse ("unnamed")}") + println (s"param.data: $v") + println (s"analytical: $analytical") + println (s"numerical: $numerical") + end if + + // ----- Compare ----- +// val diff = (analytical - numerical).abs + val (d1, d2, d3) = analytical.dims + var passed = true + var shown = 0 + + cfor(0, d1) { i => + cfor(0, d2) { j => + cfor(0, d3) { k => + val a = analytical(i, j, k) + val n = numerical(i, j, k) + val d = math.abs (a - n) + val tol = atol + rtol * math.abs (n) + if d > tol then + passed = false + if shown < maxMismatches then + println (f" ❌ Mismatch at ($i,$j,$k): autograd=$a%.6g, numerical=$n%.6g, diff=$d%.3g > tol=$tol%.3g") + shown += 1 + end if + } // cfor + } // cfor + } // cfor + + if shown == 0 && ! quiet then + println (s"\nGradCheck for ${param.name.getOrElse("unnamed")}") + println (s" Forward value: $forwardVal") + println (s"✅ GradCheck PASSED for ${param.name.getOrElse ("unnamed")}\n") + else if ! passed then + println (s"\nGradCheck for ${param.name.getOrElse ("unnamed")}") + println (s" Forward value: $forwardVal") + val extra = math.max (0, shown - maxMismatches) + if extra > 0 then println (s" ... and $extra more mismatches not shown") + println (s"❌ GradCheck FAILED for ${param.name.getOrElse ("unnamed")}\n") + + passed + end gradCheck + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Convert ? to a `FunctionV2S` + * @param param the parameters + * @param f the function + */ + private def toFunctionV2S (param: Variabl, f: () => Double): FunctionV2S = + (x: VectorD) => param.data = TensorD (param.data.dims, ArraySeq.unsafeWrapArray (x.toArray)*) + f() + end toFunctionV2S + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Check the gradients for all the variables. + */ + def gradCheckAll (params: Seq [Variabl], loss: () => Variabl, + atol: Double = 1e-5, rtol: Double = 1e-3, + maxMismatches: Int = 10, quiet: Boolean = true, + debug: Boolean = false): Boolean = + params.forall { p => gradCheck (p, loss, atol, rtol, maxMismatches, quiet, debug) } + end gradCheckAll + +end GradCheck + diff --git a/src/main/scala/scalation/modeling/autograd/GraphExporter.scala b/src/main/scala/scalation/modeling/autograd/GraphExporter.scala new file mode 100644 index 000000000..897b08d2d --- /dev/null +++ b/src/main/scala/scalation/modeling/autograd/GraphExporter.scala @@ -0,0 +1,399 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author Praveen Rangavajhula + * @version 2.0 + * @date Thu Nov 13 11:42:31 EST 2025 + * @see LICENSE (MIT style license file). + * + * @note Autograd: Computational Graph Export Utilities + * + * Provides utilities for exporting autograd computation graphs into multiple + * visualization formats (DOT/GraphViz, Mermaid, D3.js JSON). Starting from a + * root `Variabl`, the graph builder performs a reachability scan, determines + * variable/function roles, detects shapes, assigns depths for layout, and + * produces a structured `GraphModel` representation. + * + * Exporters: + * - `toDot` : GraphViz DOT format with optional depth clustering, shapes, + * gradients, and legend rendering. + * - `toMermaid` : Mermaid Flowchart syntax. + * - `toJson` : D3.js-friendly node/edge JSON. + * - `writeDot`, `writeAll` : Convenience writers for filesystem output. + */ + +package scalation +package modeling +package autograd + +import java.nio.file.{Files, Paths} +import java.nio.charset.StandardCharsets + +import scala.util.Try + +import scalation.database.graph_pm.TopSort +import scalation.mathstat.TensorD + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** GraphExporter generates a computation graph visualization from a root + * `Variabl`. The graph includes variables, functions, dependency edges, + * tensor shapes, and optional gradient annotations. The resulting graph can + * be serialized to DOT, Mermaid, or JSON formats for visualization. + */ +object GraphExporter: + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Rendering options for DOT output. + * @param showAnnotations whether to annotate nodes that have stored gradients + * @param edgeShapes whether to label edges with tensor shapes + * @param nodeShapes whether to display tensor shapes inside nodes + * @param colorScheme color theme for rendering (reserved for future use) + * @param groupBy grouping mode ("depth" or "none") + * @param showLegend whether to include a legend cluster in the DOT output + */ + case class RenderOptions (showAnnotations: Boolean = true, edgeShapes: Boolean = true, + nodeShapes: Boolean = true, colorScheme: String = "default", + groupBy: String = "depth", showLegend: Boolean = true) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** A variable node in the exported computation graph. + * @param id unique identifier for the variable + * @param isParam whether this variable represents a trainable parameter + * @param isOutput whether this variable is the graph’s final output + * @param shape tensor shape of the variable + * @param name optional user-defined name + * @param grad optional stored gradient tensor + */ + case class VarNode (id: String, isParam: Boolean, + isOutput: Boolean, shape: List[Int], + name: Option[String], grad: Option[TensorD]) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** A function node (operation) in the computation graph. + * @param id unique identifier for the function + * @param op operation name (e.g., "add", "matmul") + * @param attrs operator-specific attributes + * @param shape tensor shape of the function output + * @param depth depth level for layered graph layout + */ + case class FuncNode (id: String, op: String, + attrs: Map[String,String], shape: List[Int], depth: Int) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** A directed graph edge. + * @param src source node identifier + * @param dst destination node identifier + * @param kind edge type (currently only "data") + */ + case class Edge (src: String, dst: String, kind: String) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Internal model of the full computation graph. + * @param vars all variable nodes + * @param funcs all function nodes + * @param edges all dependency edges + * @param root id of the root output variable + */ + case class GraphModel (vars: Seq [VarNode], funcs: Seq [FuncNode], + edges: Seq [Edge], root: String) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Build a computation graph starting from a root `Variabl`. + * Traverses all dependent variables, topologically sorts functions, + * assigns depth levels, and constructs variable/function nodes and edges. + * @param root the root output variable + * @param includeGrad whether to attach stored gradients to variable nodes + * @return a structured `GraphModel` for visualization + */ + // FIX - Doesn't work with RNNFused yet (possibly due to name mismatch) + def build (root: Variabl, includeGrad: Boolean = true): GraphModel = + // Collect all variables reachable from the root + val vars = scala.collection.mutable.LinkedHashSet [Variabl] () + def visit (v: Variabl): Unit = + if ! vars.contains (v) then + vars += v + v.gradFn.foreach (f => f.inputs.foreach (visit)) + visit (root) + + // Topo-sorted function nodes (inputs → outputs) + val fns = TopSort.topSortFunctions (root) + + // Map each Function -> its output Variabl + val funcOut: Map[Function, Variabl] = + vars.flatMap (v => v.gradFn.map (_ -> v)).toMap + + // Depth assignment (for nicer horizontal layering) + val depthCache = scala.collection.mutable.HashMap [Any, Int] () + + def depthOfVar(v: Variabl): Int = v.gradFn match + case None => 0 + case Some(f) => depthCache.getOrElseUpdate (f, depthOfFunc(f)) + + def depthOfFunc (f: Function): Int = + depthCache.getOrElseUpdate (f, + (if f.inputs.isEmpty then 0 else f.inputs.map(depthOfVar).max) + 1) + + val funcNodes = fns.map { f => + val out = funcOut.getOrElse (f, + throw new IllegalStateException(s"No output var for Function id=${f.id} (${f.opName})")) + FuncNode (id = s"F${f.id}", op = f.opName, + attrs = f.attributes, shape = out.data.shape, depth = depthOfFunc(f)) + } + + val varNodes = vars.toSeq.map { v => + val isOut = (v eq root) + val isParam = v.gradFn.isEmpty && v.name.exists(n => + val ln = n.toLowerCase + ln.contains ("weight") || ln.contains ("bias") || + ln.startsWith ("w_") || ln.startsWith ("b_")) + VarNode (id = s"V${System.identityHashCode(v)}", + isParam = isParam, isOutput= isOut, + shape = v.data.shape, name = v.name, + grad = if includeGrad then Some(v.grad) else None) + } + + // Stable mapping Var -> id (same order used to build varNodes) + val idMap: Map [Variabl, String] = + vars.toSeq.zip (varNodes.map (_.id)).toMap + + def idOf(v: Variabl): String = idMap (v) + + // Data edges: Var -> Func (inputs) and Func -> Var (output) + val edgesData: Seq [Edge] = fns.flatMap { f => + val fId = s"F${f.id}" + val out = funcOut(f) + val ins = f.inputs.map(in => Edge(idOf(in), fId, "data")) + ins :+ Edge (fId, idOf(out), "data") } + + GraphModel (varNodes, funcNodes, edgesData, idOf (root)) + end build + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Escape special characters in a string for safe usage in DOT/GraphViz syntax. + * Specifically, this method replaces backslashes (`\`) with double backslashes (`\\`) + * and double quotes (`"`) with escaped double quotes (`\"`). + * @param s the input string to be escaped + * @return the escaped string with special characters replaced + */ + private def esc (s: String): String = + s.replace("\\", "\\\\").replace ("\"", "\\\"") + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Convert a graph model into GraphViz DOT format. + * Supports optional depth-based clustering, shape labels, gradient tags, + * and legend rendering. + * @param g the computation graph + * @param opts rendering options + * @return DOT string representation + */ + def toDot (g: GraphModel, opts: RenderOptions = RenderOptions()): String = + val sb = new StringBuilder + sb ++= "digraph ComputationGraph {\n" + + " rankdir=LR;\n" + + " compound=true;\n" + + " node [fontname=\"Helvetica\"];\n" + +/* + // For edge shape labels and leaf detection + val varMap: Map [String, VarNode] = g.vars.map (v => v.id -> v).toMap + val producedVarIds: Set [String] = + g.edges.collect { + case Edge(src, dst, "data") if src.startsWith ("F") && dst.startsWith ("V") => dst + }.toSet +*/ + + // Functions (either grouped by depth or flat) + if opts.groupBy == "depth" then + g.funcs.groupBy (_.depth).toSeq.sortBy (_._1).foreach { case (d, layer) => + sb ++= s" subgraph cluster_depth_$d {\n label=\"Layer $d\"; style=dashed; color=gray70;\n" + layer.foreach { f => + val shapeLine = if opts.nodeShapes then s"shape=${f.shape.mkString("x")}" else "" + val attrLines = + (if shapeLine.nonEmpty then Seq(shapeLine) else Seq.empty) ++ + f.attrs.map { case (k,v) => s"$k=$v" } + val extra = if attrLines.nonEmpty then attrLines.mkString("\\n") else "" + sb ++= + s""" ${f.id} [shape=box, style=filled, fillcolor="#FFF7D6", label="${esc(f.op)}${if extra.nonEmpty then "\n"+extra else ""}"];\n""" + } + sb ++= " }\n" + } + else + g.funcs.sortBy (_.depth).foreach { f => + val shapeLine = if opts.nodeShapes then s"shape=${f.shape.mkString("x")}" else "" + val attrLines = + (if shapeLine.nonEmpty then Seq (shapeLine) else Seq.empty) ++ + f.attrs.map{ case (k,v) => s"$k=$v" } + val extra = if attrLines.nonEmpty then attrLines.mkString("\\n") else "" + sb ++= + s""" ${f.id} [shape=box, style=filled, fillcolor="#FFF7D6", label="${esc(f.op)}${if extra.nonEmpty then "\n"+extra else ""}"];\n""" + } + + // Variables (color by role) + g.vars.foreach { v => + val isLeaf = !v.isOutput && !v.isParam && !g.edges.exists(e => e.src.startsWith("F") && e.dst == v.id) + val fill = + if v.isOutput then "#FFCCE0" + else if v.isParam then "#FFE9AA" + else if isLeaf then "#D5F5D5" + else "#E0F1FF" + val style = if v.isParam then "doublecircle" else "ellipse" + val gradTag = if opts.showAnnotations && v.grad.isDefined then "\\n∂L stored" else "" + val nm = v.name.map (esc).getOrElse ("") + val shapeLn = if opts.nodeShapes then s"\n${v.shape.mkString("x")}" else "" + sb ++= s" ${v.id} [shape=$style, style=filled, fillcolor=\"$fill\", label=\"${nm}$shapeLn$gradTag\"];\n" + } + + // Edges (optionally label with tensor shapes) + g.edges.foreach { e => + if e.kind == "data" then + val labelOpt = + if opts.edgeShapes then + if e.src.startsWith ("V") && e.dst.startsWith ("F") then g.vars.find (_.id == e.src).map(_.shape.mkString("x")) + else if e.src.startsWith ("F") && e.dst.startsWith ("V") then g.vars.find (_.id == e.dst).map(_.shape.mkString("x")) + else None + else None + val labelStr = labelOpt.map(l => s" label=\"${esc(l)}\"").getOrElse("") + sb ++= s" ${e.src} -> ${e.dst} [color=black$labelStr];\n" + } + if opts.showLegend then + sb ++= " subgraph cluster_legend {\n" + sb ++= " label=\"\"; style=rounded; color=gray60; fontsize=10;\n" + // Compact HTML label (shape=plain) single block (no leading/trailing blank lines inside <>) + sb ++= " legend_info [shape=plain, label=<" + + "" + + "" + + "" + + "" + + "" + + "" + + "" + + "" + + "
    Legend
    Function: operation (box, pale yellow)
    Param: parameter variable (gold, doublecircle)
    Leaf: leaf input variable (green)
    Intermediate: intermediate variable (light blue)
    Output: final output variable (pink)
    Edge label: tensor shape
    Node suffix: ∂L stored (gradient available)
    >];\n" + sb ++= " }\n" + sb ++= "}\n" + sb.toString() + end toDot + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Backward-compatible DOT exporter using only a gradient toggle. + * @param g the computation graph + * @param showGrad whether to annotate gradient availability + */ + def toDot (g: GraphModel, showGrad: Boolean): String = + toDot (g, RenderOptions(showAnnotations = showGrad)) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Export the computation graph in Mermaid Flowchart syntax. + * Variables become rounded nodes; functions become double-curly nodes. + * @param g the graph model + * @return Mermaid flowchart string + */ + def toMermaid (g: GraphModel): String = + val sb = new StringBuilder ("flowchart LR\n") + g.vars.foreach { v => + val nm = v.name.getOrElse (v.id) + sb ++= s""" ${v.id}(["$nm\\n${v.shape.mkString("x")}"])\n""" + } + g.funcs.foreach { f => + sb ++= s""" ${f.id}{{"${f.op}\\n${f.shape.mkString("x")}"]}}\n""" + } + g.edges.filter (_.kind == "data").foreach { e => + sb ++= s" ${e.src} --> ${e.dst}\n" + } + sb.toString + end toMermaid + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Export the graph in JSON format for D3.js visualizations. + * @param g the graph model + * @return JSON string containing nodes and edges + */ + def toJson (g: GraphModel): String = + val nodes = + (g.vars.map { v => + s"""{"id":"${v.id}","type":"var","name":${v.name.fold("null")(n => s"\"${esc(n)}\"")},"shape":"${v.shape.mkString("x")}","isParam":${v.isParam},"isOutput":${v.isOutput}}""" + } ++ g.funcs.map { f => + val attrs = f.attrs.map{ case (k,v) => s""""${esc(k)}":"${esc(v)}""" }.mkString(",") + s"""{"id":"${f.id}","type":"func","op":"${esc(f.op)}","shape":"${f.shape.mkString("x")}","depth":${f.depth},"attrs":{${attrs}}}""" + }).mkString (",") + + val edges = + g.edges.filter (_.kind == "data") + .map(e => s"""{"source":"${e.src}","target":"${e.dst}"}""") + .mkString (",") + s"""{"nodes":[${nodes}],"edges":[${edges}]}""" + end toJson + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Write DOT output to a file, with optional GraphViz SVG rendering. + * @param root root variable of the computation graph + * @param path target .dot file path + * @param opts rendering options + * @param renderSvg whether to also generate an SVG via `dot -Tsvg` + * @return path of written file or SVG file + */ + def writeDot (root: Variabl, path: String, + opts: RenderOptions = RenderOptions (), + renderSvg: Boolean = false): Try [String] = Try { + val p = Paths.get (path) + val parent = p.getParent + if parent != null && ! Files.exists (parent) then Files.createDirectories (parent) + val g = build (root) + val dot = toDot (g, opts) + Files.write (p, dot.getBytes (StandardCharsets.UTF_8)) + if renderSvg then + val out = path.stripSuffix (".dot") + ".svg" + val pb = new ProcessBuilder ("dot", "-Tsvg", path, "-o", out) + pb.inheritIO ().start ().waitFor () + out + else path + } + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Write all supported graph formats (DOT, SVG, Mermaid, JSON) into a directory. + * @param root root variable + * @param dir output directory + * @param baseName base filename without extension + * @param opts rendering options + * @param svg whether to export SVG + * @param mermaid whether to export Mermaid + * @param json whether to export JSON + * @return sequence of file paths written + */ + def writeAll (root: Variabl, dir: String, + baseName: String, opts: RenderOptions = RenderOptions (), + svg: Boolean = true, mermaid: Boolean = true, + json: Boolean = true): Try [Seq [String]] = Try { + val outDir = Paths.get (dir) + if !Files.exists (outDir) then Files.createDirectories (outDir) + val results = scala.collection.mutable.ArrayBuffer [String] () + + val dotPath = outDir.resolve (baseName + ".dot").toString + writeDot (root, dotPath, opts, renderSvg = svg).foreach( results += _) + + val g = build (root) + if mermaid then + val mer = toMermaid (g) + val mp = outDir.resolve (baseName + ".mmd") + Files.write(mp, mer.getBytes(StandardCharsets.UTF_8)) + results += mp.toString + + if json then + val js = toJson (g) + val jp = outDir.resolve (baseName + ".json") + Files.write(jp, js.getBytes (StandardCharsets.UTF_8)) + results += jp.toString + + results.toSeq + } + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Backward-compatible alias for `writeDot`. + * @param root root variable + * @param path target DOT file + * @param render whether to also produce SVG + */ + def makeDot(root: Variabl, path: String, render: Boolean = false): Try[String] = + writeDot(root, path, RenderOptions(), renderSvg = render) + +end GraphExporter + diff --git a/src/main/scala/scalation/modeling/autograd/LRScheduler.scala b/src/main/scala/scalation/modeling/autograd/LRScheduler.scala new file mode 100644 index 000000000..df981a7a5 --- /dev/null +++ b/src/main/scala/scalation/modeling/autograd/LRScheduler.scala @@ -0,0 +1,55 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author Praveen Rangavajhula + * @version 2.0 + * @date Fri Nov 7 09:15:25 EST 2025 + * @see LICENSE (MIT style license file). + * + * @note Autograd: Learning Rate Scheduler Interface + * + * Defines the base trait for learning rate schedulers used in gradient-based + * optimization. Concrete schedulers should override one or both versions of + * `step()` and implement their own logic for updating the learning rate. + */ + +package scalation +package modeling +package autograd + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Learning Rate Scheduler (LR Scheduler) trait. + * Defines a generic interface for schedulers that adjust the learning rate + * during optimization. Concrete implementations may update the learning rate + * based on iteration count, loss values, or other criteria. + * Notes: + * - The parameterless `step()` is intended for schedulers that adjust + * learning rate solely based on iteration count. + * - The `step(currentLoss)` method is intended for schedulers that adapt + * learning rate based on the current loss value. + * - By default, both methods throw `UnsupportedOperationException`; + * subclasses must override the method(s) they support. + */ +trait LRScheduler: + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Advance the scheduler one step (iteration-based). + * Default implementation throws an exception; override if supported. + */ + def step (): Unit = + throw new UnsupportedOperationException ("This scheduler does not support step without loss input.") + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Advance the scheduler using the current loss (loss-based scheduling). + * Default implementation throws an exception; override if supported. + * @param currentLoss the current loss value used for scheduling + */ + def step (currentLoss: Double): Unit = + throw new UnsupportedOperationException ("This scheduler does not support step with loss input.") + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the most recently computed learning rate. + */ + def getLastLR: Double + +end LRScheduler + diff --git a/src/main/scala/scalation/modeling/autograd/LayerNorm.scala b/src/main/scala/scalation/modeling/autograd/LayerNorm.scala new file mode 100644 index 000000000..a7b05ff86 --- /dev/null +++ b/src/main/scala/scalation/modeling/autograd/LayerNorm.scala @@ -0,0 +1,41 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author Praveen Rangavajhula + * @version 2.0 + * @date Fri April 25 19:48:13 EDT 2025 + * @see LICENSE (MIT style license file). + * + * @note Autograd: Fully Connected (Linear) Layer for Neural Networks + */ + +package scalation +package modeling +package autograd + +import scalation.mathstat.TensorD + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `LayerNorm` class implements Layer Normalization as described in: + * "Layer Normalization" by Jimmy Lei Ba, Jamie Ryan Kiros, Geoffrey E. Hinton + * @see https://arxiv.org/abs/1607.06450 + * @param dModel the number of features in the input + * @param eps a small value to avoid division by zero + * @param ops the autograd operations + */ +class LayerNorm (dModel: Int, eps: Double = 1e-6)(using ops: AutogradOps) + extends Module: + + val gamma = Variabl (ops.onesLike (new TensorD (1, 1, dModel)), name = Some ("gamma")) + val beta = Variabl (ops.zerosLike (new TensorD (1, 1, dModel)), name = Some ("beta")) + + override def parameters: IndexedSeq [Variabl] = IndexedSeq (gamma, beta) + + override def forward (input: Variabl): Variabl = + val mean = input.meanAxis (axis = 2) + val variance = input.varAxis (axis = 2) + val normalized = (input - mean) / (variance + eps).sqrt + gamma * normalized + beta + end forward + +end LayerNorm + diff --git a/src/main/scala/scalation/modeling/autograd/Linear.scala b/src/main/scala/scalation/modeling/autograd/Linear.scala new file mode 100644 index 000000000..e81501e8b --- /dev/null +++ b/src/main/scala/scalation/modeling/autograd/Linear.scala @@ -0,0 +1,70 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author Praveen Rangavajhula + * @version 2.0 + * @date Fri April 25 19:48:13 EDT 2025 + * @see LICENSE (MIT style license file). + * + * @note Autograd: Fully Connected (Linear) Layer for Neural Networks + */ + +package scalation +package modeling +package autograd + +import scalation.mathstat.TensorD + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** A fully connected linear (affine) layer: output =weight.bmm(input) + bias + * Computes a linear transformation of the input tensor: + * - Weight shape: (1, outFeatures, inFeatures) + * - Bias shape: (1, outFeatures, 1) + * - Input shape: (batch, inFeatures, 1) + * - Output shape: (batch, outFeatures, 1) + * The weight and bias are learnable parameters wrapped in `Variabl`. + * Internally uses batched matrix multiplication and broadcasting for bias addition. + * @param inFeatures the number of input features + * @param outFeatures the number of output features + */ +class Linear (inFeatures: Int, outFeatures: Int)(using ops: AutogradOps) + extends Module: + + // Possible Improvements: Try to use different initialization methods based on the activation function used... + + private val weightData: TensorD = TensorInitializers.xavierInit (1, outFeatures, inFeatures) + private val biasData: TensorD = TensorInitializers.zeros (1, outFeatures, 1) + +// private val weightData: TensorD = TensorD.fromMatrix (Initializer.weightMat (outFeatures, inFeatures)) +// private val biasData: TensorD = TensorD.fromVector (Initializer.weightVec (outFeatures), axis = 1) + + val weight: Variabl = Variabl (weightData, name = Some ("weight")) + val bias: Variabl = Variabl (biasData, name = Some ("bias")) + + require (weightData.dims == (1, outFeatures, inFeatures), + s"Linear: expected weight dims (1, $outFeatures, $inFeatures), but got ${weightData.dims}") + + require (biasData.dims == (1, outFeatures, 1), + s"Linear: expected bias dims (1, $outFeatures, 1), but got ${biasData.dims}") + + override def parameters: IndexedSeq [Variabl] = IndexedSeq (weight, bias) + + override def forward (input: Variabl): Variabl = + val (_, inF, last) = input.data.dims + require (inF == inFeatures && last == 1, + s"Linear.forward: expected input dims (B, $inFeatures, 1), but got ${input.data.dims}") + weight.bmm (input) + bias + end forward + +end Linear + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `Linear` companion object for `Linear` to provide an easier construction API. + */ +object Linear: + + def apply (inFeatures: Int, outFeatures: Int)(using ops: AutogradOps): Linear = + new Linear (inFeatures, outFeatures) + +end Linear + diff --git a/src/main/scala/scalation/modeling/autograd/Module.scala b/src/main/scala/scalation/modeling/autograd/Module.scala new file mode 100644 index 000000000..9ae9d8129 --- /dev/null +++ b/src/main/scala/scalation/modeling/autograd/Module.scala @@ -0,0 +1,124 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author Praveen Rangavajhula + * @version 2.0 + * @date Fri April 25 19:50:46 EDT 2025 + * @see LICENSE (MIT style license file). + * + * @note Autograd: Base Classes for Neural Network Modules and Layers + */ + +package scalation +package modeling +package autograd + +import scalation.mathstat.TensorD + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `BaseModule` is a base class for all neural network modules (layers, blocks, models). + * Provides support for: + * - Parameter registration + * - Automatic submodule detection + * - Gradient management (zeroing) + * - Training/evaluation mode switching + * Modules are structured hierarchically: a module can contain submodules. + * @param localParameters the parameters (Variables) directly belonging to this module + */ +abstract class BaseModule (localParameters: IndexedSeq [Variabl] = IndexedSeq.empty): + + /** Automatically detect submodules (other BaseModules) within this module. */ + lazy val subModules: IndexedSeq [BaseModule] = + this.getClass.getDeclaredFields.flatMap { f => + f.setAccessible (true) + f.get (this) match + case module: BaseModule => Some(module) + case _ => None + }.toIndexedSeq + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return all trainable parameters, including those from submodules. + */ + def parameters: IndexedSeq [Variabl] = localParameters ++ subModules.flatMap (_.parameters) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the gradients of all parameters. + */ + def gradients: IndexedSeq [TensorD] = parameters.map (_.grad) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Zero out all gradients (in-place). + */ + def zeroGrad ()(using ops: AutogradOps): Unit = parameters.foreach (p => p.grad = ops.zerosLike (p.grad)) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Flag to control training or evaluation behavior. + */ + var inTrainingMode: Boolean = false + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Set the module to training mode (and all submodules recursively). + */ + def train (mode: Boolean = true): Unit = + this.inTrainingMode = mode + subModules.foreach (_.train(mode)) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Set the module to evaluation mode (and all submodules recursively). + */ + def eval (): Unit = train (false) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Replace the current parameters with new ones. + * Useful for weight updates, loading saved models, etc. + * @param newParams The new parameter list to assign + */ + def setParameters(newParams: IndexedSeq [Variabl]): Unit = + val currentParams = this.parameters + require (currentParams.size == newParams.size, "Parameter size mismatch in setParameters!") + for i <- currentParams.indices do currentParams(i).data = newParams(i).data + end setParameters + +end BaseModule + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Standard module for layers that take a single input (e.g., Linear, Conv1D). + * Defines the abstract forward function for single input. + * @param localParameters the parameters (Variables) directly belonging to this module + */ +abstract class Module (localParameters: IndexedSeq [Variabl] = IndexedSeq.empty) + extends BaseModule (localParameters): + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forward pass for a single input Variable. Must be implemented by subclasses. + */ + def forward (input: Variabl): Variabl + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Alias for forward, allows calling the module as a function: `module(x)`. + */ + def apply (input: Variabl): Variabl = forward (input) + +end Module + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Module for layers that take multiple inputs (e.g., RNN cells, attention blocks). + * Defines the abstract forward function for sequence or multiple inputs. + * @param localParameters the parameters (Variables) directly belonging to this module + */ +abstract class SeqModule (localParameters: IndexedSeq [Variabl] = IndexedSeq.empty) + extends BaseModule (localParameters): + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forward pass for multiple input Variables. Must be implemented by subclasses. + */ + def forward (inputs: IndexedSeq [Variabl]): IndexedSeq [Variabl] + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Alias for forward, allows calling the module as a function: `module(xs)`. + */ + def apply (inputs: IndexedSeq [Variabl]): IndexedSeq [Variabl] = forward (inputs) + +end SeqModule + diff --git a/src/main/scala/scalation/modeling/autograd/MultiHeadAttention.scala b/src/main/scala/scalation/modeling/autograd/MultiHeadAttention.scala new file mode 100644 index 000000000..4131bb231 --- /dev/null +++ b/src/main/scala/scalation/modeling/autograd/MultiHeadAttention.scala @@ -0,0 +1,199 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author Praveen Rangavajhula + * @version 2.0 + * @date Fri November 21 19:48:13 EDT 2025 + * @see LICENSE (MIT style license file). + * + * @note Autograd: Multi-Head Attention Module for Transformer Models + */ + +package scalation +package modeling +package autograd + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Implements the Multi-Head Attention mechanism, a key component of transformer models. + * This class performs linear projections of the input tensors, splits them into multiple + * attention heads, applies scaled dot-product attention to each head, and combines the + * results into a single output tensor. + * @see https://arxiv.org/abs/1706.03762 + * "Attention Is All You Need" by Vaswani et al., 2017. + * @see https://dev-discuss.pytorch.org/t/understanding-multi-head-attention-for-ml-framework-developers/1792 + * "Understanding Multi-Head Attention for ML Framework Developers" + * @see https://pytorch.org/docs/stable/generated/torch.nn.MultiheadAttention.html + * PyTorch MultiheadAttention Documentation + * @param numHeads the number of attention heads + * @param dModel the dimensionality of the model (input and output feature size) + */ +class MultiHeadAttention (numHeads: Int, dModel: Int) + extends SeqModule (IndexedSeq.empty): + + private val headDim: Int = dModel / numHeads + + // Linear projections + private val W_q = Linear (dModel, dModel) + private val W_k = Linear (dModel, dModel) + private val W_v = Linear (dModel, dModel) + private val W_o = Linear (dModel, dModel) + + private val sdpa = ScaledDotProductAttention() + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forward pass for the Multi-Head Attention module. + * This method takes three input tensors (query, key, value), performs linear projections, + * splits them into multiple heads, applies scaled dot-product attention to each head, + * and combines the results into a single output tensor. + * @param inputs an `IndexedSeq` containing the query (q), key (k), and value (v) tensors + * @return an `IndexedSeq` containing the resulting output tensor + * @throws IllegalArgumentException if the number of inputs is not 3 + */ + override def forward (inputs: IndexedSeq [Variabl]): IndexedSeq [Variabl] = + + require (inputs.length == 3, s"Expected 3 inputs (q, k, v), got ${inputs.length}") + + val (q, k, v) = (inputs(0), inputs(1), inputs(2)) + + // Linear projections + val B = q.shape.head // batch size + val T = q.shape(1) // sequence length + + // Reshape for linear layers + // TODO: Move the reshape logic inside Linear layer + val qProj = W_q(q.reshape (Seq (B * T, dModel, 1))).reshape (Seq (B, T, dModel)) + val kProj = W_k(k.reshape (Seq (B * T, dModel, 1))).reshape (Seq (B, T, dModel)) + val vProj = W_v(v.reshape (Seq (B * T, dModel, 1))).reshape (Seq (B, T, dModel)) + + // Split into multiple heads + val qHeads = splitHeads (qProj) + val kHeads = splitHeads (kProj) + val vHeads = splitHeads (vProj) + + // Apply Scaled Dot-Product Attention for each head + val attHeads = (0 until numHeads).map { h => + val att = sdpa.forward (IndexedSeq (qHeads(h), kHeads(h), vHeads(h))) + att.head + } + + // Combine heads + val combined = combineHeads (attHeads) + + // Final linear projection (reshape for linear layer) + val output = W_o(combined.reshape (Seq (B * T, dModel, 1))).reshape (Seq (B, T, dModel)) + + IndexedSeq (output) + end forward + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Splits the input tensor into multiple attention heads. + * This method reshapes the input tensor to produce `numHeads` tensors, each + * corresponding to a single attention head. + * @param x the input tensor to split + * @return an `IndexedSeq` containing the split tensors for each head + * @throws IllegalArgumentException if the input tensor's last dimension is not + * divisible by the number of heads + */ + private def splitHeads (x: Variabl): IndexedSeq [Variabl] = + val shape = x.shape +// val batchSize = shape.head // B +// val seqLen = shape(1) // T + val dim = shape(2) // D + + require (dim == dModel, + s"splitHeads: expected last dim $dModel, got $dim") + + require (dModel % numHeads == 0, + s"splitHeads: dModel = $dModel must be divisible by numHeads = $numHeads") + + // Each head dimension + val hDim = headDim + + // We produce H heads, each (B, T, headDim) + (0 until numHeads).map { h => + val start = h * hDim + val end = start + hDim + + // slice last dimension (axis = 2) + x(?, ?, start until end) + } + end splitHeads + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Combines the tensors from multiple attention heads into a single tensor. + * This method concatenates the tensors along the last dimension to produce + * the final output tensor. + * @param heads an `IndexedSeq` containing the tensors for each head + * @return a `Variabl` containing the combined tensor + * @throws IllegalArgumentException if the number of heads does not match `numHeads` + */ + private def combineHeads (heads: IndexedSeq [Variabl]): Variabl = + + require (heads.length == numHeads, + s"combineHeads: expected $numHeads heads, got ${heads.length}") + + // Concatenate along the last dimension (axis = 2) + val combined = concat (heads, axis = 2) + combined + end combineHeads + +end MultiHeadAttention + + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Implements the Scaled Dot-Product Attention mechanism. + * This class is a sequence module that computes the attention scores and + * applies them to the value tensor (v) based on the query (q) and key (k) tensors. + * It is a fundamental building block for transformer models. + * @see https://arxiv.org/abs/1706.03762 + * "Attention Is All You Need" by Vaswani et al., 2017. + * @see https://docs.pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html + */ +class ScaledDotProductAttention + extends SeqModule (IndexedSeq.empty): + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forward pass for the Scaled Dot-Product Attention module. + * This method takes three input tensors (q, k, v), computes the attention + * scores, and applies them to the value tensor. + * @param inputs an `IndexedSeq` containing the query (q), key (k), and value (v) tensors + * @return an `IndexedSeq` containing the resulting attention tensor + * @throws IllegalArgumentException if the number of inputs is not 3 + */ + override def forward (inputs: IndexedSeq [Variabl]): IndexedSeq [Variabl] = + require(inputs.length == 3, s"Expected 3 inputs (q, k, v), got ${inputs.length}") + val q = inputs(0) + val k = inputs(1) + val v = inputs(2) + + val att = attention (q, k, v) + IndexedSeq (att) + end forward + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Compute the attention tensor based on the query, key, and value tensors. + * This method calculates the scaled dot-product attention by performing + * the following steps: + * 1. Compute the dot product of the query and the transposed key tensors. + * 2. Scale the result by the square root of the key dimension. + * 3. Apply the softmax function to obtain the attention scores. + * 4. Multiply the attention scores with the value tensor to get the final attention. + * @param q the query tensor + * @param k the key tensor + * @param v the value tensor + * @return the resulting attention tensor + */ + private def attention (q: Variabl, k: Variabl, v: Variabl): Variabl = + val d_k = q.shape.last + val scaleFactor = 1.0 / math.sqrt(d_k) + + val kT = k.transpose(1, 2) + val qkt = q.bmm(kT) // repeated dot product + + val sdp = qkt * scaleFactor // scaled dot product (sdp) + val scr = sdp.softmax // attention scores + val att = scr.bmm(v) // attention (Q, K, V) + att + end attention + +end ScaledDotProductAttention + diff --git a/src/main/scala/scalation/modeling/autograd/Optimizer.scala b/src/main/scala/scalation/modeling/autograd/Optimizer.scala new file mode 100644 index 000000000..b15cff47d --- /dev/null +++ b/src/main/scala/scalation/modeling/autograd/Optimizer.scala @@ -0,0 +1,86 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author Praveen Rangavajhula + * @version 2.0 + * @date Fri Apr 25 20:00:17 EDT 2025 + * @see LICENSE (MIT style license file) + * + * @note Autograd: Base Class for Gradient-Based Optimization + * + * Defines an abstract optimizer for parameter updates in the autograd engine. + * Concrete optimizers (e.g., SGD, Adam) should extend this class and implement + * the `step()` method to apply parameter updates based on stored gradients. + * + * Each parameter is represented by a `Variabl` node in the computation graph. + * The optimizer operates directly on `Variabl.grad`, expecting gradients to be + * accumulated via backpropagation before each call to `step()`. + * + * Typical usage: + * 1. Call `zeroGrad()` before a forward/backward pass. + * 2. Perform forward + backward computation. + * 3. Call `step()` to update parameters. + */ + +package scalation +package modeling +package autograd + +import autograd.Variabl + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `Optimizer` abstract class optimizes model parameters. + * Notes: + * - Subclasses implement the specific update rule in `step()`. + * - The optimizer assumes that gradients (`p.grad`) have been computed and + * accumulated by the autograd engine before each call to `step()`. + * - Parameters with `null` gradients are safely ignored. + * @param parameters the trainable parameters, each wrapped in a `Variabl` + * @param learningRate the step size (η) used for gradient-based updates + */ +abstract class Optimizer (parameters: IndexedSeq [Variabl], var learningRate: Double): + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Executes a single optimization step by updating each parameter based on its gradient. + */ + def step (): Unit + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Reset gradients for all parameters. + * Typically called before the next forward/backward pass. + * Only parameters with non-null gradient buffers are updated. + */ + def zeroGrad (): Unit = + parameters.foreach { p => if p.grad != null then p.grad.set (0.0) } + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Compute the global L2 norm of all parameter gradients. + * Math: + * g = √(∑_p‖grad_p‖² ) + */ + def gradNorm: Double = + math.sqrt (parameters.map (p => if p.grad != null then p.grad.normFSq else 0.0).sum) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Clip the gradients of all parameters by global norm. + * Scales gradients so that the total norm ≤ maxNorm. + * Math: + * Let g = √(∑_p ‖grad_p‖² ). + * If g > maxNorm, scale all gradients by (maxNorm / g). + */ + def clipGradNorm (maxNorm: Double): Unit = + val totalNorm = math.sqrt (parameters.map (p => if p.grad != null then p.grad.normFSq else 0.0).sum) + if totalNorm > maxNorm then + val scale = maxNorm / (totalNorm + 1e-6) + parameters.foreach { p => if p.grad != null then p.grad *= scale } + end clipGradNorm + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Clip the gradients of all parameters by value (element-wise). + * Each gradient entry smaller than `minVal` is set to `minVal`, + * and each entry larger than `maxVal` is set to `maxVal`. + */ + def clipGradValue (minVal: Double, maxVal: Double): Unit = + parameters.foreach { p => if p.grad != null then p.grad = p.grad.clipByValue (minVal, maxVal) } + +end Optimizer + diff --git a/src/main/scala/scalation/modeling/autograd/Optimizer.scala.bak b/src/main/scala/scalation/modeling/autograd/Optimizer.scala.bak new file mode 100644 index 000000000..94b7a0a93 --- /dev/null +++ b/src/main/scala/scalation/modeling/autograd/Optimizer.scala.bak @@ -0,0 +1,34 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author Praveen Rangavajhula + * @version 2.0 + * @date Fri April 25 20:00:17 EDT 2025 + * @see LICENSE (MIT style license file). + * + * @note Autograd: Base Class for Gradient-Based Optimization + */ + +package scalation +package modeling +package autograd + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Abstract optimizer for updating model parameters. + * @param parameters an indexed sequence of Variables representing the parameters to be optimized. + */ +abstract class Optimizer (parameters: IndexedSeq [Variabl]): + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Executes a single optimization step by updating each parameter based on its gradient. + */ + def step (): Unit + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Resets the gradient of each parameter to zero. + * This is typically called before computing the next set of gradients. + */ + def zeroGrad (): Unit = + parameters.foreach { p => if p.grad != null then p.grad.set (0.0) } + +end Optimizer + diff --git a/src/main/scala/scalation/modeling/autograd/RNN.scala b/src/main/scala/scalation/modeling/autograd/RNN.scala new file mode 100644 index 000000000..2d1aece68 --- /dev/null +++ b/src/main/scala/scalation/modeling/autograd/RNN.scala @@ -0,0 +1,583 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author Praveen Rangavajhula + * @version 2.0 + * @date Fri April 25 19:48:13 EDT 2025 + * @see LICENSE (MIT style license file). + * + * @note Autograd: Recurrent Neural Networks + */ + +package scalation +package modeling +package autograd + +import scalation.mathstat.TensorD + +import TensorInitializers._ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Base class for recurrent cells. + * Defines the input/hidden sizes and provides utilities + * for initializing the tracking states (e.g., hidden, cell). + * Subclasses must specify the number of states, parameters, + * and the forward computation. + * @see https://github.com/pytorch/pytorch/blob/v2.9.1/torch/nn/modules/rnn.py#L1492 + */ +private [autograd] abstract class RNNCellBase (val inputSize: Int, val hiddenSize: Int)(using ops: AutogradOps) + extends SeqModule (IndexedSeq.empty): + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Number of state tensors tracked by the cell (e.g., 1 for RNN/GRU, 2 for LSTM). + */ + def numTrackingStates: Int + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a batch of zero-initialized tracking states. + * You pass in the batch size to get properly shaped tensors: (batchSize, hiddenSize, 1) + */ + def initialTrackingStates (batchSize: Int): IndexedSeq [Variabl] = + IndexedSeq.fill (numTrackingStates) { Variabl(TensorD.fill (batchSize, hiddenSize, 1, 0.0)) } + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the parameters of the cell. + * + * @return sequence of parameters + */ + override def parameters: IndexedSeq [Variabl] + +end RNNCellBase + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `RNNCell` class supports a simple RNN cell that updates the hidden state: + * h' = activation(W_ih * x + b_ih + W_hh * h + b_hh) using two biases instead of one. + * @param inputSize number of input features + * @param hiddenSize number of hidden units + * @param activation activation function to use: "tanh" (default) or "relu" + * + * @see https://pytorch.org/docs/stable/generated/torch.nn.RNNCell.html + */ +class RNNCell (inputSize: Int, hiddenSize: Int, activation: String = "tanh") + (using ops: AutogradOps) + extends RNNCellBase (inputSize, hiddenSize): + + // W_ih shape = (1, hiddenSize, inputSize) + val W_ih: Variabl = Variabl ( + rnnUniform (1, hiddenSize, hiddenSize, inputSize), name = Some ("W_ih") + ) + // W_hh shape = (1, hiddenSize, hiddenSize) + val W_hh: Variabl = Variabl ( + rnnUniform (1, hiddenSize, hiddenSize, hiddenSize), name = Some ("W_hh") + ) + // Biases shape = (1, hiddenSize, 1) + val b_ih: Variabl = Variabl ( + rnnUniform (1, hiddenSize, hiddenSize, 1), name = Some ("b_ih") + ) + val b_hh: Variabl = Variabl ( + rnnUniform (1, hiddenSize, hiddenSize, 1), name = Some ("b_hh") + ) + val activationFun: String = activation + + override def numTrackingStates: Int = 1 + + override def parameters: IndexedSeq [Variabl] = IndexedSeq (W_ih, W_hh, b_ih, b_hh) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Perform the forward pass for the RNN cell without using fused operations. + * Computes the next hidden state based on the input and the previous hidden state. + * @param inputs an indexed sequence containing: + * - `input`: the input tensor at the current time step + * - `hPrev`: the hidden state tensor from the previous time step + * @return an indexed sequence containing the next hidden state tensor + * @throws IllegalArgumentException if the number of inputs is not exactly 2 + */ + def forwardUnfused (inputs: IndexedSeq [Variabl]): IndexedSeq [Variabl] = + inputs match + case IndexedSeq (input, hPrev) => + val xProj = W_ih.bmm (input) + b_ih + val hProj = W_hh.bmm (hPrev) + b_hh + val preAct = xProj + hProj + + val hNext = activation match + case "tanh" => tanh (preAct) + case "relu" => relu (preAct) + case other => throw new IllegalArgumentException (s"Unsupported activation: $other") + + IndexedSeq (hNext) + + case _ => + throw new IllegalArgumentException (s"RNNCell expects exactly 2 inputs (input, hPrev), got ${inputs.length}") + end forwardUnfused + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Perform the forward pass for the RNN cell using fused operations. + * Computes the next hidden state based on the input and the previous hidden state. + * @param inputs an indexed sequence containing: + * - `input`: the input tensor at the current time step + * - `hPrev`: the hidden state tensor from the previous time step + * @return an indexed sequence containing the next hidden state tensor + * @throws IllegalArgumentException if the number of inputs is not exactly 2 + */ + override def forward (inputs: IndexedSeq [Variabl]): IndexedSeq [Variabl] = + inputs match + case IndexedSeq (input, hPrev) => + val fusedCell = RNNCellFused (input, hPrev, W_ih, W_hh, + b_ih, b_hh, activationFun) + val hNext = fusedCell.forward () + IndexedSeq (hNext) + + case _ => + throw new IllegalArgumentException ( + s"RNNCell expects exactly 2 inputs (input, hPrev), got ${inputs.length}") + end forward + +end RNNCell + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `RNNCell` object provides a factory method for creating instances of the `RNNCell` class. + */ +object RNNCell: + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a new `RNNCell` instance. + * @param inputSize number of input features + * @param hiddenSize number of hidden units + * @param activation activation function to use: "tanh" (default) or "relu" + * @param ops implicit autograd operations + * @return a new instance of `RNNCell` + */ + def apply (inputSize: Int, hiddenSize: Int, activation: String = "tanh") + (using ops: AutogradOps): RNNCell = + new RNNCell (inputSize, hiddenSize, activation) + +end RNNCell + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `GRUCell` class supports a gated recurrent unit cell: + * r_t = sigmoid(W_ir * x + b_ir + W_hr * h_{t-1} + b_hr) + * z_t = sigmoid(W_iz * x + b_iz + W_hz * h_{t-1} + b_hz) + * n_t = tanh(W_in * x + b_in + r_t ⊙ (W_hn * h_{t-1} + b_hn)) + * h_t = (1 - z_t) ⊙ n_t + z_t ⊙ h_{t-1} + * This class defines the parameters and forward computation for a GRU cell. + * @see https://pytorch.org/docs/stable/generated/torch.nn.GRUCell.html + * @param inputSize number of input features + * @param hiddenSize number of hidden units + */ +class GRUCell (inputSize: Int, hiddenSize: Int)(using ops: AutogradOps) + extends RNNCellBase (inputSize, hiddenSize): + + // Reset gate parameters + /** Weight matrix for the input-to-hidden connection in the reset gate. */ + val W_ir: Variabl = Variabl ( + rnnUniform (1, hiddenSize, hiddenSize, inputSize), name = Some ("W_ir") + ) + /** Weight matrix for the hidden-to-hidden connection in the reset gate. */ + val W_hr: Variabl = Variabl ( + rnnUniform (1, hiddenSize, hiddenSize, hiddenSize), name = Some ("W_hr") + ) + /** Bias for the input-to-hidden connection in the reset gate. */ + val b_ir: Variabl = Variabl ( + rnnUniform (1, hiddenSize, hiddenSize, 1), name = Some ("b_ir") + ) + /** Bias for the hidden-to-hidden connection in the reset gate. */ + val b_hr: Variabl = Variabl ( + rnnUniform (1, hiddenSize, hiddenSize, 1), name = Some ("b_hr") + ) + + // Update gate parameters + /** Weight matrix for the input-to-hidden connection in the update gate. */ + val W_iz: Variabl = Variabl ( + rnnUniform (1, hiddenSize, hiddenSize, inputSize), name = Some ("W_iz") + ) + /** Weight matrix for the hidden-to-hidden connection in the update gate. */ + val W_hz: Variabl = Variabl ( + rnnUniform (1, hiddenSize, hiddenSize, hiddenSize), name = Some ("W_hz") + ) + /** Bias for the input-to-hidden connection in the update gate. */ + val b_iz: Variabl = Variabl ( + rnnUniform (1, hiddenSize, hiddenSize, 1), name = Some ("b_iz") + ) + /** Bias for the hidden-to-hidden connection in the update gate. */ + val b_hz: Variabl = Variabl ( + rnnUniform (1, hiddenSize, hiddenSize, 1), name = Some ("b_hz") + ) + + // New gate parameters + /** Weight matrix for the input-to-hidden connection in the new gate. */ + val W_in: Variabl = Variabl ( + rnnUniform (1, hiddenSize, hiddenSize, inputSize), name = Some ("W_in") + ) + /** Weight matrix for the hidden-to-hidden connection in the new gate. */ + val W_hn: Variabl = Variabl ( + rnnUniform (1, hiddenSize, hiddenSize, hiddenSize), name = Some ("W_hn") + ) + /** Bias for the input-to-hidden connection in the new gate. */ + val b_in: Variabl = Variabl ( + rnnUniform (1, hiddenSize, hiddenSize, 1), name = Some ("b_in") + ) + /** Bias for the hidden-to-hidden connection in the new gate. */ + val b_hn: Variabl = Variabl ( + rnnUniform (1, hiddenSize, hiddenSize, 1), name = Some ("b_hn") + ) + + // Initialize biases for the update gate to 0.5 + // to encourage initial retention of previous hidden state (as per common practice) + // b_iz.data = b_iz.data.map_(_ => 0.5) + // b_hz.data = b_hz.data.map_(_ => 0.5) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the number of tracking states for the GRU cell. + * For GRU, this is always 1. + */ + override def numTrackingStates: Int = 1 + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the parameters of the GRU cell. + * @return an indexed sequence of `Variabl` objects representing the parameters + */ + override def parameters: IndexedSeq [Variabl] = + IndexedSeq ( + // Reset gate + W_ir, W_hr, b_ir, b_hr, + // Update gate + W_iz, W_hz, b_iz, b_hz, + // New gate + W_in, W_hn, b_in, b_hn + ) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Perform the forward pass for the GRU cell. + * Computes the next hidden state based on the input and the previous hidden state. + * @param inputs an indexed sequence containing: + * - `input`: the input tensor at the current time step + * - `hPrev`: the hidden state tensor from the previous time step + * @return an indexed sequence containing the next hidden state tensor + * @throws IllegalArgumentException if the number of inputs is not exactly 2 + */ + override def forward (inputs: IndexedSeq [Variabl]): IndexedSeq [Variabl] = + inputs match + case IndexedSeq (input, hPrev) => + val fusedCell = GRUCellFused (input, hPrev, + W_ir, W_hr, b_ir, b_hr, + W_iz, W_hz, b_iz, b_hz, + W_in, W_hn, b_in, b_hn) + val hNext = fusedCell.forward() + IndexedSeq (hNext) + case _ => + throw new IllegalArgumentException (s"GRUCell expects exactly 2 inputs (input, hPrev), got ${inputs.length}") + end forward +end GRUCell + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** The `GRUCell` object provides a factory method for creating instances of the `GRUCell` class. + */ + object GRUCell: + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a new `GRUCell` instance. + * @param inputSize number of input features + * @param hiddenSize number of hidden units + * @param ops implicit autograd operations + * @return a new instance of `GRUCell` + */ + def apply (inputSize: Int, hiddenSize: Int)(using ops: AutogradOps): GRUCell = + new GRUCell (inputSize, hiddenSize) + + end GRUCell + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `RNNBase` class serves as a wrapper for recurrent neural network cells. + * It manages the forward pass through the cell, including handling of input sequences, + * initial hidden states, and truncated backpropagation through time (TBPTT). + * @see https://github.com/pytorch/pytorch/blob/v2.9.1/torch/nn/modules/rnn.py#L48 + * @see https://docs.pytorch.org/docs/stable/generated/torch.nn.RNNBase.html + * @param cell the recurrent cell (e.g., RNNCell, GRUCell) to be wrapped + */ +private [autograd] class RNNBase (val cell: RNNCellBase) + extends BaseModule (IndexedSeq.empty): + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Retrieve the parameters of the wrapped recurrent cell. + * @return an indexed sequence of `Variabl` objects representing the parameters + */ + override def parameters: IndexedSeq [Variabl] = cell.parameters + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Perform the forward pass through the cell without using fused operations. + * This method processes the input sequence step-by-step, updating the hidden state + * at each time step. Optionally supports TBPTT by detaching hidden states periodically. + * @param xs the input sequence, where each element is a tensor of shape (batchSize, inputDim, 1) + * @param h0 optional initial hidden states for the cell (default: zero-initialized) + * @param tbptt the truncation interval for TBPTT (default: 0, meaning no truncation) + * @return a tuple containing: + * - the output sequence (one tensor per time step) + * - the final hidden state(s) + * @throws IllegalArgumentException if the input sequence is empty + */ + def forwardUnfused (xs: IndexedSeq [Variabl], h0: Option [IndexedSeq [Variabl]] = None, + tbptt: Int = 0): (IndexedSeq [Variabl], IndexedSeq [Variabl]) = + + require (xs.nonEmpty, "Input sequence must be non-empty") + val batchSize = xs.head.shape.head + + // initialize all tracking states (1 for RNN/GRU, 2 for LSTM) + var h: IndexedSeq [Variabl] = h0.getOrElse (cell.initialTrackingStates (batchSize)) + + inline def maybeDetach (step: Int): Unit = + if tbptt > 0 && ((step + 1) % tbptt == 0) then + h = h.map(_.detach()) + + val outputs = xs.zipWithIndex.map { case (x, t) => + val nextStates = cell (IndexedSeq(x) ++ h) // pass input + all states + h = nextStates // update full state vector + maybeDetach (t) + nextStates.head // convention: first is "h" + } + (outputs, h) + end forwardUnfused + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Perform the forward pass through the cell, using fused operations if available. + * This method processes the input sequence and selects the appropriate computation + * path based on the type of the wrapped cell. + * @param xs the input sequence, where each element is a tensor of shape (batchSize, inputDim, 1) + * @param h0 optional initial hidden states for the cell (default: zero-initialized) + * @param tbptt the truncation interval for TBPTT (default: 0, meaning no truncation) + * @return a tuple containing: + * - the output sequence (one tensor per time step) + * - the final hidden state(s) + * @throws IllegalArgumentException if the input sequence is empty + */ + def forward (xs: IndexedSeq [Variabl], h0: Option [IndexedSeq [Variabl]] = None, + tbptt: Int = 0): (IndexedSeq [Variabl], IndexedSeq [Variabl]) = + + require (xs.nonEmpty, "Input sequence must be non-empty") + val batchSize = xs.head.shape.head + + // initialize all tracking states (1 for RNN/GRU, 2 for LSTM) + var h: IndexedSeq [Variabl] = h0.getOrElse (cell.initialTrackingStates (batchSize)) + + // dispatch computation based on the type of the cell + cell match + // -------------------------------------------------------------------------- + case rnn: RNNCell => + val fused = RNNFused (input = xs, hidden = h.head, + W_ih = rnn.W_ih, + W_hh = rnn.W_hh, + b_ih = rnn.b_ih, + b_hh = rnn.b_hh, + activation = rnn.activationFun, + tbptt = tbptt) + val (outs, last) = fused.forwardAll () + (outs, IndexedSeq (last)) + + // -------------------------------------------------------------------------- + // fallback path for other cells (GRU, custom, etc.) + case _ => + inline def maybeDetach (step: Int): Unit = + if tbptt > 0 && ((step + 1) % tbptt == 0) then + h = h.map(_.detach ()) + end maybeDetach + + val outputs = xs.zipWithIndex.map { case (x, t) => + val nextStates = cell (IndexedSeq(x) ++ h) + h = nextStates + maybeDetach (t) + nextStates.head // convention: first is "h" + } + (outputs, h) + end match + end forward + +end RNNBase + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `RNN` class implements a multi-layer recurrent neural network (RNN). + * It supports stacked RNN layers, where each layer processes the input sequence + * and passes its output to the next layer. The class also provides methods for + * parameter retrieval and forward computation. + * @see https://pytorch.org/docs/stable/generated/torch.nn.RNN.html + * @param inputSize number of features in the input at each time step + * @param hiddenSize number of features in the hidden state + * @param numLayers number of stacked RNN layers (default: 1) + * @param activation activation function to use: "tanh" (default) or "relu" + * @param ops implicit autograd operations + */ +class RNN (inputSize: Int, hiddenSize: Int, val numLayers: Int = 1, + activation: String = "tanh")(using ops: AutogradOps) + extends BaseModule: + + /** Layers of the RNN, each represented by an `RNNBase` instance. */ + private val layers: IndexedSeq [RNNBase] = + (0 until numLayers).map { layerIdx => + val inDim = if layerIdx == 0 then inputSize else hiddenSize + RNNBase (RNNCell (inDim, hiddenSize, activation)) + } + + /** Retrieve a specific layer of the RNN. + * @param i index of the layer to retrieve + * @return the `RNNBase` instance representing the layer + */ + def layer (i: Int): RNNBase = layers(i) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Retrieve the parameters of all layers in the RNN. + * @return an indexed sequence of `Variabl` objects representing the parameters + */ + override def parameters: IndexedSeq [Variabl] = layers.flatMap (_.parameters) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Perform the forward pass through all layers of the RNN. + * Processes the input sequence through each layer, updating the hidden states + * at each time step. Optionally supports truncated backpropagation through time (TBPTT). + * @param inputSeq the input sequence, where each element is a tensor of shape (batchSize, inputDim, 1) + * @param h0 optional initial hidden states for each layer (default: zero-initialized) + * @param tbptt the truncation interval for TBPTT (default: 0, meaning no truncation) + * @return a tuple containing: + * - the output sequence from the top layer (one tensor per time step) + * - the final hidden states for all layers + * @throws IllegalArgumentException if the input sequence is empty + */ + def forward (inputSeq: IndexedSeq [Variabl], + h0: Option [IndexedSeq [Variabl]] = None, + tbptt: Int = 0): (IndexedSeq [Variabl], IndexedSeq [Variabl]) = + + require (inputSeq.nonEmpty, "Input sequence cannot be empty") + + val batchSize = inputSeq.head.shape.head + + // Initialize hidden states for all layers + val hidden: IndexedSeq [Variabl] = + h0.getOrElse { layers.map (_.cell.initialTrackingStates (batchSize).head) } + + var layerInput: IndexedSeq [Variabl] = inputSeq // Sequence flowing upward + val finalHidden = collection.mutable.ArrayBuffer.empty [Variabl] + + // ----- pass sequence through each RNN layer ----- + for (layer, hInit) <- layers.zip (hidden) do // forward this layer through all time steps + val (layerOutput, hLast) = layer.forwardUnfused (layerInput, Some (IndexedSeq(hInit)), tbptt) + layerInput = layerOutput // becomes input to next layer + finalHidden.append (hLast.head) // store final hidden of this layer + + val outputsTop = layerInput // after last layer + (outputsTop, finalHidden.toIndexedSeq) + end forward + +end RNN + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `RNN` object provides a factory method for creating instances of the `RNN` class. + */ +object RNN: + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Factory method for creating a standard RNN instance. + * @param inputSize number of features in the input at each time step + * @param hiddenSize number of features in the hidden state + * @param numLayers number of stacked RNN layers (default = 1) + * @param activation nonlinearity to apply ("tanh" or "relu", default = "tanh") + * @param ops implicit autograd operations + * @return an instance of `RNN` + */ + def apply( inputSize: Int, hiddenSize: Int, numLayers: Int = 1, activation: String = "tanh") + (using ops: AutogradOps): RNN = + new RNN (inputSize, hiddenSize, numLayers, activation) + +end RNN + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `GRU` class implements a multi-layer gated recurrent unit (GRU) network. + * It supports stacked GRU layers, where each layer processes the input sequence + * and passes its output to the next layer. The class also provides methods for + * parameter retrieval and forward computation. + * @see https://pytorch.org/docs/stable/generated/torch.nn.GRU.html + * @param inputSize number of features in the input at each time step + * @param hiddenSize number of features in the hidden state + * @param numLayers number of stacked GRU layers (default: 1) + */ +class GRU (inputSize: Int, hiddenSize: Int, val numLayers: Int = 1) + extends BaseModule: + + /** Layers of the GRU, each represented by an `RNNBase` instance. */ + private val layers: IndexedSeq [RNNBase] = + (0 until numLayers).map { layerIdx => + val inDim = if layerIdx == 0 then inputSize else hiddenSize + RNNBase (GRUCell (inDim, hiddenSize)) + } + + /** Retrieve a specific layer of the GRU. + * @param i index of the layer to retrieve + * @return the `RNNBase` instance representing the layer + */ + def layer (i: Int): RNNBase = layers(i) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Retrieve the parameters of all layers in the GRU. + * + * @return an indexed sequence of `Variabl` objects representing the parameters + */ + override def parameters: IndexedSeq [Variabl] = layers.flatMap (_.parameters) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Perform the forward pass through all layers of the GRU. + * Processes the input sequence through each layer, updating the hidden states + * at each time step. Optionally supports truncated backpropagation through time (TBPTT). + * @param inputSeq the input sequence, where each element is a tensor of shape (batchSize, inputDim, 1) + * @param h0 optional initial hidden states for each layer (default: zero-initialized) + * @param tbptt the truncation interval for TBPTT (default: 0, meaning no truncation) + * @return a tuple containing: + * - the output sequence from the top layer (one tensor per time step) + * - the final hidden states for all layers + * @throws IllegalArgumentException if the input sequence is empty + */ + def forward (inputSeq: IndexedSeq [Variabl], h0: Option [IndexedSeq [Variabl]] = None, + tbptt: Int = 0): (IndexedSeq [Variabl], IndexedSeq [Variabl]) = + + require (inputSeq.nonEmpty, "Input sequence cannot be empty") + + val batchSize = inputSeq.head.shape.head + + // Initialize hidden states for all layers + val hidden: IndexedSeq [Variabl] = + h0.getOrElse { layers.map (_.cell.initialTrackingStates (batchSize).head) } + + var layerInput: IndexedSeq [Variabl] = inputSeq + val finalHidden = collection.mutable.ArrayBuffer.empty [Variabl] + + // ----- pass sequence through each GRU layer ----- + for (layer, hInit) <- layers.zip(hidden) do + val (layerOutput, hLast) = layer.forward (layerInput, Some (IndexedSeq (hInit)), tbptt) + layerInput = layerOutput + finalHidden.append (hLast.head) + + val outputsTop = layerInput + (outputsTop, finalHidden.toIndexedSeq) + end forward + +end GRU + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `GRU` object provides a factory method for creating instances of the `GRU` class. + */ +object GRU: + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Factory method for creating a standard GRU instance. + * @param inputSize number of features in the input at each time step + * @param hiddenSize number of features in the hidden state + * @param numLayers number of stacked GRU layers (default = 1) + * @return an instance of `GRU` + */ + def apply (inputSize: Int, hiddenSize: Int, numLayers: Int = 1): GRU = +// (using ops: AutogradOps): GRU = + new GRU (inputSize, hiddenSize, numLayers) + +end GRU + diff --git a/src/main/scala/scalation/modeling/autograd/RNN.scala.bak b/src/main/scala/scalation/modeling/autograd/RNN.scala.bak new file mode 100644 index 000000000..ea3093ddc --- /dev/null +++ b/src/main/scala/scalation/modeling/autograd/RNN.scala.bak @@ -0,0 +1,230 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author Praveen Rangavajhula + * @version 2.0 + * @date Fri April 25 19:48:13 EDT 2025 + * @see LICENSE (MIT style license file). + * + * @note Autograd: Recurrent Neural Networks + */ + +package scalation +package modeling +package autograd + +import scalation.mathstat.TensorD + +import TensorInitializers._ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `RNN` class ... + */ +class RNN (inputSize: Int, hiddenSize: Int, numLayers: Int = 1, activation: String = "tanh") + (using ops: AutogradOps) + extends BaseModule: + + // One RNNBase ( = one UNROLLED layer ) per level + private val layers: IndexedSeq [RNNBase] = + (0 until numLayers).map { layerIdx => + val inDim = if layerIdx == 0 then inputSize else hiddenSize + RNNBase ("rnn", inDim, hiddenSize, activation = activation) + } + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the parameters. + */ + override def parameters: IndexedSeq [Variabl] = layers.flatMap (_.parameters) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forward through all layers. + * `inputSeq` – Seq [Variabl] (length = seqLen) + * shape of each element: (inputDim, 1, batch) + * `h0` – optional initial hidden states for every + * layer (Seq with length = numLayers). + * RETURNS: + * outputs – Seq [Variabl] (seqLen) – from top layer + * h_n – Seq [Variabl] (numLayers) final hidden states + */ + def forward (inputSeq: IndexedSeq [Variabl], h0: Option [IndexedSeq [Variabl]] = None, + tbptt: Int = 0): // 0 ⇒ no truncation + (IndexedSeq [Variabl], IndexedSeq [Variabl]) = + + require (inputSeq.nonEmpty, "Input sequence cannot be empty") + + val batchSize = inputSeq.head.shape.last + + // One hidden Variabl per layer + val hidden = h0.getOrElse { layers.map (_.cell.initialTrackingStates (batchSize).head) } + + var layerInput: IndexedSeq [Variabl] = inputSeq // sequence flowing upward + val finalHidden = collection.mutable.ArrayBuffer.empty [Variabl] + + // ----- pass sequence through each RNN layer ----- + for (layer, hInit) <- layers.zip (hidden) do // forward this layer through all time steps + val (layerOutput, hLast) = layer.forward (layerInput, Some (hInit), tbptt) + layerInput = layerOutput // becomes input to next layer + finalHidden.append (hLast) // store final hidden of this layer + + val outputsTop = layerInput // after last layer + (outputsTop, finalHidden.toIndexedSeq) + end forward + +end RNN + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `RNN` object ... + */ +object RNN: + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Factory method for creating a standard RNN instance. + * @param inputSize number of features in the input at each time step + * @param hiddenSize number of features in the hidden state + * @param numLayers number of stacked RNN layers (default = 1) + * @param activation nonlinearity to apply ("tanh" or "relu", default = "tanh") + * @return an instance of RNN + */ + def apply ( inputSize: Int, hiddenSize: Int, numLayers: Int = 1, activation: String = "tanh") + (using ops: AutogradOps): RNN = + new RNN (inputSize, hiddenSize, numLayers, activation) + +end RNN + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `RNNBase` class ... + */ +class RNNBase (val cell: RNNCell) // (using ops: AutogradOps) + extends BaseModule (IndexedSeq.empty): + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + override def parameters: IndexedSeq[Variabl] = cell.parameters + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + def forward (xs: IndexedSeq [Variabl], h0: Option [Variabl] = None, tbptt: Int = 0): + (IndexedSeq [Variabl], Variabl) = + + require (xs.nonEmpty, "Input sequence must be non‑empty") + val batchSize = xs.head.shape.head + var h = h0.getOrElse (cell.initialTrackingStates (batchSize).head) + + // Helper: detach `h` if TBPTT is active and step is a multiple of k + inline def maybeDetach(step: Int): Unit = + if tbptt > 0 && step % tbptt == 0 then h = h.detach() + + val outputs = xs.zipWithIndex.map { case (x, t) => + maybeDetach (t) // TBPTT gate + val next = cell (IndexedSeq(x, h)).head // cell returns Seq(h_t) + h = next // update hidden + next + } + + (outputs, h) + end forward + +end RNNBase + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `RNNBase` object ... + */ +object RNNBase: + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create an RNNBase using the specified cell type and dimensions. + * @param cellType "rnn", "gru", or "lstm" + * @param inputSize number of input features (nx) + * @param hiddenSize number of hidden units (na) + * @return an RNNBase instance using the specified cell + */ + def apply (cellType: String, inputSize: Int, hiddenSize: Int, activation: String = "tanh") + (using ops: AutogradOps): RNNBase = + val cell = cellType match + case "rnn" => RNNCell (inputSize, hiddenSize, activation) +// case "gru" => GRUCell (inputSize, hiddenSize) +// case "lstm" => LSTMCell (inputSize, hiddenSize) + case other => throw new IllegalArgumentException (s"Unsupported cell type: $other") + + new RNNBase (cell) + end apply + +end RNNBase + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `RNNCell` class supports a simple RNN cell that updates the hidden state: + * h' = activation(W_ih * x + b_ih + W_hh * h + b_hh) using two biases instead of one. + * @param inputSize number of input features + * @param hiddenSize number of hidden units + * @param activation activation function to use: "tanh" (default) or "relu" + */ +class RNNCell (inputSize: Int, hiddenSize: Int, activation: String = "tanh") + (using ops: AutogradOps) + extends RNNCellBase (inputSize, hiddenSize): + + override protected def weightInit: (Int, Int, Int) => TensorD = + if activation == "relu" then heInit else xavierInit + + override def numTrackingStates: Int = 1 + + override def forward (inputs: IndexedSeq [Variabl]): IndexedSeq [Variabl] = + inputs match + case IndexedSeq (input, hPrev) => + val xProj = W_ih.bmm(input) + b_ih + val hProj = W_hh.bmm(hPrev) + b_hh + val preAct = xProj + hProj + + val hNext = activation match + case "tanh" => tanh (preAct) + case "relu" => relu (preAct) + case other => throw new IllegalArgumentException (s"Unsupported activation: $other") + + IndexedSeq (hNext) + + case _ => + throw new IllegalArgumentException (s"RNNCell expects exactly 2 inputs (input, hPrev), got ${inputs.length}") + + end forward + +end RNNCell + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `RNNCell` object ... + */ +object RNNCell: + + def apply (inputSize: Int, hiddenSize: Int, activation: String = "tanh") + (using ops: AutogradOps): RNNCell = + new RNNCell (inputSize, hiddenSize, activation) + +end RNNCell + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `RNNCellBase` abstract class ... + */ +abstract class RNNCellBase (val inputSize: Int, val hiddenSize: Int)(using ops: AutogradOps) + extends SeqModule (IndexedSeq.empty): + + protected def weightInit: (Int, Int, Int) => TensorD = xavierInit + + val W_ih: Variabl = Variabl (weightInit (1, hiddenSize, inputSize), name = Some ("W_ih")) + val W_hh: Variabl = Variabl (weightInit (1, hiddenSize, hiddenSize), name = Some ("W_hh")) + val b_ih: Variabl = Variabl (zeros(1, hiddenSize, 1), name = Some ("b_ih")) + val b_hh: Variabl = Variabl (zeros(1, hiddenSize, 1), name = Some ("b_hh")) + + def numTrackingStates: Int + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a batch of zero-initialized tracking states. + * You pass in the batch size to get properly shaped tensors: (hiddenSize, 1, batchSize) + */ + def initialTrackingStates (batchSize: Int): IndexedSeq [Variabl] = + IndexedSeq.fill (numTrackingStates) { Variabl(TensorD.fill (batchSize, hiddenSize, 1, 0.0)) } + + override def parameters: IndexedSeq [Variabl] = IndexedSeq (W_ih, W_hh, b_ih, b_hh) + +end RNNCellBase + diff --git a/src/main/scala/scalation/modeling/autograd/RNNTest.scala.bak b/src/main/scala/scalation/modeling/autograd/RNNTest.scala.bak new file mode 100644 index 000000000..87bc6a02d --- /dev/null +++ b/src/main/scala/scalation/modeling/autograd/RNNTest.scala.bak @@ -0,0 +1,318 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author Praveen Rangavajhula + * @version 2.0 + * @date Fri April 25 19:40:13 EDT 2025 + * @see LICENSE (MIT style license file). + * + * @note Autograd: Unit Tests for Autograd Functionality + */ + +package scalation +package modeling +package autograd + +import scalation.mathstat.{MatrixD, TensorD, VectorD} + +import forecasting.MakeMatrix4TS.makeMatrix4L + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `RNNTest` object contains various @main tests for autograd RNN functionality. + */ +object RNNTest: + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Creates sequences for the RNN model from the input and output matrices. + * This function takes the input and output matrices and creates sequences of a specified length. + * Each sequence is a slice of the original matrices, and the function returns tensors containing + * these sequences. + * @param x the input matrix of shape [n_samples, n_features] + * @param yy the output matrix of shape [n_samples, n_output_features] + * @param sequence_length the length of each sequence + * @return A tuple containing two tensors: + * - x_sequences: The input sequences tensor of shape [sequence_length, n_features, n_sequences] + * - y_sequences: The output sequences tensor of shape [sequence_length, n_output_features, n_sequences] + */ + def create_sequences (x: MatrixD, yy: MatrixD, sequence_length: Int): (TensorD, TensorD) = + val n_samples = x.dim + val n_sequences = n_samples - sequence_length + 1 + val x_sequences: TensorD = new TensorD (sequence_length, x.dim2, n_sequences) + val y_sequences: TensorD = new TensorD (sequence_length, yy.dim2, n_sequences) + + for seq <- 0 until n_sequences do + val sequence = seq until (seq + sequence_length) + x_sequences(?, ?, seq) = x(sequence) + y_sequences(?, ?, seq) = yy(sequence) + end for + + (x_sequences, y_sequences) + end create_sequences + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Creates an output matrix for the RNN model from the target vector. + * This function takes a target vector and creates a matrix where each row represents + * a shifted version of the target vector. The number of columns in the matrix is equal + * to the forecasting horizon (hh). If the shifted index exceeds the length of the target + * vector, the value is set to -0.0. + * @param y the target vector of shape [n_samples] + * @param hh the forecasting horizon (number of future steps to predict) + * @return A matrix of shape [n_samples - 1, hh] where each row is a shifted version of the target vector + */ + private def makeOutputMatrix (y: VectorD, hh: Int): MatrixD = + val yy = new MatrixD(y.dim - 1, hh) + for t <- 0 until yy.dim; j <- 0 until hh do + yy(t, j) = if t + 1 + j >= y.dim then -0.0 else y(t + 1 + j) + yy + end makeOutputMatrix + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Builds the input and output matrices for time series forecasting. + * This function takes a target vector and creates input and output matrices for time series forecasting. + * The input matrix is created using the specified number of lags, and the output matrix is created using + * the specified forecasting horizon. The function prints the dimensions of the input and output matrices + * and the value of the last element in the target vector. + * @param y the target vector of shape [n_samples] + * @param lags the number of lags to use for creating the input matrix + * @param hh the forecasting horizon (number of future steps to predict) + * @param backcast a boolean flag indicating whether to include backcasting (default is true) + * @return A tuple containing two matrices: + * - The input matrix of shape [n_samples - lags, lags] + * - The output matrix of shape [n_samples - 1, hh] + */ + def buildMatrix4TS (y: VectorD, lags: Int, hh: Int, backcast: Boolean = true): (MatrixD, MatrixD) = + val x = makeMatrix4L (y, lags, backcast) + val yy = makeOutputMatrix (y, hh) + println (s"dims of x = ${x.dims}") + println (s"dims of yy = ${yy.dims}") + println (s"last element in y = ${y(y.dim - 1)}") + (x, yy) + end buildMatrix4TS + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Approximate the gradient of the loss function using a finite difference. + * @param param the model parameters + * @param computeLoss the function to compute the loss + * @param epsilon the size of the finite difference + */ + def finiteDiffGrad (param: Variabl, computeLoss: () => Double, epsilon: Double = 1e-5): TensorD = + val (d1, d2, d3) = param.data.dims + val gradApprox = TensorD.fill (d1, d2, d3, 0.0) + + for i <- 0 until d1; j <- 0 until d2; k <- 0 until d3 do + val orig = param.data(i, j, k) + param.data(i, j, k) = orig + epsilon // Perturb +epsilon + val lossPlus = computeLoss () + param.data(i, j, k) = orig - epsilon // Perturb -epsilon + val lossMinus = computeLoss () + param.data(i, j, k) = orig // Restore + + // Central difference + gradApprox(i, j, k) = (lossPlus - lossMinus) / (2 * epsilon) + end for + + gradApprox + end finiteDiffGrad + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Check that the analytical and numerical gradients match within tolerance. + * @param analytical the analytical gradient + * @param numerical the numerical gradient + * @param atol the absolute tolerance + * @param rtol the relative tolerance + */ + def assertGradientsClose (analytical: TensorD, numerical: TensorD, + atol: Double = 1e-4, rtol: Double = 1e-3): Unit = + val (d1, d2, d3) = analytical.dims + + for i <- 0 until d1; j <- 0 until d2; k <- 0 until d3 do + val a = analytical(i, j, k) + val n = numerical(i, j, k) + val diff = math.abs(a - n) + val tol = atol + rtol * math.abs (n) + assert (diff <= tol, s"Gradient mismatch at ($i,$j,$k): autograd=$a, numerical=$n, diff=$diff > tol=$tol") + end for + + println ("✅ Gradients match within tolerance.") + end assertGradientsClose + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** The `rnnTest1` main function tests the `RRNCell` class. + * > runMain scalation.modeling.autodiff.rnnTest1 + */ + @main def rnnTest1 (): Unit = + + banner ("RNNCell - Forward + Backward Test 1 (1 timesteps × 2 sequences × 4 feature)") + + given ops : AutogradOps = AutogradOps.default + + val inputSize = 4 + val hiddenSize = 3 + val batchSize = 2 // 2 sequences (batch size) + val seqLen = 1 // just one time step for now + + println (s"Input size: $inputSize, Hidden size: $hiddenSize, Batch size: $batchSize, Sequence length: $seqLen") + + // Step 1: Create 2 sequences with 4 time steps each and 1 feature + // Tensor shape: (inputSize, seqLen, batchSize) = (1, 4, 2) + val inputData = TensorD ((batchSize, inputSize, 1), + 1.0, 2.0, 3.0, 4.0, // batch 0 -> four features + 5.0, 6.0, 7.0, 8.0) // batch 1 -> four features + val input = Variabl (inputData, name = Some("input")) + val hPrevData = TensorD ((batchSize, hiddenSize, 1), + 0.1, 0.2, 0.3, // batch 0 + 0.4, 0.5, 0.6) // batch 1 + + val hPrev = Variabl (hPrevData, name = Some ("hPrev")) + + println (s"Input: $input") + println (s"Previous hidden state (hPrev): $hPrev") + + // Step 4: Construct RNNCell + val cell = RNNCell(inputSize, hiddenSize, activation = "tanh") + + println (s"RNNCell: $cell") + println (s"input.shape = ${input.shape}") + println (s"hPrev.shape = ${hPrev.shape}") + println (s"W_ih.shape = ${cell.W_ih.shape}") + println (s"W_hh.shape = ${cell.W_hh.shape}") + println (s"b_ih.shape = ${cell.b_ih.shape}") + println (s"b_hh.shape = ${cell.b_hh.shape}") + println (s"W_ih.data = ${cell.W_ih.data}") + println (s"W_hh.data = ${cell.W_hh.data}") + println (s"b_ih.data = ${cell.b_ih.data}") + println (s"b_hh.data = ${cell.b_hh.data}") + + // Step 5: Forward pass through one step + val hNext = cell (IndexedSeq (input, hPrev)).head + println (s"Forward output (hNext): $hNext") + + // Step 6: Loss and backward + val loss = hNext.mean + println (s"Loss (mean of hNext): $loss") + + loss.backward () + + println (s"Gradient of input: ${input.grad}") + println (s"Gradient of hPrev: ${hPrev.grad}") + println (s"Gradient of W_ih: ${cell.W_ih.grad}") + println (s"Gradient of W_hh: ${cell.W_hh.grad}") + println (s"Gradient of b_ih: ${cell.b_ih.grad}") + println (s"Gradient of b_hh: ${cell.b_hh.grad}") + + // Step 7: Gradient check + val gradApprox = finiteDiffGrad (cell.W_ih, () => + val xDet = input.detach () + val hDet = hPrev.detach () + val hNext = cell (IndexedSeq(xDet, hDet)).head + hNext.data.mean) + + println (s"Approximate grad (W_ih): $gradApprox") + println (s"Autograd grad (W_ih): ${cell.W_ih.grad}") + assertGradientsClose(cell.W_ih.grad, gradApprox) + + println ("\n✅ RNNCell test complete with 1 time step, 2 sequences and 4 features.") + + end rnnTest1 + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** The `rnnTest2` main function tests the `RRNBase` class. + * > runMain scalation.modeling.autodiff.rnnTest2 + */ + @main def rnnTest2 (): Unit = + + banner ("RNNBase − Forward + Backward Test 2 (2 time steps * 2 sequences * 4 features)") + + given ops: AutogradOps = AutogradOps.default + + // --------------------------------------------------------------------------- + // Config + // --------------------------------------------------------------------------- + val inputSize = 4 // F + val hiddenSize = 3 // H + val batchSize = 2 // B + val seqLen = 2 // T + + println (s"inputSize=$inputSize hiddenSize=$hiddenSize batchSize=$batchSize seqLen=$seqLen") + + // --------------------------------------------------------------------------- + // 1) Build full mini‑batch sequence : shape (B, T, F) = (2, 2, 4) + // batch‑0 : [1,2,3,4] then [9,10,11,12] + // batch‑1 : [5,6,7,8] then [13,14,15,16] + // --------------------------------------------------------------------------- + val seqData = TensorD ((batchSize, seqLen, inputSize), + 1.0, 2.0, 3.0, 4.0, + 9.0, 10.0, 11.0, 12.0, + 5.0, 6.0, 7.0, 8.0, + 13.0, 14.0, 15.0, 16.0) + + // --------------------------------------------------------------------------- + // 2) Initial hidden state (B, H, 1) = (2, 3, 1) + // --------------------------------------------------------------------------- + val h0Data = TensorD ((batchSize, hiddenSize, 1), + 0.1, 0.2, 0.3, // batch‑0 + 0.4, 0.5, 0.6) // batch‑1 + val hPrev = Variabl (h0Data, name = Some ("hPrev")) + + // Helper: wrap a (B × F) MatrixD slice into (B, F, 1) + def stepTensor (m: MatrixD): TensorD = + val base = TensorD.fromMatrix (m) + base.permute (Seq (1, 2, 0)) + + // --------------------------------------------------------------------------- + // 3) Convert each time‑step slice into a Variabl (B, F, 1) + // --------------------------------------------------------------------------- + val inputSeq: IndexedSeq [Variabl] = + (0 until seqLen).map { t => + val mat = seqData(?, t) // (2 × 4) + val ten = stepTensor(mat) + Variabl (ten, name = Some(s"input_t$t")) + } + + // --------------------------------------------------------------------------- + // 4) Build RNNBase (simple tanh RNN cell) + // --------------------------------------------------------------------------- + val rnn = RNNBase ("rnn", inputSize, hiddenSize, "tanh") + val cell = rnn.cell + println (s"RNN cell weights W_ih.shape=${cell.W_ih.shape} W_hh.shape=${cell.W_hh.shape}") + + // --------------------------------------------------------------------------- + // 5) Forward through both time‑steps + // --------------------------------------------------------------------------- + val (outputs, hLast) = rnn.forward (inputSeq, Some(hPrev)) + + outputs.zipWithIndex.foreach { case (h_t, t) => + println (s"h_t[$t] = ${h_t.data}") + } + println (s"Final hidden h_last = ${hLast.data}") + + // --------------------------------------------------------------------------- + // 6) Dummy loss = mean of final hidden, backward + // --------------------------------------------------------------------------- + val loss = hLast.mean + println (s"Loss = $loss") + loss.backward () + + inputSeq.foreach (x => println (s"${x.name.get}.grad = ${x.grad}")) + println (s"hPrev.grad = ${hPrev.grad}") + println (s"W_ih.grad = ${cell.W_ih.grad}") + println (s"W_hh.grad = ${cell.W_hh.grad}") + + // --------------------------------------------------------------------------- + // 7) Finite‑difference check on W_ih + // --------------------------------------------------------------------------- + val gradFD = finiteDiffGrad (cell.W_ih, () => + val freshIn = inputSeq.map (_.detach()) + val freshH0 = hPrev.detach () + val (_, hL) = rnn.forward (freshIn, Some (freshH0)) + hL.data.mean) + + println (s"Finite‑diff W_ih.grad = $gradFD") + println (s"Autograd W_ih.grad = ${cell.W_ih.grad}") + assertGradientsClose(cell.W_ih.grad, gradFD) + + println ("\n✅ rnnTest2 finished: batch‑first, 2‑step sequence.") + + end rnnTest2 + +end RNNTest + diff --git a/src/main/scala/scalation/modeling/autograd/RNNTestCore.scala b/src/main/scala/scalation/modeling/autograd/RNNTestCore.scala new file mode 100644 index 000000000..6a6734348 --- /dev/null +++ b/src/main/scala/scalation/modeling/autograd/RNNTestCore.scala @@ -0,0 +1,723 @@ +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author Praveen Rangavajhula + * @version 2.0 + * @date Tue Nov 11 10:44:32 EDT 2025 + * @see LICENSE (MIT style license file). + * + * @note Autograd: Unit Tests for Core RNN/GRU Autograd Functionality + */ + +package scalation +package modeling +package autograd + +import scalation.mathstat.{MatrixD, TensorD} + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `RNNTestCore` object defines a suite of @main entrypoints that exercise the + * autograd system using recurrent neural network components. These tests verify: + * - forward computation consistency for RNNCell and GRUCell + * - correct propagation of hidden states through `RNNBase` + * - correctness of gradient backpropagation through time + * - multilayer RNN/GRU behavior and parameter interaction + * - construction and export of autograd computation graphs for debugging + * All tests use synthetic inputs and manually assigned weights/biases to ensure + * deterministic behavior to validate against PyTorch, enabling reliable + * gradient-checking via finite differences using `GradCheck.gradCheck`. + * @note This file focuses exclusively on core autograd correctness and does not + * contain any real-data forecasting experiments. + */ +object RNNTestCore: + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** The `rnnCellTest` main function tests the `RNNCell` class. + * > runMain scalation.modeling.autograd.RnnTest.rnnCellTest + */ + @main def rnnCellTest (): Unit = + banner ("Simple RNN Cell - Forward + Backward Test") + + val inputSize = 3 + val hiddenSize = 4 +// val batchSize = 2 + + // Create a toy RNN cell (tanh activation) + val cell = RNNCell(inputSize, hiddenSize, activation = "tanh") + + // ---- Manually set weights/biases for reproducibility ---- + + // W_ih: shape (1, hiddenSize, inputSize) = (1,4,3) + val WihM = new MatrixD (4, 3, Array ( + Array (0.1, 0.2, 0.3), + Array (0.4, 0.5, 0.6), + Array (0.7, 0.8, 0.9), + Array (1.0, 1.1, 1.2) + )) + cell.W_ih.data = TensorD (WihM) + + // W_hh: shape (1, hiddenSize, hiddenSize) = (1,4,4) + val WhhM = new MatrixD (4, 4, Array ( + Array (0.1, 0.0, 0.0, 0.0), + Array (0.0, 0.1, 0.0, 0.0), + Array (0.0, 0.0, 0.1, 0.0), + Array (0.0, 0.0, 0.0, 0.1) + )) + cell.W_hh.data = TensorD (WhhM) + + // Biases: (1, hiddenSize, 1) + val bIhM = new MatrixD (4, 1, Array ( + Array (0.01), + Array (0.02), + Array (0.03), + Array (0.04) + )) + cell.b_ih.data = TensorD (bIhM) + + val bHhM = new MatrixD (4, 1, Array ( + Array (0.0), + Array (0.0), + Array (0.0), + Array (0.0) + )) + cell.b_hh.data = TensorD (bHhM) + + // ---- Dummy batch input ---- + // x: shape (batch, inputSize, 1) = (2,3,1) + val x0 = new MatrixD (3, 1, Array (Array (0.1), Array (0.2), Array (0.3))) + val x1 = new MatrixD (3, 1, Array (Array (0.4), Array (0.5), Array (0.6))) + val x = Variabl (TensorD (x0, x1), name = Some ("x")) + + // ---- Dummy previous hidden state ---- + // hPrev: shape (batch, hiddenSize, 1) = (2,4,1) + val h0 = new MatrixD (4, 1, Array (Array (0.01), Array (0.02), Array (0.03), Array (0.04))) + val h1 = new MatrixD (4, 1, Array (Array (0.05), Array (0.06), Array (0.07), Array (0.08))) + val hPrev = Variabl (TensorD (h0, h1), name = Some ("hPrev")) + + // Forward through the RNNCell + val hNext = cell (IndexedSeq (x, hPrev)).head // Since RNNCell returns IndexedSeq of length 1 + + println (s"Input x shape: ${x.shape}, values: ${x.data}") + println (s"Prev hidden shape: ${hPrev.shape}, values: ${hPrev.data}") + println (s"Next hidden shape: ${hNext.shape}, values: ${hNext.data}") + + val R = TestReport() + + // Gradient check w.r.t input + R.record ("RNNCell - Input GradCheck") { + GradCheck.gradCheck (x, () => cell (IndexedSeq (x, hPrev)).head.sum, quiet = true) + } + + // Gradient check w.r.t previous hidden state + R.record ("RNNCell - Hidden GradCheck") { + GradCheck.gradCheck (hPrev, () => cell (IndexedSeq (x, hPrev)).head.sum, quiet = true) + } + + // Gradient check w.r.t parameters + R.record ("RNNCell - W_ih GradCheck") { + GradCheck.gradCheck (cell.W_ih, () => cell (IndexedSeq (x, hPrev)).head.sum, quiet = true) + } + R.record ("RNNCell - W_hh GradCheck") { + GradCheck.gradCheck (cell.W_hh, () => cell (IndexedSeq (x, hPrev)).head.sum, quiet = true) + } + R.record ("RNNCell - b_ih GradCheck") { + GradCheck.gradCheck (cell.b_ih, () => cell (IndexedSeq (x, hPrev)).head.sum, quiet = true) + } + R.record ("RNNCell - b_hh GradCheck") { + GradCheck.gradCheck (cell.b_hh, () => cell (IndexedSeq (x, hPrev)).head.sum, quiet = true) + } + + R.summary ("RNNCell Forward + Backward Test") + + val outPath = "target/autograd/visualization/computation_graph_rnn_cell.dot" + GraphExporter.writeDot(cell (IndexedSeq (x, hPrev)).head, outPath, renderSvg = true) + println (s"Computation graph DOT written to $outPath") + end rnnCellTest + + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** The `rnnBaseTest` main function tests the `RNNBase` class. + * > runMain scalation.modeling.autograd.rnnBaseTest + */ + @main def rnnBaseTest (): Unit = + banner ("RNNBase - Forward + Backward Test") + + val inputSize = 3 + val hiddenSize = 4 +// val seqLen = 2 +// val batchSize = 2 + + val cell = RNNCell(inputSize, hiddenSize, activation = "tanh") + val rnnBase = new RNNBase(cell) + + // ---- Manually set weights/biases (same as RNNCell test for reproducibility) ---- + val WihM = new MatrixD (4, 3, Array ( + Array (0.1, 0.2, 0.3), + Array (0.4, 0.5, 0.6), + Array (0.7, 0.8, 0.9), + Array (1.0, 1.1, 1.2) + )) + cell.W_ih.data = TensorD (WihM) + + val WhhM = new MatrixD (4, 4, Array ( + Array (0.1, 0.0, 0.0, 0.0), + Array (0.0, 0.1, 0.0, 0.0), + Array (0.0, 0.0, 0.1, 0.0), + Array (0.0, 0.0, 0.0, 0.1) + )) + cell.W_hh.data = TensorD (WhhM) + + val bIhM = new MatrixD (4, 1, Array (Array (0.01), Array (0.02), Array (0.03), Array (0.04))) + cell.b_ih.data = TensorD (bIhM) + + val bHhM = new MatrixD (4, 1, Array (Array (0.0), Array (0.0), Array (0.0), Array (0.0))) + cell.b_hh.data = TensorD (bHhM) // all weights are shared across timesteps + + // ---- Sequence input ---- + // Two timesteps, each (batch, inputSize, 1) + val x0 = new MatrixD (3, 1, Array (Array (0.1), Array (0.2), Array (0.3))) + val x1 = new MatrixD (3, 1, Array (Array (0.4), Array (0.5), Array (0.6))) + val xs = IndexedSeq ( + Variabl (TensorD (x0, x1), name = Some ("x_t0")), + Variabl (TensorD (x0, x1), name = Some ("x_t1")) // reuse same input for step 1 for simplicity + ) + + // ---- Initial hidden ---- + val h0_batch0 = new MatrixD (4, 1, Array (Array (0.01), Array (0.02), Array (0.03), Array (0.04))) + val h0_batch1 = new MatrixD (4, 1, Array (Array (0.05), Array (0.06), Array (0.07), Array (0.08))) + val h0 = IndexedSeq (Variabl (TensorD (h0_batch0, h0_batch1), name = Some ("h0"))) + + // Forward through RNNBase + val (outputs, hLast) = rnnBase.forward (xs, Some (h0)) + + println (s"Outputs per timestep: ${outputs.map(_.data)}") + println (s"Final hidden: ${hLast.head.data}") + + // ---- Gradient checks ---- + val R = TestReport() + R.record ("RNNBase - x_t0 GradCheck") { + GradCheck.gradCheck (xs(0), () => rnnBase.forward (xs, Some (h0))._1.last.sum, quiet = true) + } + R.record ("RNNBase - h0 GradCheck") { + GradCheck.gradCheck (h0.head, () => rnnBase.forward (xs, Some (h0))._1.last.sum, quiet = true) + } + R.record ("RNNBase - W_ih GradCheck") { + GradCheck.gradCheck (cell.W_ih, () => rnnBase.forward (xs, Some (h0))._1.last.sum, quiet = true) + } + R.record ("RNNBase - W_hh GradCheck") { + GradCheck.gradCheck (cell.W_hh, () => rnnBase.forward (xs, Some (h0))._1.last.sum, quiet = true) + } + R.record ("RNNBase - b_ih GradCheck") { + GradCheck.gradCheck (cell.b_ih, () => rnnBase.forward (xs, Some (h0))._1.last.sum, quiet = true) + } + R.record ("RNNBase - b_hh GradCheck") { + GradCheck.gradCheck (cell.b_hh, () => rnnBase.forward (xs, Some (h0))._1.last.sum, quiet = true) + } + + R.summary ("RNNBase Forward + Backward Test") + + val outPath = "target/autograd/visualization/computation_graph_rnn_base.dot" + GraphExporter.writeDot(hLast.head, outPath, renderSvg = true) + println (s"Computation graph DOT written to $outPath") + end rnnBaseTest + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** The `rnnMultiLayerTest` main function tests the `RNN` class. + * > runMain scalation.modeling.autograd.rnnMultiLayerTest + */ + @main def rnnMultiLayerTest (): Unit = + banner ("RNN Multi Layer (2 layers) - Forward + Backward Test") + + val inputSize = 3 + val hiddenSize = 4 +// val seqLen = 2 +// val batchSize = 2 + val numLayers = 2 + + // ---- Construct 2-layer RNN ---- + val rnn = RNN(inputSize, hiddenSize, numLayers, activation = "tanh") + + // Layer 0 + val cell0 = rnn.layer(0).cell.asInstanceOf [RNNCell] + val Wih0 = new MatrixD (4, 3, Array ( + Array (0.1, 0.2, 0.3), + Array (0.4, 0.5, 0.6), + Array (0.7, 0.8, 0.9), + Array (1.0, 1.1, 1.2) + )) + cell0.W_ih.data = TensorD (Wih0) + + val Whh0 = new MatrixD (4, 4, Array ( + Array (0.1, 0.0, 0.0, 0.0), + Array (0.0, 0.1, 0.0, 0.0), + Array (0.0, 0.0, 0.1, 0.0), + Array (0.0, 0.0, 0.0, 0.1) + )) + cell0.W_hh.data = TensorD (Whh0) + + // biases + val bIh0 = new MatrixD (4, 1, Array (Array (0.01), Array (0.02), Array (0.03), Array (0.04))) + cell0.b_ih.data = TensorD (bIh0) + + val bHh0 = new MatrixD (4, 1, Array (Array (0.0), Array (0.0), Array (0.0), Array (0.0))) + cell0.b_hh.data = TensorD (bHh0) + + // Layer 1 (note input size = hidden size = 4 here) + val cell1 = rnn.layer(1).cell.asInstanceOf[RNNCell] + val Wih1 = new MatrixD (4, 4, Array ( + Array (-0.1, -0.2, -0.3, -0.4), + Array (-0.5, -0.6, -0.7, -0.8), + Array (-0.9, -1.0, -1.1, -1.2), + Array (-1.3, -1.4, -1.5, -1.6) + )) + cell1.W_ih.data = TensorD (Wih1) + + val Whh1 = new MatrixD (4, 4, Array ( + Array (0.11, 0.12, 0.13, 0.14), + Array (0.15, 0.16, 0.17, 0.18), + Array (0.19, 0.20, 0.21, 0.22), + Array (0.23, 0.24, 0.25, 0.26) + )) + cell1.W_hh.data = TensorD (Whh1) + + // biases + val bIh1 = new MatrixD (4, 1, Array (Array (0.1), Array (0.2), Array (0.3), Array (0.4))) + cell1.b_ih.data = TensorD (bIh1) + + val bHh1 = new MatrixD (4, 1, Array (Array (0.5), Array (0.6), Array (0.7), Array (0.8))) + cell1.b_hh.data = TensorD (bHh1) + + // ------------------------------------------------------------------ + // Inputs: sequence of length 2 (x_t0, x_t1), each [batch=2, input=3, 1] + // ------------------------------------------------------------------ + val x0_batch0 = new MatrixD (3, 1, Array (Array (0.1), Array (0.2), Array (0.3))) + val x0_batch1 = new MatrixD (3, 1, Array (Array (0.4), Array (0.5), Array (0.6))) + val x_t0 = Variabl (TensorD (x0_batch0, x0_batch1), name = Some ("x_t0")) + + val x1_batch0 = new MatrixD (3, 1, Array (Array (0.7), Array (0.8), Array (0.9))) + val x1_batch1 = new MatrixD (3, 1, Array (Array (1.0), Array (1.1), Array (1.2))) + val x_t1 = Variabl (TensorD (x1_batch0, x1_batch1), name = Some ("x_t1")) + + val xs = IndexedSeq (x_t0, x_t1) + + // ------------------------------------------------------------------ + // Initial hidden states for both layers + // ------------------------------------------------------------------ + val h0_layer0_batch0 = new MatrixD (4, 1, Array (Array (0.01), Array (0.02), Array (0.03), Array (0.04))) + val h0_layer0_batch1 = new MatrixD (4, 1, Array (Array (0.05), Array (0.06), Array (0.07), Array (0.08))) + val h0_layer0 = Variabl (TensorD (h0_layer0_batch0, h0_layer0_batch1), name = Some ("h0_layer0")) + + val h0_layer1_batch0 = new MatrixD (4, 1, Array (Array (0.1), Array (0.2), Array (0.3), Array (0.4))) + val h0_layer1_batch1 = new MatrixD (4, 1, Array (Array (0.5), Array (0.6), Array (0.7), Array (0.8))) + val h0_layer1 = Variabl (TensorD (h0_layer1_batch0, h0_layer1_batch1), name = Some ("h0_layer1")) + + val h0s = IndexedSeq (h0_layer0, h0_layer1) + + // ---- Forward ---- + val (outputs, hLasts) = rnn.forward (xs, Some (h0s)) + + println ("Outputs per timestep:") + outputs.zipWithIndex.foreach { case (out, t) => + println (s"t=$t: ${out.data}") + } + println (s"Final hidden states: ${hLasts.map(_.data)}") + + // ---- Gradient checks ---- + val R = TestReport() + // Input at time 0 + R.record ("RNN Multi Layer - x_t0 GradCheck") { + GradCheck.gradCheck (xs(0), () => rnn.forward (xs, Some (h0s))._1.last.sum, quiet = true) + } + + // Initial hidden states + R.record ("RNN Multi Layer - h0_layer0 GradCheck") { + GradCheck.gradCheck (h0_layer0, () => rnn.forward (xs, Some (h0s))._1.last.sum, quiet = true) + } + R.record ("RNN Multi Layer - h0_layer1 GradCheck") { + GradCheck.gradCheck (h0_layer1, () => rnn.forward (xs, Some (h0s))._1.last.sum, quiet = true) + } + + // Layer 0 params + R.record ("RNN Multi Layer - W_ih0 GradCheck") { + GradCheck.gradCheck (cell0.W_ih, () => rnn.forward (xs, Some (h0s))._1.last.sum, quiet = true) + } + R.record ("RNN Multi Layer - W_hh0 GradCheck") { + GradCheck.gradCheck (cell0.W_hh, () => rnn.forward (xs, Some (h0s))._1.last.sum, quiet = true) + } + R.record ("RNN Multi Layer - b_ih0 GradCheck") { + GradCheck.gradCheck (cell0.b_ih, () => rnn.forward (xs, Some (h0s))._1.last.sum, quiet = true) + } + R.record ("RNN Multi Layer - b_hh0 GradCheck") { + GradCheck.gradCheck (cell0.b_hh, () => rnn.forward (xs, Some (h0s))._1.last.sum, quiet = true) + } + + // Layer 1 params + R.record ("RNN Multi Layer - W_ih1 GradCheck") { + GradCheck.gradCheck (cell1.W_ih, () => rnn.forward (xs, Some (h0s))._1.last.sum, quiet = true) + } + R.record ("RNN Multi Layer - W_hh1 GradCheck") { + GradCheck.gradCheck (cell1.W_hh, () => rnn.forward (xs, Some (h0s))._1.last.sum, quiet = true) + } + R.record ("RNN Multi Layer - b_ih1 GradCheck") { + GradCheck.gradCheck (cell1.b_ih, () => rnn.forward (xs, Some (h0s))._1.last.sum, quiet = true) + } + R.record ("RNN Multi Layer - b_hh1 GradCheck") { + GradCheck.gradCheck (cell1.b_hh, () => rnn.forward (xs, Some (h0s))._1.last.sum, quiet = true) + } + + R.summary ("RNN Multi Layer (2 layers) Backward Test") + + // ---- Export graph ---- + val outPath = "target/autograd/visualization/computation_graph_rnn_multi_layer.dot" + GraphExporter.writeDot(hLasts.last, outPath, renderSvg = true) + println (s"Computation graph DOT written to $outPath") + + end rnnMultiLayerTest + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + // GRU Tests + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** The `gruCellTest` main function tests the `GRUCell` class. + * > runMain scalation.modeling.autograd.gruCellTest + */ + @main def gruCellTest (): Unit = + banner ("GRUCell - Forward + Backward Test") + + val inputSize = 3 + val hiddenSize = 4 +// val batchSize = 2 + + // Create a toy GRU cell + val cell = GRUCell(inputSize, hiddenSize) + + // ---- Manually set weights/biases for reproducibility ---- + // Reset gate + val WirM = new MatrixD (4, 3, Array ( + Array (0.1, 0.2, 0.3), + Array (0.4, 0.5, 0.6), + Array (0.7, 0.8, 0.9), + Array (1.0, 1.1, 1.2) + )) + val WhrM = new MatrixD (4, 4, Array ( + Array (0.1, 0.0, 0.0, 0.0), + Array (0.0, 0.1, 0.0, 0.0), + Array (0.0, 0.0, 0.1, 0.0), + Array (0.0, 0.0, 0.0, 0.1) + )) + cell.W_ir.data = TensorD (WirM) + cell.W_hr.data = TensorD (WhrM) + cell.b_ir.data = TensorD (new MatrixD (4, 1, Array (Array (0.01), Array (0.02), Array (0.03), Array (0.04)))) + cell.b_hr.data = TensorD (new MatrixD (4, 1, Array (Array (0.0), Array (0.0), Array (0.0), Array (0.0)))) + + // Update gate + val WizM = new MatrixD (4, 3, Array ( + Array (-0.1, -0.2, -0.3), + Array (-0.4, -0.5, -0.6), + Array (-0.7, -0.8, -0.9), + Array (-1.0, -1.1, -1.2) + )) + val WhzM = new MatrixD (4, 4, Array ( + Array (0.2, 0.0, 0.0, 0.0), + Array (0.0, 0.2, 0.0, 0.0), + Array (0.0, 0.0, 0.2, 0.0), + Array (0.0, 0.0, 0.0, 0.2) + )) + cell.W_iz.data = TensorD (WizM) + cell.W_hz.data = TensorD (WhzM) + cell.b_iz.data = TensorD (new MatrixD (4, 1, Array (Array (0.05), Array (0.06), Array (0.07), Array (0.08)))) + cell.b_hz.data = TensorD (new MatrixD (4, 1, Array (Array (0.0), Array (0.0), Array (0.0), Array (0.0)))) + + // New gate + val WinM = new MatrixD (4, 3, Array ( + Array (0.2, 0.1, 0.0), + Array (0.0, -0.1, -0.2), + Array (0.3, 0.2, 0.1), + Array (-0.1, -0.2, -0.3) + )) + val WhnM = new MatrixD (4, 4, Array ( + Array (0.3, 0.0, 0.0, 0.0), + Array (0.0, 0.3, 0.0, 0.0), + Array (0.0, 0.0, 0.3, 0.0), + Array (0.0, 0.0, 0.0, 0.3) + )) + cell.W_in.data = TensorD (WinM) + cell.W_hn.data = TensorD (WhnM) + cell.b_in.data = TensorD (new MatrixD (4, 1, Array (Array (0.01), Array (0.02), Array (0.03), Array (0.04)))) + cell.b_hn.data = TensorD (new MatrixD (4, 1, Array (Array (0.0), Array (0.0), Array (0.0), Array (0.0)))) + + + // ---- Dummy batch input ---- + val x0 = new MatrixD (3, 1, Array (Array (0.1), Array (0.2), Array (0.3))) + val x1 = new MatrixD (3, 1, Array (Array (0.4), Array (0.5), Array (0.6))) + val x = Variabl (TensorD (x0, x1), name = Some ("x")) + + // ---- Dummy previous hidden state ---- + val h0 = new MatrixD (4, 1, Array (Array (0.01), Array (0.02), Array (0.03), Array (0.04))) + val h1 = new MatrixD (4, 1, Array (Array (0.05), Array (0.06), Array (0.07), Array (0.08))) + val hPrev = Variabl (TensorD (h0, h1), name = Some ("hPrev")) + + // Forward through GRUCell + val hNext = cell (IndexedSeq (x, hPrev)).head + + println (s"Input x: ${x.data}") + println (s"Prev hidden: ${hPrev.data}") + println (s"Next hidden: ${hNext.data}") + + val R = TestReport() + + R.record ("GRUCell - Input GradCheck") { + GradCheck.gradCheck (x, () => cell (IndexedSeq (x, hPrev)).head.sum, quiet = true) + } + R.record ("GRUCell - Hidden GradCheck") { + GradCheck.gradCheck (hPrev, () => cell (IndexedSeq (x, hPrev)).head.sum, quiet = true) + } + + for p <- cell.parameters do + R.record (s"GRUCell - ${p.name.getOrElse ("param")} GradCheck") { + GradCheck.gradCheck (p, () => cell (IndexedSeq (x, hPrev)).head.sum, quiet = true) + } + + R.summary ("GRUCell Forward + Backward Test") + + val outPath = "target/autograd/visualization/computation_graph_gru_cell.dot" + GraphExporter.writeDot(hNext, outPath, renderSvg = true) + println (s"Computation graph DOT written to $outPath") + + end gruCellTest + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** The `gruMultiLayerTest` main function tests the `GRU` class. + * > runMain scalation.modeling.autograd.gruMultiLayerTest + */ + @main def gruMultiLayerTest (): Unit = + banner ("GRU Multi Layer (2 layers) - Forward + Backward Test") + + val inputSize = 3 + val hiddenSize = 4 +// val seqLen = 2 +// val batchSize = 2 + val numLayers = 2 + + // ---- Construct 2-layer GRU ---- + val gru = GRU(inputSize, hiddenSize, numLayers) + + // Layer 0 + val cell0 = gru.layer(0).cell.asInstanceOf[GRUCell] + // Reset gate + cell0.W_ir.data = TensorD (new MatrixD (4, 3, Array ( + Array (0.1, 0.2, 0.3), + Array (0.4, 0.5, 0.6), + Array (0.7, 0.8, 0.9), + Array (1.0, 1.1, 1.2) + ))) + cell0.W_hr.data = TensorD (new MatrixD (4, 4, Array ( + Array (1.0, 0.0, 0.0, 0.0), + Array (0.0, 1.0, 0.0, 0.0), + Array (0.0, 0.0, 1.0, 0.0), + Array (0.0, 0.0, 0.0, 1.0) + ))) + cell0.b_ir.data = TensorD (new MatrixD (4, 1, Array ( + Array (0.01), + Array (0.02), + Array (0.03), + Array (0.04) + ))) + cell0.b_hr.data = TensorD (new MatrixD (4, 1, Array ( + Array (0.0), + Array (0.0), + Array (0.0), + Array (0.0) + ))) + + // Update gate + cell0.W_iz.data = TensorD (new MatrixD (4, 3, Array ( + Array (-0.1, -0.2, -0.3), + Array (-0.4, -0.5, -0.6), + Array (-0.7, -0.8, -0.9), + Array (-1.0, -1.1, -1.2) + ))) + cell0.W_hz.data = TensorD (new MatrixD (4, 4, Array ( + Array (0.2, 0.0, 0.0, 0.0), + Array (0.0, 0.2, 0.0, 0.0), + Array (0.0, 0.0, 0.2, 0.0), + Array (0.0, 0.0, 0.0, 0.2) + ))) + cell0.b_iz.data = TensorD (new MatrixD (4, 1, Array ( + Array (0.05), + Array (0.06), + Array (0.07), + Array (0.08) + ))) + cell0.b_hz.data = TensorD (new MatrixD (4, 1, Array ( + Array (0.0), + Array (0.0), + Array (0.0), + Array (0.0) + ))) + + // New gate + cell0.W_in.data = TensorD (new MatrixD (4, 3, Array ( + Array (0.2, 0.1, 0.0), + Array (0.0, -0.1, -0.2), + Array (0.3, 0.2, 0.1), + Array (-0.1, -0.2, -0.3) + ))) + cell0.W_hn.data = TensorD (new MatrixD (4, 4, Array ( + Array (0.3, 0.0, 0.0, 0.0), + Array (0.0, 0.3, 0.0, 0.0), + Array (0.0, 0.0, 0.3, 0.0), + Array (0.0, 0.0, 0.0, 0.3) + ))) + cell0.b_in.data = TensorD (new MatrixD (4, 1, Array ( + Array (0.01), + Array (0.02), + Array (0.03), + Array (0.04) + ))) + cell0.b_hn.data = TensorD (new MatrixD (4, 1, Array ( + Array (0.0), + Array (0.0), + Array (0.0), + Array (0.0) + ))) + + // Layer 1 (input size = hidden size = 4) + val cell1 = gru.layer(1).cell.asInstanceOf[GRUCell] + // Reset gate + cell1.W_ir.data = TensorD (new MatrixD (4, 4, Array ( + Array (-0.1, -0.2, -0.3, -0.4), + Array (-0.5, -0.6, -0.7, -0.8), + Array (-0.9, -1.0, -1.1, -1.2), + Array (-1.3, -1.4, -1.5, -1.6) + ))) + cell1.W_hr.data = TensorD (new MatrixD (4, 4, Array ( + Array (0.11, 0.0, 0.0, 0.0), + Array (0.0, 0.11, 0.0, 0.0), + Array (0.0, 0.0, 0.11, 0.0), + Array (0.0, 0.0, 0.0, 0.11) + ))) + cell1.b_ir.data = TensorD (new MatrixD (4, 1, Array ( + Array (0.1), + Array (0.2), + Array (0.3), + Array (0.4) + ))) + cell1.b_hr.data = TensorD (new MatrixD (4, 1, Array ( + Array (0.5), + Array (0.6), + Array (0.7), + Array (0.8) + ))) + + // Update gate + cell1.W_iz.data = TensorD (new MatrixD (4, 4, Array ( + Array (1.0, 0.0, 0.0, 0.0), + Array (0.0, 1.0, 0.0, 0.0), + Array (0.0, 0.0, 1.0, 0.0), + Array (0.0, 0.0, 0.0, 1.0) + ))) + cell1.W_hz.data = TensorD (new MatrixD (4, 4, Array ( + Array (0.12, 0.0, 0.0, 0.0), + Array (0.0, 0.12, 0.0, 0.0), + Array (0.0, 0.0, 0.12, 0.0), + Array (0.0, 0.0, 0.0, 0.12) + ))) + cell1.b_iz.data = TensorD (new MatrixD (4, 1, Array ( + Array (0.05), + Array (0.06), + Array (0.07), + Array (0.08) + ))) + cell1.b_hz.data = TensorD (new MatrixD (4, 1, Array ( + Array (0.0), + Array (0.0), + Array (0.0), + Array (0.0) + ))) + + // New gate + cell1.W_in.data = TensorD (new MatrixD (4, 4, Array ( + Array (1.0, 0.0, 0.0, 0.0), + Array (0.0, 1.0, 0.0, 0.0), + Array (0.0, 0.0, 1.0, 0.0), + Array (0.0, 0.0, 0.0, 1.0) + ))) + cell1.W_hn.data = TensorD (new MatrixD (4, 4, Array ( + Array (0.13, 0.0, 0.0, 0.0), + Array (0.0, 0.13, 0.0, 0.0), + Array (0.0, 0.0, 0.13, 0.0), + Array (0.0, 0.0, 0.0, 0.13) + ))) + cell1.b_in.data = TensorD (new MatrixD (4, 1, Array ( + Array (0.01), + Array (0.02), + Array (0.03), + Array (0.04) + ))) + cell1.b_hn.data = TensorD (new MatrixD (4, 1, Array ( + Array (0.0), + Array (0.0), + Array (0.0), + Array (0.0) + ))) + + // ------------------------------------------------------------------ + // Inputs: sequence of length 2, each [batch=2, input=3, 1] + // ------------------------------------------------------------------ + val x0_batch0 = new MatrixD (3, 1, Array (Array (0.1), Array (0.2), Array (0.3))) + val x0_batch1 = new MatrixD (3, 1, Array (Array (0.4), Array (0.5), Array (0.6))) + val x_t0 = Variabl (TensorD (x0_batch0, x0_batch1), name = Some ("x_t0")) + + val x1_batch0 = new MatrixD (3, 1, Array (Array (0.7), Array (0.8), Array (0.9))) + val x1_batch1 = new MatrixD (3, 1, Array (Array (1.0), Array (1.1), Array (1.2))) + val x_t1 = Variabl (TensorD (x1_batch0, x1_batch1), name = Some ("x_t1")) + + val xs = IndexedSeq (x_t0, x_t1) + + // ------------------------------------------------------------------ + // Initial hidden states for both layers + // ------------------------------------------------------------------ + val h0_layer0 = Variabl (TensorD ( + new MatrixD (4, 1, Array (Array (0.01), Array (0.02), Array (0.03), Array (0.04))), + new MatrixD (4, 1, Array (Array (0.05), Array (0.06), Array (0.07), Array (0.08))) + ), name = Some ("h0_layer0")) + + val h0_layer1 = Variabl (TensorD ( + new MatrixD (4, 1, Array (Array (0.1), Array (0.2), Array (0.3), Array (0.4))), + new MatrixD (4, 1, Array (Array (0.5), Array (0.6), Array (0.7), Array (0.8))) + ), name = Some ("h0_layer1")) + + val h0s = IndexedSeq (h0_layer0, h0_layer1) + + // ---- Forward ---- + val (outputs, hLasts) = gru.forward (xs, Some (h0s)) + + println ("Outputs per timestep:") + outputs.zipWithIndex.foreach { case (out, t) => + println (s"t=$t: ${out.data}") + } + println (s"Final hidden states: ${hLasts.map(_.data)}") + + // ---- Gradient checks ---- + val R = TestReport() + + R.record ("GRU Multi Layer - x_t0 GradCheck") { + GradCheck.gradCheck (xs(0), () => gru.forward (xs, Some (h0s))._1.last.sum, quiet = true) + } + R.record ("GRU Multi Layer - h0_layer0 GradCheck") { + GradCheck.gradCheck (h0_layer0, () => gru.forward (xs, Some (h0s))._1.last.sum, quiet = true) + } + R.record ("GRU Multi Layer - h0_layer1 GradCheck") { + GradCheck.gradCheck (h0_layer1, () => gru.forward (xs, Some (h0s))._1.last.sum, quiet = true) + } + + for p <- cell0.parameters ++ cell1.parameters do + R.record (s"GRU Multi Layer - ${p.name.getOrElse ("param")} GradCheck") { + GradCheck.gradCheck (p, () => gru.forward (xs, Some (h0s))._1.last.sum, quiet = true) + } + + R.summary ("GRU Multi Layer (2 layers) Backward Test") + + end gruMultiLayerTest + +end RNNTestCore diff --git a/src/main/scala/scalation/modeling/autograd/RNNTestForecasting.scala b/src/main/scala/scalation/modeling/autograd/RNNTestForecasting.scala new file mode 100644 index 000000000..7c875154a --- /dev/null +++ b/src/main/scala/scalation/modeling/autograd/RNNTestForecasting.scala @@ -0,0 +1,1910 @@ +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author Praveen Rangavajhula + * @version 2.0 + * @date Tue November 11 10:44:32 EDT 2025 + * @see LICENSE (MIT style license file). + * + * @note Autograd: RNN Forecasting Tests & Utilities + */ + +package scalation +package modeling +package autograd + +import java.io.PrintWriter + +import scalation.mathstat.{MatrixD, Plot, TensorD, VectorD} +import scalation.modeling.forecasting.MakeMatrix4TS.{makeMatrix4EXO, makeMatrix4L, makeMatrix4Y} +import scalation.modeling.forecasting.{Example_Covid, Example_ILI} +import scalation.modeling.neuralnet.StoppingRule + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `RNNTestForecasting` object provides a suite of time–series utilities + * and forecasting experiments using Autograd–based recurrent neural networks. + * It includes: + * - lagged–window matrix builders (`buildMatrix4TS`, `buildMatrix4TSX`) + * - batch construction utilities for sequence models (`makeBatches`) + * - demonstration tests for RNN and GRU models on: + * • synthetic sequences + * • COVID–19 new-deaths data + * • ILI (Influenza-Like Illness) data + * - chronological train/test splits + * - rolling / walk–forward validation + * These tests verify correctness of data pipelines, shape handling, + * training loops, scaling transformations, and forecasting performance. + */ +object RNNTestForecasting: + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Build lagged feature matrix and horizon-ahead target matrix for univariate + * time–series forecasting. + * @param y the raw input series + * @param lags number of past time steps used as model input + * @param hh forecast horizon (steps ahead) + * @param backcast if `true`, constructs backward windows for reconstruction tests + * @return a tuple `(x, yy)` where: + * - `x` is the lagged matrix (windows × features) + * - `yy` is the horizon-matrix aligned with `x` + */ + def buildMatrix4TS (y: VectorD, lags: Int, hh: Int, backcast: Boolean = false): (MatrixD, MatrixD) = + val x = makeMatrix4L (y, lags, backcast) + val yy = makeMatrix4Y (y, hh, backcast) + + println (s"dims of x = ${x.dims}") + println (s"dims of yy = ${yy.dims}") + println (s"last element in y = ${y(y.dim - 1)}") + + (x, yy) + end buildMatrix4TS + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Build lagged feature matrix for time-series with exogenous inputs. + * @param y endogenous (target) series + * @param xe exogenous variables (matrix) + * @param lags number of lagged steps to include + * @param hh forecast horizon + * @param backcast whether to generate backward windows + * @param isTest if `true`, skip alignment trimming + * @return `(x_trim, yy_trim)` aligned lagged feature + horizon matrices + */ + def buildMatrix4TSX (y: VectorD, xe: MatrixD, lags: Int, hh: Int, backcast: Boolean = false, + isTest: Boolean = false): (MatrixD, MatrixD) = + // Lagged endogenous (target) and exogenous features + val x_y = makeMatrix4L (y, lags, backcast) // (n, lags) + val x_ex = makeMatrix4EXO (xe, 7, 1.0, backcast) // (n, lags * n_exo) + + // Combine all lag features side by side + val x = x_y ++^ x_ex // (n, lags * (1 + n_exo)) + + // Output horizons for the target + val yy = makeMatrix4Y (y, hh, backcast) + + // Align dimensions (drop first lags rows, to match available horizons) + val (x_trim, yy_trim) = + if !isTest then + println (isTest) + println ("Trimming training data to align input and output matrices") + (x(lags until x.dim), yy(lags until yy.dim)) + else + (x, yy) + end if + + println (s"dims of x = ${x_trim.dims}") + println (s"dims of yy = ${yy_trim.dims}") + println (s"last element in y = ${y(y.dim - 1)}") + + (x_trim, yy_trim) + end buildMatrix4TSX + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Convert lagged matrices into mini-batches suitable for RNN/GRU models. + * Splits the dataset into batches, converts each window into an ordered + * input sequence `(X_t0, X_t1, ..., X_tn)` and attaches the corresponding + * target tensor for supervised sequence forecasting. + * @param xSeq full lagged input matrix + * @param ySeq horizon target matrix + * @param batchSize batch size for training + * @param nFeatures number of features at each time step (default = 1) + * @return an indexed sequence of `(inputSeq, target)` batch pairs + */ + def makeBatches (xSeq: MatrixD, ySeq: MatrixD, batchSize: Int, nFeatures: Int = 1): + IndexedSeq [(IndexedSeq[Variabl], Variabl)] = + val nSeq = xSeq.dim + val totalLag = xSeq.dim2 + val lags = totalLag / nFeatures +// val horizon = ySeq.dim2 + val nBatches = (nSeq + batchSize - 1) / batchSize + + for b <- 0 until nBatches yield + val start = b * batchSize + val end = math.min (start + batchSize, nSeq) + val batchWindow = start until end + + val xBatchMat = xSeq (batchWindow) + val yBatchMat = ySeq (batchWindow) + + val inputSeq = (0 until lags).map { t => + val startCol = t * nFeatures + val endCol = startCol + nFeatures + val cols = xBatchMat (?, startCol until endCol) + Variabl ( + TensorD.fromMatrix (cols).permute (Seq (1, 2, 0)), // (batch, nFeatures, 1) + name = Some (s"x_b${b}_t${t}") + ) + } + // keep target as (batch, horizon, 1) + val target = Variabl ( + TensorD.fromMatrix (yBatchMat).permute (Seq (1, 2, 0)), + name = Some (s"y_batch$b") + ) + (inputSeq, target) + end for + end makeBatches + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Test utility functions used for RNN forecasting: + * - lag construction + * - horizon matrix construction + * - batch formatting into proper RNN tensors + * Demonstrates window creation for a simple synthetic series. + * Run using: + * > runMain scalation.modeling.autograd.rnnUtilityTest + */ + @main def rnnUtilityTest (): Unit = + banner ("RNN Utility Functions Test - Sequence Creation and Batching") + + val y = VectorD (1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0) + val lags = 3 + val hh = 2 + val (x, yy) = buildMatrix4TS (y, lags, hh) + + println (s"Step 0: Original series y = $y") + println ("Step 1: Lagged X and Horizon Y") + for i <- 0 until x.dim do + println (f"t=$i%2d | x=${x(i)} => yy=${yy(i)}") + println (s"Step 2: Creat batches of length = 4") + val batches = makeBatches (x, yy, batchSize = 4) + for (batch, i) <- batches.zipWithIndex do + val (inputSeq, target) = batch + println (s"Batch $i:") + for t <- inputSeq.indices do + println (s" Time $t: ${inputSeq (t).data}") + println (s" Target: ${target.data}") + end for + end rnnUtilityTest + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forecast COVID-19 weekly new deaths using a single-layer RNN. + * Pipeline: + * 1. load + scale the dataset + * 2. convert into lagged windows + * 3. batch for RNN input + * 4. train with Adam + StepLR + * 5. compute QoF metrics (R², SMAPE, MAE, RMSE) + * 6. plot predictions & training loss + * Run: + * > runMain scalation.modeling.autograd.rnnCovidTest + */ + @main def rnnCovidTest (): Unit = + banner ("RNN Covid Test - Single Layer RNN on Covid Data") + var y = Example_Covid.loadData_y ("new_deaths") + y = y(0 until 116) + val original_extremes = extreme (y) + println ("original_extremes.type = " + original_extremes.getClass) + + val y_s = scaleV (original_extremes, (-2.0, 2.0))(y) + + val t = VectorD.range(0, y.dim) + new Plot (t, y, null, "Covid New Deaths y(t)", lines=true) + + val lags = 10 // how many past steps the RNN sees + val hh = 1 // predict 1 step ahead + val (x, yy) = buildMatrix4TS (y_s, lags, hh) + + val batchSize = 32 + val batches = makeBatches (x, yy, batchSize) + + // Print shapes + println (s"Number of batches: ${batches.length}") + println (s"Each batch input shape: (${batches(0)._1.length}, ${batches(0)._1(0).shape})") + println (s"Each batch target shape: ${batches(0)._2.shape}") + +// val inputSize = 1 +// val hiddenSize = 10 + + case class RnnForecast (seqLen: Int, hiddenSize: Int, horizon: Int) + extends SeqModule with Fit (seqLen, horizon): + + import scala.language.implicitConversions + + // RNN + output projection + private val rnn = RNN (inputSize = 1, hiddenSize, numLayers = 1, activation = "tanh") + private val outputLayer = Linear (hiddenSize, horizon) + + override def parameters: IndexedSeq [Variabl] = + rnn.parameters ++ outputLayer.parameters + + override def forward (x: IndexedSeq [Variabl]): IndexedSeq[Variabl] = + val (outputs, _) = rnn.forward (x) + val lastOut = outputs.last // (batch, hiddenSize, 1) + val pred = lastOut ~> outputLayer // (batch, horizon, 1) + IndexedSeq (pred) + end forward + end RnnForecast + + // Instantiate the model and optimizer + val net = RnnForecast(seqLen = lags, hiddenSize = 10, horizon = hh) + val optimizer = Adam (parameters = net.parameters, lr = 0.02, beta1 = 0.9, beta2 = 0.999) + val scheduler = StepLR (optimizer, stepSize = 50, gamma = 0.8) + + // Sanity: one forward pass on the first batch + val (xs, target) = batches.head + val pred = net.forward (xs).head + println (s"Pred shape: ${pred.shape} vs target: ${target.shape}") + + // Training loop + // 1. Results: after 350 epochs, loss ~ 0.003, QoF: R2 ~ 0.97, SMAPE ~ 9.56 + // patience = 40 + // lr = 0.3, momentum = 0.90 (SGD) (no scheduler) + // maxNorm = 5.0 (grad clipping) + // hiddenSize = 10 + // batchSize = 32 + // lags = 10 + + // 2. Results: after 310 epochs, loss ~ 0.0027, QoF: R2 ~ 0.97, SMAPE ~ 9.2 + // patience = 40 + // lr = 0.005, beta1 = 0.9, beta2 = 0.99 (Adam) (no scheduler) + // maxNorm = 5.0 (grad clipping) + // hiddenSize = 10 + // batchSize = 32 + // lags = 10 + + // 3. Results: after 900 epochs, loss ~ 0.0006, QoF: R2 ~ 0.995, SMAPE ~ 4.7 + // patience = none + // lr = 0.01, beta1 = 0.9, beta2 = 0.999 (Adam) + // StepLR scheduler: stepSize = 50, gamma = 0.8 + // maxNorm = 5.0 (grad clipping) + // hiddenSize = 10 + // batchSize = 32 + // lags = 10 + + // 4. Results: after 341 epochs, loss ~ 0.0147, QoF: R2 ~ 0.987, SMAPE ~ 6.7 + // patience = 60 + // lr = 0.01, beta1 = 0.9, beta2 = 0.999 (Adam) + // StepLR scheduler: stepSize = 50, gamma = 0.8 + // maxNorm = 5.0 (grad clipping) + // hiddenSize = 10 + // batchSize = 32 + // lags = 10 + + object monitor extends MonitorLoss + object EarlyStopper extends StoppingRule + val patience = 60 + var stopTraining = false + val nEpochs = 500 + val maxNorm = 5.0 + + val lossWriter = new PrintWriter ("loss_covid_insample.txt") + lossWriter.println ("epoch,loss") + // Training loop + for epoch <- 0 until nEpochs if ! stopTraining do + var epochLoss = 0.0 + for (xs, target) <- batches do + // Zero gradients + optimizer.zeroGrad () + + // Forward pass + val pred = net.forward (xs).head + + // Compute loss (MSE) + val loss = mseLoss (pred, target) + epochLoss += loss.data(0)(0)(0) + + // Backward pass + loss.backward () + + // Clip gradients + optimizer.clipGradNorm (maxNorm) + + // Update parameters + optimizer.step () + end for + + // Step the scheduler (once per epoch) + scheduler.step () + + val avgLoss = epochLoss / batches.length + lossWriter.println (s"$epoch,$avgLoss") + monitor.collectLoss (avgLoss) + + if epoch % 10 == 0 then + println (f"Epoch $epoch%3d | Loss: ${epochLoss / batches.length}%.6f") + + // early stopping check + val (stopParams, bestLoss) = EarlyStopper.stopWhenPatience (net.parameters, avgLoss, patience) + if stopParams != null then + println (s"Early stopping at epoch $epoch with best loss $bestLoss") + net.setParameters (stopParams) + stopTraining = true + end if + end for + lossWriter.close () + + // =================================================== + // Diagnostics: QoF & FitMap + // =================================================== + // Predict on whole dataset + val yPredSeq: IndexedSeq [Double] = + batches.flatMap { case (xs, _) => + val v = net.forward (xs).head.data.flattenToVector // VectorD + (0 until v.dim).map (v(_)) + } + // drop the first "lags" predictions to align with yy + val yPredSeqAligned = yPredSeq.drop (lags) + val yPredVec: VectorD = VectorD (yPredSeqAligned) + // drop the first "lags" rows from yy to align + val yyAligned = yy(lags until yy.dim) + val yTrueVec = yyAligned.flatten + + val yPredVecRescaled = unscaleV (original_extremes, (-2.0, 2.0))(yPredVec) + val yTrueVecRescaled = unscaleV (original_extremes, (-2.0, 2.0))(yTrueVec) + + val predWriter = new PrintWriter("predictions_covid_insample.csv") + predWriter.println ("t,actual,predicted") + for i <- 0 until yTrueVecRescaled.dim do + predWriter.println (s"$i,${yTrueVecRescaled (i)},${yPredVecRescaled (i)}") + predWriter.close () + + println (s"shapes of yTrueVecRescaled = ${yTrueVecRescaled.dim}, yPredVecRescaled = ${yPredVecRescaled.dim}") + println (s"first 10 yTrueVecRescaled = ${yTrueVecRescaled (0 until 10)}") + println (s"first 10 yPredVecRescaled = ${yPredVecRescaled (0 until 10)}") + + monitor.plotLoss ("RNN-Covid-New-Deaths") + new Plot (t(lags until t.dim), yTrueVecRescaled, yPredVecRescaled, "RNN New Deaths Forecast", lines=true) + banner ("Final Train Statistics") + val qof = net.diagnose (yTrueVecRescaled, yPredVecRescaled) + println (FitM.fitMap (qof, qoF_names)) + end rnnCovidTest + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forecast ILI (Influenza-Like Illness) incidence using a single-layer RNN. + * Similar to the COVID test but uses a longer lag window (21 weeks). + * Includes scaling, batching, training with Adam + StepLR, diagnostics and plots. + * Run: + * > runMain scalation.modeling.autograd.rnnILITest + */ + @main def rnnILITest (): Unit = + banner ("RNN ILI Test - Single Layer RNN on ILI Data") + + val y = Example_ILI.loadData_y ("ILITOTAL") + + val original_extremes = extreme(y) + println ("original_extremes.type = " + original_extremes.getClass) + + val y_s = scaleV (original_extremes, (-1.0, 1.0))(y) + + val t = VectorD.range(0, y.dim) + new Plot (t, y, null, "Covid New Deaths y(t)", lines = true) + + val lags = 21 // how many past steps the RNN sees + val hh = 1 // predict 1 step ahead + val (x, yy) = buildMatrix4TS (y_s, lags, hh) + + val batchSize = 32 + val batches = makeBatches (x, yy, batchSize) + + // Print shapes + println (s"Number of batches: ${batches.length}") + println (s"Each batch input shape: (${batches(0)._1.length}, ${batches(0)._1(0).shape})") + println (s"Each batch target shape: ${batches(0)._2.shape}") + +// val inputSize = 1 +// val hiddenSize = 10 + + case class RnnForecast (seqLen: Int, hiddenSize: Int, horizon: Int) + extends SeqModule with Fit (seqLen, horizon): + + import scala.language.implicitConversions + + // RNN + output projection + private val rnn = RNN (inputSize = 1, hiddenSize, numLayers = 1, activation = "tanh") + private val outputLayer = Linear (hiddenSize, horizon) + + override def parameters: IndexedSeq [Variabl] = + rnn.parameters ++ outputLayer.parameters + + override def forward (x: IndexedSeq [Variabl]): IndexedSeq[Variabl] = + val (outputs, _) = rnn.forward (x) + val lastOut = outputs.last // (batch, hiddenSize, 1) + val pred = lastOut ~> outputLayer // (batch, horizon, 1) + IndexedSeq (pred) + end forward + end RnnForecast + + // Instantiate the model and optimizer + val net = RnnForecast(seqLen = lags, hiddenSize = 10, horizon = hh) + val optimizer = Adam(parameters = net.parameters, lr = 0.005, beta1 = 0.9, beta2 = 0.999) + val scheduler = StepLR(optimizer, stepSize = 80, gamma = 0.8) + + // Sanity: one forward pass on the first batch + val (xs, target) = batches.head + val pred = net.forward (xs).head + println (s"Pred shape: ${pred.shape} vs target: ${target.shape}") + + object monitor extends MonitorLoss + object EarlyStopper extends StoppingRule + val patience = 140 + var stopTraining = false + val nEpochs = 500 + val maxNorm = 5.0 + + val lossWriter = new PrintWriter ("loss_ili_insample.csv") + // Training loop + for epoch <- 0 until nEpochs if ! stopTraining do + var epochLoss = 0.0 + for (xs, target) <- batches do + // Zero gradients + optimizer.zeroGrad () + + // Forward pass + val pred = net.forward (xs).head + + // Compute loss (MSE) + val loss = mseLoss (pred, target) + epochLoss += loss.data(0)(0)(0) + + // Backward pass + loss.backward () + + // Clip gradients + optimizer.clipGradNorm (maxNorm) + + // Update parameters + optimizer.step () + + end for + + // Step the scheduler (once per epoch) + scheduler.step () + + val avgLoss = epochLoss / batches.length + lossWriter.println (s"$epoch,$avgLoss") + monitor.collectLoss (avgLoss) + + if epoch % 10 == 0 then + println (f"Epoch $epoch%3d | Loss: ${epochLoss / batches.length}%.6f") + + // early stopping check + val (stopParams, bestLoss) = EarlyStopper.stopWhenPatience (net.parameters, avgLoss, patience) + if stopParams != null then + println (s"Early stopping at epoch $epoch with best loss $bestLoss") + net.setParameters (stopParams) + stopTraining = true + end if + end for + lossWriter.close + + // =================================================== + // Diagnostics: QoF & FitMap + // =================================================== + // Predict on whole dataset + val yPredSeq: IndexedSeq [Double] = + batches.flatMap { case (xs, _) => + val v = net.forward (xs).head.data.flattenToVector // VectorD + (0 until v.dim).map (v(_)) + } + // drop the first "lags" predictions to align with yy + val yPredSeqAligned = yPredSeq.drop (lags) + val yPredVec: VectorD = VectorD (yPredSeqAligned) + // drop the first "lags" rows from yy to align + val yyAligned = yy(lags until yy.dim) + val yTrueVec = yyAligned.flatten + + val yPredVecRescaled = unscaleV (original_extremes, (-1.0, 1.0))(yPredVec) + val yTrueVecRescaled = unscaleV (original_extremes, (-1.0, 1.0))(yTrueVec) + + val predWriter = new PrintWriter("predictions_ili_insample.csv") + predWriter.println ("t,actual,predicted") + for i <- 0 until yTrueVecRescaled.dim do + predWriter.println (s"$i,${yTrueVecRescaled (i)},${yPredVecRescaled (i)}") + predWriter.close () + println (s"shapes of yTrueVecRescaled = ${yTrueVecRescaled.dim}, yPredVecRescaled = ${yPredVecRescaled.dim}") + println (s"first 10 yTrueVecRescaled = ${yTrueVecRescaled (0 until 10)}") + println (s"first 10 yPredVecRescaled = ${yPredVecRescaled (0 until 10)}") + + monitor.plotLoss ("RNN-Covid-New-Deaths") + new Plot (t(lags until t.dim), yTrueVecRescaled, yPredVecRescaled, "RNN New Deaths Forecast", lines = true) + banner ("Final Train Statistics") + val qof = net.diagnose (yTrueVecRescaled, yPredVecRescaled) + println (FitM.fitMap (qof, qoF_names)) + end rnnILITest + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Chronological (or Temporal) train/test split forecasting using RNN. + * Features: + * - Temporal holdout (e.g., first 80% train, remaining test) + * - adaptive learning rate via `ReduceLROnPlateau` + * - early stopping + * - rescaling predictions back to original space + * - complete QoF metrics for train & test + * Run: + * > runMain scalation.modeling.autograd.rnnCovidTest1 + */ + @main def rnnCovidTest1 (): Unit = + + banner ("RNN Covid Test - Single Layer RNN on Covid Data with chronological split") + var y = Example_Covid.loadData_y ("new_deaths") + y = y(0 until 116) + val split = 92 + val y_train = y(0 until split) + val y_test = y(split until y.dim) + val original_extremes = extreme (y_train) + println ("original_extremes.type = " + original_extremes.getClass) + + val y_train_s = scaleV (original_extremes, (-2.0, 2.0))(y_train) + val y_test_s = scaleV (original_extremes, (-2.0, 2.0))(y_test) + + val t = VectorD.range(0, y.dim) + new Plot (t, y, null, "Covid New Deaths y(t)", lines = true) + + val lags = 14 // how many past steps the RNN sees + val hh = 1 // predict 1 step ahead + val (x_train, yy_train) = buildMatrix4TS (y_train_s, lags, hh) + val (x_test, yy_test) = buildMatrix4TS (y_test_s, lags, hh) + + val batchSize = 16 + val batchesTrain = makeBatches (x_train, yy_train, batchSize) + + // Print shapes + println (s"Number of training batches: ${batchesTrain.length}") + println (s"Each batch input shape: (${batchesTrain(0)._1.length}, ${batchesTrain(0)._1(0).shape})") + println (s"Each batch target shape: ${batchesTrain(0)._2.shape}") + +// val inputSize = 1 +// val hiddenSize = 10 + + case class RnnForecast (seqLen: Int, hiddenSize: Int, horizon: Int) + extends SeqModule with Fit (seqLen, horizon): + + import scala.language.implicitConversions + + // RNN + output projection + private val rnn = RNN (inputSize = 1, hiddenSize, numLayers = 1, activation = "tanh") + private val outputLayer = Linear (hiddenSize, horizon) + + override def parameters: IndexedSeq [Variabl] = + rnn.parameters ++ outputLayer.parameters + + override def forward (x: IndexedSeq [Variabl]): IndexedSeq[Variabl] = + val (outputs, _) = rnn.forward (x) + val lastOut = outputs.last // (batch, hiddenSize, 1) + val pred = lastOut ~> outputLayer // (batch, horizon, 1) + IndexedSeq (pred) + end forward + end RnnForecast + + // Instantiate the model and optimizer + val net = RnnForecast(seqLen = lags, hiddenSize = 5, horizon = hh) + val optimizer = Adam (parameters = net.parameters, lr = 0.001, beta1 = 0.9, beta2 = 0.999) + val scheduler = ReduceLROnPlateau ( + optim = optimizer, + mode = "min", // monitoring validation loss + factor = 0.6, // decay factor + patience = 30, // epochs to wait before reducing LR + threshold = 0.01, // 1% relative improvement required + thresholdMode = "rel", // use relative thresholding (PyTorch style) + cooldown = 0, // epochs to wait after LR has been reduced + minLR = 1e-4, // minimum learning rate + eps = 1e-8, // minimal decay applied to lr + verbose = true // print message on each update + ) + + // Sanity: one forward pass on the first batch + val (xs, target) = batchesTrain.head + val pred = net.forward (xs).head + println (s"Pred shape: ${pred.shape} vs target: ${target.shape}") + + // Training loop + object monitor extends MonitorLoss + object EarlyStopper extends StoppingRule + val patience = 80 + var stopTraining = false + val nEpochs = 500 + val maxNorm = 5.0 + + // Training loop + for epoch <- 0 until nEpochs if ! stopTraining do + var epochLoss = 0.0 + for (xs, target) <- batchesTrain do + // Zero gradients + optimizer.zeroGrad () + + // Forward pass + val pred = net.forward (xs).head + + // Compute loss (MSE) + val loss = mseLoss (pred, target) + epochLoss += loss.data(0)(0)(0) + + // Backward pass + loss.backward () + + // Clip gradients + optimizer.clipGradNorm (maxNorm) + + // Update parameters + optimizer.step () + end for + + + val avgLoss = epochLoss / batchesTrain.length + monitor.collectLoss (avgLoss) + + scheduler.step (avgLoss) + + if epoch % 10 == 0 then + println (f"Epoch $epoch%3d | Loss: ${epochLoss / batchesTrain.length}%.6f") + + // early stopping check + val (stopParams, bestLoss) = EarlyStopper.stopWhenPatience (net.parameters, avgLoss, patience) + if stopParams != null then + println (s"Early stopping at epoch $epoch with best loss $bestLoss") + net.setParameters (stopParams) + stopTraining = true + end if + end for + + // =================================================== + // Diagnostics: QoF & FitMap + // =================================================== + // Predict on test set + val yPredTestSeq: IndexedSeq [Double] = + makeBatches (x_test, yy_test, batchSize) + .flatMap { case (xs, _) => + val v = net.forward (xs).head.data.flattenToVector + (0 until v.dim).map (v(_)) + } + + val yPredTest: VectorD = VectorD (yPredTestSeq) + val yTrueTest = yy_test.flatten + + // --- Train predictions --- + val yPredTrainSeq = + makeBatches (x_train, yy_train, batchSize) + .flatMap { case (xs, _) => + val v = net.forward (xs).head.data.flattenToVector + (0 until v.dim).map (v(_)) + } + + val yPredTrain = VectorD (yPredTrainSeq) + val yTrueTrain = yy_train.flatten + + // --- Unscale both --- + val yPredTrainRescaled = unscaleV (original_extremes, (-2.0, 2.0))(yPredTrain) + val yTrueTrainRescaled = unscaleV (original_extremes, (-2.0, 2.0))(yTrueTrain) + val yPredTestRescaled = unscaleV (original_extremes, (-2.0, 2.0))(yPredTest) + val yTrueTestRescaled = unscaleV (original_extremes, (-2.0, 2.0))(yTrueTest) + + val yPredTrainAligned = yPredTrainRescaled.drop (lags) + val yTrueTrainAligned = yTrueTrainRescaled.drop (lags) + + println (s"shapes of yTrueTrainAligned = ${yTrueTrainAligned.dim}, yPredTrainAligned = ${yPredTrainAligned.dim}") + println (s"shapes of yTrueTestRescaled = ${yTrueTestRescaled.dim}, yPredTestRescaled = ${yPredTestRescaled.dim}") + println (s"first 10 yTrueTrainAligned = ${yTrueTrainAligned (0 until 10)}") + println (s"first 10 yPredTrainAligned = ${yPredTrainAligned (0 until 10)}") + println (s"first 10 yTrueTestRescaled = ${yTrueTestRescaled (0 until 10)}") + println (s"first 10 yPredTestRescaled = ${yPredTestRescaled (0 until 10)}") + + // --- QoF metrics --- + val qofTrain = net.diagnose (yTrueTrainAligned, yPredTrainAligned) + val qofTest = net.diagnose (yTrueTestRescaled, yPredTestRescaled) + + banner ("QoF: Train (in-sample)") + println (FitM.fitMap (qofTrain, qoF_names)) + banner ("QoF: Test (out-of-sample)") + println (FitM.fitMap (qofTest, qoF_names)) + + // --- Plots --- + val t_train = VectorD.range(0, yPredTrainAligned.dim) + val t_test = VectorD.range(0, yPredTestRescaled.dim) + new Plot (t_train, yTrueTrainAligned, yPredTrainAligned, + "RNN New Deaths Forecast (Train)", lines = true) + new Plot (t_test, yTrueTestRescaled, yPredTestRescaled, + "RNN New Deaths Forecast (Test)", lines = true) + end rnnCovidTest1 + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Chronological split forecasting with: + * - log → scale → train → unscale → exp transform pipeline + * - validation set carved out of training windows + * - ReduceLROnPlateau for LR scheduling + * - early stopping based on validation loss + * Supports both COVID and ILI depending on loaded dataset. + * Run: + * > runMain scalation.modeling.autograd.rnnCovidTest2 + */ + @main def rnnCovidTest2 (): Unit = + banner ("RNN Covid Test - Single Layer RNN on Covid Data with chronological split") + var y = Example_Covid.loadData_y ("new_deaths") + y = y(0 until 116) + +// var y = Example_ILI.loadData_y ("ILITOTAL") + + val split = (0.8 * y.dim).toInt + val y_train = y(0 until split) + val y_test = y(split until y.dim) +// val original_extremes = extreme(y_train) + +// val y_train_s = scaleV (original_extremes, (-2.0, 2.0))(y_train) +// val y_test_s = scaleV (original_extremes, (-2.0, 2.0))(y_test) + + val offset = 1.0 + val scale_range = (-1.0, 1.0) + val y_train_log = logTransformV (offset)(y_train) + val y_test_log = logTransformV (offset)(y_test) + val original_extremes = extreme(y_train_log) + + val y_train_s = scaleV (original_extremes, scale_range) (y_train_log) + val y_test_s = scaleV (original_extremes, scale_range) (y_test_log) + + val t = VectorD.range (0, y.dim) + new Plot (t, y, null, "Covid New Deaths y(t)", lines = true) + + val lags = 21 // how many past steps the RNN sees + val hh = 1 // predict 1 step ahead + val (x_train, yy_train) = buildMatrix4TS (y_train_s, lags, hh) + + // ----------- Validation set -------------- + // Here we create a validation set from the end of the training set + // by taking the last 10% of the training data + val valFraction = 0.1 // 10% for validation + val nTotalTrain = x_train.dim + val nVal = (nTotalTrain * valFraction).toInt max 8 // at least 8 windows + val nTrain = nTotalTrain - nVal + + val x_train_final = x_train (0 until nTrain) + val yy_train_final = yy_train (0 until nTrain) + val x_val = x_train (nTrain until nTotalTrain) + val yy_val = yy_train (nTrain until nTotalTrain) + println (s"Training windows: $nTrain, Validation windows: $nVal") + // ----------------------------------------- + val (x_prev_dummy, yy_prev_dummy) = buildMatrix4TS(y_test_s, lags, hh) + println (s"Previous x_test is : $x_prev_dummy") + println (s"Previous y_test is : $yy_prev_dummy") + + val y_full = VectorD (y_train_s.takeRight(lags) ++ y_test_s) + val (x_test_all, yy_test_all) = buildMatrix4TS (y_full, lags, hh) + val x_test = x_test_all.drop (lags) + val yy_test = yy_test_all.drop (lags) + + println (s"Current x_test is : $x_test") + println (s"Current y_test is : $yy_test") + + val batchSize = 8 // was 8 for covid, 16 for ILI + val batchesTrain = makeBatches (x_train_final, yy_train_final, batchSize) + val batchesVal = makeBatches (x_val, yy_val, batchSize) + + // Print shapes + println (s"Number of training batches: ${batchesTrain.length}") + println (s"Each batch input shape: (${batchesTrain(0)._1.length}, ${batchesTrain(0)._1(0).shape})") + println (s"Each batch target shape: ${batchesTrain(0)._2.shape}") + +// val inputSize = 1 + val hiddenSize = 20 // was 20 for covid, 10 for ILI + + case class RnnForecast (seqLen: Int, hiddenSize: Int, horizon: Int) + extends SeqModule with Fit (seqLen, horizon): + + import scala.language.implicitConversions + + // RNN + output projection + private val rnn = RNN (inputSize = 1, hiddenSize, numLayers = 1, activation = "tanh") + private val outputLayer = Linear (hiddenSize, horizon) + + override def parameters: IndexedSeq [Variabl] = + rnn.parameters ++ outputLayer.parameters + + override def forward (x: IndexedSeq [Variabl]): IndexedSeq[Variabl] = + val (outputs, _) = rnn.forward (x) + val lastOut = outputs.last // (batch, hiddenSize, 1) + val pred = lastOut ~> outputLayer // (batch, horizon, 1) + IndexedSeq (pred) + end forward + end RnnForecast + + // Instantiate the model and optimizer + val net = RnnForecast(seqLen = lags, hiddenSize = hiddenSize, horizon = hh) + val optimizer = Adam (parameters = net.parameters, + lr = 0.02, // was 0.02 for covid, 0.005 for ILI + weightDecay = 1e-4) + val scheduler = ReduceLROnPlateau ( + optim = optimizer, + mode = "min", // monitoring validation loss + factor = 0.6, // decay factor + patience = 30, // epochs to wait before reducing LR (was 30 for covid, 50 for ILI) + threshold = 0.02, // % relative improvement required + thresholdMode = "rel", // use relative thresholding (PyTorch style) + cooldown = 0, + minLR = 1e-5, + eps = 1e-8, + verbose = true + ) + + def validationLoss (): Double = + var tot = 0.0 + var n = 0 + for (xs, target) <- batchesVal do + val pred = net.forward (xs).head + val loss = mseLoss (pred, target) + val batchSize = target.shape.head + tot += loss.data(0)(0)(0) * batchSize + n += batchSize + end for + tot / math.max (n, 1) + end validationLoss + + // Training loop + object monitor extends MonitorLoss +// object EarlyStopper extends StoppingRule + val patience = 9000+80 // effectively disabled, it's over 9000! + var stopTraining = false + var bestValLoss = Double.PositiveInfinity + var bestParams: IndexedSeq [Variabl] = null + var badEpochs = 0 + val nEpochs = 500 + val maxNorm = 4.0 + + val startTime = System.nanoTime () + +// val lossWriter = new PrintWriter ("loss_covid_chronological.csv") + val lossWriter = new PrintWriter ("loss_ili_chronological.csv") + // Training loop + for epoch <- 0 until nEpochs if ! stopTraining do + var epochLoss = 0.0 + for (xs, target) <- batchesTrain do + // Zero gradients + optimizer.zeroGrad () + + // Forward pass + val pred = net.forward (xs).head + + // Compute loss (MSE) + val loss = mseLoss (pred, target) + epochLoss += loss.data(0)(0)(0) + + // Backward pass + loss.backward () + + // Clip gradients + optimizer.clipGradNorm (maxNorm) + + // Update parameters + optimizer.step () + + end for + + val avgTrain = epochLoss / batchesTrain.length + val valLoss = validationLoss () + + lossWriter.println (s"$epoch,$avgTrain,$valLoss") + monitor.collectLoss (valLoss) + scheduler.step (valLoss) + + if epoch % 10 == 0 then + println (f"Epoch $epoch%3d | train=$avgTrain%.6f | val=$valLoss%.6f") + end if + + // early stopping check + if valLoss < bestValLoss - 1e-6 then + bestValLoss = valLoss + bestParams = net.parameters.map (p => p.copy()) // deep copy + badEpochs = 0 + else + badEpochs += 1 + if badEpochs >= patience then + println (s"No improvement for $patience epochs. Early stopping at epoch $epoch with best val loss $bestValLoss") + net.setParameters (bestParams) + stopTraining = true + end if + end if + end for + // At the end of training, ensure we have the best parameters +// if (bestParams != null) then net.setParameters(bestParams) + lossWriter.close () + + val endTime = System.nanoTime () + val durationSeconds = (endTime - startTime) / 1e9 + println (f"Training completed in $durationSeconds%.2f seconds.") + + // =================================================== + // Diagnostics: QoF & FitMap + // =================================================== + // Predict on test set + val yPredTestSeq: IndexedSeq [Double] = + makeBatches (x_test, yy_test, batchSize) + .flatMap { case (xs, _) => + val v = net.forward (xs).head.data.flattenToVector + (0 until v.dim).map (v(_)) + } + + val yPredTest: VectorD = VectorD (yPredTestSeq) + val yTrueTest = yy_test.flatten + + // --- Train predictions --- + val yPredTrainSeq = + makeBatches (x_train_final, yy_train_final, batchSize) + .flatMap { case (xs, _) => + val v = net.forward (xs).head.data.flattenToVector + (0 until v.dim).map (v(_)) + } + + val yPredTrain = VectorD (yPredTrainSeq) + val yTrueTrain = yy_train_final.flatten + + // --- Unscale both --- + val yPredTrainRescaled = + expTransformV (offset) (unscaleV (original_extremes, scale_range)(yPredTrain)) + val yTrueTrainRescaled = + expTransformV (offset) (unscaleV (original_extremes, scale_range)(yTrueTrain)) + val yPredTestRescaled = + expTransformV (offset) (unscaleV (original_extremes, scale_range)(yPredTest)) + val yTrueTestRescaled = + expTransformV (offset) (unscaleV (original_extremes, scale_range)(yTrueTest)) + + val yPredTrainAligned = yPredTrainRescaled.drop (lags) + val yTrueTrainAligned = yTrueTrainRescaled.drop (lags) + + println (s"shapes of yTrueTrainAligned = ${yTrueTrainAligned.dim}, yPredTrainAligned = ${yPredTrainAligned.dim}") + println (s"shapes of yTrueTestRescaled = ${yTrueTestRescaled.dim}, yPredTestRescaled = ${yPredTestRescaled.dim}") + println (s"first 10 yTrueTrainAligned = ${yTrueTrainAligned (0 until 10)}") + println (s"first 10 yPredTrainAligned = ${yPredTrainAligned (0 until 10)}") + println (s"first 10 yTrueTestRescaled = ${yTrueTestRescaled (0 until 10)}") + println (s"first 10 yPredTestRescaled = ${yPredTestRescaled (0 until 10)}") + +// val trainWriter = new PrintWriter ("predictions_covid_train_chronological.csv") + val trainWriter = new PrintWriter ("predictions_ili_train_chronological.csv") + trainWriter.println ("t,actual,predicted") + for i <- yTrueTrainAligned.indices do + trainWriter.println (s"$i,${yTrueTrainAligned (i)},${yPredTrainAligned (i)}") + trainWriter.close () + +// val testWriter = new PrintWriter ("predictions_covid_test_chronological.csv") + val testWriter = new PrintWriter ("predictions_ili_test_chronological.csv") + testWriter.println ("t,actual,predicted") + for i <- yTrueTestRescaled.indices do + testWriter.println (s"$i,${yTrueTestRescaled (i)},${yPredTestRescaled (i)}") + testWriter.close () + + + // --- QoF metrics --- + val qofTrain = net.diagnose (yTrueTrainAligned, yPredTrainAligned) + val qofTest = net.diagnose (yTrueTestRescaled, yPredTestRescaled) + + banner ("QoF: Train (in-sample)") + println (FitM.fitMap (qofTrain, qoF_names)) + banner ("QoF: Test (out-of-sample)") + println (FitM.fitMap (qofTest, qoF_names)) + + // --- Plots --- + val t_train = VectorD.range (0, yPredTrainAligned.dim) + val t_test = VectorD.range (0, yPredTestRescaled.dim) + new Plot (t_train, yTrueTrainAligned, yPredTrainAligned, + "RNN New Deaths Forecast (Train)", lines = true) + new Plot (t_test, yTrueTestRescaled, yPredTestRescaled, + "RNN New Deaths Forecast (Test)", lines = true) + end rnnCovidTest2 + + // Current Best it seems to be: + // 1. R2 (train) = 0.962, SMAPE (train) = 7.65% + // R2 (test) = 0.896, SMAPE (test) = 13.89% + // ------------------ Hyperparameters (adjustable) ------------------ + // lags = 21 // number of past steps (window size) + // hiddenSize = 20 // RNN hidden units + // scale_range = (-1.0, 1.0) // scaling range for normalized inputs + // lr = 0.02 // initial learning rate + // weightDecay = 1e-4 // L2 regularization strength + // batchSize = 8 // mini-batch size + // nEpochs = 500 // max training epochs + // valFraction = 0.1 // fraction of training data for validation + // patienceLR = 30 // epochs before LR reduction (ReduceLROnPlateau) + // thresholdLR = 0.02 // % relative improvement required for LR scheduler + // maxNorm = 4.0 // gradient clipping norm + // offset = 1.0 // log transform offset + //------------------------------------------------------------------- + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + // RNN Covid Test - Single Layer RNN on Covid Data with Rolling Validation + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Walk-forward (rolling) validation: 1-step-ahead forecasting. + * Each fold: + * - trains (or reuses) the RNN on all available data up to time t + * - predicts the next point t+1 + * - logs QoF metrics + * Supports growing or rolling windows and periodic retraining. + * Run: + * > runMain scalation.modeling.autograd.rnnCovidTestRollVal + */ + @main def rnnCovidTestRollVal (): Unit = + banner ("RNN Covid Test — Walk-Forward (1-step ahead)") + + // ----------------------- Data ----------------------- + var y = Example_Covid.loadData_y ("new_deaths") + y = y(0 until 116) + + val tAll = VectorD.range (0, y.dim) + new Plot (tAll, y, null, "Covid New Deaths y(t)", lines = true) + + // -------------------- Hyperparams ------------------- + val lags = 21 // past context + val hh = 1 + val batchSize = 8 + val hiddenSize = 20 + val nEpochs = 500 + val maxNorm = 4.0 + val offset = 1.0 // log transform offset + val scaleRange = (-1.0, 1.0) + val lr = 0.02 + val weightDecay = 1e-4 + val trainFrac = 0.80 // initial train fraction + val growingTrain = false + val retrainEvery = 8 // retrain every k folds + + // ----------------- Train window sizes ---------------- + val n = y.dim + val trSize0 = math.max((trainFrac * n).toInt, lags + hh) // ensure enough windows + val nFolds = n - trSize0 + if nFolds <= 0 then + println (s"Not enough data for walk-forward with lags=$lags.") + return + end if + + println (s"Data size n = $n, " + + s"initial train size = $trSize0, " + + s"folds = $nFolds (walk-forward 1-step)" + ) + + // ----------------- Model definition ----------------- + case class RnnForecast (seqLen: Int, hiddenSize: Int, horizon: Int) + extends SeqModule with Fit (seqLen, horizon): + import scala.language.implicitConversions + + private val rnn = RNN (inputSize = 1, hiddenSize, numLayers = 1, activation = "tanh") + private val out = Linear (hiddenSize, horizon) + + override def parameters: IndexedSeq [Variabl] = rnn.parameters ++ out.parameters + + override def forward (x: IndexedSeq [Variabl]): IndexedSeq[Variabl] = + val (outputs, _) = rnn.forward (x) + val last = outputs.last // (batch, hidden, 1) + val pred = last ~> out // (batch, horizon, 1) + IndexedSeq (pred) + end RnnForecast + + // ------------------ Helpers ------------------------- + def makeB(x: MatrixD, yy: MatrixD): IndexedSeq [(IndexedSeq[Variabl], Variabl)] = + makeBatches (x, yy, batchSize) + + inline def smape(yTrue: Double, yPred: Double): Double = + val a = math.abs (yTrue); val b = math.abs (yPred); val d = math.abs(yPred - yTrue) + d / ( (a + b) / 2.0 + 1e-9 ) + + // Build ONE prediction batch from the last `lags` values of a (scaled) train vector. + // Returns an IndexedSeq [Variabl] of length `lags`, each of shape (1, 1, 1). + + def lastLagBatch (yTrainScaled: VectorD, lags: Int): IndexedSeq [Variabl] = + val w: VectorD = yTrainScaled (yTrainScaled.dim - lags until yTrainScaled.dim) + (0 until lags).map { t => + val m = new MatrixD (1, 1) + m(0, 0) = w(t) + Variabl (TensorD.fromMatrix (m).permute (Seq (1, 2, 0))) // (1,1,1) + } + end lastLagBatch + + // ----------------- Aggregation buffers --------------- + val yTrueAll = collection.mutable.ArrayBuffer [Double]() + val yPredAll = collection.mutable.ArrayBuffer [Double]() + + // ----------------- Walk-forward loop ----------------- + + + // ------------- Walk-forward (1-step) loop ------------ + var lastExtremes: (Double, Double) = (0.0, 0.0) + var net: RnnForecast = null + var optim: Optimizer = null + var sched: LRScheduler = null + + for fold <- 0 until nFolds do + // Train range + val (trStart, trEnd) = + if growingTrain then (0, trSize0 + fold) + else (fold, trSize0 + fold) // rolling/fixed-width + val y_tr = y(trStart until trEnd) + + // True next value (one step ahead) + val y_true_next = y(trEnd) + + // --- transform & scale fit on TRAIN ONLY --- + val y_tr_log = logTransformV (offset)(y_tr) + val (extremes, y_tr_s) = { + if fold % retrainEvery == 0 then + net = RnnForecast(seqLen = lags, hiddenSize = hiddenSize, horizon = hh) + optim = Adam(parameters = net.parameters, lr = lr, weightDecay = weightDecay) + sched = ReduceLROnPlateau( + optim = optim, + mode = "min", + factor = 0.6, + patience = 30, + threshold = 0.02, + thresholdMode = "rel", + cooldown = 0, + minLR = 1e-5, + eps = 1e-8, + verbose = false + ) + + val ex = extreme(y_tr_log) + lastExtremes = ex + val y_tr_s = scaleV (ex, scaleRange)(y_tr_log) + (ex, y_tr_s) + else + println (s"[Fold ${fold + 1}/$nFolds] Skipping retrain; reusing previous net/optim/sched") + + val ex = lastExtremes + val y_tr_s = scaleV (ex, scaleRange)(y_tr_log) + (ex, y_tr_s) + end if + } + + // --- build train matrices for many windows --- + val (x_tr, yy_tr) = buildMatrix4TS(y_tr_s, lags, hh) + if x_tr.dim < 2 || yy_tr.dim < 2 then + val naivePred = y_tr.last + yTrueAll += y_true_next + yPredAll += naivePred + println (f"[Fold ${fold+1}/$nFolds] skipped training (few windows). naive=${naivePred}%.4f true=${y_true_next}%.4f") + else + // batches + tiny val split from end of train windows + val nTot = x_tr.dim + val nVal = (nTot * 0.1).toInt max 8 // keep >=2 for train core + val nKeep = nTot - nVal + val bTrF = makeB (x_tr(0 until nKeep), yy_tr(0 until nKeep)) + val bVal = if nVal > 0 then makeB (x_tr(nKeep until nTot), yy_tr(nKeep until nTot)) + else IndexedSeq.empty + + def valLoss (): Double = + if bVal.isEmpty then Double.NaN + else + var tot = 0.0; var n = 0 + for (xs, tgt) <- bVal do + val p = net.forward (xs).head + val L = mseLoss (p, tgt) + val bs = tgt.shape.head + tot += L.data(0)(0)(0) * bs + n += bs + end for + tot / math.max (n, 1) + end if + end valLoss + + var bestValLoss = Double.PositiveInfinity + var bestParams = net.parameters.map (p => p.copy()) + var badEpochs = 0 + val patience = 9000+80 // effectively disabled, it's over 9000! + var stopTraining = false + + if fold % retrainEvery == 0 then + // --- Training phase --- + for epoch <- 0 until nEpochs if ! stopTraining do + var epochLoss = 0.0 + for (xs, tgt) <- bTrF do + optim.zeroGrad () + val p = net.forward (xs).head + val L = mseLoss (p, tgt) + epochLoss += L.data(0)(0)(0) + L.backward () + optim.clipGradNorm (maxNorm) + optim.step () + end for + + val v = valLoss () + if !java.lang.Double.isNaN (v) then + sched.step (v) + if v < bestValLoss - 1e-6 then + bestValLoss = v + bestParams = net.parameters.map (p => p.copy ()) + badEpochs = 0 + else + badEpochs += 1 + if badEpochs >= patience then + println (s"No improvement for $patience epochs. Early stopping at epoch $epoch : best loss $bestValLoss") + net.setParameters (bestParams) + stopTraining = true + end if + end if + end if + + if epoch % 50 == 0 || stopTraining then + val trainAvg = epochLoss / math.max (bTrF.length, 1) + if java.lang.Double.isNaN(v) then + println (f"[Fold ${fold + 1}/$nFolds] epoch=$epoch%3d train=$trainAvg%.6f") + else + println (f"[Fold ${fold + 1}/$nFolds] epoch=$epoch%3d train=$trainAvg%.6f val=$v%.6f (best=$bestValLoss%.6f)") + end if + end for + else + println (s"[Fold ${fold + 1}/$nFolds] Skipping training (retrainEvery = $retrainEvery)") + end if + + // =================================================== + // Diagnostics: QoF & FitMap (Train within this fold) + // =================================================== + val yPredTrainSeq = + makeB (x_tr(0 until nKeep), yy_tr(0 until nKeep)) + .flatMap { case (xs, _) => + val v = net.forward (xs).head.data.flattenToVector + (0 until v.dim).map (v(_)) + } + end yPredTrainSeq + + val yPredTrain = VectorD (yPredTrainSeq) + val yTrueTrain = yy_tr(0 until nKeep).flatten + + // --- Inverse scale + log back to original space --- + val yPredTrainRescaled = expTransformV (offset)(unscaleV (extremes, scaleRange)(yPredTrain)) + val yTrueTrainRescaled = expTransformV (offset)(unscaleV (extremes, scaleRange)(yTrueTrain)) + + val yPredTrainAligned = yPredTrainRescaled.drop (lags) + val yTrueTrainAligned = yTrueTrainRescaled.drop (lags) + + println ( s"[Fold ${fold + 1}/$nFolds] shapes: yTrueTrainAligned=${yTrueTrainAligned.dim}, yPredTrainAligned=${yPredTrainAligned.dim}") + println (s"[Fold ${fold + 1}/$nFolds] first few yTrueTrainAligned = ${yTrueTrainAligned (0 until math.min(10, yTrueTrainAligned.dim))}") + println (s"[Fold ${fold + 1}/$nFolds] first few yPredTrainAligned = ${yPredTrainAligned (0 until math.min(10, yPredTrainAligned.dim))}") + + // --- Compute QoF (diagnostics / metrics) --- + val qofTrain = net.diagnose (yTrueTrainAligned, yPredTrainAligned) + println (banner (s"[Fold ${fold + 1}/$nFolds] TRAIN Diagnostics")) + println (FitM.fitMap (qofTrain, qoF_names)) + + + // --- 1-step prediction using last `lags` from TRAIN (teacher-forced) --- + val x_one = lastLagBatch (y_tr_s, lags) // IndexedSeq [Variabl] length = lags + val predNextScaled = net.forward (x_one).head.data.flattenToVector(0) + // invert scale+log to original space + val y_pred_next = + expTransformV (offset)( unscaleV (extremes, scaleRange)(VectorD (predNextScaled)))(0) + + // collect + yTrueAll += y_true_next + yPredAll += y_pred_next + + val sm = smape(y_true_next, y_pred_next) * 100.0 + println (f"[Fold ${fold+1}/$nFolds] y_true=${y_true_next}%.4f y_pred=${y_pred_next}%.4f SMAPE=${sm}%.2f%%") + +// val t_train = VectorD.range (0, yPredTrainAligned.dim) +// new Plot (t_train, yTrueTrainAligned, yPredTrainAligned, +// "RNN New Deaths Forecast (Train)", lines = true) + end if + end for + + // ---------------- Overall diagnostics ---------------- + + val yTrueAllV = VectorD (yTrueAll.toArray) + val yPredAllV = VectorD (yPredAll.toArray) + + banner ("Walk-Forward (1-step): Overall QoF on concatenated test points") + // Use a dummy net instance just to call `diagnose` (vector-only) + val dummy = RnnForecast (seqLen = lags, hiddenSize = hiddenSize, horizon = hh) + val qofAll = dummy.diagnose (yTrueAllV, yPredAllV) + println (FitM.fitMap (qofAll, qoF_names)) + + val t_oos = VectorD.range (0, yTrueAllV.dim) + new Plot (t_oos, yTrueAllV, yPredAllV, "RNN Walk-Forward 1-step (All Test Points)", lines = true) + end rnnCovidTestRollVal + + // :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + // Previous Run Results Summary: + // :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + + // Run #1 — RNN Forecast (lags=21, retrainEvery=inf, growingTrain=false) + // Weekly COVID-19 Deaths — 1-Step Walk-Forward Forecast + // ------------------------------------------------------ + // R²: 0.896 | SMAPE: 13.89% | MAE: 1251.84 | RMSE: 1742.18 + + // Run #1.1 — RNN Forecast (lags=21, retrainEvery=inf, growingTrain=true) + // Weekly COVID-19 Deaths — 1-Step Walk-Forward Forecast + // ----------------------------------------------------- + // R²: 0.896 | SMAPE: 13.89% | MAE: 1251.84 | RMSE: 1742.18 + + // ::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + + // Run #2 — RNN Forecast (lags=21, retrainEvery=1, growingTrain=false) + // Weekly COVID-19 Deaths — 1-Step Walk-Forward Forecast + // ------------------------------------------------------ + // R²: 0.723 | SMAPE: 20.72% | MAE: 2065.32 | RMSE: 2846.25 + + // Run #2.1 — RNN Forecast (lags=21, retrainEvery=1, growingTrain=true) + // Weekly COVID-19 Deaths — 1-Step Walk-Forward Forecast + // ----------------------------------------------------- + // R²: 0.861 | SMAPE: 15.83% | MAE: 1439.73 | RMSE: 2016.49 + + // ::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + + // Run #3 — RNN Forecast (lags=21, retrainEvery=4, growingTrain=false) + // Weekly COVID-19 Deaths — 1-Step Walk-Forward Forecast + // ----------------------------------------------------- + // R²: 0.861 | SMAPE: 18.88% | MAE: 1511 | RMSE: 2019 + + // Run #3.1 — RNN Forecast (lags=21, retrainEvery=4, growingTrain=true) + // Weekly COVID-19 Deaths — 1-Step Walk-Forward + // -------------------------------------------- + // R²: 0.889 | SMAPE: 15.55 % | MAE: 1335 | RMSE: 1803 + + // ::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + + // Run #4 — RNN Forecast (lags=21, retrainEvery=7, growingTrain=false) + // Weekly COVID-19 Deaths — 1-Step Walk-Forward Forecast + // ----------------------------------------------------- + // R²: 0.829 | SMAPE: 17.38% | MAE: 1563.76 | RMSE: 2236.67 + + // Run #4.1 — RNN Forecast (lags=21, retrainEvery=7, growingTrain=true) + // Weekly COVID-19 Deaths — 1-Step Walk-Forward Forecast + // ------------------------------------------------------ + // R²: 0.898 | SMAPE: 14.30% | MAE: 1247.7 | RMSE: 1724.8 + + // ::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + + // Run #5 — RNN Forecast (lags=21, retrainEvery=8, growingTrain=false) + // Weekly COVID-19 Deaths — 1-Step Walk-Forward Forecast + // ----------------------------------------------------- + // R²: 0.905 | SMAPE: 14.05 % | MAE: 1176 | RMSE: 1665 + + // Run #5.1 — RNN Forecast (lags=21, retrainEvery=8, growingTrain=true) + // Weekly COVID-19 Deaths — 1-Step Walk-Forward Forecast + // ----------------------------------------------------- + // R²: 0.899 | SMAPE: 13.91 % | MAE: 1214 | RMSE: 1719 + + // ::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + + // Run #6 — RNN Forecast (lags=21, retrainEvery=12, growingTrain=false) + // Weekly COVID-19 Deaths — 1-Step Walk-Forward Forecast + // ----------------------------------------------------- + // R²: 0.874 | SMAPE: 18.52 % | MAE: 1509 | RMSE: 1917 + + // Run #6.1 — RNN Forecast (lags=21, retrainEvery=12, growingTrain=true) + // Weekly COVID-19 Deaths — 1-Step Walk-Forward Forecast + // ----------------------------------------------------- + // R²: 0.891 | SMAPE: 16.20% | MAE: 1357.69 | RMSE: 1788.01 + + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** GRU toy example trained on a noisy sine wave. + * Demonstrates: + * - GRUCell correctness in forecasting a smooth periodic signal + * - batching, training, clipping, plotting + * Run: + * > runMain scalation.modeling.autograd.gruSineTest + */ + @main def gruSineTest (): Unit = + banner ("GRU Toy Test - Single Layer RNN on Sine Data") + val nPoints = 200 + val step = 0.1 + val noise = 0.0 + val y = VectorD.range(0, nPoints).map (t => + math.sin(t * step) + noise * (scala.util.Random.nextGaussian())) + + val t = VectorD.range (0, y.dim) + new Plot (t, y, null, "Sine series y(t)", lines = true) + + val lags = 20 // how many past steps the RNN sees + val hh = 1 // predict 1 step ahead + val (x, yy) = buildMatrix4TS(y, lags, hh) + + val batchSize = 16 + val batches = makeBatches (x, yy, batchSize) + + // Print shapes + println (s"Number of batches: ${batches.length}") + println (s"Each batch input shape: (${batches(0)._1.length}, ${batches(0)._1(0).shape})") + println (s"Each batch target shape: ${batches(0)._2.shape}") + +// val inputSize = 1 +// val hiddenSize = 10 +// val numLayers = 1 + + case class GRUForecast(seqLen: Int, hiddenSize: Int, horizon: Int) + extends SeqModule with Fit(seqLen, horizon): + + import scala.language.implicitConversions + + // RNN + output projection + private val gru = GRU(inputSize = 1, hiddenSize, numLayers = 1) + private val outputLayer = Linear(hiddenSize, horizon) + + override def parameters: IndexedSeq [Variabl] = + gru.parameters ++ outputLayer.parameters + + override def forward (x: IndexedSeq [Variabl]): IndexedSeq[Variabl] = + val (outputs, _) = gru.forward (x) + val lastOut = outputs.last // (batch, hiddenSize, 1) + val pred = lastOut ~> outputLayer // (batch, horizon, 1) + IndexedSeq (pred) + end forward + end GRUForecast + + // Instantiate the model and optimizer + val net = GRUForecast(seqLen = lags, hiddenSize = 10, horizon = hh) + val optimizer = SGD(parameters = net.parameters, lr = 0.1, momentum = 0.90) + + // Sanity: one forward pass on the first batch + val (xs, target) = batches.head + val pred = net.forward (xs).head + println (s"Pred shape: ${pred.shape} vs target: ${target.shape}") + + // Training loop + object monitor extends MonitorLoss + object EarlyStopper extends StoppingRule + val patience = 20 + var stopTraining = false + val nEpochs = 200 + val maxNorm = 5.0 + + for epoch <- 0 until nEpochs if ! stopTraining do + var epochLoss = 0.0 + for (xs, target) <- batches do + // Zero gradients + optimizer.zeroGrad() + + // Forward pass + val pred = net.forward (xs).head + + // Compute loss (MSE) + val loss = mseLoss (pred, target) + epochLoss += loss.data(0)(0)(0) + + // Backward pass + loss.backward () + + // Clip gradients + optimizer.clipGradNorm(maxNorm) + + // Update parameters + optimizer.step () + end for + + val avgLoss = epochLoss / batches.length + monitor.collectLoss (avgLoss) + + if epoch % 10 == 0 then + println (f"Epoch $epoch%3d | Loss: ${epochLoss / batches.length}%.6f") + end if + + // early stopping check + val (stopParams, bestLoss) = EarlyStopper.stopWhenPatience(net.parameters, avgLoss, patience) + if stopParams != null then + println (s"Early stopping at epoch $epoch with best loss $bestLoss") + net.setParameters(stopParams) + stopTraining = true + end if + end for + + // =================================================== + // Diagnostics: QoF & FitMap + // =================================================== + // Predict on whole dataset + val yPredSeq: IndexedSeq [Double] = + batches.flatMap { case (xs, _) => + val v = net.forward (xs).head.data.flattenToVector // VectorD + (0 until v.dim).map (v(_)) + } + // drop the first "lags" predictions to align with yy + val yPredSeqAligned = yPredSeq.drop (lags) + val yPredVec: VectorD = VectorD (yPredSeqAligned) + // drop the first "lags" rows from yy to align + val yyAligned = yy(lags until yy.dim) + val yTrueVec = yyAligned.flatten + + println (s"shapes of yTrueVec = ${yTrueVec.dim}, yPredVec = ${yPredVec.dim}") + println (s"first 10 yTrueVec = ${yTrueVec(0 until 10)}") + println (s"first 10 yPredVec = ${yPredVec(0 until 10)}") + + monitor.plotLoss ("GRU-Sine") + new Plot (t(lags until t.dim), yTrueVec, yPredVec, "RNN Sine Forecast", lines = true) + banner ("Final Train Statistics") + val qof = net.diagnose(yTrueVec, yPredVec) + println (FitM.fitMap (qof, qoF_names)) + + end gruSineTest + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forecast COVID-19 new deaths using a single-layer GRU. + * + * Run: + * > runMain scalation.modeling.autograd.gruCovidTest + */ + @main def gruCovidTest(): Unit = + banner ("GRU Covid Test - Single Layer GRU on Covid Data") + var y = Example_Covid.loadData_y("new_deaths") + y = y(0 until 116) + val original_extremes = extreme(y) + println ("original_extremes.type = " + original_extremes.getClass) + + val y_s = scaleV (original_extremes, (-1.0, 1.0))(y) + + val t = VectorD.range(0, y.dim) + new Plot (t, y, null, "Covid New Deaths y(t)", lines = true) + + val lags = 10 // how many past steps the GRU sees + val hh = 1 // predict 1 step ahead + val (x, yy) = buildMatrix4TS(y_s, lags, hh) + + val batchSize = 16 + val batches = makeBatches (x, yy, batchSize) + + // Print shapes + println (s"Number of batches: ${batches.length}") + println (s"Each batch input shape: (${batches(0)._1.length}, ${batches(0)._1(0).shape})") + println (s"Each batch target shape: ${batches(0)._2.shape}") + +// val inputSize = 1 + val hiddenSize = 8 + + case class GRUForecast (seqLen: Int, hiddenSize: Int, horizon: Int) + extends SeqModule with Fit (seqLen, horizon): + + import scala.language.implicitConversions + + // RNN + output projection + private val gru = GRU (inputSize = 1, hiddenSize, numLayers = 1) + private val outputLayer = Linear (hiddenSize, horizon) + + override def parameters: IndexedSeq [Variabl] = + gru.parameters ++ outputLayer.parameters + + override def forward (x: IndexedSeq [Variabl]): IndexedSeq[Variabl] = + val (outputs, _) = gru.forward (x) + val lastOut = outputs.last // (batch, hiddenSize, 1) + val pred = lastOut ~> outputLayer // (batch, horizon, 1) + IndexedSeq (pred) + end forward + end GRUForecast + + // Instantiate the model and optimizer + val net = GRUForecast (seqLen = lags, hiddenSize = hiddenSize, horizon = hh) + val optimizer = Adam (parameters = net.parameters, lr = 0.002, beta1 = 0.9, beta2 = 0.999) + val scheduler = StepLR (optimizer, stepSize = 100, gamma = 0.8) + + // Sanity: one forward pass on the first batch + val (xs, target) = batches.head + val pred = net.forward (xs).head + println (s"Pred shape: ${pred.shape} vs target: ${target.shape}") + + object monitor extends MonitorLoss + object EarlyStopper extends StoppingRule + val patience = 800 + var stopTraining = false + val nEpochs = 500 + val maxNorm = 5.0 + + // Training loop + for epoch <- 0 until nEpochs if ! stopTraining do + var epochLoss = 0.0 + for (xs, target) <- batches do + // Zero gradients + optimizer.zeroGrad() + + // Forward pass + val pred = net.forward (xs).head + + // Compute loss (MSE) + val loss = mseLoss (pred, target) + epochLoss += loss.data(0)(0)(0) + + // Backward pass + loss.backward () + + // Clip gradients + optimizer.clipGradNorm (maxNorm) + + // Update parameters + optimizer.step () + + end for + + val avgLoss = epochLoss / batches.length + monitor.collectLoss (avgLoss) + + // Step the scheduler (once per epoch) + scheduler.step () + + if epoch % 10 == 0 then + println (f"Epoch $epoch%3d | Loss: ${epochLoss / batches.length}%.6f") + end if + + // early stopping check + val (stopParams, bestLoss) = EarlyStopper.stopWhenPatience(net.parameters, avgLoss, patience) + if stopParams != null then + println (s"Early stopping at epoch $epoch with best loss $bestLoss") + net.setParameters(stopParams) + stopTraining = true + end if + end for + + // =================================================== + // Diagnostics: QoF & FitMap + // =================================================== + // Predict on whole dataset + val yPredSeq: IndexedSeq [Double] = + batches.flatMap { case (xs, _) => + val v = net.forward (xs).head.data.flattenToVector // VectorD + (0 until v.dim).map (v(_)) + } + // drop the first "lags" predictions to align with yy + val yPredSeqAligned = yPredSeq.drop (lags) + val yPredVec: VectorD = VectorD (yPredSeqAligned) + // drop the first "lags" rows from yy to align + val yyAligned = yy(lags until yy.dim) + val yTrueVec = yyAligned.flatten + + val yPredVecRescaled = unscaleV (original_extremes, (-2.0, 2.0))(yPredVec) + val yTrueVecRescaled = unscaleV (original_extremes, (-2.0, 2.0))(yTrueVec) + + println (s"shapes of yTrueVecRescaled = ${yTrueVecRescaled.dim}, yPredVecRescaled = ${yPredVecRescaled.dim}") + println (s"first 10 yTrueVecRescaled = ${yTrueVecRescaled (0 until 10)}") + println (s"first 10 yPredVecRescaled = ${yPredVecRescaled (0 until 10)}") + + monitor.plotLoss ("GRU-Covid-New-Deaths") + new Plot (t(lags until t.dim), yTrueVecRescaled, yPredVecRescaled, "RNN New Deaths Forecast", lines = true) + banner ("Final Train Statistics") + val qof = net.diagnose(yTrueVecRescaled, yPredVecRescaled) + println (FitM.fitMap (qof, qoF_names)) + end gruCovidTest + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** GRU forecasting with chronological split on either COVID or ILI datasets. + * Uses: + * - log transform + scaling + * - validation-based LR scheduling (ReduceLROnPlateau) + * - truncated backprop (TBPTT) option in GRU.forward + * - full diagnostics and plots for predictions + * Run: + * > runMain scalation.modeling.autograd.gruCovidTest2 + */ + @main def gruCovidTest2 (): Unit = + banner ("GRU Covid Test - Single Layer GRU on Covid Data with chronological split") +// var y = Example_Covid.loadData_y ("new_deaths") +// y = y(0 until 116) + + val y = Example_ILI.loadData_y ("ILITOTAL") + + val split = (0.8 * y.dim).toInt + val y_train = y(0 until split) + val y_test = y(split until y.dim) +// val original_extremes = extreme (y_train) + +// val y_train_s = scaleV (original_extremes, (-2.0, 2.0))(y_train) +// val y_test_s = scaleV (original_extremes, (-2.0, 2.0))(y_test) + + val offset = 1.0 + val scale_range = (-1.0, 1.0) + val y_train_log = logTransformV (offset)(y_train) + val y_test_log = logTransformV (offset)(y_test) + val original_extremes = extreme (y_train_log) + + val y_train_s = scaleV (original_extremes, scale_range)(y_train_log) + val y_test_s = scaleV (original_extremes, scale_range)(y_test_log) + + val t = VectorD.range(0, y.dim) + new Plot (t, y, null, "Covid New Deaths y(t)", lines = true) + + val lags = 20 // how many past steps the GRU sees + val hh = 1 // predict 1 step ahead + val (x_train, yy_train) = buildMatrix4TS (y_train_s, lags, hh) + + // ----------- Validation set -------------- + // Here we create a validation set from the end of the training set + // by taking the last 10% of the training data + val valFraction = 0.1 // 10% for validation + val nTotalTrain = x_train.dim + val nVal = (nTotalTrain * valFraction).toInt max 8 // at least 8 windows + val nTrain = nTotalTrain - nVal + + val x_train_final = x_train (0 until nTrain) + val yy_train_final = yy_train (0 until nTrain) + val x_val = x_train (nTrain until nTotalTrain) + val yy_val = yy_train (nTrain until nTotalTrain) + println (s"Training windows: $nTrain, Validation windows: $nVal") + // ----------------------------------------- + + val (x_test, yy_test) = buildMatrix4TS (y_test_s, lags, hh) + + val batchSize = 16 + val batchesTrain = makeBatches (x_train_final, yy_train_final, batchSize) + val batchesVal = makeBatches (x_val, yy_val, batchSize) + + // Print shapes + println (s"Number of training batches: ${batchesTrain.length}") + println (s"Each batch input shape: (${batchesTrain(0)._1.length}, ${batchesTrain(0)._1(0).shape})") + println (s"Each batch target shape: ${batchesTrain(0)._2.shape}") + +// val inputSize = 1 + val hiddenSize = 20 + + case class GRUForecast(seqLen: Int, hiddenSize: Int, horizon: Int) + extends SeqModule with Fit(seqLen, horizon): + + import scala.language.implicitConversions + + // GRU + output projection + private val gru = GRU (inputSize = 1, hiddenSize, numLayers = 1) + private val outputLayer = Linear (hiddenSize, horizon) + + override def parameters: IndexedSeq [Variabl] = + gru.parameters ++ outputLayer.parameters + + override def forward (x: IndexedSeq [Variabl]): IndexedSeq [Variabl] = + val (outputs, _) = gru.forward (x, tbptt = 8) + val lastOut = outputs.last // (batch, hiddenSize, 1) + val pred = lastOut ~> outputLayer // (batch, horizon, 1) + IndexedSeq (pred) + end forward + end GRUForecast + + // Instantiate the model and optimizer + val net = GRUForecast(seqLen = lags, hiddenSize = hiddenSize, horizon = hh) + val optimizer = Adam (parameters = net.parameters, lr = 0.001, weightDecay = 3e-4) + val scheduler = ReduceLROnPlateau ( + optim = optimizer, + mode = "min", // monitoring validation loss + factor = 0.6, // decay factor + patience = 80, // epochs to wait before reducing LR + threshold = 5e-4, // % abs improvement required + thresholdMode = "abs", // use abs thresholding + cooldown = 20, + minLR = 1e-5, + eps = 1e-8, + verbose = true + ) + + def validationLoss (): Double = + var tot = 0.0 + var n = 0 + for (xs, target) <- batchesVal do + val pred = net.forward (xs).head + val loss = mseLoss (pred, target) + val batchSize = target.shape.head + tot += loss.data(0)(0)(0) * batchSize + n += batchSize + end for + tot / math.max(n, 1) + end validationLoss + + // Training loop + object monitor extends MonitorLoss +// object EarlyStopper extends StoppingRule + val patience = 9000 + 80 // effectively disabled, it's over 9000! + var stopTraining = false + var bestValLoss = Double.PositiveInfinity + var bestParams: IndexedSeq [Variabl] = null + var badEpochs = 0 + val nEpochs = 400 + val maxNorm = 5.0 + + // Training loop + for epoch <- 0 until nEpochs if ! stopTraining do + var epochLoss = 0.0 + for (xs, target) <- batchesTrain do + // Zero gradients + optimizer.zeroGrad () + + // Forward pass + val pred = net.forward (xs).head + + // Compute loss (MSE) + val loss = mseLoss (pred, target) + epochLoss += loss.data(0)(0)(0) + + // Backward pass + loss.backward () + + // Clip gradients + optimizer.clipGradNorm (maxNorm) + + // Update parameters + optimizer.step () + + end for + + val avgTrain = epochLoss / batchesTrain.length + val valLoss = validationLoss () + + monitor.collectLoss (valLoss) + scheduler.step (valLoss) + + if epoch % 10 == 0 then + println (f"Epoch $epoch%3d | train=$avgTrain%.6f | val=$valLoss%.6f") + end if + + // early stopping check + if valLoss < bestValLoss - 1e-6 then + bestValLoss = valLoss + bestParams = net.parameters.map (p => p.copy()) // deep copy + badEpochs = 0 + else + badEpochs += 1 + if badEpochs >= patience then + println (s"No improvement for $patience epochs. Early stopping at epoch $epoch with best val loss $bestValLoss") + net.setParameters (bestParams) + stopTraining = true + end if + end if + end for + // At the end of training, ensure we have the best parameters + // if (bestParams != null) then net.setParameters(bestParams) + + // =================================================== + // Diagnostics: QoF & FitMap + // =================================================== + // Predict on test set + val yPredTestSeq: IndexedSeq [Double] = + makeBatches (x_test, yy_test, batchSize) + .flatMap { case (xs, _) => + val v = net.forward (xs).head.data.flattenToVector + (0 until v.dim).map (v(_)) + } + + val yPredTest: VectorD = VectorD (yPredTestSeq) + val yTrueTest = yy_test.flatten + + // --- Train predictions --- + val yPredTrainSeq = + makeBatches (x_train_final, yy_train_final, batchSize) + .flatMap { case (xs, _) => + val v = net.forward (xs).head.data.flattenToVector + (0 until v.dim).map (v(_)) + } + + val yPredTrain = VectorD (yPredTrainSeq) + val yTrueTrain = yy_train_final.flatten + + // --- Unscale both --- + val yPredTrainRescaled = + expTransformV (offset)(unscaleV (original_extremes, scale_range)(yPredTrain)) + val yTrueTrainRescaled = + expTransformV (offset)(unscaleV (original_extremes, scale_range)(yTrueTrain)) + val yPredTestRescaled = + expTransformV (offset)(unscaleV (original_extremes, scale_range)(yPredTest)) + val yTrueTestRescaled = + expTransformV (offset)(unscaleV (original_extremes, scale_range)(yTrueTest)) + + val yPredTrainAligned = yPredTrainRescaled.drop (lags) + val yTrueTrainAligned = yTrueTrainRescaled.drop (lags) + + println (s"shapes of yTrueTrainAligned = ${yTrueTrainAligned.dim}, yPredTrainAligned = ${yPredTrainAligned.dim}") + println (s"shapes of yTrueTestRescaled = ${yTrueTestRescaled.dim}, yPredTestRescaled = ${yPredTestRescaled.dim}") + println (s"first 10 yTrueTrainAligned = ${yTrueTrainAligned (0 until 10)}") + println (s"first 10 yPredTrainAligned = ${yPredTrainAligned (0 until 10)}") + println (s"first 10 yTrueTestRescaled = ${yTrueTestRescaled (0 until 10)}") + println (s"first 10 yPredTestRescaled = ${yPredTestRescaled (0 until 10)}") + + // --- QoF metrics --- + val qofTrain = net.diagnose (yTrueTrainAligned, yPredTrainAligned) + val qofTest = net.diagnose (yTrueTestRescaled, yPredTestRescaled) + + banner ("QoF: Train (in-sample)") + println (FitM.fitMap (qofTrain, qoF_names)) + banner ("QoF: Test (out-of-sample)") + println (FitM.fitMap (qofTest, qoF_names)) + + // --- Plots --- + val t_train = VectorD.range (0, yPredTrainAligned.dim) + val t_test = VectorD.range (0, yPredTestRescaled.dim) + new Plot (t_train, yTrueTrainAligned, yPredTrainAligned, + "GRU New Deaths Forecast (Train)", lines = true) + new Plot (t_test, yTrueTestRescaled, yPredTestRescaled, + "GRU New Deaths Forecast (Test)", lines = true) + end gruCovidTest2 + +end RNNTestForecasting + diff --git a/src/main/scala/scalation/modeling/autograd/ReduceLROnPlateau.scala b/src/main/scala/scalation/modeling/autograd/ReduceLROnPlateau.scala new file mode 100644 index 000000000..63e3563a8 --- /dev/null +++ b/src/main/scala/scalation/modeling/autograd/ReduceLROnPlateau.scala @@ -0,0 +1,151 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author Praveen Rangavajhula + * @version 2.0 + * @date Sat Nov 8 10:24:12 EST 2025 + * @see LICENSE (MIT style license file). + * + * @note Autograd: ReduceLROnPlateau Learning Rate Scheduler + */ + +package scalation +package modeling +package autograd + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** PyTorch-style ReduceLROnPlateau scheduler. + * Monitors a metric each epoch and reduces the learning rate when progress + * plateaus. Supports both "min" (e.g., loss) and "max" (e.g., accuracy) modes, + * with relative or absolute thresholds for determining improvement. + * The LR is reduced when the number of non-improving epochs exceeds `patience`, + * after which a cooldown period prevents further reductions. Each reduction + * follows: newLR = max(oldLR * factor, minLR), skipped when the change is + * too small (≤ eps). Non-finite metric values are ignored. + * Call `step(metric)` after each optimizer update. `getLastLR` returns the + * most recent learning rate. + * @param optim the optimizer whose learning rate will be scheduled + * @param mode "min" or "max" (target direction for improvement) + * @param factor multiplicative decay factor in (0,1), i.e., newLR = oldLR * factor + * @param patience number of non-improving epochs tolerated before reduction (strictly `> patience`) + * @param threshold significance threshold (relative or absolute depending on `thresholdMode`) + * @param thresholdMode "rel" for relative margin, "abs" for absolute margin + * @param cooldown epochs to wait after a reduction during which bad-epoch counter stays at 0 + * @param minLR lower bound on the learning rate + * @param eps minimal effective LR change required to apply a reduction + * @param verbose if true, prints LR reduction messages + */ +final class ReduceLROnPlateau (optim: Optimizer, + mode: String = "min", // "min" or "max" + factor: Double = 0.1, // newLR = oldLR * factor + patience: Int = 10, // epochs with no significant improvement before reduce + threshold: Double = 1e-4, // significance threshold + thresholdMode: String = "rel", // "rel" or "abs" + cooldown: Int = 0, // epochs to wait after a reduction + minLR: Double = 0.0, // LR floor + eps: Double = 1e-8, // minimal effective LR change + verbose: Boolean = false) + extends LRScheduler: + + require (factor > 0.0 && factor < 1.0, "factor must be in (0, 1)") + require (patience >= 0, "patience must be >= 0") + require (cooldown >= 0, "cooldown must be >= 0") + require (threshold >= 0.0, "threshold must be >= 0") + require (eps >= 0.0, "eps must be >= 0") + require (mode.equalsIgnoreCase ("min") || mode.equalsIgnoreCase ("max"), "mode must be 'min' or 'max'") + require (thresholdMode.equalsIgnoreCase ("rel") || thresholdMode.equalsIgnoreCase ("abs"), "thresholdMode must be 'rel' or 'abs'") + + private val _modeMin = mode.toLowerCase == "min" + private val _thrRel = thresholdMode.toLowerCase == "rel" + + private var best: Double = + if _modeMin then Double.PositiveInfinity else Double.NegativeInfinity + + private var numBadEpochs: Int = 0 + private var cooldownCounter: Int = 0 +// private var _lastLR: Double = optim.learningRate +// private var lastEpoch: Int = -1 + + private inline def isFinite (x: Double): Boolean = ! x.isNaN && ! x.isInfinity + private inline def inCooldown: Boolean = cooldownCounter > 0 + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Determine whether the current metric is significantly better than the + * stored `best` metric. Uses both mode ("min"/"max") and threshold mode + * ("rel"/"abs") to define improvement. + * @param current the new metric value + * @param bestSoFar the best metric recorded so far + */ + private def isBetter (current: Double, bestSoFar: Double): Boolean = + if _thrRel then + if _modeMin then current < bestSoFar * (1.0 - threshold) + else current > bestSoFar * (1.0 + threshold) + else // "abs" + if _modeMin then current < bestSoFar - threshold + else current > bestSoFar + threshold + end isBetter + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Apply a learning rate reduction: + * newLR = max(oldLR * factor, minLR) + * Skips the update if `oldLR - newLR ≤ eps` (no effective change). + * Resets bad-epoch counter and enters cooldown. + */ + private def reduceLR (): Unit = + val oldLR = optim.learningRate + val newLR = math.max (oldLR * factor, minLR) + // eps guard (skip if change too tiny) + if oldLR - newLR > eps then + if verbose then + println (f"[LR Scheduler] ReduceLROnPlateau: reducing LR from $oldLR%.6f to $newLR%.6f") + optim.learningRate = newLR +// _lastLR = newLR + cooldownCounter = cooldown + numBadEpochs = 0 + end if + end reduceLR + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Maintain cooldown semantics by decrementing the cooldown counter and + * keeping the bad-epoch counter at 0 during cooldown epochs. + */ + private def resetDuringCooldown (): Unit = + if cooldownCounter > 0 then cooldownCounter -= 1 + numBadEpochs = 0 + end resetDuringCooldown + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Update the scheduler using the monitored metric (e.g., validation loss). + * Must be called **after** each optimizer update. + * Steps: + * 1. Ignore non-finite metric values. + * 2. Test for significant improvement versus `best`. + * 3. Update counters (bad-epochs or reset during cooldown). + * 4. Trigger LR reduction when plateau persists (`numBadEpochs > patience`). + * @param currentMetric the metric value for this epoch + */ + override def step (currentMetric: Double): Unit = +// lastEpoch += 1 + if ! isFinite (currentMetric) then return + + val better = isBetter (currentMetric, best) + if better then + best = currentMetric // update best only on significant improvement + numBadEpochs = 0 + else + if inCooldown then + resetDuringCooldown () // PyTorch keeps bad-epoch counter at 0 during cooldown + else + numBadEpochs += 1 + end if + + // Reduce if plateau persisted strictly longer than patience + if numBadEpochs > patience && !inCooldown then reduceLR() + end step + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the most recently updated learning rate. + */ + override def getLastLR: Double = optim.learningRate + +end ReduceLROnPlateau + diff --git a/src/main/scala/scalation/modeling/autograd/SGD.scala b/src/main/scala/scalation/modeling/autograd/SGD.scala new file mode 100644 index 000000000..b8e8637a4 --- /dev/null +++ b/src/main/scala/scalation/modeling/autograd/SGD.scala @@ -0,0 +1,49 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author Praveen Rangavajhula + * @version 2.0 + * @date Fri April 25 20:01:00 EDT 2025 + * @see LICENSE (MIT style license file). + * + * @note Autograd: SGD Optimizer for Parameter Updates + */ + +package scalation +package modeling +package autograd + +import scalation.mathstat.TensorD + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Implements the Stochastic Gradient Descent (SGD) optimization algorithm. + * @param parameters an indexed sequence of model parameters to be optimized. + * @param lr the learning rate used for updating the parameters. + * @param momentum momentum factor to accelerate convergence (default is 0.0). + */ +case class SGD (parameters: IndexedSeq [Variabl], lr: Double, momentum: Double = 0.0) + extends Optimizer (parameters, lr): + + /** Velocity for each parameter initialized to zeros matching the shape of parameter data. + */ + private val velocity = parameters.map (p => TensorD.zerosLike (p.data)) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Performs a single optimization step using the SGD algorithm. + * For each parameter: + * - Updates the velocity using the momentum factor and the current gradient. + * - Updates the parameter data by subtracting the computed velocity. + */ + override def step (): Unit = + for i <- parameters.indices do + val p = parameters(i) + var v = velocity(i) + + if p.grad != null then + v *= momentum // Update velocity using momentum + v += p.grad * lr // Add the gradient times the learning rate + p.data -= v // Update parameter by subtracting the velocity + end for + end step + +end SGD + diff --git a/src/main/scala/scalation/modeling/autograd/StepLR.scala b/src/main/scala/scalation/modeling/autograd/StepLR.scala new file mode 100644 index 000000000..bb60774ba --- /dev/null +++ b/src/main/scala/scalation/modeling/autograd/StepLR.scala @@ -0,0 +1,50 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author Praveen Rangavajhula + * @version 2.0 + * @date Wed Nov 12 10:32:44 EST 2025 + * @see LICENSE (MIT style license file). + * + * @note Autograd: StepLR Learning Rate Scheduler + * + * A simple learning rate scheduler that decays the learning rate by a constant + * factor `gamma` every `stepSize` epochs. Call `step()` once per epoch. + * `getLastLR` returns the most recently updated learning rate. + */ + +package scalation +package modeling +package autograd + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Step-based learning rate scheduler. + * Reduces the optimizer's learning rate by multiplying with `gamma` + * every `stepSize` epochs. Matches the behavior of PyTorch's StepLR + * for the single-LR (non–param-group) setting. + * @param optim the optimizer whose learning rate will be scheduled + * @param stepSize the interval (in epochs) between LR reductions + * @param gamma the multiplicative decay factor applied every step + */ +final class StepLR (optim: Optimizer, stepSize: Int, gamma: Double) + extends LRScheduler: + + private var epoch = 0 + private var lastLR: Double = optim.learningRate + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Advance the scheduler by one epoch. When the epoch count is divisible + * by `stepSize`, reduce the learning rate by multiplying with `gamma`. + */ + override def step (): Unit = + epoch += 1 + if epoch % stepSize == 0 then + optim.learningRate *= gamma + lastLR = optim.learningRate + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the most recently updated learning rate. + */ + def getLastLR: Double = lastLR + +end StepLR + diff --git a/src/main/scala/scalation/modeling/autograd/TensorInitializers.scala b/src/main/scala/scalation/modeling/autograd/TensorInitializers.scala new file mode 100644 index 000000000..2decf8f1b --- /dev/null +++ b/src/main/scala/scalation/modeling/autograd/TensorInitializers.scala @@ -0,0 +1,129 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author Praveen Rangavajhula + * @version 2.0 + * @date Fri April 25 19:56:12 EDT 2025 + * @see LICENSE (MIT style license file). + * + * @note Autograd: Tensor Initialization Methods for Neural Networks + */ + +package scalation +package modeling +package autograd + +import scalation.mathstat.{MatrixD, TensorD} +import scalation.random.{NormalMat, RandomMatD} + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `TensorInitializers` utility object for tensor initializations commonly used in neural networks. + * Provides methods to create tensors filled with zeros, ones, random values, + * and standardized initialization schemes like He and Xavier initialization. + * All returned tensors have batch-first shape: (batch, rows, cols). + */ +object TensorInitializers: + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a tensor of zeros with shape (batch, rows, cols). + */ + def zeros (batch: Int = 1, rows: Int, cols: Int): TensorD = TensorD.fill (batch, rows, cols, 0.0) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a tensor of ones with shape (batch, rows, cols). + */ + def ones (batch: Int = 1, rows: Int, cols: Int): TensorD = TensorD.fill (batch, rows, cols, 1.0) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Convert a MatrixD to a TensorD with shape (1, rows, cols). + */ + def fromMatrix (m: MatrixD): TensorD = TensorD.fromMatrix (m) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Stack a sequence of matrices into a TensorD with batch dimension. + * Each matrix becomes one slice: resulting shape = (batch, rows, cols). + */ + def fromMatrices (mats: IndexedSeq [MatrixD]): TensorD = TensorD (mats) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a tensor with random values from a uniform distribution N(0, 1). + */ + def rand (batch: Int = 1, rows: Int, cols: Int): TensorD = + val mats = for _ <- 0 until batch yield RandomMatD(rows, cols, 0.0, 1.0).gen + fromMatrices (mats) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a tensor with random values from a normal distribution N(0, stdDev^2). + */ + def randn (batch: Int = 1, rows: Int, cols: Int, stdDev: Double = 1.0): TensorD = + val mats = for _ <- 0 until batch yield NormalMat (rows, cols, 0.0, stdDev).gen + fromMatrices (mats) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** He initialization (Kaiming initialization). + * Standard deviation: sqrt(2 / fanIn), where fanIn = number of input features. + */ + def heInit (batch: Int = 1, rows: Int, cols: Int): TensorD = + val std = math.sqrt(2.0 / rows) + randn (batch, rows, cols, std) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Xavier initialization (Glorot initialization). + * Standard deviation: sqrt(2 / (fanIn + fanOut)). + */ + def xavierInit (batch: Int = 1, rows: Int, cols: Int): TensorD = + val std = math.sqrt (2.0 / (rows + cols)) + randn (batch, rows, cols, std) + +// Additional initializers + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Uniform initializer: samples values from U(-bound, bound). + * @param batch number of tensor slices + * @param rows number of rows per slice + * @param cols number of columns per slice + * @param bound half-width of uniform interval + * @return a TensorD with uniform random values in [-bound, bound] + */ + def uniform (batch: Int = 1, rows: Int, cols: Int, bound: Double): TensorD = + val mats = for _ <- 0 until batch yield RandomMatD (rows, cols, bound, -bound).gen + fromMatrices (mats) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Xavier/Glorot uniform initialization. + * Samples from U(± sqrt(6 / (fanIn + fanOut))). + * @param batch the number of tensor slices (default 1) + * @param fanOut the number of output features (rows) + * @param fanIn the number of input features (cols) + * @return a TensorD initialized using Xavier uniform initialization + */ + def xavierUniform (batch: Int = 1, fanOut: Int, fanIn: Int): TensorD = + val bound = math.sqrt (6.0 / (fanIn + fanOut)) + uniform (batch, fanOut, fanIn, bound) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** LeCun uniform initialization. + * Samples from U(± sqrt(3 / fanIn)). Suitable for tanh activations. + * @param batch the number of tensor slices (default 1) + * @param fanOut the number of output features (rows) + * @param fanIn the number of input features (cols) + * @return a TensorD initialized using LeCun uniform initialization + */ + def lecunUniform (batch: Int = 1, fanOut: Int, fanIn: Int): TensorD = + val bound = math.sqrt (3.0 / fanIn) + uniform (batch, fanOut, fanIn, bound) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** PyTorch-style RNN parameter initialization. + * Samples from U(± 1/sqrt(hiddenSize)) for all recurrent parameters. + * @param batch the number of tensor slices (default 1) + * @param hiddenSize size of the hidden state used to compute bound + * @param rows the number of rows per slice + * @param cols the number of columns per slice + * @return a TensorD initialized using RNN uniform initialization + */ + def rnnUniform (batch: Int = 1, hiddenSize: Int, rows: Int, cols: Int): TensorD = + val bound = 1.0 / math.sqrt (hiddenSize.toDouble) + uniform (batch, rows, cols, bound) + +end TensorInitializers + diff --git a/src/main/scala/scalation/modeling/autograd/TestReport.scala b/src/main/scala/scalation/modeling/autograd/TestReport.scala new file mode 100644 index 000000000..988599314 --- /dev/null +++ b/src/main/scala/scalation/modeling/autograd/TestReport.scala @@ -0,0 +1,144 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author Praveen Rangavajhula + * @version 2.0 + * @date Tue Nov 11 11:18:42 EST 2025 + * @see LICENSE (MIT style license file). + * + * @note Autograd: Lightweight Testing Utilities + * + * Provides simple tools for recording and summarizing unit tests. Includes + * status enumeration, result containers, ANSI color support, and a reporting + * utility for structured test output. + */ + +package scalation +package modeling +package autograd + +import scala.collection.mutable.ArrayBuffer +import scala.util.control.NonFatal + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Enumeration representing the status of a test: + * - Passed + * - Failed + */ +enum Status: + case Passed, Failed +end Status + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Result container for a single test execution. + * @param name the name of the test + * @param status the status of the test (Passed or Failed) + * @param ms the execution time in milliseconds + * @param note optional note or error message (default empty) + */ +final case class TestResult (name: String, status: Status, ms: Long, note: String = "") + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** ANSI color codes for colored console output. + */ +object ConsoleColor: + val RESET = "\u001B[0m" + val RED = "\u001B[31m" + val GREEN = "\u001B[38;5;34m" + val YELLOW = "\u001B[33m" +end ConsoleColor + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** A test report utility for recording and summarizing test results. + * Stores a collection of `TestResult` objects and provides support for + * timing tests, capturing failures, and printing formatted summary reports. + */ +final class TestReport: + + private val buf = ArrayBuffer.empty [TestResult] + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Record and execute a test, capturing its execution time and status. + * @param name the name of the test + * @param body the test code to execute (returns true on success) + * @return true if the test passed, false otherwise + */ + inline def record (name: String)(inline body: => Boolean): Boolean = + val t0 = System.nanoTime() + var note = "" + val ok = + try body + catch + case NonFatal(e) => + note = s"${e.getClass.getSimpleName}: ${e.getMessage}" + false + val dtMs = ((System.nanoTime() - t0) / 1e6).toLong + val status = if ok then Status.Passed else Status.Failed + buf += TestResult(name, status, dtMs, note) + ok + end record + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Print a formatted summary of all recorded tests. + * @param title title of the test report (default "Test Report") + * @param onlyFailures whether to print only failed tests (default false) + */ + def summary (title: String = "Test Report", onlyFailures: Boolean = false): Unit = + println (s"\n================== $title ==================") + val (passed, failed) = buf.partition(_.status == Status.Passed) + val rows = if onlyFailures then failed else buf + + if rows.isEmpty then + if onlyFailures && buf.nonEmpty then + println ("All tests passed ✅ (no failures to show)") + else + println ("No tests recorded.") + else + rows.foreach { r => + import ConsoleColor.* + val (tag, color) = + if r.status == Status.Passed then ("✅ PASSED", GREEN) + else ("❌ FAILED", RED) + + val note = if r.note.nonEmpty then s" [$YELLOW${r.note}$RESET]" else "" + + println (f"$color${r.name}%-40s $tag$RESET (${r.ms}ms)$note") + } + + println ("-------------------------------------------------") + val total = buf.size + val pass = passed.size + val fail = failed.size + val rate = if total == 0 then 0.0 else pass * 100.0 / total + println (f"Total: $total Passed: $pass Failed: $fail | Pass rate: $rate%.1f%%") + println ("=================================================") + end summary + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Check whether any recorded tests have failed. + * @return true if any test has status Failed, false otherwise + */ + def hasFailures: Boolean = buf.exists (_.status == Status.Failed) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Clear all recorded test results. + */ + def reset (): Unit = buf.clear () + +end TestReport + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Companion object for creating TestReport instances. + */ +object TestReport: + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a new TestReport instance. + * @return a new TestReport + */ + def apply (): TestReport = new TestReport + +end TestReport + diff --git a/src/main/scala/scalation/modeling/autograd/TransfomerEnc.scala b/src/main/scala/scalation/modeling/autograd/TransfomerEnc.scala new file mode 100644 index 000000000..85e252b0a --- /dev/null +++ b/src/main/scala/scalation/modeling/autograd/TransfomerEnc.scala @@ -0,0 +1,291 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author John A. Miller + * @version 2.0 + * @date Sun Nov 9 23:27:14 EST 2025 + * @see LICENSE (MIT style license file). + * + * @note Simple Implementation for an Encoder-Only Transformer, e.g., used for + * (1) Natural Language Processing (NLP) or + * (2) Time Series Forecasting (TSF) + * + * Limitations: one attention head, no dropout layer, single encoder block, + * no back-propagation + * + * @see sebastianraschka.com/blog/2023/self-attention-from-scratch.html + * @see arxiv.org/pdf/1706.03762.pdf (main paper) + */ + +package scalation +package modeling +package autograd + +import scala.math.{cos, sin, sqrt} + +import scalation.mathstat._ +import scalation.modeling.ActivationFun.{f_reLU, f_softmax} + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `TransformerEnc` object implements the attention method based on the + * scaled dot product. + */ +object TransformerEnc: + + private val debug = debugf ("TransformerEnc", true) // debug function + val eps = 1E-5 // very small value + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Patchify the univariate time series y by breaking it into non-overlapping + * patches of length pl. This simple implementation assumes stride s = pl, + * but PatchTST uses pl = 16 and s = 8 as defaults. + * @param y the given univariate time series + * @param pl the patch length + */ + def patchify (y: VectorD, pl: Int): MatrixD = + val m = y.dim + val np = m / pl + val x = new MatrixD (np, pl) + for i <- x.indices do x(i) = y(i*pl until (i+1)*pl) + x + end patchify + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Use a matrix transformation containing learnable weights to embed each patch + * vector into a higher dimensional space (providing enhanced vector similarity). + * The dimensionality of the embedding space is d_model. For this simple + * implementation d_model = d_k as there is only one attention head. + * @param xx the matrix containing each patch as a row + * @param wE the dimensionality of the embedding space + */ + def embed (xx: MatrixD, wE: MatrixD): MatrixD = xx * wE + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Encode all the positions in the time series as vectors of length d_model. + * @param len the sequence length + * @param d_k the dimensionality of the model (d_model = d_k here) + */ + def encodePositions (len: Int, d_k: Int): MatrixD = + val pe = new MatrixD (len, d_k) + for k <- pe.indices do + for i <- 0 until d_k/2 do + val den = 10000.0~^(2.0*i/d_k) + pe(k, 2*i) = sin (k / den) + pe(k, 2*i+1) = cos (k / den) + pe + end encodePositions + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Based on the Query (Q), Key (K), and Value (V) matrices, compute the attention. + * + * att = softmax (QK^ᵀ/√d_k) V + * + * @param q the Query: the input of interest + * @param k the Key: other locations to compare it with (for similarity) + * @param v the Value: the input value at the key locations + * @param d_k the dimensionality of Query, Key, and Value (if different use d_v) + */ + def attention (q: MatrixD, k: MatrixD, v: MatrixD, d_k: Int): MatrixD = + val qkt = q * k.ᵀ // repeated dot product + val sdp = qkt / sqrt (d_k) // scaled dot product (sdp) + val scr = f_softmax.fM (sdp) // attention scores + debug ("attention", s" qkt = $qkt, sdp = $sdp, scr = $scr") + scr * v // attention (Q, K, V) + end attention + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Perform layer normalization on matrix x. The more general affine transformation + * is not supported in this simple implementation. + * @param x the matrix to normalize + */ + def layerNorm (x: MatrixD): MatrixD = + val xt = x.ᵀ + ((xt - xt.mean) / (xt.stdev + eps)).ᵀ + end layerNorm + +end TransformerEnc + +import TransformerEnc._ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `transformerEnc1` main function illustrates the calculation of attention (Q, K, V) + * for a Single Head as used in a Transformer. SEE LINK BELOW FOR MORE DETAILS. + * + * @see pub.aimind.so/transformer-model-and-variants-of-transformer-chatgpt-3d423676e29c (URL) + * + * > runMain scalation.modeling.forecasting.neuralforecasting.transformerEnc1 + */ +@main def transformerEnc1 (): Unit = + + val d_k = 3 // dimensionality for Q, K, and V (if different need d_v) +// val heads = 1 // number of attention heads (d_model = d_k * heads) + + // input token can be (sub) words (for NLP) or patches (for TSF) + // three inputs after embedding, each embedding vector has size/dimensionality 4 + // these three embedding vectors are made up, but could use word2vec, etc. + val x = MatrixD ((3, 4), 1, 0, 1, 0, // input x0 for token 0 + 0, 2, 0, 2, // input x1 for token 1 + 1, 1, 1, 1) // input x2 for token 2 + + println (s"input (after embedding) x = $x") + + val wQ = MatrixD ((4, 3), 1, 0, 1, // Query weight matrix + 1, 0, 0, + 0, 0, 1, + 0, 1, 1) + val wK = MatrixD ((4, 3), 0, 0, 1, // Key weight matrix + 1, 1, 0, + 0, 1, 0, + 1, 1, 0) + val wV = MatrixD ((4, 3), 0, 2, 0, // Value weight matrix + 0, 3, 0, + 1, 0, 3, + 1, 1, 0) + + val q = x * wQ // Query: size of input x d_k + val k = x * wK // Key: size of input x d_k + val v = x * wV // Value: size of input x d_k (or d_v if different) + +// val att = attention (q, k, v, d_k) // compute attention based on correct size + val att = attention (q, k, v, 1) // approximation used by URL for checking purposes + + println (s""" + d_k = $d_k + wQ = $wQ + wK = $wK + wV = $wV + q = $q + k = $k + v = $v + att = $att + """) + +end transformerEnc1 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `transformerEnc2` main function illustrates the steps in an "Encoder-Only Transformer" + * consisting of a single encoder block with a "Prediction Head" added for making forecasts. + * > runMain scalation.modeling.forecasting.neuralforecasting.transformerEnc2 + */ +@main def transformerEnc2 (): Unit = + + import neuralnet.SimpleCNN.yy // example univariate time series of length 20 (t = 0 ... 19) + println (s"|| time series yy = \n $yy") + val y_h1 = 6.5 // next actual value y(20) to compare with forecast + // rolling validation skipped for simplicity + + val pl = 4 // patch length + val d_k = 6 // dimensionality for Q, K, and V (if different need d_v) +// val heads = 1 // number of attention heads (d_model = d_k * heads) + + // input tokens in this case are patches (for TSF) + // five inputs with each patch vector having size/dimensionality 4 + val (μ, σ) = (yy.mean, yy.stdev) + val y = (yy - μ) / (σ + eps) // normalize the whole time series via z-transformation + val xx = patchify (y, pl) // patchify to form a 5 by 4 matrix + println (s"|| input after normalize and patchify y -> xx = $xx") + + //----------------------------------------- + // Input Embedding + Positional Encodings | + //----------------------------------------- + + val wE = MatrixD.fill (xx.dim2, d_k, 0.1) // transformation matrix holding learnable embedding weights + // improve initialization: use random Normal and rescale + var x = embed (xx, wE) // embed xx in a higher dimensional space + println (s"|| input after (higher dimensional) embedding xx -> x = $x") + + val pe = encodePositions (xx.dim, d_k) // create positional encodings + x += pe // add positional encodings + println (s"|| positional encodings pe = $pe") + println (s"|| input (after adding positional encoding) x = $x") + + //------------------ + // Attention Layer | + //------------------ + + val wQ = MatrixD.fill (d_k, d_k, 0.1) // Query weight matrix: improve initialization: randomize + val wK = MatrixD.fill (d_k, d_k, 0.1) // Key weight matrix + val wV = MatrixD.fill (d_k, d_k, 0.1) // Value weight matrix + + val q = x * wQ // Query: size of input x d_k + val k = x * wK // Key: size of input x d_k + val v = x * wV // Value: size of input x d_k (or d_v if different) + + val att = attention (q, k, v, d_k) // compute attention + + println (s""" + wQ = $wQ + wK = $wK + wV = $wV + q = $q + k = $k + v = $v + att = $att + """) + + //----------------------------- + // Add & Norm After Attention | + //----------------------------- + + x += att // add attention to x (residual connection) + x = layerNorm (x) // apply layer normalization + println (s"|| after layer normalization x = $x") + + //-------------------------------------------------------- + // Two-Layer (Hidden, Output) Feed Forward Network (FFN) | + //-------------------------------------------------------- + + val d_ff = 2 * d_k // dimensionality of FFN hidden layer (commonly use four-fold expansion) + val w1 = MatrixD.fill (d_k, d_ff, 0.1) // weight matrix preceding the FFN hidden layer: improve initialization: randomize + val b1 = VectorD.fill (d_ff)(0.1) // bias vector for the FFN hidden layer + val w2 = MatrixD.fill (d_ff, d_k, 0.1) // weight matrix preceding the FFN output layer + val b2 = VectorD.fill (d_k)(0.1) // bias vector for the FFN output layer + + // FFN forward prop: 'refined input' -> hidden // input with embedding, position encoding, attention, layer norm + val u = x * w1 + b1 // hidden pre-activation matrix + val z = f_reLU.fM (u) // hidden matrix from f0 = reLU activation +// val z = f_geLU.fM (u) // hidden matrix from f0 = geLU activation + + // FFN forward prop: hidden -> output + val vv = z * w2 + b2 // output pre-activation matrix + val ŷ = vv // output/prediction matrix: typically no activation + + println (s""" + u = $u + z = $z + vv = $vv + ŷ = $ŷ + """) + + //----------------------- + // Add & Norm After FNN | + //----------------------- + + x += ŷ // add output from FNN to x (residual connection) + x = layerNorm (x) // apply layer normalization a second time + println (s"|| at end of encoder block x = $x") + + //------------------------------- + // Prediction Head: Horizon = 1 | + //------------------------------- + + val h_last = x(x.dim-1) // use the last patch for prediction (simple example) + println (s"h_last.dim = ${h_last.dim}") + + val w_out = VectorD.fill (d_k)(0.1) // weight for the prediction head + val b_out = 0.0 // bias for the prediction head + + val ŷ_norm = h_last ∙ w_out + b_out // scalar on normalized (z) scale + val ŷ_h1 = μ + (σ + eps) * ŷ_norm // apply back-transformation to get forecast + + println (s"forecast (normalized) = $ŷ_norm") + println (s"forecast (original scale) = $ŷ_h1") + + val ε = y_h1 - ŷ_h1 // error at horizon 1 (actual - forecasted) + + println (s"forecast error at time t = ${y.dim}: ε = $ε") + + // No backward prop -- should use AutoDiff due to complexity + +end transformerEnc2 + diff --git a/src/main/scala/scalation/modeling/autograd/TransformerTestCore.scala b/src/main/scala/scalation/modeling/autograd/TransformerTestCore.scala new file mode 100644 index 000000000..379c5e267 --- /dev/null +++ b/src/main/scala/scalation/modeling/autograd/TransformerTestCore.scala @@ -0,0 +1,120 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author Praveen Rangavajhula + * @version 2.0 + * @date Wed Nov 26 10:44:32 EDT 2025 + * @see LICENSE (MIT style license file). + * + * @note Autograd: Unit Tests for Core Transformer Components + */ + +package scalation +package modeling +package autograd + +import scalation.mathstat.TensorD + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The TransformerTestCore` tests the `Transformer` class. + */ +object TransformerTestCore: + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Test ScaledDotProductAttention forward + backward. + * > runMain scalation.modeling.autograd.TransformerTestCore.sdpaTest + */ + @main def sdpaTest (): Unit = + + banner ("Scaled Dot Product Attention Test") + + val R = TestReport() + val sdpa = ScaledDotProductAttention() + + // --------------------------------------------------------- + // Create a small dummy batch + // Shapes: (B, T, D) = (2, 3, 4) + // --------------------------------------------------------- + val B = 2 + val T = 3 + val D = 4 + + val q = Variabl ( + TensorD ((B, T, D), + // batch 0 + 0.1, 0.2, 0.3, 0.4, + 0.5, 0.6, 0.7, 0.8, + 0.9, 1.0, 1.1, 1.2, + // batch 1 + 1.1, 1.0, 0.9, 0.8, + 0.7, 0.6, 0.5, 0.4, + 0.3, 0.2, 0.1, 0.0 + ), + name = Some("q") + ) + + val k = Variabl ( + TensorD ((B, T, D), + 0.1, 0.2, 0.3, 0.4, + 0.5, 0.6, 0.7, 0.8, + 0.9, 1.0, 1.1, 1.2, + 1.1, 1.0, 0.9, 0.8, + 0.7, 0.6, 0.5, 0.4, + 0.3, 0.2, 0.1, 0.0 + ), + name = Some("k") + ) + + val v = Variabl ( + TensorD ((B, T, D), + 0.1, 0.2, 0.3, 0.4, + 0.5, 0.6, 0.7, 0.8, + 0.9, 1.0, 1.1, 1.2, + 1.1, 1.0, 0.9, 0.8, + 0.7, 0.6, 0.5, 0.4, + 0.3, 0.2, 0.1, 0.0 + ), + name = Some("v") + ) + + println(s"q shape = ${q.shape}, values = ${q.data}") + println(s"k shape = ${k.shape}") + println(s"v shape = ${v.shape}") + + // --------------------------------------------------------- + // Forward + // --------------------------------------------------------- + val out = sdpa (IndexedSeq(q, k, v)).head + println(s"SDPA output shape = ${out.shape}") + println(s"SDPA output values = ${out.data}") + + // --------------------------------------------------------- + // Gradient checks + // --------------------------------------------------------- + R.record("SDPA - gradCheck(q, k, v)") { + GradCheck.gradCheckAll( + Seq(q, k, v), + () => sdpa(IndexedSeq(q, k, v)).head.sum, + quiet = false + ) + } + + R.summary("Scaled Dot Product Attention Test") + + end sdpaTest + + + @main def mhaTest (): Unit = + + banner ("Multi-Head Attention Test") + + end mhaTest + + + @main def layerNormTest (): Unit = + + banner ("Layer Normalization Test") + + end layerNormTest + +end TransformerTestCore + diff --git a/src/main/scala/scalation/modeling/autograd/Variabl.scala b/src/main/scala/scalation/modeling/autograd/Variabl.scala new file mode 100644 index 000000000..4f6af1aa3 --- /dev/null +++ b/src/main/scala/scalation/modeling/autograd/Variabl.scala @@ -0,0 +1,527 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author Praveen Rangavajhula + * @version 2.0 + * @date Fri April 25 19:47:13 EDT 2025 + * @see LICENSE (MIT style license file). + * + * @note Autograd: Variabl Class for Automatic Differentiation + * @see `Variable` in `scalation.modeling` + */ + +package scalation +package modeling +package autograd + +import scala.annotation.targetName + +import scalation.mathstat.TensorD + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `Variabl` case class represents a tensor with automatic differentiation capability. + * It tracks operations applied to it for backward gradient propagation. + * Variabls can be combined using arithmetic operations, activation functions, + * and loss functions. Backpropagation is triggered via the `backward` method. + * @param data the tensor data for this variable. + * @param gradFn an optional function for backpropagation. + * @param name an optional name for this variable. + * @param ops the implicit autograd operations for tensor computations. + */ +case class Variabl (var data: TensorD, gradFn: Option [Function] = None, name: Option [String] = None) + (using ops: AutogradOps): + +// Representation & State + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Returns a string representation of the variable. + * If a name is defined, it is included in the output. + * @return a string containing the name (if available) and data. + */ + override def toString: String = + if name.isDefined then s"name: ${name.get}, data: $data" + else s"data: $data" + end toString + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** The gradient tensor associated with this variable. + * Initially set to a tensor of zeros with the same shape as data. + */ + var grad: TensorD = ops.zerosLike (data) + +// Graph / Autograd Control + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Performs backpropagation with a default gradient of ones. + */ + inline def backward (): Unit = backward (ops.onesLike (data)) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Performs backpropagation using the specified output gradient. + * @param gradOutput the gradient tensor to propagate. + */ + inline def backward (gradOutput: TensorD): Unit = + grad += gradOutput + gradFn.foreach (fn => fn.backward (gradOutput)) + end backward + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Detaches the variable from the computation graph, returning a new variable with the same data. + * @param name an optional new name for the detached variable. + * @return a new variable with identical data but no gradient function. + */ + def detach (name: Option [String] = None): Variabl = Variabl (data, name = name) + +// Shape & Introspection + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Returns the shape of the tensor data as a list of dimensions. + * @return a List [Int] representing the dimensions of the data. + */ + inline def shape: List [Int] = ops.shape (data) + + private type SliceArg = Char | Range + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Slice this tensor variable along its three dimensions. + * Allows slicing using either `Range` objects or the special character `'?'` + * to denote selecting the *entire* dimension (as in `x(?, 2, 5 until 10)`). + * @param a the slice for dimension 0 (a `Range` or `'?'`) + * @param b the slice for dimension 1 (a `Range` or `'?'`) + * @param c the slice for dimension 2 (a `Range` or `'?'`) + * @return a new `Variabl` representing the sliced view + * @throws IllegalArgumentException if any slice argument is not a `Range` or `'?'` + */ + def apply (a: SliceArg, b: SliceArg, c: SliceArg): Variabl = + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Convert a slice argument (`Range` or `'?'`) into a concrete `Range`. + * `'?'` is interpreted as the full dimension: `0 until dim`. + * @param spec the slice specifier (`Range` or `'?'`) + * @param dim the size of the dimension + * @return a normalized `Range` + * @throws IllegalArgumentException if the argument is not valid + */ + def normalize (spec: SliceArg, dim: Int): Range = + spec match + case c: Char if c == '?' => 0 until dim + case r: Range => r + case other => + throw new IllegalArgumentException( + s"Invalid slice argument: $other (expected Range or '?')" + ) + end normalize + + Slice( + this, + normalize(a, data.dim), + normalize(b, data.dim2), + normalize(c, data.dim3) + ).forward() + end apply + +// Factory Helpers (Same Shape) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Returns a new variable with data filled with zeros and the same shape as this variable. + * @return a Variabl with zeros. + */ + inline def zerosLike (): Variabl = Variabl (ops.zerosLike (data)) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Returns a new variable with data filled with ones and the same shape as this variable. + * @return a Variabl with ones. + */ + inline def onesLike (): Variabl = Variabl (ops.onesLike (data)) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Returns a new variable with data filled with the specified value and the same shape as this variable. + * @param value the value to fill the new variable with. + * @return a Variabl with the specified value. + */ + inline def fullLike (value: Double): Variabl = Variabl (ops.fullLike (data, value)) + +// Shape / View Operations + + @targetName ("Autograd_Transpose") + inline def transpose (i: Int, j: Int): Variabl = Transpose (this, i, j).forward () + + @targetName ("Autograd_Permute") + inline def permute (axes: Seq[Int]): Variabl = Permute (this, axes).forward () + + @targetName ("Autograd_Reshape") + inline def reshape (newShape: Seq[Int]): Variabl = Reshape (this, newShape).forward () + +// Statistical Reductions + + @targetName ("Autograd_Sum") + inline def sum: Variabl = Sum(this).forward () + + @targetName ("Autograd_Mean") + inline def mean: Variabl = Mean (this).forward () + + @targetName ("Autograd_Variance") + inline def variance: Variabl = Variance (this).forward () + + @targetName ("Autograd_Std") + inline def std: Variabl = Std (this).forward () + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Computes the sum of elements along the specified axis and returns the result as a new variable. + * @param axis the axis along which to compute the sum. + * @return a Variabl representing the sum along the axis. + */ + inline def sumAlongAxis (axis: Int): Variabl = Variabl (ops.sumAlongAxis (data, axis)) + + @targetName ("Autograd_MeanAxis") + inline def meanAxis (axis: Int): Variabl = MeanAlongAxis (this, axis).forward () + + @targetName ("Autograd_VarAxis") + inline def varAxis (axis: Int): Variabl = VarianceAlongAxis (this, axis).forward () + + @targetName ("Autograd_StdAxis") + inline def stdAxis (axis: Int): Variabl = StdAlongAxis (this, axis).forward () + + @targetName ("Autograd_MaxValue") + inline def maxValue: Variabl = MaxValue (this).forward () + + @targetName ("Autograd_MinValue") + inline def minValue: Variabl = MinValue (this).forward () + +// Elementwise Unary Math + + @targetName ("Autograd_Abs") + inline def abs: Variabl = Abs (this).forward () + + @targetName ("Autograd_Neg") + inline def unary_- : Variabl = Neg (this).forward () + + @targetName ("Autograd_Reciprocal") + inline def reciprocal: Variabl = Reciprocal (this).forward () + + @targetName ("Autograd_Sqrt") + inline def sqrt: Variabl = Sqrt (this).forward () + + @targetName ("Autograd_Log") + inline def log: Variabl = Log (this).forward () + + @targetName ("Autograd_Log") + inline def logBase (base: Double): Variabl = LogBase (this, base).forward () + + @targetName ("Autograd_Exp") + inline def exp: Variabl = Exp (this).forward () + +// Elementwise Rounding / Sign / Clamping + + @targetName ("Autograd_Floor") + inline def floor: Variabl = Floor (this).forward () + + @targetName ("Autograd_Ceil") + inline def ceil: Variabl = Ceil (this).forward () + + @targetName ("Autograd_Round") + inline def round: Variabl = Round (this).forward () + + @targetName ("Autograd_Sign") + inline def sign: Variabl = Sign (this).forward () + + @targetName ("Autograd_Clip") + inline def clip (min: Double, max: Double): Variabl = Clip (this, min, max).forward () + +// Elementwise Binary Arithmetic (Tensor-Tensor) + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Adds this variable with another variable. + * @param other the variable to add. + * @return a new Variabl representing the element-wise addition. + */ + @targetName ("Autograd_Add") + inline def + (other: Variabl): Variabl = Add (this, other).forward () + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Adds a constant to this variable. + * @param s the constant to add. + * @return a new Variabl representing the result. + */ + @targetName ("Autograd_AddConstant") + inline def + (s: Double): Variabl = AddConstant (this, s).forward () + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Subtracts another variable from this variable. + * @param other the variable to subtract. + * @return a new Variabl representing the element-wise subtraction. + */ + @targetName ("Autograd_Sub") + inline def - (other: Variabl): Variabl = Sub (this, other).forward () + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Subtracts a constant from this variable. + * @param s the constant to subtract. + * @return a new Variabl representing the result. + */ + @targetName("Autograd_SubConstant") + inline def - (s: Double): Variabl = SubConstant (this, s).forward () + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Multiplies this variable with another variable element-wise. + * @param other the variable to multiply. + * @return a new Variabl representing the multiplication. + */ + @targetName ("Autograd_Mul") + inline def * (other: Variabl): Variabl = Mul (this, other).forward () + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Multiplies this variable by a constant. + * @param s the constant multiplier. + * @return a new Variabl representing the scaled variable. + */ + @targetName("Autograd_MulConstant") + inline def * (s: Double): Variabl = MulConstant (this, s).forward () + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Divides this variable by another variable element-wise. + * @param other the variable divisor. + * @return a new Variabl representing the division. + */ + @targetName ("Autograd_Div") + inline def / (other: Variabl): Variabl = Div (this, other).forward () + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Divides this variable by a constant. + * @param s the constant divisor. + * @return a new Variabl representing the scaled division. + */ + @targetName ("Autograd_DivConstant") + inline def / (s: Double): Variabl = DivConstant (this, s).forward () + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Raises this variable to the power of the given exponent. + * @param s the exponent to raise this variable to. + * @return a new Variabl representing the power operation. + */ + @targetName ("Autograd_Pow") + inline def ~^ (s: Int): Variabl = Pow (this, s).forward () + +// Min / Max (Elementwise) + + @targetName ("Autograd_Max") + inline def max (other: Variabl): Variabl = Max (this, other).forward () + + @targetName ("Autograd_Min") + inline def min (other: Variabl): Variabl = Min (this, other).forward () + + @targetName ("Autograd_MaxScalar") + inline def max (s: Double): Variabl = MaxScalar (this, s).forward () + + @targetName ("Autograd_MinScalar") + inline def min (s: Double): Variabl = MinScalar (this, s).forward () + +// Tensor Algebra + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Computes the dot product of this variable with another variable. + * @param other the variable to perform the dot product with. + * @return a Variabl representing the dot product. + */ + @targetName ("Autograd_Dot") + inline def dot (other: Variabl): Variabl = Dot (this, other).forward () + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Performs matrix multiplication of this variable with another variable. + * @param other the variable to multiply matrices with. + * @return a Variabl representing the matrix multiplication result. + */ + @targetName ("Autograd_Matmul") + inline def matmul (other: Variabl): Variabl = MatMul (this, other).forward () + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Performs batched matrix multiplication of this variable with another variable. + * @param other the variable to multiply in batches. + * @return a Variabl representing the batch matrix multiplication. + */ + @targetName ("Autograd_BatchMatmul") + inline def bmm (other: Variabl): Variabl = BatchMatMul(this, other).forward () + +// Activation Functions + + @targetName ("Autograd_ReLU") + inline def id: Variabl = Identity (this).forward () + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Applies the ReLU activation function to this variable. + * @return a Variabl after applying ReLU. + */ + @targetName ("Autograd_ReLU") + inline def relu: Variabl = ReLU (this).forward () + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Applies the LeakyReLU activation function to this variable. + * @param alpha the slope for negative inputs (default is 0.2). + * @return a Variabl after applying LeakyReLU. + */ + @targetName ("Autograd_LeakyReLU") + inline def leakyReLU (alpha: Double = 0.2): Variabl = LeakyReLU (this, alpha).forward () + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Applies the ELU activation function to this variable. + * @param alpha the ELU scaling parameter (default is 1.0). + * @return a Variabl after applying ELU. + */ + @targetName ("Autograd_ELU") + inline def elu (alpha: Double = 1.0): Variabl = ELU (this, alpha).forward () + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Applies the tanh activation function to this variable. + * @return a Variabl after applying tanh. + */ + @targetName ("Autograd_Tanh") + inline def tanh: Variabl = Tanh (this).forward () + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Applies the sigmoid activation function to this variable. + * @return a Variabl after applying sigmoid. + */ + @targetName ("Autograd_Sigmoid") + inline def sigmoid: Variabl = Sigmoid (this).forward () + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Applies the GeLU activation function to this variable. + * @return a Variabl after applying GeLU. + */ + @targetName ("Autograd_GeLU") + inline def gelu: Variabl = GeLU (this).forward () + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Applies the softmax activation function to this variable. + * @return a Variabl after applying softmax. + */ + @targetName ("Autograd_Softmax") + inline def softmax: Variabl = Softmax (this).forward () + +// Functional Chaining + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Chains the provided function with this variable. + * @param f a function that takes a Variabl and returns a Variabl. + * @return the result of applying the function to this variable. + */ + @targetName ("Autograd_Chain") + inline def ~> (f: Variabl => Variabl): Variabl = f (this) + +end Variabl + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Top-level helper functions and implicit conversions for autograd. + * Includes shortcut activation functions (e.g., relu, sigmoid) and + * implicit conversions to treat modules as functions for cleaner composition. + */ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Computes the Rectified Linear Unit (ReLU) activation for the input variable. + * @param v the input variable. + * @return a new variable after applying ReLU. + */ +def relu (v: Variabl): Variabl = v.relu + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Computes the Leaky ReLU activation for the input variable. + * @param v the input variable. + * @param alpha the slope for negative inputs, default is 0.01. + * @return a new variable after applying Leaky ReLU. + */ +def leakyReLU (v: Variabl, alpha: Double = 0.01): Variabl = v.leakyReLU (alpha) + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Computes the hyperbolic tangent (tanh) activation for the input variable. + * @param v the input variable. + * @return a new variable after applying tanh. + */ +def tanh (v: Variabl): Variabl = v.tanh + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Computes the Sigmoid activation for the input variable. + * @param v the input variable. + * @return a new variable after applying sigmoid. + */ +def sigmoid (v: Variabl): Variabl = v.sigmoid + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Computes the exponential (exp) of the input variable. + * @param v the input variable. + * @return a new variable after applying the exponential function. + */ +def exp (v: Variabl): Variabl = v.exp + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Computes the softmax activation for the input variable. + * @param v the input variable. + * @return a new variable after applying softmax. + */ +def softmax (v: Variabl): Variabl = v.softmax + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Computes the Gaussian Error Linear Unit (GeLU) activation for the input variable. + * @param v the input variable. + * @return a new variable after applying GeLU. + */ +def gelu (v: Variabl): Variabl = v.gelu + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Computes the Exponential Linear Unit (ELU) activation for the input variable. + * @param v the input variable. + * @param alpha the ELU scaling parameter, default is 1.0. + * @return a new variable after applying ELU. + */ +def elu (v: Variabl, alpha: Double = 1.0): Variabl = v.elu (alpha) + +// Loss Functions + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Computes the Sum of Squared Error (SSE) loss between two variables. + * @param x the predictions variable. + * @param y the target variable. + * @return a variable representing the computed SSE loss. + */ +def sseLoss (x: Variabl, y: Variabl): Variabl = SSELoss (x, y).forward () + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Computes the Mean Squared Error (MSE) loss between two variables. + * @param x the predictions variable. + * @param y the target variable. + * @return a variable representing the computed MSE loss. + */ +def mseLoss (x: Variabl, y: Variabl): Variabl = MSELoss (x, y).forward () + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Computes the Mean Absolute Error (MAE) loss between two variables. + * @param x the predictions variable. + * @param y the target variable. + * @return a variable representing the computed MAE loss. + */ +def maeLoss (x: Variabl, y: Variabl): Variabl = MAELoss (x, y).forward () + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Concatenates a sequence of variables along the specified axis. + * @param vars the sequence of variables to concatenate. + * @param axis the axis along which to concatenate. + * @return a new variable representing the concatenated result. + */ +def concat (vars: Seq [Variabl], axis: Int): Variabl = Concat (vars, axis).forward () + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Slices a variable along its three dimensions using the specified ranges. + * @param v the variable to slice. + * @param a the range for the first dimension. + * @param b the range for the second dimension. + * @param c the range for the third dimension. + * @return a new variable representing the sliced result. + */ +def slice (v: Variabl, a: Range, b: Range, c: Range): Variabl = Slice (v, a, b, c).forward () + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Provides an implicit conversion from a Module to a function that maps a Variabl to a Variabl. + * This allows using a Module directly as a function. + */ +given Conversion [Module, Variabl => Variabl] with + def apply (m: Module): Variabl => Variabl = m.apply + diff --git a/src/main/scala/scalation/modeling/classifying/BaggingTrees.scala b/src/main/scala/scalation/modeling/classifying/BaggingTrees.scala index 543d66f72..829ffdf5e 100644 --- a/src/main/scala/scalation/modeling/classifying/BaggingTrees.scala +++ b/src/main/scala/scalation/modeling/classifying/BaggingTrees.scala @@ -38,9 +38,9 @@ class BaggingTrees (x: MatrixD, y: VectorI, fname_ : Array [String] = null, k: I private val debug = debugf ("BaggingTrees", true) // debug function private val flaw = flawf ("BaggingTrees") // flaw function - protected val nTrees = hparam ("nTrees").toInt // number of trees - protected val bRatio = hparam ("bRatio").toDouble // bagging ratio - protected val height = hparam ("height").toInt // height limit + protected val nTrees = hparam("nTrees").toInt // number of trees + protected val bRatio = hparam("bRatio").toDouble // bagging ratio + protected val height = hparam("height").toInt // height limit protected val trees = Array.ofDim [DecisionTree_C45] (nTrees) // many decision trees protected val sampleSize = (bRatio * x.dim).toInt // size of matrix sub-samples @@ -48,7 +48,7 @@ class BaggingTrees (x: MatrixD, y: VectorI, fname_ : Array [String] = null, k: I if nTrees <= 0 then flaw ("init", "BT number of tree must be at least one") if bRatio <= 0 || bRatio >= 1 then flaw ("init", "BT bagging ratio restricted to (0, 1)") - modelName = s"BaggingTrees_${height}_$nTrees" // name of the model + _modelName = s"BaggingTrees_${height}_$nTrees" // name of the model //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Return the vector of model parameter vector. @@ -269,7 +269,6 @@ end baggingTreesTest3 else elseSample.set (elseCount, xy(i)) elseCount += 1 - end if end for val elseFeature = elseSample(?, 0 until elseSample.dim2-1) diff --git a/src/main/scala/scalation/modeling/classifying/Classifier.scala b/src/main/scala/scalation/modeling/classifying/Classifier.scala index fed1d7536..08be353a9 100644 --- a/src/main/scala/scalation/modeling/classifying/Classifier.scala +++ b/src/main/scala/scalation/modeling/classifying/Classifier.scala @@ -12,6 +12,7 @@ package scalation package modeling package classifying +import scala.annotation.unused import scala.collection.mutable.{ArrayBuffer, IndexedSeq, LinkedHashSet => LSET, Set} import scala.runtime.ScalaRunTime.stringOf import scala.util.control.Breaks.{break, breakable} @@ -32,7 +33,8 @@ import scalation.mathstat._ trait Classifier (x: MatrixD, y: VectorI, protected var fname: Array [String], k: Int = 2, protected var cname: Array [String] = null, hparam: HyperParameter) - extends Model: + extends Model + with FeatureSelection: private val debug = debugf ("Classifier", true) // debug function private val flaw = flawf ("Classifier") // flaw function @@ -40,7 +42,6 @@ trait Classifier (x: MatrixD, y: VectorI, protected var fname: Array [String], if cname == null then cname = if k == 2 then Array ("No", "Yes") // use default class names/labels else (for i <- 0 until k yield s"c$i").toArray - end if if cname.length != k then flaw ("init", "# class names != # classes") if x != null then @@ -59,6 +60,8 @@ trait Classifier (x: MatrixD, y: VectorI, protected var fname: Array [String], // protected var b = VectorD.nullv // parameter/coefficient vector [b_0, b_1, ... b_k] protected var e = VectorI.nullv // residual/error vector [e_0, e_1, ... e_m-1] + _taskType = TaskType.Classify // the type of task performed + if x != null && fname == null then fname = x.indices2.map ("x" + _).toArray // default feature/variable names //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -96,6 +99,7 @@ trait Classifier (x: MatrixD, y: VectorI, protected var fname: Array [String], * @param y_ the training/full response/output vector (defaults to full y) */ def train (x_ : MatrixD = x, y_ : VectorI = y): Unit = + debug ("train", s"x_.dims = ${x_.dims}, y_.dim = ${y_.dim}") val nup = y_.freq (k) nu_y = nup._1 // set frequency vector p_y = nup._2 // set probability vector @@ -151,7 +155,7 @@ trait Classifier (x: MatrixD, y: VectorI, protected var fname: Array [String], * Override as needed. * @param z the new vector to predict */ - def predictI (z: VectorI): Int = p_y.argmax () + def predictI (@unused z: VectorI): Int = p_y.argmax () def predictI (z: VectorD): Int // = p_y.argmax () def predict (z: VectorD): Double = predictI (z.toInt) @@ -279,8 +283,14 @@ REPORT /** Build a sub-model that is restricted to the given columns of the data matrix. * Override for models that support feature selection. * @param x_cols the columns that the new model is restricted to + * @param fname2 the variable/feature names for the new model (defaults to null) */ - def buildModel (x_cols: MatrixD): Classifier = null + def buildModel (x_cols: MatrixD, fname2: Array [String] = null): Classifier = ??? // FIX + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the best model found from feature selection. + */ + def getBest: scalation.modeling.BestStep = ??? // FIX //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `BestStep` is used to record the best improvement step found so far. @@ -294,17 +304,15 @@ REPORT /** Update the rSq-based QoF results for the l-th iteration. * @param rSq the matrix contain information about r-Sq-based QoF measures * @param l the l-th iteration - * @param cross indicator of whether cross-validation are to be included - * @param fit_l the fit vector for the l-th iteration - * @param mod_l the predictive model for the l-th iteration + * @param cross indicator to include "many" cross-validation, "one" validation, or "none" nothing + * @param best the best step so far */ - private def updateQoF (rSq: MatrixD, l: Int, cross: Boolean, best: BestStep): Unit = + private def updateQoF (rSq: MatrixD, l: Int, cross: String, best: BestStep): Unit = rSq(l) = - if cross then + if cross == "many" then FitC.qofVector (best.qof, best.mod.crossValidate ()) // results for model mod_l, with cross-validation else FitC.qofVector (best.qof, null) // results for model mod_l, no cross-validation - end if end updateQoF //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -313,27 +321,29 @@ REPORT * measures for all steps. * @see `Fit` for index of QoF measures. * @param tech the feature selection technique to apply - * @param idx_q index of Quality of Fit (QoF) to use for comparing quality + * @param qk index of Quality of Fit (QoF) to use for comparing quality * @param cross whether to include the cross-validation QoF measure - */ - def selectFeatures (tech: SelectionTech, idx_q: Int = QoF.rSqBar.ordinal, cross: Boolean = true): + * + def selectFeatures (tech: SelectionTech, qk: Int = QoF.rSqBar.ordinal, cross: String = "many"): (LSET [Int], MatrixD) = tech match - case SelectionTech.Forward => forwardSelAll (idx_q, cross) - case SelectionTech.Backward => backwardElimAll (idx_q, 1, cross) - case SelectionTech.Stepwise => stepRegressionAll (idx_q, cross) + case SelectionTech.Forward => forwardSelAll (qk, cross) + case SelectionTech.Backward => backwardElimAll (qk, 1, cross) + case SelectionTech.Stepwise => stepRegressionAll (qk, cross) + case SelectionTech.Beam => beamSelAll (qk, cross)(0) // FIX use bk, not default end match end selectFeatures + */ //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Perform forward selection to find the most predictive variable to add the * existing model, returning the variable to add and the new model. * May be called repeatedly. * @see `Fit` for index of QoF measures. - * @param cols the columns of matrix x currently included in the existing model - * @param idx_q index of Quality of Fit (QoF) to use for comparing quality + * @param cols the columns of matrix x currently included in the existing model + * @param qk index of Quality of Fit (QoF) to use for comparing quality */ - def forwardSel (cols: LSET [Int], idx_q: Int = QoF.rSqBar.ordinal): BestStep = + def forwardSel (cols: LSET [Int], qk: Int = QoF.rSqBar.ordinal): BestStep = var best = BestStep () // best step so far var bestq = -MAX_VALUE // best score so far @@ -343,25 +353,23 @@ REPORT val mod_j = buildModel (x_cols) // regress with x_j added mod_j.train () // train model val cand = BestStep (j, mod_j.test ()._2, mod_j) // candidate step - if cand.qof(idx_q) > bestq then { best = cand; bestq = cand.qof(idx_q) } + if cand.qof(qk) > bestq then { best = cand; bestq = cand.qof(qk) } end for if best.col == -1 then flaw ("forwardSel", "could not find a variable x_j to add: best.col = -1") - end if best end forwardSel //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Perform forward selection to find the most predictive variables to have + /** Perform FORWARD SELECTION to find the MOST predictive variables to have * in the model, returning the variables added and the new Quality of Fit (QoF) * measures for all steps. * @see `Fit` for index of QoF measures. - * @param idx_q index of Quality of Fit (QoF) to use for comparing quality - * @param cross whether to include the cross-validation QoF measure + * @param cross indicator to include the cross-validation/validation QoF measure (defaults to "many") + * @param qk index of Quality of Fit (QoF) to use for comparing quality */ - def forwardSelAll (idx_q: Int = QoF.rSqBar.ordinal, cross: Boolean = true): - (LSET [Int], MatrixD) = + def forwardSelAll (cross: String = "many")(using qk: Int): (LSET [Int], MatrixD) = val rSq = new MatrixD (x.dim2 - 1, 3) // QoF: R^2, R^2 Bar, R^2 cv val cols = LSET (0) // start with x_0 in model @@ -369,11 +377,11 @@ REPORT breakable { for l <- 1 until x.dim2 do - val best = forwardSel (cols, idx_q) // add most predictive variable + val best = forwardSel (cols, qk) // add most predictive variable if best.col == -1 then break () // could not find variable to add cols += best.col // add variable x_j updateQoF (rSq, l-1, cross, best) // update QoF results - val (jj, jj_qof) = (best.col, best.qof(idx_q)) + val (jj, jj_qof) = (best.col, best.qof(qk)) banner (s"forwardSelAll: (l = $l) ADD variable ($jj, ${fname(jj)}) => cols = $cols @ $jj_qof") end for } // breakable @@ -387,11 +395,11 @@ REPORT * vector and the new Quality of Fit (QoF). May be called repeatedly. * @see `Fit` for index of QoF measures. * @param cols the columns of matrix x currently included in the existing model - * @param idx_q index of Quality of Fit (QoF) to use for comparing quality + * @param qk index of Quality of Fit (QoF) to use for comparing quality * @param first first variable to consider for elimination * (default (1) assume intercept x_0 will be in any model) */ - def backwardElim (cols: LSET [Int], idx_q: Int = QoF.rSqBar.ordinal, first: Int = 1): BestStep = + def backwardElim (cols: LSET [Int], qk: Int = QoF.rSqBar.ordinal, first: Int = 1): BestStep = var best = BestStep () // best step so far var bestq = -MAX_VALUE // best score so far @@ -401,12 +409,11 @@ REPORT val mod_j = buildModel (x_cols) // regress with x_j added mod_j.train () // train model val cand = BestStep (j, mod_j.test ()._2, mod_j) // candidate step - if cand.qof(idx_q) > bestq then { best = cand; bestq = cand.qof(idx_q) } + if cand.qof(qk) > bestq then { best = cand; bestq = cand.qof(qk) } end for if best.col == -1 then flaw ("backwardElim", "could not find a variable x_j to eliminate: best.col = -1") - end if best end backwardElim @@ -421,31 +428,30 @@ REPORT end fullModel //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Perform backward elimination to find the least predictive variables to remove + /** Perform BACKWARD ELIMINATION to find the LEAST predictive variables to remove * from the full model, returning the variables left and the new Quality of Fit (QoF) * measures for all steps. * @see `Fit` for index of QoF measures. - * @param idx_q index of Quality of Fit (QoF) to use for comparing quality * @param first first variable to consider for elimination - * @param cross whether to include the cross-validation QoF measure + * @param cross indicator to include the cross-validation/validation QoF measure (defaults to "many") + * @param qk index of Quality of Fit (QoF) to use for comparing quality */ - def backwardElimAll (idx_q: Int = QoF.rSqBar.ordinal, first: Int = 1, cross: Boolean = true): - (LSET [Int], MatrixD) = + def backwardElimAll (first: Int = 1, cross: String = "many")(using qk: Int): (LSET [Int], MatrixD) = val rSq = new MatrixD (x.dim2 - 1, 3) // R^2, R^2 Bar, R^2 cv val cols = LSET.range (0, x.dim2) // start with all x_j in model val best0 = fullModel updateQoF (rSq, 0, cross, best0) // update QoF results for full model - val jj_qof = best0.qof(idx_q) + val jj_qof = best0.qof(qk) banner (s"backwardElimAll: (l = 0) INITIAL variables (all) => cols = $cols @ $jj_qof") breakable { for l <- 1 until x.dim2 - 1 do // l indicates number of variables eliminated - val best = backwardElim (cols, idx_q, first) // remove least predictive variable + val best = backwardElim (cols, qk, first) // remove least predictive variable if best.col == -1 then break () // could not find variable to remove cols -= best.col // remove variable x_j updateQoF (rSq, l, cross, best) // update QoF results - val (jj, jj_qof) = (best.col, best.qof(idx_q)) + val (jj, jj_qof) = (best.col, best.qof(qk)) banner (s"backwardElimAll: (l = $l) REMOVE variable ($jj, ${fname(jj)}) => cols = $cols @ $jj_qof") end for } // breakable @@ -454,64 +460,64 @@ REPORT end backwardElimAll //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Perform stepwise regression to find the most predictive variables to have - * in the model, returning the variables left and the new Quality of Fit (QoF) + /** Perform STEPWISE SELECTION to find a GOOD COMBINATION of predictive featues/variables + * to have in the model, returning the variables left and the new Quality of Fit (QoF) * measures for all steps. At each step it calls forwardSel and backwardElim * and takes the best of the two actions. Stops when neither action yields improvement. * @see `Fit` for index of QoF measures. - * @param idx_q index of Quality of Fit (QoF) to use for comparing quality - * @param cross whether to include the cross-validation QoF measure + * @param cross indicator to include the cross-validation/validation QoF measure (defaults to "many") + * @param swap whether to allow a swap step (swap out a feature for a new feature in one step) + * @param qk index of Quality of Fit (QoF) to use for comparing quality */ - def stepRegressionAll (idx_q: Int = QoF.rSqBar.ordinal, cross: Boolean = true): - (LSET [Int], MatrixD) = - val SWAP = false // whether to include swapping + def stepwiseSelAll (cross: String = "many", swap_ : Boolean = true)(using qk: Int): + (LSET [Int], MatrixD) = val rSq = new MatrixD (x.dim2 - 1, 3) // QoF: R^2, R^2 Bar, R^2 cv val cols = LSET (0) // start with x_0 in model var last_q = -MAX_VALUE // current best QoF val vars = ArrayBuffer [Int]() + val swap_ = true banner (s"stepRegressionAll: (l = 0) INITIAL variable (0, ${fname(0)}) => cols = $cols") breakable { for l <- 1 until x.dim2 - 1 do - val bestf = forwardSel (cols, idx_q) // add most predictive variable OR - val bestb = backwardElim (cols, idx_q, 1) // remove least predictive variable + val bestf = forwardSel (cols, qk) // add most predictive variable OR + val bestb = backwardElim (cols, qk, 1) // remove least predictive variable debug ("stepRegressionAll", s"bestf = $bestf, bestb = $bestb") - if (bestb.col == -1 || bestf.qof(idx_q) >= bestb.qof(idx_q)) && // forward as good as backward - (bestf.col != -1 && bestf.qof(idx_q) > last_q) then // a better model has been found + if (bestb.col == -1 || bestf.qof(qk) >= bestb.qof(qk)) && // forward as good as backward + (bestf.col != -1 && bestf.qof(qk) > last_q) then // a better model has been found vars += bestf.col - cols += bestf.col // ADD variable bestf.col - last_q = bestf.qof(idx_q) - updateQoF (rSq, l, cross, bestf) // update QoF results + cols += bestf.col // ADD variable bestf.col + last_q = bestf.qof(qk) + updateQoF (rSq, l, cross, bestf) // update QoF results println (s"\nstepRegressionAll: (l = $l) ADD variable $bestf") val (jj, jj_qof) = (bestf.col, last_q) banner (s"stepRegressionAll: (l = $l) ADD variable ($jj, ${fname(jj)}) => cols = $cols @ $jj_qof") - else if bestb.col != -1 && bestb.qof(idx_q) > last_q then // a better model has been found + else if bestb.col != -1 && bestb.qof(qk) > last_q then // a better model has been found vars += bestb.col - cols -= bestb.col // REMOVE variable bestb.col - last_q = bestb.qof(idx_q) - updateQoF (rSq, l, cross, bestb) // update QoF results + cols -= bestb.col // REMOVE variable bestb.col + last_q = bestb.qof(qk) + updateQoF (rSq, l, cross, bestb) // update QoF results println (s"\nstepRegressionAll: (l = $l) REMOVE variable $bestb") val (jj, jj_qof) = (bestb.col, last_q) banner (s"stepRegressionAll: (l = $l) REMOVE variable ($jj, ${fname(jj)}) => cols = $cols @ $jj_qof") else - if ! SWAP then break () + if ! swap_ then break () val (out, in) = (bestb.col, bestf.col) val bestfb = swapVars (cols, out, in) - if out != -1 && in != -1 && bestfb.qof(idx_q) > last_q then // a better model has been found + if out != -1 && in != -1 && bestfb.qof(qk) > last_q then // a better model has been found vars += bestb.col vars += bestf.col cols -= bestb.col // REMOVE variable bestb.col (swap out) cols += bestf.col // ADD variable bestf.col (swap in) - last_q = bestfb.qof(idx_q) + last_q = bestfb.qof(qk) updateQoF (rSq, l, cross, bestfb) // update QoF results println (s"\nstepRegressionAll: (l = $l) SWAP variable $bestb with $bestf") else break () // can't find a better model -> quit - end if end if end for } // breakable @@ -521,7 +527,7 @@ REPORT println (s"stepRegressionAll: features in/out = $vars") (cols, rSq(1 until cols.size)) - end stepRegressionAll + end stepwiseSelAll //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Swap out variable with in variable. @@ -537,6 +543,25 @@ REPORT BestStep (in, mod_j.test ()._2, mod_j) // candidate step end swapVars + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Perform BEAM SEARCH SELECTION to find a GOOD COMBINATION of predictive features/variables to + * have in the model, returning the top k sets of features/variables selected and the new Quality of + * Fit (QoF) measures/metrics for all steps. At each step, iterate over the models in the beam + * (top k) and create candidates by adding features (phase 1) and then removing (phase 2). + * From all the candidates, keep the best k and start a new iteration. Stops when there is + * no improvement in any of top k (or the maximum number of features is reached. + * @see `Fit` for index of QoF measures/metrics. + * @param cross indicator to include the cross-validation/validation QoF measure (defaults to "many") + * @param bk the beam width holding the top k models (defaults to 3) + * @param qk index of Quality of Fit (QoF) to use for comparing quality + */ + def beamSelAll (cross: String = "many", bk: Int = 3)(using qk: Int): (LSET [Int], MatrixD) = + + // FIX -- to be implemented + + null + end beamSelAll + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Compute the Variance Inflation Factor (VIF) for each variable to test * for multi-collinearity by regressing x_j against the rest of the variables. @@ -560,6 +585,20 @@ REPORT vifV end vif +// T E S T I N G S C E N A R I O S + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Perform In-Sample Testing, i.e., train and test on the FULL data set. + * @param skip the number of initial data points to skip (due to insufficient information) + * @param showYp whether to show the prediction vector + */ + def inSample_Test (skip: Int = 0, showYp: Boolean = false): Unit = + val (x_, y_) = (x.drop (skip), y.drop (skip)) + val yp = trainNtest (x_, y_)(x_, y_)._1 + if showYp then + println (s"Final In-Sample Prediction Vector yp = $yp") + end inSample_Test + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Return the indices for the test-set. * @see `scalation.mathstat.TnT_Split` @@ -570,24 +609,39 @@ REPORT TnT_Split.testIndices (permGen, n_test, rando) end testIndices + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the indices for the test-set for (1) RANDONLY or (3) LAST + * @see `scalation.mathstat.TnT_Split` + * @param n_total the size of full dataset + * @param n_test the size of test-set + * @param rando whether to select indices randomly or in blocks + */ + inline def testIndices (n_total: Int, n_test: Int, rando: Boolean): IndexedSeq [Int] = + TnT_Split.testIndices (permGen, n_total, n_test, rando) + end testIndices + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /* Use validation to compute test Quality of Fit (QoF) measures by dividing * the full dataset into a TESTING set and a TRAINING set. * The test set is defined by idx and the rest of the data is the training set. + * @see `modeling.Predictor.validate` about the RANDOM, FIRST, and LAST options + * for selecting the testing-set. * @param rando flag indicating whether to use randomized or simple validation * @param ratio the ratio of the TESTING set to the full dataset (most common 70-30, 80-20) - * @param idx the prescribed TESTING set indices + * @param idx the prescribed TESTING set indices (default => generate) */ - def validate (rando: Boolean = true, ratio: Double = 0.2) - (idx : IndexedSeq [Int] = testIndices ((ratio * y.dim).toInt, rando)): VectorD = + def validate (rando: Boolean = true, ratio: Double = Model.TE_RATIO) +// (idx: IndexedSeq [Int] = testIndices ((ratio * y.dim).toInt, rando)): + (idx: IndexedSeq [Int] = testIndices (y.dim, (ratio * y.dim).toInt, rando)): + (VectorD, VectorD) = + debug ("validate", s"n_test = ${(ratio * y.dim).toInt}, rando = $rando") val (x_e, x_, y_e, y_) = TnT_Split (x, y, idx) // Test-n-Train Split train (x_, y_) // train model on the training set - val qof = test (x_e, y_e)._2 // test on test-set and get QoF measures + val (yp, qof) = test (x_e, y_e) // test on test-set and get QoF measures if qof(QoF.sst.ordinal) <= 0.0 then // requires variation in test-set flaw ("validate", "chosen testing set has no variability") - end if - qof + (yp.toDouble, qof) end validate //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -609,11 +663,10 @@ REPORT for fold <- 0 until k do banner (s"crossValidate: fold $fold: train-test sizes = (${y.dim - sz}, $sz)") val idx = fullIdx (fold * sz until (fold+1) * sz).toMuIndexedSeq // instance indices for this fold - val qof = validate (rando, ratio)(idx) + val qof = validate (rando, ratio)(idx)._2 debug ("crossValidate", s"fold $fold: qof = $qof") if qof(QoF.sst.ordinal) > 0.0 then // requires variation in test-set for q <- qof.indices do stats(q).tally (qof(q)) // tally these QoF measures - end if end for stats end crossValidate @@ -623,7 +676,7 @@ end Classifier //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `Classifier` companion object provides a method for testing predictive - * models. + * classification models. */ object Classifier: @@ -674,10 +727,9 @@ object Classifier: //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Downsample to reduce imbalance of classes, by returning the group indices * and the probability for each group. - * @param y the classification/response vector - * @param ns the number of instances in downsample + * @param y the classification/response vector */ - def downsample (y: VectorI, ns: Int): Array [Int] = + def downsample (y: VectorI): Array [Int] = val dsample = Set [Int] () // create an empty downsample val (group, freq) = partition (y) // partition into groups val gmax = freq.min - 1 // use smallest group for samples per group @@ -701,12 +753,12 @@ object Classifier: def test (mod: Classifier, ext: String = "", check: Boolean = true): Unit = val iq = QoF.rSq.ordinal banner (s"Test ${mod.modelName} $ext") - val (yp, qof) = mod.trainNtest ()() // train and test the model on full dataset (in-sample) + val qof = mod.trainNtest ()()._2 // train and test the model on full dataset (in-sample) println ("Validate: Out-of-Sample Testing") - val qof2 = mod.validate ()() // train on training set, test on testing set + val qof2 = mod.validate ()()._2 // train on training set, test on testing set if check then assert (rel_diff (qof(iq), qof2(iq)) < 0.2) // check agreement of in-sample and out-of-sample results - println (FitM.fitMap (mod.validate ()(), QoFC.values.map (_.toString))) + println (FitM.fitMap (qof2, QoFC.values.map (_.toString))) end test end Classifier diff --git a/src/main/scala/scalation/modeling/classifying/DecisionTree.scala b/src/main/scala/scalation/modeling/classifying/DecisionTree.scala index d1e4d8772..9ca09de34 100644 --- a/src/main/scala/scalation/modeling/classifying/DecisionTree.scala +++ b/src/main/scala/scalation/modeling/classifying/DecisionTree.scala @@ -94,7 +94,6 @@ trait DecisionTree: leaves += n // add n to leaves else println (s"makeLeaf: node $n already is a leaf") - end if end makeLeaf //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -154,7 +153,7 @@ trait DecisionTree: val zj = z(n.j) try predictIrec (z, n.branch(zj)) catch - case ex: NoSuchElementException => n.nu.argmax () // take consensus of node n + case _ : NoSuchElementException => n.nu.argmax () // take consensus of node n end if end predictIrec @@ -172,7 +171,7 @@ trait DecisionTree: if cont then predictIrecD (z, if zj <= n.thres then n.branch(0) else n.branch(1)) else predictIrecD (z, n.branch(zj.toInt)) catch - case ex: NoSuchElementException => n.nu.argmax () // take consensus of node n + case _ : NoSuchElementException => n.nu.argmax () // take consensus of node n end if end predictIrecD @@ -235,7 +234,6 @@ object Node: println ("\t" * level + "[ " + n) for c <- n.branch.values do printT (c, level + 1) println ("\t" * level + "]") - end if end printT end Node diff --git a/src/main/scala/scalation/modeling/classifying/DecisionTree_C45.scala b/src/main/scala/scalation/modeling/classifying/DecisionTree_C45.scala index 2a14da73e..32529482a 100644 --- a/src/main/scala/scalation/modeling/classifying/DecisionTree_C45.scala +++ b/src/main/scala/scalation/modeling/classifying/DecisionTree_C45.scala @@ -16,8 +16,8 @@ import scala.collection.mutable.{ArrayBuffer, Set} import scalation.mathstat._ import scalation.mathstat.Probability.{entropy, freq} - -import VariableKind.Categorical +import scalation.theory.Variable +import scalation.theory.VariableKind.Categorical //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `DecisionTree_C45` class implements a Decision Tree classifier using the @@ -34,15 +34,15 @@ import VariableKind.Categorical * @param hparam the hyper-parameters */ class DecisionTree_C45 (x: MatrixD, y: VectorI, fname_ : Array [String] = null, k: Int = 2, - cname_ : Array [String] = Array ("No", "Yes"), - conts: Set [Int] = Set [Int] (), hparam: HyperParameter = DecisionTree.hp) + cname_ : Array [String] = Array ("No", "Yes"), + conts: Set [Int] = Set [Int] (), hparam: HyperParameter = DecisionTree.hp) extends Classifier (x, y, fname_, k, cname_, hparam) with FitC (k) with DecisionTree: private val debug = debugf ("DecisionTree_C45", false) // debug function - private val height = hparam ("height").toInt // the maximum height of tree - private val cutoff = hparam ("cutoff") // cutoff entropy threshold + private val height = hparam("height").toInt // the maximum height of tree + private val cutoff = hparam("cutoff") // cutoff entropy threshold private var entropy_0 = entropy (y.freq (k)._2) // initial entropy of full vector y private val threshold = Array.ofDim [Double] (x.dim2) // threshold for continuous features (below <=, above >) @@ -51,7 +51,7 @@ class DecisionTree_C45 (x: MatrixD, y: VectorI, fname_ : Array [String] = null, for j <- x.indices2 do feas(j) = if conts contains j then Variable (x(?, j), j) else Variable (x(?, j), j, Categorical) - modelName = s"DecisionTree_C45_$height" // name of the model + _modelName = s"DecisionTree_C45_$height" // name of the model debug ("init", s"entropy of original/full y: entropy_0 = $entropy_0") @@ -118,7 +118,6 @@ class DecisionTree_C45 (x: MatrixD, y: VectorI, fname_ : Array [String] = null, val xj = x_(?, j) // column j of matrix x if feas(j).kind != Categorical then threshold(j) = DecisionTree_C45.findSplit (xj, y_, rindex, k) // => calculate split threshold - end if val (gn, nu) = gain (feas(j), xj, y_, rindex) // compute gain for feature j // debug ("findBest", s"compare ($j, $gn, $nu) to $best") if gn > best._2 then best = (j, gn, nu) // better gainb => update best @@ -148,11 +147,9 @@ class DecisionTree_C45 (x: MatrixD, y: VectorI, fname_ : Array [String] = null, val node = Node (j, gn, nu, parent, nu.argmax (), leaf) // construct the next node if ! leaf && feas(j).kind != Categorical then node.thres = threshold (j) // for continuous features, store threshold in node - end if if parent == null then addRoot (node) // if no parent, add node as root of tree debug ("buildTree", s"entropy of root node: entropy_0 = $entropy_0") - end if if ! node.leaf && cindex.dim > 1 then val xj = x(?, j) // extract feature column j @@ -176,9 +173,9 @@ class DecisionTree_C45 (x: MatrixD, y: VectorI, fname_ : Array [String] = null, * @param xj the column of the data matrix to be considered * @param rindex the working row index used to create the new trimmed version * @param vl the value to matched (for conts its 0 (up to) or 1 (beyond) threshold) - * @param thres the splitting threshold + * @param thres the splitting threshold */ - private def trimRows (j: Int, xj: VectorD, rindex: VectorI, vl: Int, thres: Double = -0.0): VectorI = + private def trimRows (j: Int, xj: VectorD, rindex: VectorI, vl: Int, thres: Double): VectorI = val a = if conts contains j then if vl == 0 then (for i <- rindex if xj(i) <= thres yield i).toArray else (for i <- rindex if xj(i) > thres yield i).toArray @@ -282,7 +279,6 @@ object DecisionTree_C45: if ent < minEnt then thres = mid // found a better threshold minEnt = ent // save better gain - end if end for thres // save best threshold for this feature @@ -321,7 +317,7 @@ object DecisionTree_C45: val ymin = y.min () println (s"unadjusted ymin = $ymin") if ymin != 0 then y -= ymin - val height = hparam ("height") + val height = hparam("height") println (s"height limit = $height") val tree = new DecisionTree_C45 (x, y.toInt, fn, k, cn, conts, hparam) diff --git a/src/main/scala/scalation/modeling/classifying/DecisionTree_C45wp.scala b/src/main/scala/scalation/modeling/classifying/DecisionTree_C45wp.scala index 03211381c..e50b5e7f1 100644 --- a/src/main/scala/scalation/modeling/classifying/DecisionTree_C45wp.scala +++ b/src/main/scala/scalation/modeling/classifying/DecisionTree_C45wp.scala @@ -35,7 +35,7 @@ class DecisionTree_C45wp (x: MatrixD, y: VectorI, fname_ : Array [String] = null private val debug = debugf ("DecisionTree_C45wp", true) // debug function - modelName = "DecisionTree_C45wp" // name of the model + _modelName = "DecisionTree_C45wp" // name of the model //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Prune nPrune nodes from the tree, the ones providing the least gain. @@ -43,7 +43,7 @@ class DecisionTree_C45wp (x: MatrixD, y: VectorI, fname_ : Array [String] = null * @param threshold cut-off for pruning (IG < threshold, then prune) */ def prune (nPrune: Int = 1, threshold: Double = 0.98): Unit = - for i <- 0 until nPrune do + cfor (0, nPrune) { _ => val can = candidates debug ("prune", s"can = $can") val (best, gn) = bestCandidate (can) @@ -51,8 +51,7 @@ class DecisionTree_C45wp (x: MatrixD, y: VectorI, fname_ : Array [String] = null if gn < threshold then println (s"prune: make node $best with gain $gn into a leaf") makeLeaf (best) - end if - end for + } // cfor end prune end DecisionTree_C45wp diff --git a/src/main/scala/scalation/modeling/classifying/DecisionTree_ID3.scala b/src/main/scala/scalation/modeling/classifying/DecisionTree_ID3.scala index 004561b17..a24100e57 100644 --- a/src/main/scala/scalation/modeling/classifying/DecisionTree_ID3.scala +++ b/src/main/scala/scalation/modeling/classifying/DecisionTree_ID3.scala @@ -16,8 +16,8 @@ import scala.collection.mutable.ArrayBuffer import scalation.mathstat._ import scalation.mathstat.Probability.{entropy, freq} - -import VariableKind.Categorical +import scalation.theory.Variable +import scalation.theory.VariableKind.Categorical //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `DecisionTree_ID3` class implements a Decision Tree classifier using the @@ -40,15 +40,15 @@ class DecisionTree_ID3 (x: MatrixD, y: VectorI, fname_ : Array [String] = null, with DecisionTree: private val debug = debugf ("DecisionTree_ID3", false) // debug function - private val height = hparam ("height").toInt // the maximum height of tree - private val cutoff = hparam ("cutoff") // cutoff entropy threshold + private val height = hparam("height").toInt // the maximum height of tree + private val cutoff = hparam("cutoff") // cutoff entropy threshold private var entropy_0 = entropy (y.freq (k)._2) // initial entropy of full vector y private val param = ArrayBuffer [Double] () // parameter vector = feature order private val feas = Array.ofDim [Variable] (x.dim2) // array of features/variables xj's for j <- x.indices2 do feas(j) = Variable (x(?, j), j, Categorical) - modelName = s"DecisionTree_ID3_$height" // name of the model + _modelName = s"DecisionTree_ID3_$height" // name of the model debug ("init", s"entropy of original/full y: entropy_0 = $entropy_0") @@ -141,7 +141,6 @@ class DecisionTree_ID3 (x: MatrixD, y: VectorI, fname_ : Array [String] = null, if parent == null then addRoot (node) // if no parent, add node as root of tree debug ("buildTree", s"entropy of root node: entropy_0 = $entropy_0") - end if if ! node.leaf && cindex.dim > 1 then val xj = x_(?, j).toInt // extract feature column j diff --git a/src/main/scala/scalation/modeling/classifying/DecisionTree_ID3wp.scala b/src/main/scala/scalation/modeling/classifying/DecisionTree_ID3wp.scala index 3630ccf80..84af8f4d2 100644 --- a/src/main/scala/scalation/modeling/classifying/DecisionTree_ID3wp.scala +++ b/src/main/scala/scalation/modeling/classifying/DecisionTree_ID3wp.scala @@ -33,7 +33,7 @@ class DecisionTree_ID3wp (x: MatrixD, y: VectorI, fname_ : Array [String] = null private val debug = debugf ("DecisionTree_ID3wp", true) // debug function - modelName = "DecisionTree_ID3wp" // name of the model + _modelName = "DecisionTree_ID3wp" // name of the model //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Prune nPrune nodes from the tree, the ones providing the least gain. @@ -41,7 +41,7 @@ class DecisionTree_ID3wp (x: MatrixD, y: VectorI, fname_ : Array [String] = null * @param threshold cut-off for pruning (IG < threshold, then prune) */ def prune (nPrune: Int = 1, threshold: Double = 0.98): Unit = - for i <- 0 until nPrune do + cfor (0, nPrune) { _ => val can = candidates debug ("prune", s"can = $can") val (best, gn) = bestCandidate (can) @@ -49,8 +49,7 @@ class DecisionTree_ID3wp (x: MatrixD, y: VectorI, fname_ : Array [String] = null if gn < threshold then println (s"prune: make node $best with gain $gn into a leaf") makeLeaf (best) - end if - end for + } // cfor end prune end DecisionTree_ID3wp @@ -165,7 +164,6 @@ end decisionTree_ID3wpTest2 else testData.set (testCount, xy(i)) testCount += 1 - end if end for val testFeature = testData(0 until testData.dim2) diff --git a/src/main/scala/scalation/modeling/classifying/FitC.scala b/src/main/scala/scalation/modeling/classifying/FitC.scala index fd9428771..c48eeff9d 100644 --- a/src/main/scala/scalation/modeling/classifying/FitC.scala +++ b/src/main/scala/scalation/modeling/classifying/FitC.scala @@ -125,7 +125,7 @@ help: Quality of Fit (QoF) measures: * @param k the number of class labels {0, 1, ... , k-1} */ def test (fc: FitC, y_ : VectorI, yp: VectorI, k: Int = 2): Unit = - banner ("Actual Class Values/Labels") + banner (s"Actual Class Values/Labels with k = $k") println (s"y_ = $y_") // actual class values banner ("Predicted Class Values/Labels") @@ -192,7 +192,7 @@ end FitC * accuracy, precision, recall, specificity and Cohen's kappa coefficient. * @see `modeling.Fit` * Must call the confusion method before calling the other methods. - * @param k the number distinct class values/labels (defaults to 2) + * @param k the number distinct class values/labels (defaults to 2) */ trait FitC (k: Int = 2) extends FitM: diff --git a/src/main/scala/scalation/modeling/classifying/HiddenMarkov.scala b/src/main/scala/scalation/modeling/classifying/HiddenMarkov.scala index f25cd5a2d..a21237a8d 100644 --- a/src/main/scala/scalation/modeling/classifying/HiddenMarkov.scala +++ b/src/main/scala/scalation/modeling/classifying/HiddenMarkov.scala @@ -69,19 +69,16 @@ class HiddenMarkov (y: VectorI, m: Int, n: Int, cname_ : Array [String] = null, private val rvalue = 0 until m // range over observation symbols/values private val rstate = 0 until n // range over states - modelName = "HiddenMarkov" // name of the model + _modelName = s"HiddenMarkov_$n" // name of the model if pi == null then pi = pvn.gen // initialize the state probability vector - end if if a == null then a = new MatrixD (n, n) for i <- rstate do a(i) = pvn.gen // initialize state transition probability matrix a - end if if b == null then b = new MatrixD (n, m) for i <- rstate do b(i) = pvm.gen // initialize observation probability matrix b - end if //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Return the size of the (hidden) state space. @@ -143,7 +140,6 @@ class HiddenMarkov (y: VectorI, m: Int, n: Int, cname_ : Array [String] = null, 1.0 / p // reciporcal of product else alp(tt-1).sum // sum of last row - end if end probY //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -153,14 +149,13 @@ class HiddenMarkov (y: VectorI, m: Int, n: Int, cname_ : Array [String] = null, * Requires: alp the unscaled alpha matrix or * c the vector of scaling factors */ - private def logProbY (scaled: Boolean = false): Double = + private def logProbY (scaled: Boolean): Double = if scaled then var lp = 0.0 // log-probability for t <- rtime do lp += log (c(t)) // sum of the log of scaling factors -lp // reciporcal via -log else -log (alp(tt-1).sum) // - log of sum of last row - end if end logProbY //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -299,7 +294,6 @@ class HiddenMarkov (y: VectorI, m: Int, n: Int, cname_ : Array [String] = null, else println (s"train: HMM model converged after $it iterations") break () - end if end for } // breakable println (s"train: HMM model did not converged after $MIT iterations") diff --git a/src/main/scala/scalation/modeling/classifying/KNN_Classifier.scala b/src/main/scala/scalation/modeling/classifying/KNN_Classifier.scala index 2e0f5725b..333709806 100644 --- a/src/main/scala/scalation/modeling/classifying/KNN_Classifier.scala +++ b/src/main/scala/scalation/modeling/classifying/KNN_Classifier.scala @@ -45,7 +45,7 @@ class KNN_Classifier (x: MatrixD, y: VectorI, fname_ : Array [String] = null, private val count = new VectorI (k) // how many nearest neighbors in each class. private var d = VectorD.nullv // vector to hold distances - modelName = s"KNN_Classifier_$kappa" // name of the model + _modelName = s"KNN_Classifier_$kappa" // name of the model if kappa < 3 then flaw ("init", s"number of neighbors kappa = $kappa < 3") @@ -255,7 +255,8 @@ end kNN_ClassifierTest3 banner ("original imbalanced yb") println (s"yb = $yb") - val idx = Classifier.downsample (yb, 100) // use these indices +// val idx = Classifier.downsample (yb, 100) // use these indices - 100 part not implemented yet + val idx = Classifier.downsample (yb) // use these indices val x_ = x(idx) // new x-matrix val y_ = yb(idx) // new y-vector diff --git a/src/main/scala/scalation/modeling/classifying/LinDiscAnalyis.scala b/src/main/scala/scalation/modeling/classifying/LinDiscAnalyis.scala index 469f5ffc0..47a87877f 100644 --- a/src/main/scala/scalation/modeling/classifying/LinDiscAnalyis.scala +++ b/src/main/scala/scalation/modeling/classifying/LinDiscAnalyis.scala @@ -46,7 +46,7 @@ class LinDiscAnalyis (x: MatrixD, y: VectorI, fname_ : Array [String] = null, k: if k != 2 then flaw ("init", "k must equal 2 in current implementation") - modelName = "LinDiscAnalyis" // name of the model + _modelName = "LinDiscAnalyis" // name of the model //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Compute the corrected covariance matrix. diff --git a/src/main/scala/scalation/modeling/classifying/LogisticRegression.scala b/src/main/scala/scalation/modeling/classifying/LogisticRegression.scala index adda16bd2..56ee1e771 100644 --- a/src/main/scala/scalation/modeling/classifying/LogisticRegression.scala +++ b/src/main/scala/scalation/modeling/classifying/LogisticRegression.scala @@ -42,7 +42,7 @@ class LogisticRegression (x: MatrixD, y: VectorI, fname_ : Array [String] = null private val r_df = (n-1.0) / (n-k-1.0) // ratio of degrees of freedom - modelName = s"LogisticRegression_$cThresh" // name of the model + _modelName = s"LogisticRegression_$cThresh" // name of the model debug ("init", s"r_df = $r_df") diff --git a/src/main/scala/scalation/modeling/classifying/NaiveBayes.scala b/src/main/scala/scalation/modeling/classifying/NaiveBayes.scala index bc633ca43..586ad28ab 100644 --- a/src/main/scala/scalation/modeling/classifying/NaiveBayes.scala +++ b/src/main/scala/scalation/modeling/classifying/NaiveBayes.scala @@ -47,11 +47,10 @@ class NaiveBayes (x: MatrixD, y: VectorI, fname_ : Array [String] = null, k: Int private val debug = debugf ("NaiveBayes", true) // debug function - modelName = "NaiveBayes" // name of the model + _modelName = "NaiveBayes" // name of the model if vc == null then shift2zero (x); vc = vc_fromData (x) // set value counts from data - end if private val me = hparam("me").toDouble // m-estimates (me == 0 => regular MLE estimates) private val me_v = NaiveBayes.me_vc (me, vc) // for Laplace smoothing: me / vc_j for all j @@ -78,16 +77,15 @@ class NaiveBayes (x: MatrixD, y: VectorI, fname_ : Array [String] = null, k: Int override def train (x_ : MatrixD = x, y_ : VectorI = y): Unit = super.train (x_, y_) // set class frequencies nu_y and probabilities p_y val nu_Xy = RTensorD.freq (x_, vc, y, k) // Joint Frequency Tables (JFTs) - p_Xy = cProb_Xy (x_, y_, nu_Xy) // Conditional Probability Tables (CPTs) + p_Xy = cProb_Xy (x_, nu_Xy) // Conditional Probability Tables (CPTs) end train //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Compute the conditional probability of X given y for each feature xj. * @param x_ the integer-valued data vectors stored as rows of a matrix - * @param y_ the class vector, where y(i) = class for row i of the matrix x, x(i) * @param nu_Xy the joint frequency of X and y for each feature xj and class value */ - def cProb_Xy (x_ : MatrixD, y_ : VectorI, nu_Xy: RTensorD): RTensorD = + def cProb_Xy (x_ : MatrixD, nu_Xy: RTensorD): RTensorD = val pXy = new RTensorD (x_.dim2, vc, k) for j <- x_.indices2; xj <- 0 until vc(j) do pXy(j, xj) = (nu_Xy(j, xj) + me_v(j)) / (nu_y + me) // Conditional Probability Tables (CPTs) diff --git a/src/main/scala/scalation/modeling/classifying/NaiveBayesR.scala b/src/main/scala/scalation/modeling/classifying/NaiveBayesR.scala index 944bed8d5..ada838a40 100644 --- a/src/main/scala/scalation/modeling/classifying/NaiveBayesR.scala +++ b/src/main/scala/scalation/modeling/classifying/NaiveBayesR.scala @@ -52,7 +52,7 @@ class NaiveBayesR (x: MatrixD, y: VectorI, fname_ : Array [String] = null, k: In private val cd = Array.ofDim [Double => Double] (k, x.dim2) // conditional density functions - modelName = "NaiveBayesR" // name of the model + _modelName = "NaiveBayesR" // name of the model debug ("init", s"correlation matrix = $cor") diff --git a/src/main/scala/scalation/modeling/classifying/NullModel.scala b/src/main/scala/scalation/modeling/classifying/NullModel.scala index 086dda4cc..72b6c0521 100644 --- a/src/main/scala/scalation/modeling/classifying/NullModel.scala +++ b/src/main/scala/scalation/modeling/classifying/NullModel.scala @@ -5,7 +5,7 @@ * @date Fri Feb 16 16:14:34 EST 2018 * @see LICENSE (MIT style license file). * - * @note Model: Null Model Classifier + * @note Model: Null Model Classifier (Picks Most Frequent Class Label) */ package scalation @@ -17,7 +17,7 @@ import scalation.mathstat._ //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `NullModel` class implements a Null Model Classifier, which is a simple * classifier for discrete input data. The classifier is trained just using a - * classification vector y. Picks the most frequent class. + * classification vector y. Picks the most frequent class label. * Each data instance is classified into one of k classes numbered 0, ..., k-1. * Note: the train method in the super class suffices. * @param y the response/output m-vector (class values where y(i) = class for instance i) @@ -30,7 +30,7 @@ class NullModel (y: VectorI, k: Int = 2, cname_ : Array [String] = Array ("No", private val debug = debugf ("NullModel", true) // debug function - modelName = "NullModel" // name of the model + _modelName = "NullModel" // name of the model //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Test the predictive model y_ = f(x_) + e and return its predictions and QoF vector. diff --git a/src/main/scala/scalation/modeling/classifying/RandomForest.scala b/src/main/scala/scalation/modeling/classifying/RandomForest.scala index fc2607c22..85bb5804b 100644 --- a/src/main/scala/scalation/modeling/classifying/RandomForest.scala +++ b/src/main/scala/scalation/modeling/classifying/RandomForest.scala @@ -39,7 +39,7 @@ class RandomForest (x: MatrixD, y: VectorI, fname_ : Array [String] = null, k: I private val debug = debugf ("RandomForest", true) // debug function private val flaw = flawf ("RandomForest") // flaw function - private val fbRatio = hparam ("fbRatio").toDouble // feature bagging ratio + private val fbRatio = hparam("fbRatio").toDouble // feature bagging ratio private val nFeats = (fbRatio * x.dim2).toInt // number of features/columns to select private val rvg = RandomVecI (nFeats, x.dim2-1, 0, -1, true) // random vector generator @@ -47,7 +47,7 @@ class RandomForest (x: MatrixD, y: VectorI, fname_ : Array [String] = null, k: I if nFeats < 0 || nFeats > x.dim2 then flaw ("init", "RF feature size restricted to 0 thru number of features") - modelName = s"RandomForest_${height}_$nTrees" // name of the model + _modelName = s"RandomForest_${height}_$nTrees" // name of the model //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Select subFeatures for input of building trees, return the subFeatures @@ -256,7 +256,6 @@ end randomForestTest3 else elseSample.set (elseCount, xy(i)) elseCount += 1 - end if end for val elseFeature = elseSample(?, 0 until elseSample.dim2-1) diff --git a/src/main/scala/scalation/modeling/classifying/SimpleLogisticRegression.scala b/src/main/scala/scalation/modeling/classifying/SimpleLogisticRegression.scala index 8fac1f178..1c4af441b 100644 --- a/src/main/scala/scalation/modeling/classifying/SimpleLogisticRegression.scala +++ b/src/main/scala/scalation/modeling/classifying/SimpleLogisticRegression.scala @@ -44,7 +44,7 @@ class SimpleLogisticRegression (x: MatrixD, y: VectorI, fname_ : Array [String] if y != null && x.dim != y.dim then flaw ("init", "dimensions of x and y are incompatible") - protected val cThresh = hparam ("cThresh") // classification/decision threshold + protected val cThresh = hparam("cThresh") // classification/decision threshold protected val n = x.dim2 // number of parameters protected val k = n - 1 // number of variables (assumes an intercept) @@ -53,7 +53,7 @@ class SimpleLogisticRegression (x: MatrixD, y: VectorI, fname_ : Array [String] protected var r_dev = -1.0 // residual dev: -2l, for full model protected var aic = -1.0 // Akaike’s Information Criterion - modelName = s"SimpleLogisticRegression_$cThresh" // name of the model + _modelName = s"SimpleLogisticRegression_$cThresh" // name of the model //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Compute McFaffen's pseudo R-squared. diff --git a/src/main/scala/scalation/modeling/classifying/SupportVectorMachine.scala b/src/main/scala/scalation/modeling/classifying/SupportVectorMachine.scala index a4c1fc794..5c54a2c26 100644 --- a/src/main/scala/scalation/modeling/classifying/SupportVectorMachine.scala +++ b/src/main/scala/scalation/modeling/classifying/SupportVectorMachine.scala @@ -68,7 +68,7 @@ class SupportVectorMachine (x: MatrixD, y: VectorI, fname_ : Array [String] = nu private var al2 = 0.0 // old Lagrange multiplier 2 private var a2 = 0.0 // new Lagrange multiplier 2 - modelName = "SupportVectorMachine" // the name of the model + _modelName = "SupportVectorMachine" // the name of the model //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Return the vector of model parameter values [ w | b ] @@ -98,7 +98,6 @@ class SupportVectorMachine (x: MatrixD, y: VectorI, fname_ : Array [String] = nu I_1 += i; i_Up = i else I_4 += i; i_Low = i - end if end for fCache(i_Low) = 1 @@ -225,14 +224,12 @@ class SupportVectorMachine (x: MatrixD, y: VectorI, fname_ : Array [String] = nu if abs (a2 - al2) < EPSILON * (a2 + al2 + EPSILON) then debug ("takeStep", s"skip if a2 = $a2 ~= al2 = $al2") // almost no change return false - end if a1 = al1 + s * (al2 - a2) if a1 < 0.0 then a2 += s * a1; a1 = 0 else if a1 > C then val t = a1 - C; a2 += s * t; a1 = C - end if update (i1, i2, y1, y2) // weights and fCache alp(i1) = a1; alp(i2) = a2 // store a1, a2 in alp array @@ -262,11 +259,9 @@ class SupportVectorMachine (x: MatrixD, y: VectorI, fname_ : Array [String] = nu if fCache (i) < b_Up then b_Up = fCache (i) i_Up = i - end if if fCache (i) > b_Low then b_Low = fCache (i) i_Low = i - end if end for true end takeStep @@ -283,7 +278,6 @@ class SupportVectorMachine (x: MatrixD, y: VectorI, fname_ : Array [String] = nu else val gamma = al1 - al2 return if gamma > 0.0 then (0.0, C - gamma) else (-gamma, C) - end if end computeLH //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -356,7 +350,6 @@ class SupportVectorMachine (x: MatrixD, y: VectorI, fname_ : Array [String] = nu if I_0 contains i2 then if b_Low - F2 > F2 - b_Up then i1 = i_Low else i1 = i_Up - end if takeStep (i1, i2) end checkExample diff --git a/src/main/scala/scalation/modeling/classifying/TANBayes.scala b/src/main/scala/scalation/modeling/classifying/TANBayes.scala index 20db4f835..b7efede9c 100644 --- a/src/main/scala/scalation/modeling/classifying/TANBayes.scala +++ b/src/main/scala/scalation/modeling/classifying/TANBayes.scala @@ -54,11 +54,10 @@ class TANBayes (x: MatrixD, y: VectorI, fname_ : Array [String] = null, private val debug = debugf ("TANBayes", true) // debug function - modelName = "TANBayes" // name of the model + _modelName = "TANBayes" // name of the model if vc == null then shift2zero (x); vc = vc_fromData (x) // set value counts from data - end if private val me = hparam("me").toDouble // m-estimates (me == 0 => regular MLE estimates) private val me_v = NaiveBayes.me_vc (me, vc) // for Laplace smoothing: me / vc_j for all j @@ -113,7 +112,7 @@ class TANBayes (x: MatrixD, y: VectorI, fname_ : Array [String] = null, nu_Xy = RTensorD.freq (x_, vc, y_, k) // Joint Frequency Tables (JFTs) nu_Xpy_ = RTensor4D.freq (x_, vc, parent, vc_p, y_, k) // extended Joint Frequency Tables (JFTs) val nu_Xpy = freq_Xpy (x_, y_) // extended Joint Frequency Tables (JFTs) - p_Xpy = cProb_Xpy (x_, y_, nu_y, nu_Xy, nu_Xpy) // extended Conditional Probability Tables (CPTs) + p_Xpy = cProb_Xpy (x_, nu_y, nu_Xy, nu_Xpy) // extended Conditional Probability Tables (CPTs) // println (s"nu_Xpy = ${stringOf (nu_Xpy)}") // println (s"nu_Xpy_ = $nu_Xpy_") end train @@ -159,12 +158,11 @@ class TANBayes (x: MatrixD, y: VectorI, fname_ : Array [String] = null, /** Compute the conditional probability of X given p and y for all xj in X, * where p is the the unique x-parent of feature xj. * @param x_ the training/full data/input matrix (defaults to full x) - * @param y_ the training/full response/output vector (defaults to full y) * @param nu_y the class frequency of y * @param nu_Xy the joint frequency of X and y for all xj in X * @param nu_Xpy the joint frequency of X, p and y for all xj in X */ - def cProb_Xpy (x_ : MatrixD, y_ : VectorI, nu_y: VectorD, nu_Xy: RTensorD, + def cProb_Xpy (x_ : MatrixD, nu_y: VectorD, nu_Xy: RTensorD, nu_Xpy: Array [Array [MatrixD]]): Array [Array [MatrixD]] = val p_Xpy = Array.ofDim [Array [MatrixD]] (x_.dim2) for j <- x_.indices2 do @@ -211,7 +209,6 @@ class TANBayes (x: MatrixD, y: VectorI, fname_ : Array [String] = null, p_yz *= ecpt (z(j))(z(p)) // multiply in its v = (z(j), z(p)) row else // xj does not have a parent p_yz *= ecpt (z(j))(0) // multiply in its v = z(j) row - end if end for // debug ("predictI", s"p_yz = $p_yz") p_yz.argmax () // return class with highest probability @@ -237,7 +234,6 @@ class TANBayes (x: MatrixD, y: VectorI, fname_ : Array [String] = null, p_yz += plog (ecpt (z(j))(z(p))) // multiply in its v = (z(j), z(p)) row else // xj does not have a parant p_yz += plog (ecpt (z(j))(0)) // multiply in its v = z(j) row - end if end for debug ("lpredictI", s"p_yz = $p_yz") p_yz.argmin () // return class with lowest positive log probability diff --git a/src/main/scala/scalation/modeling/clustering/Clusterer.scala b/src/main/scala/scalation/modeling/clustering/Clusterer.scala index ba2a92c0a..0cf485882 100644 --- a/src/main/scala/scalation/modeling/clustering/Clusterer.scala +++ b/src/main/scala/scalation/modeling/clustering/Clusterer.scala @@ -76,6 +76,8 @@ trait Clusterer: protected var stream = 0 // the stream to use for random numbers +// _taskType = TaskType.Cluster // the type of task performed -- FIX -- subtrait of Model + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Set the names for the clusters. * @param nm the array of names diff --git a/src/main/scala/scalation/modeling/clustering/ClusteringPredictor.scala b/src/main/scala/scalation/modeling/clustering/ClusteringPredictor.scala index 63fa67da2..0b1b49c21 100644 --- a/src/main/scala/scalation/modeling/clustering/ClusteringPredictor.scala +++ b/src/main/scala/scalation/modeling/clustering/ClusteringPredictor.scala @@ -33,11 +33,12 @@ import scalation.mathstat._ class ClusteringPredictor (x: MatrixD, y: VectorD, fname_ : Array [String] = null, hparam: HyperParameter = ClusteringPredictor.hp) extends Predictor (x, y, fname_, hparam) - with Fit (dfm = x.dim2 - 1, df = x.dim - x.dim2): + with Fit (dfr = x.dim2 - 1, df = x.dim - x.dim2) + with NoSubModels: private val debug = debugf ("ClusteringPredictor", false) // debug flag private val MAX_DOUBLE = Double.PositiveInfinity // infinity - private val kappa = hparam ("kappa").toInt // the number of nearest neighbors to consider + private val kappa = hparam("kappa").toInt // the number of nearest neighbors to consider private val topK = Array.fill (kappa)(-1, MAX_DOUBLE) // top-kappa nearest points (in reserve order) // private val coin = Bernoulli () // use a fair coin for breaking ties @@ -48,6 +49,8 @@ class ClusteringPredictor (x: MatrixD, y: VectorD, fname_ : Array [String] = nul // FIX - currently only works for xx = x and yy = y + override def getBest: BestStep = super [NoSubModels].getBest + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Training involves resetting the data structures before each prediction. * It uses lazy training, so most of it is done during prediction. @@ -106,10 +109,6 @@ class ClusteringPredictor (x: MatrixD, y: VectorD, fname_ : Array [String] = nul for i <- 0 until kappa do topK(i) = (-1, MAX_DOUBLE) // initialize top-kappa end reset - override def buildModel (x_cols: MatrixD): Predictor & Fit = - throw new UnsupportedOperationException ("ClusteringPredictor does not have feature selection") - end buildModel - end ClusteringPredictor diff --git a/src/main/scala/scalation/modeling/clustering/GapStatistic.scala b/src/main/scala/scalation/modeling/clustering/GapStatistic.scala index 65d843d96..9603a1b11 100644 --- a/src/main/scala/scalation/modeling/clustering/GapStatistic.scala +++ b/src/main/scala/scalation/modeling/clustering/GapStatistic.scala @@ -12,6 +12,7 @@ package scalation package modeling package clustering +import scala.annotation.unused import scala.math.log import scalation.mathstat._ @@ -40,19 +41,17 @@ object GapStatistic: val mean = x.mean val xzero = x - mean val svd = new Fac_SVD (xzero) - val (u, s, vt) = svd.factor123 () + val vt = svd.factor123 ()._3 // factors into u, s, vt val xp = xzero * vt.transpose val zp = new MatrixD (x.dim, x.dim2) for i <- zp.indices2 do val ci = xp(?, i) zp(?, i) = RandomVecD (zp.dim, ci.max, ci.min, stream = (stream + i) % 1000).gen - end for ref = (zp * vt) + mean else for i <- ref.indices2 do val ci = x(?, i) ref(?, i) = RandomVecD (ref.dim, ci.max, ci.min, stream = (stream + i) % 1000).gen - end for end if ref end reference @@ -65,7 +64,7 @@ object GapStatistic: * @param clustr the cluster assignments * @param k the number of clusters */ - def cumDistance (x: MatrixD, cl: Clusterer, clustr: Array [Int], k: Int): VectorD = + def cumDistance (x: MatrixD, @unused cl: Clusterer, clustr: Array [Int], k: Int): VectorD = val sums = new VectorD (k) for i <- 0 until x.dim-1; j <- i+1 until x.dim if clustr(i) == clustr(j) do sums(clustr(j)) += dist (x(i), x(j)) @@ -95,7 +94,7 @@ object GapStatistic: * @param useSVD use SVD to account for the shape of the points (default = true) * @param plot whether or not to plot the logs of the within-SSEs (default = false) */ - def kMeansPP (x: MatrixD, kMax: Int, algo: Algorithm = HARTIGAN, b: Int = 1, useSVD: Boolean = true, + def kMeansPP (x: MatrixD, kMax: Int, algo: Algorithm = HARTIGAN, @unused b: Int = 1, useSVD: Boolean = true, plot: Boolean = false): (KMeansPPClusterer, Array [Int], Int) = val awk = new VectorD (kMax) val rwk = new VectorD (kMax) @@ -115,13 +114,11 @@ object GapStatistic: if k != 0 && opk == -1 && gap(k-1) >= gap(k) - gap(k)*0.1 then // TODO use stddev instead of 0.01*gap opk = k - end if end for if plot then new Plot (kv, awk, rwk, "Actual wSSE and Reference wSSE vs. k") // , true) new Plot (kv, gap, null, "Gap vs. k") // , true) - end if val cl = KMeansPPClusterer (x, opk, algo) // TODO used saved instead of reclustering (cl, cl.cluster, opk) diff --git a/src/main/scala/scalation/modeling/clustering/HierClusterer.scala b/src/main/scala/scalation/modeling/clustering/HierClusterer.scala index 8f012986b..2a1459401 100644 --- a/src/main/scala/scalation/modeling/clustering/HierClusterer.scala +++ b/src/main/scala/scalation/modeling/clustering/HierClusterer.scala @@ -68,7 +68,6 @@ class HierClusterer (x: MatrixD, k: Int = 2) if d_ij < minDist then minDist = d_ij // update minimum distance si = clust(i); sj = clust(j) // remember point sets i and j - end if end for (si, sj) end bestMerge diff --git a/src/main/scala/scalation/modeling/clustering/KMeansClusterer.scala b/src/main/scala/scalation/modeling/clustering/KMeansClusterer.scala index dc076dc07..12b5f981f 100644 --- a/src/main/scala/scalation/modeling/clustering/KMeansClusterer.scala +++ b/src/main/scala/scalation/modeling/clustering/KMeansClusterer.scala @@ -138,7 +138,6 @@ class KMeansClusterer (x: MatrixD, k: Int, val flags: Array [Boolean] = Array (f to_c(i) = c2 // reassign point x_i to cluster c2 done = false // changed clusters => not done if immediate then go = false // optionally return after first change - end if end if } // cfor done // return whether there were no changes diff --git a/src/main/scala/scalation/modeling/clustering/KMeansClustererHW.scala b/src/main/scala/scalation/modeling/clustering/KMeansClustererHW.scala index 02e7c218b..9bf290096 100644 --- a/src/main/scala/scalation/modeling/clustering/KMeansClustererHW.scala +++ b/src/main/scala/scalation/modeling/clustering/KMeansClustererHW.scala @@ -49,7 +49,6 @@ class KMeansClustererHW (x: MatrixD, k: Int, flags: Array [Boolean] = Array (fal to_c(i) = c2 // reassign point x_i to cluster c2 done = false // changed clusters => not done if immediate then break () // optionally return after first change - end if end if end for } // breakable diff --git a/src/main/scala/scalation/modeling/clustering/KMeansPPClusterer.scala b/src/main/scala/scalation/modeling/clustering/KMeansPPClusterer.scala index 199a975e7..fcade4620 100644 --- a/src/main/scala/scalation/modeling/clustering/KMeansPPClusterer.scala +++ b/src/main/scala/scalation/modeling/clustering/KMeansPPClusterer.scala @@ -121,7 +121,7 @@ class KMeansPPClusterer (x: MatrixD, k: Int, algo: Algorithm = HARTIGAN, * Indicate done, if no points changed clusters (for stopping rule). * @param first whether this is the first call to 'reassign' */ - private def reassign (first: Boolean = false): Boolean = + private def reassign (first: Boolean): Boolean = var done = true for i <- x.indices do if first then // first call => no c0 diff --git a/src/main/scala/scalation/modeling/clustering/MarkovClustering.scala b/src/main/scala/scalation/modeling/clustering/MarkovClustering.scala index 7269ddcb0..8a5e081ed 100644 --- a/src/main/scala/scalation/modeling/clustering/MarkovClustering.scala +++ b/src/main/scala/scalation/modeling/clustering/MarkovClustering.scala @@ -59,7 +59,6 @@ class MarkovClusterer (t: MatrixD, k: Int = 2, r: Double = 2.0) clustr(j) = group // assign node j to this group force(j) = t(i, j) // make t(i, j) the new force found = true // a group was found for this row - end if end for if found then group += 1 // increment the group number end for @@ -126,7 +125,6 @@ class MarkovClusterer (t: MatrixD, k: Int = 2, r: Double = 2.0) sum += t(i, j) // collect sum sumSq += t(i, j) * t(i, j) // collect sum of squares n += 1.0 - end if end for for i <- t.indices do t(i, j) /= sum // normalize diff --git a/src/main/scala/scalation/modeling/forecasting/AR.scala b/src/main/scala/scalation/modeling/forecasting/AR.scala index eeca94c01..6e44cb786 100644 --- a/src/main/scala/scalation/modeling/forecasting/AR.scala +++ b/src/main/scala/scalation/modeling/forecasting/AR.scala @@ -1,6 +1,6 @@ //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller +/** @author John Miller, Yousef Fekri Dabanloo * @version 2.0 * @date Sun Jun 30 13:27:00 EDT 2024 * @see LICENSE (MIT style license file). @@ -30,22 +30,26 @@ import scalation.mathstat._ * @param adjusted whether in `Correlogram` when calculating auto-covarainces/auto-correlations * to adjust to account for the number of elements in the sum Σ (or use dim-1) * @see `VectorD.acov` + * @oaram tForm the transformation applied */ class AR (y: VectorD, hh: Int, tRng: Range = null, hparam: HyperParameter = AR.hp, - bakcast: Boolean = false, adjusted: Boolean = true) + bakcast: Boolean = false, adjusted: Boolean = true, + tForm: Transform = null) extends Forecaster (y, hh, tRng, hparam, bakcast) - with Correlogram (y, adjusted): + with Correlogram (y, adjusted) + with NoSubModels: private val flaw = flawf ("AR") // flaw function protected val p = hparam("p").toInt // use the last p values protected var δ = NO_DOUBLE // drift/intercept/constant term - modelName = s"AR($p)" + _modelName = s"AR_$p" + yForm = tForm // defined in `Fit` via hierarchy //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Train/fit an `AR` model to the times-series data in vector y_. - * Estimate the coefficient vector b for a p-th order Auto-Regressive AR(p) model. + * Estimate the coefficient vector b (φ) for a p-th order Auto-Regressive AR(p) model. * Uses Durbin-Levinson Algorithm (in `Correlogram`) to determine the coefficients. * The b (φ) vector is p-th row of psi matrix (ignoring the first (0th) column). * @param x_null the data/input matrix (ignored, pass null) @@ -152,11 +156,37 @@ object AR: * @param hh the maximum forecasting horizon (h = 1 to hh) * @param tRng the time range, if relevant (time index may suffice) * @param hparam the hyper-parameters + * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) + * @param adjusted whether in `Correlogram` when calculating auto-covarainces/auto-correlations + * to adjust to account for the number of elements in the sum Σ (or use dim-1) + * @see `VectorD.acov` */ - def apply (y: VectorD, hh: Int, tRng: Range = null, hparam: HyperParameter = hp): AR = - new AR (y, hh, tRng, hparam) + def apply (y: VectorD, hh: Int, tRng: Range = null, hparam: HyperParameter = hp, + bakcast: Boolean = false, adjusted: Boolean = true): AR = + new AR (y, hh, tRng, hparam, bakcast, adjusted) end apply + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create an `AR` object by building an input matrix xy and then calling the + * `AR` constructor. Also rescale the input data. + * @param y the endogenous/response vector (main time series data) + * @param hh the maximum forecasting horizon (h = 1 to hh) + * @param tRng the time range, if relevant (time index may suffice) + * @param hparam the hyper-parameters + * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) + * @param tForm the z-transform (rescale to standard normal) + */ + def rescale (y: VectorD, hh: Int, tRng: Range = null, hparam: HyperParameter = AR.hp, + bakcast: Boolean = false, adjusted: Boolean = true): AR = + + val tr_size = Model.trSize (y.dim) + val tForm_y = NormForm (y(0 until tr_size)) // use (mean, std) of training set for both In-sample and TnT +// val tForm_y = tForm(y) // use full dataset + + val y_scl = tForm_y.f(y) + new AR (y_scl, hh, tRng, hparam, bakcast, adjusted, tForm_y) + end rescale + end AR import Example_Covid.loadData_y @@ -199,7 +229,8 @@ end aRTest banner (s"TnT Forecasts: ${mod.modelName} on LakeLevels Dataset") mod.trainNtest ()() // train and test on full dataset - mod.rollValidate () // TnT with Rolling Validation + mod.setSkip (0) // can use values from training set to not skip any in test + mod.rollValidate () // TnT with Rolling Validation println (s"Final TnT Forecast Matrix yf = ${mod.getYf}") end aRTest2 diff --git a/src/main/scala/scalation/modeling/forecasting/ARIMA.scala b/src/main/scala/scalation/modeling/forecasting/ARIMA.scala deleted file mode 100644 index 6c3214850..000000000 --- a/src/main/scala/scalation/modeling/forecasting/ARIMA.scala +++ /dev/null @@ -1,250 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Sat Jun 13 01:27:00 EST 2017 - * @see LICENSE (MIT style license file). - * - * @note Model: Auto-Regressive, Integrated, Moving Average (ARIMA) - * - * @see en.wikipedia.org/wiki/Autoregressive%E2%80%93moving-average_model - * @see www.emu.edu.tr/mbalcilar/teaching2007/econ604/lecture_notes.htm - * @see www.stat.berkeley.edu/~bartlett/courses/153-fall2010 - * @see www.princeton.edu/~apapanic/ORFE_405,_Financial_Time_Series_%28Fall_2011%29_files/slides12-13.pdf - */ - -package scalation -package modeling -package forecasting - -import scalation.mathstat._ - -import ARIMA_diff._ -import Forecaster.differ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARIMA` class provides basic time series analysis capabilities for Auto- - * Regressive 'AR' Integrated 'I' Moving-Average 'MA' models. In an - * ARIMA(p, d, q) model, p and q refer to the order of the Auto-Regressive - * and Moving-Average components of the model; d refers to the order of - * differencing. Given time series data stored in vector y, its next value y_t = y(t) - * may be predicted based on prior values of y and its noise: - * - * y_t = δ + Σ(φ_i y_t-i) + Σ(θ_i e_t-i) + e_t - * - * where δ is a constant, φ is the auto-regressive coefficient vector, - * θ is the moving-average coefficient vector, and e is the noise vector. - *------------------------------------------------------------------------------ - * If d > 0, then the time series must be differenced first before applying - * the above model. - *------------------------------------------------------------------------------ - * @param y the response vector (time series data) - * @param hh the maximum forecasting horizon (h = 1 to hh) - * @param tRng the time range, if relevant (time index may suffice) - * @param hparam the hyper-parameters (defaults to AR.hp) - * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) - */ -class ARIMA (y: VectorD, hh: Int, tRng: Range = null, - hparam: HyperParameter = AR.hp, - bakcast: Boolean = false) - extends ARMA (diff (y, hparam("d").toInt), hh, tRng, hparam, bakcast): - - private val debug = debugf ("ARIMA", true) // debug function - private val flaw = flawf ("ARIMA") // flaw function - private val d = hparam("d").toInt // the number of differences to take - private val v = getY // get series passed to ARMA - - if d out (0, 2) then flaw ("init", s"difference d = $d must be in {0, 1, 2}") - - modelName = s"ARIMA($p, $d, $q)" // name of model - - debug ("init", s"$modelName") - - new Plot (null, y, null, s"Plot $modelName: y vs. t", lines = true) - if d > 0 then new Plot (null, v, null, s"Plot $modelName: v = diff (y, d) vs. t", lines = true) - - //////////////////////////////////////////////////////////////////////////////// - // Make predictions/forecasts on the original scale time-series (not differenced). - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Predict all values corresponding to the given vector v_. If differenced, - * tranform back to the original scale. - * @param v_ the actual values to use in making predictions (as passed to ARMA). - */ - def predictAll2 (v_ : VectorD = v): VectorD = - val vp = predictAll (v_) - if d > 0 then backform (vp, y, d) else vp - end predictAll2 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all y_.dim time points at horizon h (h-steps ahead). - * Assign to FORECAST MATRIX and return h-step ahead forecast. - * @param yf the forecast matrix (time x horizons) - * @param y_ the actual values to use in making forecasts - * @param h the forecasting horizon, number of steps ahead to produce forecasts - * - def forecastAt2 (yf: MatrixD, y_ : VectorD, h: Int): VectorD = - val yfh = arma.forecastAt (yf, pick (y_), h) - if d > 0 then yfh + y_ else yfh - end forecastAt2 - */ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all y_.dim time points and all horizons (1 through h-steps ahead). - * Record these in the yf matrix, where - * yf(t, k) = k-steps ahead forecast for y_t - * Note, column 0, yf(?, 0), is set to y (the actual time-series values). - * Forecast recursively down diagonals in the yf forecast matrix. - * The top right and bottom left triangles in yf matrix are not forecastable. - * @param y_ the actual values to use in making forecasts - * @param h the maximum forecasting horizon, number of steps ahead to produce forecasts - * - def forecastAll2 (y_ : VectorD, h: Int): MatrixD = - debug ("forecastAll2", s"y_.dim = ${y_.dim}, e.dim = ${e.dim}") - yf = new MatrixD (y_.dim+h, h+2) // forecasts for all time points t & horizons to h - for t <- y_.indices do yf(t, 0) = y_(t) // first column is the timestep (e.g., logical day) - for k <- 1 to h do forecastAt2 (yf, y_, k) // forecast k-steps into the future - for t <- yf.indices do yf(t, h+1) = t // last column is time (logical day) - yf // return matrix of forecasted values - end forecastAll2 - */ - -end ARIMA - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRIMATest` main function tests the `ARIMA` class on simulated data. - * Test predictions (one step ahead forecasts). - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.aRIMATest - */ -@main def aRIMATest (): Unit = - - import AR.hp - println (s"hp = $hp") - - val y = makeTSeries () // create simulated time-series (see `Stationary`) - val hh = 2 - - banner (s"Test Predictions: ARIMA(1, 0, 1) on simulated time-series") - var mod = new ARIMA (y, hh) // create model for time-series data ARIMA(1, 1) - mod.trainNtest ()() // train and test on full dataset - - banner (s"Test Predictions: ARIMA(1, 0, 0) on simulated time-series") - hp("q") = 0 - mod = new ARIMA (y, hh) // create model for time-series data ARIMA(1, 0) - mod.trainNtest ()() - - banner ("Select model based on ACF and PACF") - mod.plotFunc (mod.acF, "ACF") // Auto-Correlation Function (ACF) - mod.plotFunc (mod.pacF, "PACF") // Partial Auto-Correlation Function (PACF) - -end aRIMATest - -import Example_LakeLevels.y - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRIMATest2` main function tests the `ARIMA` class on real data: - * Forecasting lake levels. - * Test predictions (one step ahead forecasts) with no differencing - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.aRIMATest2 - */ -@main def aRIMATest2 (): Unit = - - import AR.hp - hp("d") = 0 // (no differencing) => should give same results as ARMA (@see `aRMATest2`) - println (s"hp = $hp") - val hh = 2 - - for p <- 1 to 5; q <- 0 to 2 do - hp("p") = p; hp("q") = q // set p (AR) and q (MA) hyper-parameters - val mod = new ARIMA (y, hh) // create model for time-series data ARIMA(p, q) - banner (s"Test Predictions: ${mod.modelName} on LakeLevels Dataset") - mod.trainNtest ()() // trainb and test the model on full dataset - - val yp = mod.predictAll2 (y) // results on original scale - new Plot (null, y, yp, s"Plot: ${mod.modelName} predictAll2: y, yp vs t", lines = true) - end for - -end aRIMATest2 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRIMATest3` main function tests the `ARIMA` class on real data: - * Forecasting lake levels. - * Test predictions (one step ahead forecasts) taking one difference. - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.aRIMATest3 - */ -@main def aRIMATest3 (): Unit = - - import AR.hp - val d = 1 - hp("d") = d // first differencing - val hh = 2 - - val v = diff (y, d) // take the first difference of time-series y - differ (y, backform (v, y)) // verify recovery of original times-series - - for p <- 1 to 4; q <- 0 to 1 do - hp("p") = p; hp("q") = q // set p (AR) and q (MA) hyper-parameters - val mod = new ARIMA (y, hh) // create model for time-series data ARIMA(p, q) - banner (s"Test Predictions: ${mod.modelName} on LakeLevels Dataset") - val (vp, qof) = mod.trainNtest ()() // test and test the model on full dataset - val yp = mod.predictAll2 (y) // results on original scale - - new Plot (null, y, yp, s"Plot: ${mod.modelName} predictAll2: y, yp vs t", lines = true) - end for - -end aRIMATest3 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRIMATest4` main function tests the `ARIMA` class on real data: - * Forecasting lake levels. - * Test forecasts (1 to h steps ahead forecasts) for several values of p and q. - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * @see `aRMATest5` - * > runMain scalation.modeling.forecasting.aRIMATest4 - * -@main def aRIMATest4 (): Unit = - - import AR.hp - - val hh = 2 // maximum forecasting horizon - - val v = Δ (y) // velocity series (first differences) - - for p <- 2 to 2; q <- 0 to 0 do - hp("p") = p; hp("q") = q // set p (AR) and q (MA) hyper-parameters - val mod = new ARIMA (y) // create model for time series data - banner (s"Test: ${mod.modelName} on LakeLevels Dataset") - mod.trainNtest ()() // train and test the model on full dataset - - val yf_ = mod.forecastAll (y, hh) // forecast using differenced values h-steps ahead for all y - val yf = transformBack (yf_, y, hh) // transform back to original scale - println (s"yf_ = $yf_") // forecast matrix on differenced values - println (s"yf = $yf") // forecast matrix on original scale - println (s"y = $y") // observed values on original scale - - val tf = new TestFit (y.dim) - val vh1 = yf_(?, 1) // test on differenced scale - val vh2 = yf_(?, 2) - println (tf.testDiagnose (v, vh1)) - println (tf.testDiagnose (v, vh2)) - new Plot (null, v, vh1, "v, vh1 vs. t", lines = true) - new Plot (null, v, vh2, "v, vh2 vs. t", lines = true) - - val yh1 = yf(?, 1) // test on original scale - val yh2 = yf(?, 2) - println (tf.testDiagnose (y, yh1)) - println (tf.testDiagnose (y, yh2)) - new Plot (null, y, yh1, "y, yh1 vs. t", lines = true) - new Plot (null, y, yh2, "y, yh2 vs. t", lines = true) - -// Forecaster.checkForecastMatrix (yf, y, yp) // FIX - differences & un-differenced - end for - -end aRIMATest4 - */ diff --git a/src/main/scala/scalation/modeling/forecasting/ARIMA.scalaa b/src/main/scala/scalation/modeling/forecasting/ARIMA.scalaa new file mode 100644 index 000000000..64bd57aac --- /dev/null +++ b/src/main/scala/scalation/modeling/forecasting/ARIMA.scalaa @@ -0,0 +1,251 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author John Miller + * @version 2.0 + * @date Sat Jun 13 01:27:00 EST 2017 + * @see LICENSE (MIT style license file). + * + * @note Model: Auto-Regressive, Integrated, Moving Average (ARIMA) + * + * @see en.wikipedia.org/wiki/Autoregressive%E2%80%93moving-average_model + * @see www.emu.edu.tr/mbalcilar/teaching2007/econ604/lecture_notes.htm + * @see www.stat.berkeley.edu/~bartlett/courses/153-fall2010 + * @see www.princeton.edu/~apapanic/ORFE_405,_Financial_Time_Series_%28Fall_2011%29_files/slides12-13.pdf + */ + +package scalation +package modeling +package forecasting + +import scalation.mathstat._ + +import ARIMA_diff._ +import Forecaster.differ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `ARIMA` class provides basic time series analysis capabilities for Auto- + * Regressive 'AR' Integrated 'I' Moving-Average 'MA' models. In an + * ARIMA(p, d, q) model, p and q refer to the order of the Auto-Regressive + * and Moving-Average components of the model; d refers to the order of + * differencing. Given time series data stored in vector y, its next value y_t = y(t) + * may be predicted based on prior values of y and its noise: + * + * y_t = δ + Σ(φ_i y_t-i) + Σ(θ_i e_t-i) + e_t + * + * where δ is a constant, φ is the auto-regressive coefficient vector, + * θ is the moving-average coefficient vector, and e is the noise vector. + *------------------------------------------------------------------------------ + * If d > 0, then the time series must be differenced first before applying + * the above model. + *------------------------------------------------------------------------------ + * @param y the response vector (time series data) + * @param hh the maximum forecasting horizon (h = 1 to hh) + * @param tRng the time range, if relevant (time index may suffice) + * @param hparam the hyper-parameters (defaults to AR.hp) + * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) + */ +class ARIMA (y: VectorD, hh: Int, tRng: Range = null, + hparam: HyperParameter = AR.hp, + bakcast: Boolean = false) + extends ARMA (diff (y, hparam("d").toInt), hh, tRng, hparam, bakcast): + + private val debug = debugf ("ARIMA", true) // debug function + private val flaw = flawf ("ARIMA") // flaw function + private val d = hparam("d").toInt // the number of differences to take + private val v = getY // get series passed to ARMA + + if d out (0, 2) then flaw ("init", s"difference d = $d must be in {0, 1, 2}") + + _modelName = s"ARIMA_${p}_${d}_$q" // name of model + + debug ("init", s"$modelName") + + new Plot (null, y, null, s"Plot $modelName: y vs. t", lines = true) + if d > 0 then new Plot (null, v, null, s"Plot $modelName: v = diff (y, d) vs. t", lines = true) + + //////////////////////////////////////////////////////////////////////////////// + // Make predictions/forecasts on the original scale time-series (not differenced). + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Predict all values corresponding to the given vector v_. If differenced, + * tranform back to the original scale. + * @param v_ the actual values to use in making predictions (as passed to ARMA). + */ + def predictAll2 (v_ : VectorD = v): VectorD = + val vp = predictAll (v_) + if d > 0 then backform (vp, y, d) else vp + end predictAll2 + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forecast values for all y_.dim time points at horizon h (h-steps ahead). + * Assign to FORECAST MATRIX and return h-step ahead forecast. + * @param yf the forecast matrix (time x horizons) + * @param y_ the actual values to use in making forecasts + * @param h the forecasting horizon, number of steps ahead to produce forecasts + * + def forecastAt2 (yf: MatrixD, y_ : VectorD, h: Int): VectorD = + val yfh = arma.forecastAt (yf, pick (y_), h) + if d > 0 then yfh + y_ else yfh + end forecastAt2 + */ + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forecast values for all y_.dim time points and all horizons (1 through h-steps ahead). + * Record these in the yf matrix, where + * yf(t, k) = k-steps ahead forecast for y_t + * Note, column 0, yf(?, 0), is set to y (the actual time-series values). + * Forecast recursively down diagonals in the yf forecast matrix. + * The top right and bottom left triangles in yf matrix are not forecastable. + * @param y_ the actual values to use in making forecasts + * @param h the maximum forecasting horizon, number of steps ahead to produce forecasts + * + def forecastAll2 (y_ : VectorD, h: Int): MatrixD = + debug ("forecastAll2", s"y_.dim = ${y_.dim}, e.dim = ${e.dim}") + yf = new MatrixD (y_.dim+h, h+2) // forecasts for all time points t & horizons to h + for t <- y_.indices do yf(t, 0) = y_(t) // first column is the timestep (e.g., logical day) + for k <- 1 to h do forecastAt2 (yf, y_, k) // forecast k-steps into the future + for t <- yf.indices do yf(t, h+1) = t // last column is time (logical day) + yf // return matrix of forecasted values + end forecastAll2 + */ + +end ARIMA + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `aRIMATest` main function tests the `ARIMA` class on simulated data. + * Test predictions (one step ahead forecasts). + * @see cran.r-project.org/web/packages/fpp/fpp.pdf + * > runMain scalation.modeling.forecasting.aRIMATest + */ +@main def aRIMATest (): Unit = + + import AR.hp + println (s"hp = $hp") + + val y = makeTSeries () // create simulated time-series (see `Stationary`) + val hh = 2 + + banner (s"Test Predictions: ARIMA(1, 0, 1) on simulated time-series") + var mod = new ARIMA (y, hh) // create model for time-series data ARIMA(1, 1) + mod.trainNtest ()() // train and test on full dataset + + banner (s"Test Predictions: ARIMA(1, 0, 0) on simulated time-series") + hp("q") = 0 + mod = new ARIMA (y, hh) // create model for time-series data ARIMA(1, 0) + mod.trainNtest ()() + + banner ("Select model based on ACF and PACF") + mod.plotFunc (mod.acF, "ACF") // Auto-Correlation Function (ACF) + mod.plotFunc (mod.pacF, "PACF") // Partial Auto-Correlation Function (PACF) + +end aRIMATest + +import Example_LakeLevels.y + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `aRIMATest2` main function tests the `ARIMA` class on real data: + * Forecasting lake levels. + * Test predictions (one step ahead forecasts) with no differencing + * @see cran.r-project.org/web/packages/fpp/fpp.pdf + * > runMain scalation.modeling.forecasting.aRIMATest2 + */ +@main def aRIMATest2 (): Unit = + + import AR.hp + hp("d") = 0 // (no differencing) => should give same results as ARMA (@see `aRMATest2`) + println (s"hp = $hp") + val hh = 2 + + for p <- 1 to 5; q <- 0 to 2 do + hp("p") = p; hp("q") = q // set p (AR) and q (MA) hyper-parameters + val mod = new ARIMA (y, hh) // create model for time-series data ARIMA(p, q) + banner (s"Test Predictions: ${mod.modelName} on LakeLevels Dataset") + mod.trainNtest ()() // trainb and test the model on full dataset + + val yp = mod.predictAll2 (y) // results on original scale + new Plot (null, y, yp, s"Plot: ${mod.modelName} predictAll2: y, yp vs t", lines = true) + end for + +end aRIMATest2 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `aRIMATest3` main function tests the `ARIMA` class on real data: + * Forecasting lake levels. + * Test predictions (one step ahead forecasts) taking one difference. + * @see cran.r-project.org/web/packages/fpp/fpp.pdf + * > runMain scalation.modeling.forecasting.aRIMATest3 + */ +@main def aRIMATest3 (): Unit = + + import AR.hp + val d = 1 + hp("d") = d // first differencing + val hh = 2 + + val v = diff (y, d) // take the first difference of time-series y + differ (y, backform (v, y)) // verify recovery of original times-series + + for p <- 1 to 4; q <- 0 to 1 do + hp("p") = p; hp("q") = q // set p (AR) and q (MA) hyper-parameters + val mod = new ARIMA (y, hh) // create model for time-series data ARIMA(p, q) + banner (s"Test Predictions: ${mod.modelName} on LakeLevels Dataset") + val vp = mod.trainNtest ()()._1 // test and test the model on full dataset + val yp = mod.predictAll2 (y) // results on original scale + + new Plot (null, v, vp, s"Plot: ${mod.modelName} predictAll2: v, vp vs t", lines = true) + new Plot (null, y, yp, s"Plot: ${mod.modelName} predictAll2: y, yp vs t", lines = true) + end for + +end aRIMATest3 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `aRIMATest4` main function tests the `ARIMA` class on real data: + * Forecasting lake levels. + * Test forecasts (1 to h steps ahead forecasts) for several values of p and q. + * @see cran.r-project.org/web/packages/fpp/fpp.pdf + * @see `aRMATest5` + * > runMain scalation.modeling.forecasting.aRIMATest4 + * +@main def aRIMATest4 (): Unit = + + import AR.hp + + val hh = 2 // maximum forecasting horizon + + val v = Δ (y) // velocity series (first differences) + + for p <- 2 to 2; q <- 0 to 0 do + hp("p") = p; hp("q") = q // set p (AR) and q (MA) hyper-parameters + val mod = new ARIMA (y) // create model for time series data + banner (s"Test: ${mod.modelName} on LakeLevels Dataset") + mod.trainNtest ()() // train and test the model on full dataset + + val yf_ = mod.forecastAll (y, hh) // forecast using differenced values h-steps ahead for all y + val yf = transformBack (yf_, y, hh) // transform back to original scale + println (s"yf_ = $yf_") // forecast matrix on differenced values + println (s"yf = $yf") // forecast matrix on original scale + println (s"y = $y") // observed values on original scale + + val tf = new TestFit (y.dim) + val vh1 = yf_(?, 1) // test on differenced scale + val vh2 = yf_(?, 2) + println (tf.testDiagnose (v, vh1)) + println (tf.testDiagnose (v, vh2)) + new Plot (null, v, vh1, "v, vh1 vs. t", lines = true) + new Plot (null, v, vh2, "v, vh2 vs. t", lines = true) + + val yh1 = yf(?, 1) // test on original scale + val yh2 = yf(?, 2) + println (tf.testDiagnose (y, yh1)) + println (tf.testDiagnose (y, yh2)) + new Plot (null, y, yh1, "y, yh1 vs. t", lines = true) + new Plot (null, y, yh2, "y, yh2 vs. t", lines = true) + +// Forecaster.checkForecastMatrix (yf, y, yp) // FIX - differences & un-differenced + end for + +end aRIMATest4 + */ diff --git a/src/main/scala/scalation/modeling/forecasting/ARIMA_diff.scala b/src/main/scala/scalation/modeling/forecasting/ARIMA_diff.scala index c3043f8a4..ea9fc4176 100644 --- a/src/main/scala/scalation/modeling/forecasting/ARIMA_diff.scala +++ b/src/main/scala/scalation/modeling/forecasting/ARIMA_diff.scala @@ -142,13 +142,14 @@ import ARIMA_diff._ banner ("Test ARMA (2, 0) on Lake Level Dataset") hp("p") = 2; hp("q") = 0 - val (yp, qof) = new ARMA (y, hh).trainNtest ()() + val yp = new ARMA (y, hh).trainNtest ()()._1 + new Plot (null, y, yp, "ARMA y and yp vs. time", lines = true) banner ("Test Differenced ARMA (2, 0) on Lake Level Dataset") val v = diff (y) // first difference on y (size of v one less than y) val yy = undiff (v, y(0)) // reverse the diff Forecaster.differ (y, yy) // verify recovery of original time series - val (vp_, qofv) = new ARMA (v, hh).trainNtest ()() // predictions skip the first value (no past) + val vp_ = new ARMA (v, hh).trainNtest ()()._1 // predictions skip the first value (no past) // val vp = v(0) +: vp_ // prepend the first actual value (want same size as v) val vp = vp_ // prepend the first actual value (want same size as v) @@ -156,7 +157,6 @@ import ARIMA_diff._ println (s"predictAll: y.dim = ${y.dim}, vp.dim = ${vp.dim}") val yp1 = undiff (vp, y(0)) // transform vp back to original (y) scale using undiff val yp2 = backform (vp, y) // transform vp back to original (y) scale using backform - banner ("Test Transformed-Back using undiff") println (tf.testDiagnose (y, yp1)) // determine the quality of fit for yp1 @@ -178,3 +178,79 @@ import ARIMA_diff._ end aRIMA_diffTest + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `aRIMA_diffTest2` main function tests the `ARIMA_diff` object on real data: + * Forecasting Covid-19 new deaths comparing ARMA, AR1MA, Differenced ARMA, Transformed-Back + * Differenced ARMA. Observe that `backform` is better than `undiff` on predictions. + * @see cran.r-project.org/web/packages/fpp/fpp.pdf + * > runMain scalation.modeling.forecasting.aRIMA_diffTest2 + */ +@main def aRIMA_diffTest2 (): Unit = + + import Example_Covid.y + import AR.hp + val tf = new TestFit (y.dim) + val hh = 6 + + banner ("Test ARMA (2, 0) on Covid-19 Dataset") + hp("p") = 2; hp("q") = 0 + val yp = new ARMA (y, hh).trainNtest ()()._1 + new Plot (null, y, yp, "ARMA y and yp vs. time", lines = true) + + banner ("Test Differenced ARMA (2, 0) on Covid-19 Dataset") + val v = diff (y) // first difference on y (size of v one less than y) + val yy = undiff (v, y(0)) // reverse the diff + Forecaster.differ (y, yy) // verify recovery of original time series + val vp_ = new ARMA (v, hh).trainNtest ()()._1 // predictions skip the first value (no past) +// val vp = v(0) +: vp_ // prepend the first actual value (want same size as v) + val vp = vp_ // prepend the first actual value (want same size as v) + + banner ("Test Transformed-Back Differenced ARMA (2, 0) on Covid-19 Dataset") + println (s"predictAll: y.dim = ${y.dim}, vp.dim = ${vp.dim}") + val yp1 = undiff (vp, y(0)) // transform vp back to original (y) scale using undiff + val yp2 = backform (vp, y) // transform vp back to original (y) scale using backform + + banner ("Test Transformed-Back using undiff") + println (tf.testDiagnose (y, yp1)) // determine the quality of fit for yp1 + banner ("Test Transformed-Back using backform") + println (tf.testDiagnose (y, yp2)) // determine the quality of fit for yp2 + new Plot (null, y, yp1, "undiff: y and yp1 vs. time", lines = true) + new Plot (null, y, yp2, "backform: y and yp2 vs. time", lines = true) + new Plot (null, yp1, yp2, "yp1 and yp2 vs. time", lines = true) + +end aRIMA_diffTest2 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `aRIMA_diffTest3` main function tests the `ARIMA_diff` object on real data: + * Forecasting Covid-19 new deaths examining phase spece plots. + * > runMain scalation.modeling.forecasting.aRIMA_diffTest3 + */ +@main def aRIMA_diffTest3 (): Unit = + + import Example_Covid.y // position: level of the time series + + val v = diff (y) // velocity: first difference on y (size of v one less than y) + + new Plot (y, v, null, "Phase Space: Velocity vs. Position", lines = true) + new Plot (null, y, v, "Phase Space: Velocity, Position vs. Time", lines = true) + + // Standarize + + val ys = y.standardize + val vs = v.standardize + + new Plot (ys, vs, null, "Phase Space: Standardized Velocity vs. Position", lines = true) + new Plot (null, ys, vs, "Phase Space: Standardized Velocity, Position vs. Time", lines = true) + + // Log transformation gives growth rates + + val u = diff (y.log) + val us = u.standardize + + new Plot (ys, us, null, "Phase Space: Standardized Growth-Rate vs. Position", lines = true) + new Plot (null, ys, us, "Phase Space: Standardized Growth-Rate, Position vs. Time", lines = true) + +end aRIMA_diffTest3 + diff --git a/src/main/scala/scalation/modeling/forecasting/ARMA.scala b/src/main/scala/scalation/modeling/forecasting/ARMA.scala index 14ec09fa3..c3c2c51de 100644 --- a/src/main/scala/scalation/modeling/forecasting/ARMA.scala +++ b/src/main/scala/scalation/modeling/forecasting/ARMA.scala @@ -1,508 +1,425 @@ //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller +/** @author John Miller, Lokesh Adusumilli, Nirupom Bose Roy * @version 2.0 * @date Sun Jun 30 13:27:00 EDT 2024 * @see LICENSE (MIT style license file). * - * @note Model: Auto-Regressive, Moving Average (ARMA) - * - * Parameter Estimation: Least Squares, Maximum Likelihood - * Conditional Sum-of-Squares (CSS), Negative Log-Likelihood (NLL) - * @see arxiv.org/pdf/1611.00965 - * @see arxiv.org/html/2310.01198v2 - * @see arxiv.org/pdf/2310.01198 - * @see people.stat.sc.edu/hitchcock/stat520ch7slides.pdf - * @see www.emu.edu.tr/mbalcilar/teaching2007/econ604/lecture_notes.htm - * @see www.stat.berkeley.edu/~bartlett/courses/153-fall2010 - * @see www.princeton.edu/~apapanic/ORFE_405,_Financial_Time_Series_%28Fall_2011%29_files/slides12-13.pdf + * @note Model: Auto-Regressive, Moving Average (ARMA) via Kalman Filter MLE */ package scalation package modeling package forecasting +import scala.math.{max, log, Pi} +import scala.util.boundary + import scalation.mathstat._ -//import scalation.optimization.quasi_newton.{BFGS => Optimizer} // change import to change optimizer -//import scalation.optimization.quasi_newton.{LBFGS => Optimizer} +import scalation.mathstat.MatrixD.outer import scalation.optimization.quasi_newton.{LBFGS_B => Optimizer} -import scalation.random.NormalVec_c -import Forecaster.rdot -import Example_Covid.loadData_y -import Example_LakeLevels.y +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `ARMA` companion object provides factory methods and default + * hyperparameters for ARMA models. + */ +object ARMA: + + /** Hyper-parameters: + * - `p`: AR order + * - `q`: MA order + */ + val hp = new HyperParameter + hp += ("p", 1, 1) + hp += ("q", 1, 1) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create an `ARMA` model. + * @param y the univariate response/time-series vector + * @param hh the maximum forecast horizon + * @param tRng the optional time range + * @param hparam the hyper-parameters + */ + def apply (y: VectorD, hh: Int, tRng: Range = null, hparam: HyperParameter = hp): ARMA = + new ARMA (y, hh, tRng, hparam) + +end ARMA -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARMA` class provides basic time series analysis capabilities for Auto-Regressive, - * Moving Average (ARMA) models. ARMA models are often used for forecasting. - * Given time series data stored in vector y, its next value y_t = combination of last - * p values and q shocks. + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `ARMA` class fits an Auto-Regressive Moving Average model using a + * state-space representation and Kalman-filter maximum likelihood. * - * y_t = δ + Σ[φ_j y_t-j] + Σ[θ_j e_t-j] + e_t + * Model: + * y_t = c + Σ_j φ_j y_{t-j} + Σ_k θ_k e_{t-k} + e_t * - * where y_t is the value of y at time t and e_t is the residual/error term. - * @param y the response vector (time series data) - * @param hh the maximum forecasting horizon (h = 1 to hh) - * @param tRng the time range, if relevant (time index may suffice) - * @param hparam the hyper-parameters (defaults to AR.hp) - * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) + * The implementation estimates: + * - AR coefficients `φ` + * - MA coefficients `θ` + * - intercept `c` + * - process variance `σ²` + * Notes: + * - The likelihood is computed from one-step-ahead Kalman innovations. + * - A small burn-in (`max(p, q+1)`) is excluded from the log-likelihood + * sum to align with the external reference implementation. + * - Rolling forecast evaluation is performed externally in the test driver. + * @param y the response/time-series vector + * @param hh the maximum forecast horizon + * @param tRng the optional time range + * @param hparam the hyper-parameters + * @param bakcast whether a backcast value is prepended */ class ARMA (y: VectorD, hh: Int, tRng: Range = null, - hparam: HyperParameter = AR.hp, - bakcast: Boolean = false) - extends AR (y, hh, tRng, hparam, bakcast): - - private val debug = debugf ("ARMA", true) // debug function - private val flaw = flawf ("ARMA") // flaw function - private val STEP = 0.02 // step size for optimizer - protected val q = hparam("q").toInt // use the last q shock/errors -// private var z = VectorD.nullv // var for centered time series (used by first train) - private val pnq = p + q // sum of the orders - private val notHR = true // don't use the HR algorithm - - modelName = s"ARMA($p, $q)" - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Initialize the model parameters b = φ ++ θ by use the inherited AR for φ and - * small random numbers for θ. - * @param y_ the training/full response vector (e.g., full y) - */ - def init_params (y_ : VectorD): VectorD = -// super.train (null, y_) // option: fit AR to initialize ARMA -// var bb = super.parameter(1 until p+1) // use AR parameters to initialize φ for ARMA - var bb = NormalVec_c (p, 0.1, 0.01).gen // randomly initialize φ with small values - if q > 0 then bb = bb ++ NormalVec_c (q, 0.0, 0.01).gen // randomly initialize θ with small values - bb - end init_params - -// Use one of the following two train methods: swap names train0 & train and add override - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train/fit an `ARMA` model to the times-series data in vector y_. - * Estimate the coefficient vector b for a (p, q)-th order Auto-Regressive ARMA(p, q) model. - * Uses a nonlinear optimizer (e.g., LBFGS_B) to determine the coefficients. - * Residuals are re-estimated during optimization (may lead to instability) - * NOTE: Requires the error update in `predict` to be uncommented. - * @param x_null the data/input matrix (ignored, pass null) - * @param y_ the training/full response vector (e.g., full y) - */ - def train0 (x_null: MatrixD, y_ : VectorD): Unit = - banner (s"T R A I N 0 -- for p = $p, q = $q") - val mu = y_.mean // sample mean of y_ - b = init_params (y_) // initialize parameter vector b = φ ++ θ -// e.clear () // set errors to zero (and uncomment) or try -// e.set (super.residual) // set errors to AR residuals - δ = mu * (1 - b(0 until p).sum) // determine intercept before optimization -// z = y_ - mu // optimization works better using zero-centered data - - def css (b_ : VectorD): Double = - b = b_.copy // copy parameters from b vector - δ = mu * (1 - b(0 until p).sum) // determine updated intercept - val yp = predictAll (y_) // predicted value for z - val yy = y_(1 until y_.dim) // skip first (backcasted) value - val loss = ssef (yy, yp) // compute loss function -// println (s"css loss = $loss, δ = $δ, b = $b") - loss - end css - - debug ("train0", s"before optimization: p = $p, q = $q, δ = $δ, b = $b") - val optimizer = Optimizer (css, b.dim) // apply Quasi-Newton optimizer - val (fb, bb) = optimizer.solve (b, STEP) // optimal solution for loss function and parameters - b = bb // assign optimized parameters to vector b - δ = mu * (1 - b(0 until p).sum) // determine intercept after optimization - debug ("train0", s"after optimization: p = $p, q = $q, δ = $δ, b = $b") -// println (s"train0: error e = $e") - - end train0 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train/fit an `ARMA` model to the times-series data in vector y_. - * Estimate the coefficient vector b for a (p, q)-th order Auto-Regressive ARMA(p, q) model. - * Uses a nonlinear optimizer (e.g., LBFGS_B) to determine the coefficients. - * Residuals are estimated before optimization using the Hannan-Rissanen Algorithm. - * NOTE: Requires the error update in `predict` to be commented out. - * @see faculty.washington.edu/dbp/s519/PDFs/13-overheads-2020.pdf - * @param x_null the data/input matrix (ignored, pass null) - * @param y_ the training/full response vector (e.g., full y) - */ - override def train (x_null: MatrixD, y_ : VectorD): Unit = - if notHR then - train0 (x_null, y_) - else - e.clear () - δ = 0.0 // intercept for y_ - resid (y_) // set the residuals using high order AR - val optimizer = Optimizer (ss, b.dim) // apply Quasi-Newton optimizer - val (fb, bb) = optimizer.solve (b, STEP) // optimal solution for loss function and parameters - b = bb // recover parameters for z - δ = y.mean * (1 - b(0 until p).sum) // determine intercept after optimization - debug ("train", s"optimized: p = $p, q - $q, δ = $δ, b = $b") - println (s"train: error e = $e") - end train + hparam: HyperParameter = ARMA.hp, bakcast: Boolean = false) + extends Forecaster (y, hh, tRng, hparam, bakcast): - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Use a higher order AR model to estimate the residuals (unobserved data). - * Set the residual/error vector e defined in `Forecaster`. - * @param y_ the training/full response vector (e.g., full y) - */ - def resid (y_ : VectorD): Unit = - val hp2 = new HyperParameter - hp2 += ("p", pnq + 3, pnq + 3) // Set the AR order to p + 1 + 3 - val ar = new AR (y, hh, tRng, hp2) // create an AR model - ar.train (null, y_) // train the AR model - e += ar.residual // use residuals from the AR model - end resid - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the sum of squared errors (loss function). - * @param b_ the combined parameters (δ, b) where b = (φ, θ). - */ - def ss (b_ : VectorD): Double = - b = b_.copy // copy parameters from b vector - val yy = yb(1 until yb.dim) // skip first (backcasted) value - δ = yy.mean * (1 - b(0 until p).sum) // determine updated intercept - val yyp = predictAll (yb) // predicted value for yb -// debug ("ss", s"yy.dim = ${yy.dim}, yyp.dim = ${yyp.dim}") - ssef (yy, yyp) // compute loss function - end ss - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Predict a value for y_t using the 1-step ahead forecast. + private val flaw = flawf ("ARMA") // flaw function + private val p = hparam("p").toInt // AR order + private val q = hparam("q").toInt // MA order + + _modelName = s"ARMA($p, $q)" + + // State dimension for the companion-form state-space model + private val r_dim = max (p, q + 1) + + // Trained filter used as the rolling forecast state anchor + private var kf_tracker: KalmanFilter = null + + // Stored mean-like quantity implied by the fitted parameterization + private var mu_est = 0.0 + + def getBest: BestStep = ??? // FIX -- implement or throw exception + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Train the ARMA model on the supplied response vector using Kalman-filter + * maximum likelihood and a bound-constrained L-BFGS optimizer. * - * y_t = δ + φ_0 y_t-1 + φ_1 y_t-2 + ... + φ_p-1 y_t-p - * θ_0 e_t-1 + θ_1 e_t-2 + ... + θ_q-1 e_t-q + * Parameter vector layout: [φ_1 ... φ_p, θ_1 ... θ_q, c, σ²] * - * where φ = b(0 until p) and θ = b(p until p_q). - * When k < 0 let y_k = y_0 (i.e., assume first value repeats back in time), - * but do not assume errors repeat. Note, column 1 of yf (yf(?, 1) holds yp. - * Must be executed in time order, so errors are properly recorded in vector e - * @see `predictAll` method in `Forecaster` trait. - * @see `rdot` in Forecaster.scala for reverse dot product implementation. - * @param t the time point being predicted - * @param y_ the actual values to use in making predictions - */ - override def predict (t: Int, y_ : VectorD): Double = - if t == 0 then e(0) = 0 // from backcast: assume no error - if t == 1 then e(1) = y_(1) - yf(0, 1) // first real point - - var sum = δ + rdot (b(0 until p), y_, t-1) // intercept + AR terms (use y); b(0 until p) = φ - for j <- 0 until q do // add MA terms (shocks) - if t-1-j >= 0 then sum += b(p+j) * e(t-1-j) // e(t-j = -1) does not exists; b(p+j) = θ(j) - - if t < y_.dim-1 then e(t) = y_(t) - sum // update the error vector (uncomment for first train) - sum // prediction yp - end predict - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Produce a vector of size hh, h = 1 to hh-steps ahead forecasts for the model, - * i.e., forecast the following time points: t+1, ..., t+h. - * Intended to work with rolling validation (analog of predict method). - * Note, must include [ y_i, e_i ] before horizon and [ yp_i ] after horizon - * @param t the time point from which to make forecasts - * @param y_ the actual values to use in making predictions - */ - override def forecast (t: Int, y_ : VectorD = yb): VectorD = - val yh = new VectorD (hh) // hold forecasts for each horizon - for h <- 1 to hh do - var sum = δ + rdot (b(0 until p), yf, t, h-1) // intercept + AR terms (use y and yp); b(0 until p) = φ - for j <- h-1 until q do // add MA terms (shocks) from before horizon - if t-j >= 0 then sum += b(p+j) * e(t-j) // e(t-j = -1) does not exists; b(p+j) = θ(j) - yf(t, h) = sum // record in forecast matrix - yh(h-1) = sum // record forecasts for each horizon - yh // return forecasts for all horizons - end forecast - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all y_.dim time points at horizon h (h-steps ahead). - * Assign into FORECAST MATRIX and return the h-steps ahead forecast. - * Note, `predictAll` provides predictions for h = 1. - * @see `forecastAll` method in `Forecaster` trait. - * Note, must include [ y_i, e_i ] before horizon and [ yp_i ] after horizon - * @param h the forecasting horizon, number of steps ahead to produce forecasts - * @param y_ the actual values to use in making forecasts + * @param x_null ignored for univariate models + * @param y_ the training response vector */ - override def forecastAt (h: Int, y_ : VectorD = yb): VectorD = - if h < 2 then flaw ("forecastAt", s"horizon h = $h must be at least 2") - - for t <- y_.indices do // make forecasts over all time points for horizon h - var sum = δ + rdot (b(0 until p), yf, t, h-1) // intercept + AR terms (use y and yp); b(0 until p) = φ - for j <- h-1 until q do // add MA terms (shocks) from before horizon - if t-j >= 0 then sum += b(p+j) * e(t-j) // e(t-j = -1) does not exists; b(p+j) = θ(j) - yf(t, h) = sum // record in forecast matrix - yf(?, h) // return the h-step ahead forecast vector - end forecastAt + override def train (x_null: MatrixD, y_ : VectorD): Unit = + banner (s"Train $modelName using Kalman Filter MLE") -end ARMA + // ------------------------------------------------------------------ + // 1. OLS-based initialization + // ------------------------------------------------------------------ + val mu_guess = y_.mean + val z_centered = y_ - mu_guess + val n_ols = z_centered.dim - p -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARMA` companion object provides factory methods for the `ARMA` class. - */ -object ARMA: + val X_ols = new MatrixD (n_ols, p) + val y_ols = new VectorD (n_ols) - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `ARMA` object. - * @param y the response vector (time series data) - * @param hh the maximum forecasting horizon (h = 1 to hh) - * @param tRng the time range, if relevant (time index may suffice) - * @param hparam the hyper-parameters - */ - def apply (y: VectorD, hh: Int, tRng: Range = null, hparam: HyperParameter = AR.hp): ARMA = - new ARMA (y, hh, tRng, hparam) - end apply + for t <- 0 until n_ols do + y_ols(t) = z_centered(t + p) + for j <- 0 until p do X_ols(t, j) = z_centered(t + p - 1 - j) + end for -end ARMA + val ols = new Regression (X_ols, y_ols) + ols.train () + val phi_guesses = ols.parameter + val e_ols = y_ols - ols.predict (X_ols) + val sigma2_guess = (e_ols dot e_ols) / n_ols -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRMATest` main function tests the `ARMA` class on real data: - * Forecasting Lake Levels using In-Sample Testing (In-ST). - * Test forecasts (h = 1 to hh steps ahead forecasts). - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.aRMATest - */ -@main def aRMATest (): Unit = + // ------------------------------------------------------------------ + // 2. Negative log-likelihood for Kalman-filter + // ------------------------------------------------------------------ - val hh = 3 // maximum forecasting horizon + def negativeLogLikelihood (b_vec: VectorD): Double = boundary: + val (kf, mu_curr) = formKalmanFilter (b_vec) + if kf == null then boundary.break (Double.PositiveInfinity) - val mod = new ARMA (y, hh) // create model for time series data - banner (s"In-ST Forecasts: ${mod.modelName} on LakeLevels Dataset") - mod.trainNtest ()() // train and test on full dataset + var logLik = 0.0 + val burn = max (p, q + 1) - mod.forecastAll () // forecast h-steps ahead (h = 1 to hh) for all y - mod.diagnoseAll (y, mod.getYf) - println (s"Final In-ST Forecast Matrix yf = ${mod.getYf}") + for t <- 0 until y_.dim do + kf.predict () + val z = VectorD (y_(t) - mu_curr) + val y_err = z - kf.h * kf.x + val s = kf.h * kf.p * kf.h.transpose + kf.r + if s(0, 0) <= 1e-12 then boundary.break (Double.PositiveInfinity) -end aRMATest + val err_sq = y_err(0) * (1.0 / s(0, 0)) * y_err(0) + val contrib = -0.5 * (log (2.0 * Pi) + log (s(0, 0)) + err_sq) + if t >= burn then logLik += contrib + kf.update (z) + end for + -logLik + end negativeLogLikelihood -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRMATest2` main function tests the `ARMA` class on real data: - * Forecasting Lake Levels using Train-n-Test Split (TnT) with Rolling Validation. - * Test forecasts (h = 1 to hh steps ahead forecasts). - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.aRMATest2 - */ -@main def aRMATest2 (): Unit = + // ------------------------------------------------------------------ + // 3. Initial parameter vector + // ------------------------------------------------------------------ - val hh = 3 // maximum forecasting horizon + val num_params = p + q + 2 + val b0 = new VectorD (num_params) + val offset = if phi_guesses.dim > p then 1 else 0 - val mod = new ARMA (y, hh) // create model for time series data - banner (s"TnT Forecasts: ${mod.modelName} on LakeLevels Dataset") - mod.trainNtest ()() // train and test on full dataset + for i <- 0 until p do b0(i) = phi_guesses(i + offset) + for i <- 0 until q do b0(p + i) = 0.0 - mod.setSkip (0) - mod.rollValidate () // TnT with Rolling Validation - mod.diagnoseAll (y, mod.getYf, Forecaster.teRng (y.dim)) // only diagnose on the testing set - println (s"Final TnT Forecast Matrix yf = ${mod.getYf}") + val phi_init_sum = (0 until p).map (i => b0(i)).sum + val c_guess = mu_guess * (1.0 - phi_init_sum) -end aRMATest2 + b0(p + q) = c_guess + b0(p + q + 1) = sigma2_guess + // ------------------------------------------------------------------ + // 4. Bound-constrained optimization + // ------------------------------------------------------------------ -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRMATest3` main function tests the `ARMA` class on real data: - * Forecasting COVID-19 using In-Sample Testing (In-ST). - * Test forecasts (h = 1 to hh steps ahead forecasts). - * Comparison of sMAPE for AR(p), ARY(p), ARY_D(p), ARMA(p, 0), and ARMA(p, 1). - * Note ARX (p, 1, 0), where 0 => no exo vars, duplicates results of ARY(p) - * - * 19.0371, 29.5797, 39.0740, 47.4638, 55.1785, 62.1818 RW - * - * 18.7298, 28.4908, 37.0997, 45.6487, 51.7248, 56.3708 AR(1) - * 18.5808, 28.3362, 37.2485, 45.7846, 52.0362, 56.9114 ARY(1) - * 18.5808, 28.8144, 37.7469, 44.8006, 49.8166, 52.3205 ARY_D(1) - * 18.5788, 28.3364, 37.2530, 45.7883, 52.0403, 56.9181 ARY_Quad(1) - * 18.7095, 28.4690, 37.1203, 45.6688, 51.7687, 56.4467 ARMA(1, 0) - * 17.0508, 26.4669, 35.4906, 43.5707, 49.4949, 54.2347 ARMA(1, 1) - * - * 16.3579, 24.7155, 33.0480, 40.0707, 46.0049, 50.8265 AR(2) - * 16.2270, 23.3708, 31.6615, 38.7385, 44.7630, 50.0814 ARY(2) - * 16.2270, 22.9698, 30.0933, 35.4960, 40.7977, 46.2700 ARY_D(2) - * 16.2663, 22.6643, 31.0768, 37.7388, 44.2476, 50.0283 ARY_Quad(2) - * 19.0826, 29.2723, 37.2914, 44.2636, 49.8307, 53.6992 ARMA(2, 0) - * 17.0445, 26.6538, 35.5239, 42.9937, 48.7679, 53.3489 ARMA(2, 1) - * - * 16.0114, 22.7408, 29.5631, 35.2773, 40.9870, 45.8408 AR(3) - * 15.7509, 21.9972, 28.8976, 34.6815, 40.7375, 46.1590 ARY(3) - * 15.7509, 21.8745, 28.2745, 32.9840, 39.1694, 43.9673 ARY_D(3) - * 15.7262, 21.2578, 28.4101, 34.1532, 40.6659 46.1492 ARY_Quad(3) - * 16.7027, 23.4111, 30.5995, 36.7396, 42.6680, 47.1189 ARMA(3, 0) - * 16.1750, 23.1243, 30.8535, 37.1636, 43.0417, 48.2946 ARMA(3, 1) - * - * 15.8988, 22.5738, 28.5298, 33.3360, 39.1586, 43.1606 AR(4) - * 15.6423, 21.7982, 27.9006, 33.1000, 39.0543, 43.9748 ARY(5) - * 15.6423, 21.8663, 28.0034, 32.9898, 38.9927, 43.6218 ARY_D(4) - * 15.5814, 21.2352, 28.5489, 34.4369, 40.3618, 45.2605 ARY_Quad(4) - * 16.6457, 22.9684, 29.0629, 34.6601, 40.1521, 44.0896 ARMA(4, 0) - * 15.3290, 21.9965, 27.8397, 34.3507, 40.0857, 45.8402 ARMA(4, 1) - * - * 15.9279, 22.5769, 28.5035, 33.3019, 39.1381, 43.0520 AR(5) - * 15.6349, 21.8003, 27.9084, 33.1127, 39.0628, 44.0175 ARY(5) - * 15.6349, 21.7885, 28.0114, 33.0117, 39.1418, 43.7715 ARY_D(5) - * 15.3209, 21.3541, 28.9325, 35.1359, 41.0300, 45.8558 ARY_Quad(5) - * 16.3720, 22.8047, 28.7702, 33.9232, 39.5677, 43.2628 ARMA(5, 0) - * 15.3361, 21.9121, 27.6568, 34.0218, 39.6254, 45.2994 ARMA(5, 1) - * - * > runMain scalation.modeling.forecasting.aRMATest3 - */ -@main def aRMATest3 (): Unit = - - val yy = loadData_y () -// val y = yy // full - val y = yy(0 until 116) // clip the flat end - val hh = 6 // maximum forecasting horizon - - for p <- 1 to 5; q <- 0 to 1 do - AR.hp("p") = p // number of AR terms - AR.hp("q") = q // number of MA terms - val mod = new ARMA (y, hh) // create model for time series data - banner (s"In-ST Forecasts: ${mod.modelName} on COVID-19 Dataset") - mod.trainNtest ()() // train and test on full dataset - - mod.forecastAll () // forecast h-steps ahead (h = 1 to hh) for all y - mod.diagnoseAll (y, mod.getYf) // use showYf = false to not print forecast matrix Yf - end for + val lowerBounds = VectorD.fill (p + q)(-2.0) ++ VectorD (-100000.0, 1e-6) + val upperBounds = VectorD.fill (p + q)( 2.0) ++ VectorD ( 100000.0, Double.PositiveInfinity) -end aRMATest3 + val optimizer = new Optimizer (f = negativeLogLikelihood, l_u = (lowerBounds, upperBounds)) + val (est_loss, est_params) = optimizer.solve (b0) + // ------------------------------------------------------------------ + // 5. Persist fitted parameters and construct the tracker filter + // ------------------------------------------------------------------ -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRMATest4` main function tests the `ARMA` class on real data: - * Forecasting COVID-19 using Train-n-Test Split (TnT) with Rolling Validation. - * Test forecasts (h = 1 to hh steps ahead forecasts). - * Comparison of sMAPE for AR(p), ARY(p), ARY_D(p), ARMA(p, 0), and ARMA(p, 1). - * - * 19.1334, 31.1906, 44.3787, 55.1576, 65.1810, 74.0524 AR(1) - * 19.0397, 30.4570, 43.9113, 54.9642, 65.3163, 74.2124 ARY(1) - * 19.1718, 30.7038, 44.5265, 55.7794, 66.3876, 75.6566 ARMA(1, 0) - * 18.3012, 29.3224, 43.0369, 54.5719, 64.9230, 74.2520 ARMA(1, 1) - * - * 16.6447, 26.9109, 39.8106, 50.8595, 60.2176, 68.6317 AR(2) - * 16.8833, 26.4824, 39.2329, 50.8677, 61.0624, 70.3218 ARY(2) - * 19.4256, 32.8815, 46.4279, 57.2199, 66.8651, 75.3077 ARMA(2, 0) - * 18.3009, 30.0443, 43.6634, 54.9669, 64.8541, 73.7911 ARMA(2, 1) - * - * 15.9232, 23.5929, 34.3577, 44.1784, 53.6513, 62.0129 AR(3) - * 15.7190, 21.7959, 32.1395, 42.0074, 52.6874, 62.7276 ARY(3) - * 16.4547, 24.4668, 36.8597, 46.7958, 58.3539, 67.6623 ARMA(3, 0) - * 17.0353, 24.0309, 36.6585, 46.1961, 57.6348, 67.2332 ARMA(3, 1) - * - * 15.3256, 22.6893, 30.7558, 39.6274, 48.6646, 56.7375 AR(4) - * 14.6791, 19.9940, 26.5644, 35.4590, 41.4955, 50.8660 ARY(4) - * 14.9687, 22.2599, 29.6359, 39.6018, 48.2853, 56.9797 ARMA(4, 0) - * 15.2243, 21.4976, 27.7929, 37.9923, 45.0999, 54.3417 ARMA(4, 1) - * - * 15.9166, 21.5246, 28.0675, 36.8669, 43.3785, 51.1786 AR(5) - * 15.0232, 19.4222, 27.1981, 35.4744, 40.3466, 48.4066 ARY(5) - * 15.5426, 21.0405, 29.1731, 37.8006, 43.3590, 52.6387 ARMA(5, 0) - * 15.7641, 21.0723, 28.7463, 37.7968, 42.8480, 52.8277 ARMA(5, 1) - * - * > runMain scalation.modeling.forecasting.aRMATest4 - */ -@main def aRMATest4 (): Unit = - - val yy = loadData_y () -// val y = yy // full - val y = yy(0 until 116) // clip the flat end - val hh = 6 // maximum forecasting horizon - - for p <- 1 to 5; q <- 0 to 1 do - AR.hp("p") = p // number of AR terms - AR.hp("q") = q // number of MA terms - val mod = new ARMA (y, hh) // create model for time series data - banner (s"TnT Forecasts: ${mod.modelName} on COVID-19 Dataset") - mod.trainNtest ()() - - mod.setSkip (0) // using data from training can forecast first in test - mod.rollValidate () // TnT with Rolling Validation - mod.diagnoseAll (y, mod.getYf, Forecaster.teRng (y.dim)) // only diagnose on the testing set -// println (s"Final TnT Forecast Matrix yf = ${mod.getYf}") - end for + b = est_params -end aRMATest4 + val (final_kf, final_mu) = formKalmanFilter (b) + kf_tracker = final_kf + mu_est = final_mu + for t <- 0 until y_.dim do + kf_tracker.predict () + kf_tracker.update (VectorD (y_(t) - mu_est)) + end for -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRMATest5` main function tests the `ARMA` class on real data: - * Forecasting COVID-19 using In-Sample Testing (In-ST). - * Test forecasts (h = 1 to hh steps ahead forecasts). - * Comparison of sMAPE for ARMA(p, 1) (i.e., q = 1) for different p orders. - * > runMain scalation.modeling.forecasting.aRMATest5 - */ -@main def aRMATest5 (): Unit = - - val yy = loadData_y () -// val y = yy // full - val y = yy(0 until 116) // clip the flat end - val hh = 6 // maximum forecasting horizon - - AR.hp("q") = 1 // number of MA terms - for p <- 1 to 5 do - AR.hp("p") = p // number of AR terms - val mod = new ARMA (y, hh) // create model for time series data - banner (s"In-ST Forecasts: ${mod.modelName} on COVID-19 Dataset") - mod.trainNtest ()() // train and test on full dataset - - mod.forecastAll () // forecast h-steps ahead (h = 1 to hh) for all y - mod.diagnoseAll (y, mod.getYf) - println (s"Final In-ST Forecast Matrix yf = ${mod.getYf}") - end for + val phisEst = est_params(0 until p) + val cEst = est_params(p + q) + val phiSum = phisEst.sum + val muImp = if math.abs (1.0 - phiSum) > 1e-8 then cEst / (1.0 - phiSum) else Double.NaN + + println (s"\nEstimated params for ARMA($p,$q)") + println (s"phis = $phisEst") + if q > 0 then println (s"thetas = ${est_params(p until p + q)}") + println (s"intercept c = $cEst") + println (s"implied mu = $muImp") + println (s"sigma2 = ${est_params(p + q + 1)}") + println (s"negLogLik = $est_loss") + end train -end aRMATest5 + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Construct a Kalman filter from the parameter vector. + * + * Parameter vector layout: [φ_1 ... φ_p, θ_1 ... θ_q, c, σ²] + * + * @param b_vec the parameter vector + * @return `(KalmanFilter, mu)` or `(null, 0.0)` if invalid + */ + private def formKalmanFilter (b_vec: VectorD): (KalmanFilter, Double) = + val phis = b_vec(0 until p) + val thetas = b_vec(p until p + q) + val c = b_vec(p + q) + val phi_sum = phis.sum + val denom = 1.0 - phi_sum + if math.abs (denom) <= 1e-8 then return (null, 0.0) -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRMATest6` main function tests the `ARMA` class on small dataset. - * Test forecasts (h = 1 step ahead forecasts). - * > runMain scalation.modeling.forecasting.aRMATest6 - */ -@main def aRMATest6 (): Unit = + val mu = c / denom + + val sig2_proc = b_vec(p + q + 1) + val sig2_obs = 1e-6 + + if sig2_proc <= 0.0 then return (null, 0.0) - val y = VectorD (1, 3, 4, 2, 5, 7, 9, 8, 6, 3) + // Companion-form state transition matrix. + val f = new MatrixD (r_dim, r_dim) + for j <- 0 until p do f(0, j) = phis(j) + for i <- 1 until r_dim do f(i, i - 1) = 1.0 - AR.hp ("q") = 0 - var mod = new ARMA (y, 1) // create model for time series data - banner (s"In-ST Forecasts: ${mod.modelName} on a Small Dataset") - mod.trainNtest ()() // train and test on full dataset - println (s"Final In-ST Forecast Matrix yf = ${mod.getYf}") - new Baseline (y, "AR1") + // Process noise covariance Q = G G' σ². + val g = new VectorD (r_dim) + g(0) = 1.0 + for i <- 0 until q if i + 1 < r_dim do g(i + 1) = thetas(i) + val q_mat = outer (g, g) * sig2_proc - AR.hp ("p") = 2 - mod = new ARMA (y, 1) // create model for time series data - banner (s"In-ST Forecasts: ${mod.modelName} on a Small Dataset") - mod.trainNtest ()() // train and test on full dataset - println (s"Final In-ST Forecast Matrix yf = ${mod.getYf}") - new Baseline (y, "AR2") + // Observation model: observe the first state element. + val h_mat = new MatrixD (1, r_dim) + h_mat(0, 0) = 1.0 -end aRMATest6 + val r_mat = MatrixD ((1, 1), sig2_obs) + // Diffuse-style large-variance initialization. + val x0 = new VectorD (r_dim) + val p0 = MatrixD.eye (r_dim, r_dim) * 1e6 -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRMATest7` main function tests the `ARMA` class on small dataset. - * Test the generation of ARMA sequences for various p and q values. - * > runMain scalation.modeling.forecasting.aRMATest7 + (new KalmanFilter (f, q_mat, h_mat, r_mat, x0, p0), mu) + end formKalmanFilter + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return a copy of the trained filter and the associated mean quantity. + * Useful for rolling-origin forecast evaluation without mutating the + * stored tracker state. + */ + def getTrainedFilter: (KalmanFilter, Double) = + if kf_tracker == null then + flaw ("getTrainedFilter", "model has not been trained") + (null, mu_est) + else + (kf_tracker.copyFilter (), mu_est) + end getTrainedFilter + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forecast all time points for a given horizon `h` using the stored + * tracker state. + * @param h the forecast horizon + * @param y_ the observed series + */ + override def forecastAt (h: Int, y_ : VectorD = yb): VectorD = + if kf_tracker == null then flaw ("forecastAt", "model has not been trained") + val (temp_kf, mu) = getTrainedFilter + + for t <- 0 until y_.dim do + val kf_forc = new KalmanFilter (temp_kf.f, temp_kf.q, temp_kf.h, temp_kf.r, + temp_kf.x.copy, temp_kf.p.copy) + for step <- 0 until h do + kf_forc.predict () + if step == h - 1 then + val pred = (kf_forc.h * kf_forc.x)(0) + mu + yf(t, h) = pred + end for + + temp_kf.predict () + temp_kf.update (VectorD (y_(t) - mu)) + end for + + yf(?, h) + end forecastAt + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** One-step prediction placeholder required by `Forecaster`. + * Horizon-specific forecasting is handled by `forecastAt`. + * @param t the given time + * @param y_ the actual time series + */ + override def predict (t: Int, y_ : VectorD): Double = 0.0 + +end ARMA + +import Example_Covid.loadData_y + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Rolling-origin validation driver for the standalone Kalman-filter ARMA + * implementation. + * The logic mirrors the external Python validation flow: + * - fit once on the training window + * - forecast horizons `1..hh` before each newly revealed test observation + * - update the filter state sequentially without refitting + * > runMain scalation.modeling.forecasting.aRMA_KalmanRollingValidation */ -@main def aRMATest7 (): Unit = +@main def aRMA_KalmanRollingValidation (): Unit = + + // -------------------------------------------------------------------------- + // Configuration + // -------------------------------------------------------------------------- + val p_max = 5 // maximum AR order to evaluate + val q_max = 0 // maximum MA order to evaluate + val hh = 6 // maximum forecast horizon + val train_size = 92 // length of training window + + // -------------------------------------------------------------------------- + // Load and split data + // -------------------------------------------------------------------------- + val yy = loadData_y () + val y = yy(0 until 116) + val y_train = y(0 until train_size) + + println (s"Data Split: Total=${y.dim}, Train=$train_size, Test=${y.dim - train_size}") + + // -------------------------------------------------------------------------- + // Evaluate all requested (p, q) configurations + // -------------------------------------------------------------------------- + for p <- 5 to p_max; q <- 0 to q_max do + + // ---------------------------------------------------------------------- + // 1. Fit model on training data only + // ---------------------------------------------------------------------- + ARMA.hp("p") = p + ARMA.hp("q") = q + + val model = new ARMA (y, hh) + model.train (null, y_train) + + // ---------------------------------------------------------------------- + // 2. Rolling-origin forecast generation + // + // At each origin t: + // - forecast horizons 1..hh before revealing y(t) + // - store each horizon-h forecast at the row corresponding to its + // target time + // - update the rolling filter with the newly observed value y(t) + // ---------------------------------------------------------------------- + val (kfRolling, muFinal) = model.getTrainedFilter + val yfMatrix = new MatrixD (y.dim, hh) + + for t <- train_size until y.dim do + // Clone the current rolling state so forecasting does not mutate it. + val kfForecast = new KalmanFilter (kfRolling.f, kfRolling.q, + kfRolling.h, kfRolling.r, + kfRolling.x.copy, kfRolling.p.copy) + + // Generate forecasts for horizons 1..hh from the current origin. + for h <- 0 until hh do + kfForecast.predict () + val yHat = (kfForecast.h * kfForecast.x)(0) + muFinal + + val targetTime = t + h + if targetTime < y.dim then yfMatrix(targetTime, h) = yHat + end for + + // Reveal the next actual observation and update the rolling state. + kfRolling.predict () + kfRolling.update (VectorD (y(t) - muFinal)) + end for + + // ---------------------------------------------------------------------- + // 3. Horizon-wise evaluation + // + // Alignment: + // Column h stores forecasts for horizon (h + 1), and the earliest valid + // row for that column is train_size + h. + // ---------------------------------------------------------------------- + class RollingDiagnoser (dfm: Double, df: Double) extends Diagnoser (dfm, df): + val modName = s"Rolling-ARMA($p, $q)" + end RollingDiagnoser + + for h <- 0 until hh do + val hStep = h + 1 + val evalStart = train_size + h + val evalEnd = y.dim - val nrg = random.Normal (0.0, 1.0) + val yActual = y(evalStart until evalEnd) + val yPred = yfMatrix(evalStart until evalEnd, h) - val m = 100 - val y = new VectorD (m) - val e = new VectorD (m) - val φ = VectorD (0.8, 0.7) - val θ = VectorD (0.8, 0.7) + println (s"\n--- Test Set Metrics (Horizon h=$hStep) ---") - for p <- 0 to 2; q <- 0 to 2 if p + q > 0 do - val (rp, rq) = ((0 until p), (0 until q)) - for t <- y.indices do - e(t) = nrg.gen - y(t) = rdot (φ(rp), y, t) + rdot (θ(rq), e, t) + e(t) + val dfm = (p + q + 1).toDouble + val df = yActual.dim - dfm + + val diagnoser = new RollingDiagnoser (dfm, df) + diagnoser.setSkip (0) + + val stats = diagnoser.diagnose (yActual, yPred) + + for i <- stats.indices do + if qoF_names(i) != "NA" then println (f"${qoF_names(i)}%10s = ${stats(i)}%12.6f") + + val tAxis = VectorD.range (0, y.dim) + val plotPred = yfMatrix(?, h).copy + new Plot (tAxis, y, plotPred, s"Rolling Forecast ARMA($p,$q) h=$hStep", lines = true) end for - new Plot (null, y, null, s"Plot of y vs. t for p = $p, q = $q", lines = true) - object CG extends Correlogram (y) - CG.makeCorrelogram () - CG.plotCorrelogram () + end for -end aRMATest7 +end aRMA_KalmanRollingValidation diff --git a/src/main/scala/scalation/modeling/forecasting/ARMA.scala.bak3 b/src/main/scala/scalation/modeling/forecasting/ARMA.scala.bak3 new file mode 100644 index 000000000..5c00230a0 --- /dev/null +++ b/src/main/scala/scalation/modeling/forecasting/ARMA.scala.bak3 @@ -0,0 +1,514 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author John Miller + * @version 2.0 + * @date Sun Jun 30 13:27:00 EDT 2024 + * @see LICENSE (MIT style license file). + * + * @note Model: Auto-Regressive, Moving Average (ARMA) + * + * Parameter Estimation: Least Squares, Maximum Likelihood + * Conditional Sum-of-Squares (CSS), Negative Log-Likelihood (NLL) + * @see arxiv.org/pdf/1611.00965 + * @see arxiv.org/html/2310.01198v2 + * @see arxiv.org/pdf/2310.01198 + * @see people.stat.sc.edu/hitchcock/stat520ch7slides.pdf + * @see www.emu.edu.tr/mbalcilar/teaching2007/econ604/lecture_notes.htm + * @see www.stat.berkeley.edu/~bartlett/courses/153-fall2010 + * @see www.princeton.edu/~apapanic/ORFE_405,_Financial_Time_Series_%28Fall_2011%29_files/slides12-13.pdf + */ + +package scalation +package modeling +package forecasting + +import scala.annotation.unused + +import scalation.mathstat._ +//import scalation.optimization.quasi_newton.{BFGS => Optimizer} // change import to change optimizer +//import scalation.optimization.quasi_newton.{LBFGS => Optimizer} +import scalation.optimization.quasi_newton.{LBFGS_B => Optimizer} +import scalation.random.NormalVec_c + +import Forecaster.rdot +import Example_Covid.loadData_y +import Example_LakeLevels.y + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `ARMA` class provides basic time series analysis capabilities for Auto-Regressive, + * Moving Average (ARMA) models. ARMA models are often used for forecasting. + * Given time series data stored in vector y, its next value y_t = combination of last + * p values and q shocks. + * + * y_t = δ + Σ[φ_j y_t-j] + Σ[θ_j e_t-j] + e_t + * + * where y_t is the value of y at time t and e_t is the residual/error term. + * @param y the response vector (time series data) + * @param hh the maximum forecasting horizon (h = 1 to hh) + * @param tRng the time range, if relevant (time index may suffice) + * @param hparam the hyper-parameters (defaults to AR.hp) + * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) + */ +class ARMA (y: VectorD, hh: Int, tRng: Range = null, + hparam: HyperParameter = AR.hp, + bakcast: Boolean = false) + extends AR (y, hh, tRng, hparam, bakcast): + + private val debug = debugf ("ARMA", true) // debug function + private val flaw = flawf ("ARMA") // flaw function + private val STEP = 0.02 // step size for optimizer + protected val q = hparam("q").toInt // use the last q shock/errors +// private var z = VectorD.nullv // var for centered time series (used by first train) + private val pnq = p + q // sum of the orders + private val notHR = true // don't use the HR algorithm + private val useAR = false // use AR for initial guess for φ parameters + + _modelName = s"ARMA_${p}_$q" + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Initialize the model parameters b = φ ++ θ by use the inherited AR for φ and + * small random numbers for θ. + * @param y_ the training/full response vector (e.g., full y) + */ + def init_params (y_ : VectorD): VectorD = + var bb = + if useAR then + super.train (null, y_) // option: fit AR to initialize ARMA + super.parameter(1 until p+1) // use AR parameters to initialize φ for ARMA + else + NormalVec_c (p, 0.1, 0.01).gen // randomly initialize φ with small values + if q > 0 then bb = bb ++ NormalVec_c (q, 0.0, 0.01).gen // randomly initialize θ with small values + bb + end init_params + +// Use one of the following two train methods: swap names train0 & train and add override + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Train/fit an `ARMA` model to the times-series data in vector y_. + * Estimate the coefficient vector b for a (p, q)-th order Auto-Regressive ARMA(p, q) model. + * Uses a nonlinear optimizer (e.g., LBFGS_B) to determine the coefficients. + * Residuals are re-estimated during optimization (may lead to instability) + * NOTE: Requires the error update in `predict` to be uncommented. + * @param x_null the data/input matrix (ignored, pass null) + * @param y_ the training/full response vector (e.g., full y) + */ + def train0 (@unused x_null: MatrixD, y_ : VectorD): Unit = + banner (s"T R A I N 0 -- for p = $p, q = $q") + val mu = y_.mean // sample mean of y_ + b = init_params (y_) // initialize parameter vector b = φ ++ θ +// e.clear () // set errors to zero (and uncomment) or try +// e.set (super.residual) // set errors to AR residuals + δ = mu * (1 - b(0 until p).sum) // determine intercept before optimization +// z = y_ - mu // optimization works better using zero-centered data + + def css (b_ : VectorD): Double = + b = b_.copy // copy parameters from b vector + δ = mu * (1 - b(0 until p).sum) // determine updated intercept + val yp = predictAll (y_) // predicted value for z + val yy = y_(1 until y_.dim) // skip first (backcasted) value + val loss = ssef (yy, yp) // compute loss function +// println (s"css loss = $loss, δ = $δ, b = $b") + loss + end css + + debug ("train0", s"before optimization: p = $p, q = $q, δ = $δ, b = $b") + val optimizer = Optimizer (css, b.dim) // apply Quasi-Newton optimizer + val bb = optimizer.solve (b, STEP)._2 // optimal solution for loss function and parameters + b = bb // assign optimized parameters to vector b + δ = mu * (1 - b(0 until p).sum) // determine intercept after optimization + debug ("train0", s"after optimization: p = $p, q = $q, δ = $δ, b = $b") +// println (s"train0: error e = $e") + + end train0 + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Train/fit an `ARMA` model to the times-series data in vector y_. + * Estimate the coefficient vector b for a (p, q)-th order Auto-Regressive ARMA(p, q) model. + * Uses a nonlinear optimizer (e.g., LBFGS_B) to determine the coefficients. + * Residuals are estimated before optimization using the Hannan-Rissanen Algorithm. + * NOTE: Requires the error update in `predict` to be commented out. + * @see faculty.washington.edu/dbp/s519/PDFs/13-overheads-2020.pdf + * @param x_null the data/input matrix (ignored, pass null) + * @param y_ the training/full response vector (e.g., full y) + */ + override def train (x_null: MatrixD, y_ : VectorD): Unit = + if notHR then + train0 (x_null, y_) + else + e.clear () + δ = 0.0 // intercept for y_ + resid (y_) // set the residuals using high order AR + val optimizer = Optimizer (ss, b.dim) // apply Quasi-Newton optimizer + val bb = optimizer.solve (b, STEP)._2 // optimal solution for loss function and parameters + b = bb // recover parameters for z + δ = y.mean * (1 - b(0 until p).sum) // determine intercept after optimization + debug ("train", s"optimized: p = $p, q - $q, δ = $δ, b = $b") + println (s"train: error e = $e") + end train + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Use a higher order AR model to estimate the residuals (unobserved data). + * Set the residual/error vector e defined in `Forecaster`. + * @param y_ the training/full response vector (e.g., full y) + */ + def resid (y_ : VectorD): Unit = + val hp2 = new HyperParameter + hp2 += ("p", pnq + 3, pnq + 3) // Set the AR order to p + 1 + 3 + val ar = new AR (y, hh, tRng, hp2) // create an AR model + ar.train (null, y_) // train the AR model + e += ar.residual // use residuals from the AR model + end resid + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the sum of squared errors (loss function). + * @param b_ the combined parameters (δ, b) where b = (φ, θ). + */ + def ss (b_ : VectorD): Double = + b = b_.copy // copy parameters from b vector + val yy = yb(1 until yb.dim) // skip first (backcasted) value + δ = yy.mean * (1 - b(0 until p).sum) // determine updated intercept + val yyp = predictAll (yb) // predicted value for yb +// debug ("ss", s"yy.dim = ${yy.dim}, yyp.dim = ${yyp.dim}") + ssef (yy, yyp) // compute loss function + end ss + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Predict a value for y_t using the 1-step ahead forecast. + * + * y_t = δ + φ_0 y_t-1 + φ_1 y_t-2 + ... + φ_p-1 y_t-p + * θ_0 e_t-1 + θ_1 e_t-2 + ... + θ_q-1 e_t-q + * + * where φ = b(0 until p) and θ = b(p until p_q). + * When k < 0 let y_k = y_0 (i.e., assume first value repeats back in time), + * but do not assume errors repeat. Note, column 1 of yf (yf(?, 1) holds yp. + * Must be executed in time order, so errors are properly recorded in vector e + * @see `predictAll` method in `Forecaster` trait. + * @see `rdot` in Forecaster.scala for reverse dot product implementation. + * @param t the time point being predicted + * @param y_ the actual values to use in making predictions + */ + override def predict (t: Int, y_ : VectorD): Double = + if t == 0 then e(0) = 0 // from backcast: assume no error + if t == 1 then e(1) = y_(1) - yf(0, 1) // first real point + + var sum = δ + rdot (b(0 until p), y_, t-1) // intercept + AR terms (use y); b(0 until p) = φ + for j <- 0 until q do // add MA terms (shocks) + if t-1-j >= 0 then sum += b(p+j) * e(t-1-j) // e(t-j = -1) does not exists; b(p+j) = θ(j) + + if t < y_.dim-1 then e(t) = y_(t) - sum // update the error vector (uncomment for first train) + sum // prediction yp + end predict + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Produce a vector of size hh, h = 1 to hh-steps ahead forecasts for the model, + * i.e., forecast the following time points: t+1, ..., t+h. + * Intended to work with rolling validation (analog of predict method). + * Note, must include [ y_i, e_i ] before horizon and [ yp_i ] after horizon + * @param t the time point from which to make forecasts + * @param y_ the actual values to use in making predictions + */ + override def forecast (t: Int, y_ : VectorD = yb): VectorD = + val yh = new VectorD (hh) // hold forecasts for each horizon + for h <- 1 to hh do + var sum = δ + rdot (b(0 until p), yf, t, h-1) // intercept + AR terms (use y and yp); b(0 until p) = φ + for j <- h-1 until q do // add MA terms (shocks) from before horizon + if t-j >= 0 then sum += b(p+j) * e(t-j) // e(t-j = -1) does not exists; b(p+j) = θ(j) + yf(t, h) = sum // record in forecast matrix + yh(h-1) = sum // record forecasts for each horizon + yh // return forecasts for all horizons + end forecast + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forecast values for all y_.dim time points at horizon h (h-steps ahead). + * Assign into FORECAST MATRIX and return the h-steps ahead forecast. + * Note, `predictAll` provides predictions for h = 1. + * @see `forecastAll` method in `Forecaster` trait. + * Note, must include [ y_i, e_i ] before horizon and [ yp_i ] after horizon + * @param h the forecasting horizon, number of steps ahead to produce forecasts + * @param y_ the actual values to use in making forecasts + */ + override def forecastAt (h: Int, y_ : VectorD = yb): VectorD = + if h < 2 then flaw ("forecastAt", s"horizon h = $h must be at least 2") + + for t <- y_.indices do // make forecasts over all time points for horizon h + var sum = δ + rdot (b(0 until p), yf, t, h-1) // intercept + AR terms (use y and yp); b(0 until p) = φ + for j <- h-1 until q do // add MA terms (shocks) from before horizon + if t-j >= 0 then sum += b(p+j) * e(t-j) // e(t-j = -1) does not exists; b(p+j) = θ(j) + yf(t, h) = sum // record in forecast matrix + yf(?, h) // return the h-step ahead forecast vector + end forecastAt + +end ARMA + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `ARMA` companion object provides factory methods for the `ARMA` class. + */ +object ARMA: + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a `ARMA` object. + * @param y the response vector (time series data) + * @param hh the maximum forecasting horizon (h = 1 to hh) + * @param tRng the time range, if relevant (time index may suffice) + * @param hparam the hyper-parameters + */ + def apply (y: VectorD, hh: Int, tRng: Range = null, hparam: HyperParameter = AR.hp): ARMA = + new ARMA (y, hh, tRng, hparam) + end apply + +end ARMA + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `aRMATest` main function tests the `ARMA` class on real data: + * Forecasting Lake Levels using In-Sample Testing (In-ST). + * Test forecasts (h = 1 to hh steps ahead forecasts). + * @see cran.r-project.org/web/packages/fpp/fpp.pdf + * > runMain scalation.modeling.forecasting.aRMATest + */ +@main def aRMATest (): Unit = + + val hh = 3 // maximum forecasting horizon + + val mod = new ARMA (y, hh) // create model for time series data + banner (s"In-ST Forecasts: ${mod.modelName} on LakeLevels Dataset") + mod.trainNtest ()() // train and test on full dataset + + mod.forecastAll () // forecast h-steps ahead (h = 1 to hh) for all y + mod.diagnoseAll (y, mod.getYf) + println (s"Final In-ST Forecast Matrix yf = ${mod.getYf}") + +end aRMATest + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `aRMATest2` main function tests the `ARMA` class on real data: + * Forecasting Lake Levels using Train-n-Test Split (TnT) with Rolling Validation. + * Test forecasts (h = 1 to hh steps ahead forecasts). + * @see cran.r-project.org/web/packages/fpp/fpp.pdf + * > runMain scalation.modeling.forecasting.aRMATest2 + */ +@main def aRMATest2 (): Unit = + + val hh = 3 // maximum forecasting horizon + + val mod = new ARMA (y, hh) // create model for time series data + banner (s"TnT Forecasts: ${mod.modelName} on LakeLevels Dataset") + mod.trainNtest ()() // train and test on full dataset + + mod.setSkip (0) + mod.rollValidate () // TnT with Rolling Validation + mod.diagnoseAll (y, mod.getYf, Forecaster.teRng (y.dim)) // only diagnose on the testing set + println (s"Final TnT Forecast Matrix yf = ${mod.getYf}") + +end aRMATest2 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `aRMATest3` main function tests the `ARMA` class on real data: + * Forecasting COVID-19 using In-Sample Testing (In-ST). + * Test forecasts (h = 1 to hh steps ahead forecasts). + * Comparison of sMAPE for AR(p), ARY(p), ARY_D(p), ARMA(p, 0), and ARMA(p, 1). + * Note ARX (p, 1, 0), where 0 => no exo vars, duplicates results of ARY(p) + * + * 19.0371, 29.5797, 39.0740, 47.4638, 55.1785, 62.1818 RW + * + * 18.7298, 28.4908, 37.0997, 45.6487, 51.7248, 56.3708 AR(1) + * 18.5808, 28.3362, 37.2485, 45.7846, 52.0362, 56.9114 ARY(1) + * 18.5808, 28.8144, 37.7469, 44.8006, 49.8166, 52.3205 ARY_D(1) + * 18.5788, 28.3364, 37.2530, 45.7883, 52.0403, 56.9181 ARY_Quad(1) + * 18.7095, 28.4690, 37.1203, 45.6688, 51.7687, 56.4467 ARMA(1, 0) + * 17.0508, 26.4669, 35.4906, 43.5707, 49.4949, 54.2347 ARMA(1, 1) + * + * 16.3579, 24.7155, 33.0480, 40.0707, 46.0049, 50.8265 AR(2) + * 16.2270, 23.3708, 31.6615, 38.7385, 44.7630, 50.0814 ARY(2) + * 16.2270, 22.9698, 30.0933, 35.4960, 40.7977, 46.2700 ARY_D(2) + * 16.2663, 22.6643, 31.0768, 37.7388, 44.2476, 50.0283 ARY_Quad(2) + * 19.0826, 29.2723, 37.2914, 44.2636, 49.8307, 53.6992 ARMA(2, 0) + * 17.0445, 26.6538, 35.5239, 42.9937, 48.7679, 53.3489 ARMA(2, 1) + * + * 16.0114, 22.7408, 29.5631, 35.2773, 40.9870, 45.8408 AR(3) + * 15.7509, 21.9972, 28.8976, 34.6815, 40.7375, 46.1590 ARY(3) + * 15.7509, 21.8745, 28.2745, 32.9840, 39.1694, 43.9673 ARY_D(3) + * 15.7262, 21.2578, 28.4101, 34.1532, 40.6659 46.1492 ARY_Quad(3) + * 16.7027, 23.4111, 30.5995, 36.7396, 42.6680, 47.1189 ARMA(3, 0) + * 16.1750, 23.1243, 30.8535, 37.1636, 43.0417, 48.2946 ARMA(3, 1) + * + * 15.8988, 22.5738, 28.5298, 33.3360, 39.1586, 43.1606 AR(4) + * 15.6423, 21.7982, 27.9006, 33.1000, 39.0543, 43.9748 ARY(5) + * 15.6423, 21.8663, 28.0034, 32.9898, 38.9927, 43.6218 ARY_D(4) + * 15.5814, 21.2352, 28.5489, 34.4369, 40.3618, 45.2605 ARY_Quad(4) + * 16.6457, 22.9684, 29.0629, 34.6601, 40.1521, 44.0896 ARMA(4, 0) + * 15.3290, 21.9965, 27.8397, 34.3507, 40.0857, 45.8402 ARMA(4, 1) + * + * 15.9279, 22.5769, 28.5035, 33.3019, 39.1381, 43.0520 AR(5) + * 15.6349, 21.8003, 27.9084, 33.1127, 39.0628, 44.0175 ARY(5) + * 15.6349, 21.7885, 28.0114, 33.0117, 39.1418, 43.7715 ARY_D(5) + * 15.3209, 21.3541, 28.9325, 35.1359, 41.0300, 45.8558 ARY_Quad(5) + * 16.3720, 22.8047, 28.7702, 33.9232, 39.5677, 43.2628 ARMA(5, 0) + * 15.3361, 21.9121, 27.6568, 34.0218, 39.6254, 45.2994 ARMA(5, 1) + * + * > runMain scalation.modeling.forecasting.aRMATest3 + */ +@main def aRMATest3 (): Unit = + + val yy = loadData_y () +// val y = yy // full + val y = yy(0 until 116) // clip the flat end + val hh = 6 // maximum forecasting horizon + + for p <- 1 to 5; q <- 0 to 1 do + AR.hp("p") = p // number of AR terms + AR.hp("q") = q // number of MA terms + val mod = new ARMA (y, hh) // create model for time series data + banner (s"In-ST Forecasts: ${mod.modelName} on COVID-19 Dataset") + mod.trainNtest ()() // train and test on full dataset + + mod.forecastAll () // forecast h-steps ahead (h = 1 to hh) for all y + mod.diagnoseAll (y, mod.getYf) // use showYf = false to not print forecast matrix Yf + end for + +end aRMATest3 + + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `aRMATest4` main function tests the `ARMA` class on real data: + * Forecasting COVID-19 using Train-n-Test Split (TnT) with Rolling Validation. + * Test forecasts (h = 1 to hh steps ahead forecasts). + * Comparison of sMAPE for AR(p), ARY(p), ARY_D(p), ARMA(p, 0), and ARMA(p, 1). + * + * 19.1334, 31.1906, 44.3787, 55.1576, 65.1810, 74.0524 AR(1) + * 19.0397, 30.4570, 43.9113, 54.9642, 65.3163, 74.2124 ARY(1) + * 19.1718, 30.7038, 44.5265, 55.7794, 66.3876, 75.6566 ARMA(1, 0) + * 18.3012, 29.3224, 43.0369, 54.5719, 64.9230, 74.2520 ARMA(1, 1) + * + * 16.6447, 26.9109, 39.8106, 50.8595, 60.2176, 68.6317 AR(2) + * 16.8833, 26.4824, 39.2329, 50.8677, 61.0624, 70.3218 ARY(2) + * 19.4256, 32.8815, 46.4279, 57.2199, 66.8651, 75.3077 ARMA(2, 0) + * 18.3009, 30.0443, 43.6634, 54.9669, 64.8541, 73.7911 ARMA(2, 1) + * + * 15.9232, 23.5929, 34.3577, 44.1784, 53.6513, 62.0129 AR(3) + * 15.7190, 21.7959, 32.1395, 42.0074, 52.6874, 62.7276 ARY(3) + * 16.4547, 24.4668, 36.8597, 46.7958, 58.3539, 67.6623 ARMA(3, 0) + * 17.0353, 24.0309, 36.6585, 46.1961, 57.6348, 67.2332 ARMA(3, 1) + * + * 15.3256, 22.6893, 30.7558, 39.6274, 48.6646, 56.7375 AR(4) + * 14.6791, 19.9940, 26.5644, 35.4590, 41.4955, 50.8660 ARY(4) + * 14.9687, 22.2599, 29.6359, 39.6018, 48.2853, 56.9797 ARMA(4, 0) + * 15.2243, 21.4976, 27.7929, 37.9923, 45.0999, 54.3417 ARMA(4, 1) + * + * 15.9166, 21.5246, 28.0675, 36.8669, 43.3785, 51.1786 AR(5) + * 15.0232, 19.4222, 27.1981, 35.4744, 40.3466, 48.4066 ARY(5) + * 15.5426, 21.0405, 29.1731, 37.8006, 43.3590, 52.6387 ARMA(5, 0) + * 15.7641, 21.0723, 28.7463, 37.7968, 42.8480, 52.8277 ARMA(5, 1) + * + * > runMain scalation.modeling.forecasting.aRMATest4 + */ +@main def aRMATest4 (): Unit = + + val yy = loadData_y () +// val y = yy // full + val y = yy(0 until 116) // clip the flat end + val hh = 6 // maximum forecasting horizon + + for p <- 1 to 5; q <- 0 to 1 do + AR.hp("p") = p // number of AR terms + AR.hp("q") = q // number of MA terms + val mod = new ARMA (y, hh) // create model for time series data + banner (s"TnT Forecasts: ${mod.modelName} on COVID-19 Dataset") + mod.trainNtest ()() + + mod.setSkip (0) // using data from training can forecast first in test + mod.rollValidate () // TnT with Rolling Validation + mod.diagnoseAll (y, mod.getYf, Forecaster.teRng (y.dim)) // only diagnose on the testing set +// println (s"Final TnT Forecast Matrix yf = ${mod.getYf}") + end for + +end aRMATest4 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `aRMATest5` main function tests the `ARMA` class on real data: + * Forecasting COVID-19 using In-Sample Testing (In-ST). + * Test forecasts (h = 1 to hh steps ahead forecasts). + * Comparison of sMAPE for ARMA(p, 1) (i.e., q = 1) for different p orders. + * > runMain scalation.modeling.forecasting.aRMATest5 + */ +@main def aRMATest5 (): Unit = + + val yy = loadData_y () +// val y = yy // full + val y = yy(0 until 116) // clip the flat end + val hh = 6 // maximum forecasting horizon + + AR.hp("q") = 1 // number of MA terms + for p <- 1 to 5 do + AR.hp("p") = p // number of AR terms + val mod = new ARMA (y, hh) // create model for time series data + banner (s"In-ST Forecasts: ${mod.modelName} on COVID-19 Dataset") + mod.trainNtest ()() // train and test on full dataset + + mod.forecastAll () // forecast h-steps ahead (h = 1 to hh) for all y + mod.diagnoseAll (y, mod.getYf) + println (s"Final In-ST Forecast Matrix yf = ${mod.getYf}") + end for + +end aRMATest5 + + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `aRMATest6` main function tests the `ARMA` class on small dataset. + * Test forecasts (h = 1 step ahead forecasts). + * > runMain scalation.modeling.forecasting.aRMATest6 + */ +@main def aRMATest6 (): Unit = + + val y = VectorD (1, 3, 4, 2, 5, 7, 9, 8, 6, 3) + + AR.hp ("q") = 0 + var mod = new ARMA (y, 1) // create model for time series data + banner (s"In-ST Forecasts: ${mod.modelName} on a Small Dataset") + mod.trainNtest ()() // train and test on full dataset + println (s"Final In-ST Forecast Matrix yf = ${mod.getYf}") + new Baseline (y, "AR1") + + AR.hp ("p") = 2 + mod = new ARMA (y, 1) // create model for time series data + banner (s"In-ST Forecasts: ${mod.modelName} on a Small Dataset") + mod.trainNtest ()() // train and test on full dataset + println (s"Final In-ST Forecast Matrix yf = ${mod.getYf}") + new Baseline (y, "AR2") + +end aRMATest6 + + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `aRMATest7` main function tests the `ARMA` class on small dataset. + * Test the generation of ARMA sequences for various p and q values. + * > runMain scalation.modeling.forecasting.aRMATest7 + */ +@main def aRMATest7 (): Unit = + + val nrg = random.Normal (0.0, 1.0) + + val m = 100 + val y = new VectorD (m) + val e = new VectorD (m) + val φ = VectorD (0.8, 0.7) + val θ = VectorD (0.8, 0.7) + + for p <- 0 to 2; q <- 0 to 2 if p + q > 0 do + val (rp, rq) = ((0 until p), (0 until q)) + for t <- y.indices do + e(t) = nrg.gen + y(t) = rdot (φ(rp), y, t) + rdot (θ(rq), e, t) + e(t) + end for + new Plot (null, y, null, s"Plot of y vs. t for p = $p, q = $q", lines = true) + object CG extends Correlogram (y) + CG.makeCorrelogram () + CG.plotCorrelogram () + end for + +end aRMATest7 + diff --git a/src/main/scala/scalation/modeling/forecasting/ARX.scala b/src/main/scala/scalation/modeling/forecasting/ARX.scala index 5cdb75b74..febd3965a 100644 --- a/src/main/scala/scalation/modeling/forecasting/ARX.scala +++ b/src/main/scala/scalation/modeling/forecasting/ARX.scala @@ -15,11 +15,13 @@ package scalation package modeling package forecasting -import scala.collection.mutable.ArrayBuffer +import scala.collection.mutable.{ArrayBuffer, LinkedHashSet => LSET} +import scala.runtime.ScalaRunTime.stringOf import scalation.mathstat._ import MakeMatrix4TS._ +import TransformT._ //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `ARX` class provides basic time series analysis capabilities for ARX models. @@ -46,15 +48,15 @@ class ARX (x: MatrixD, y: VectorD, hh: Int, n_exo: Int, fname: Array [String], tForms: TransformMap = Map ("tForm_y" -> null)) extends Forecaster_Reg (x, y, hh, fname, tRng, hparam, bakcast): - private val debug = debugf ("ARX", false) // debug function + private val debug = debugf ("ARX", false) // debug function protected val p = hparam("p").toInt // use the last p endogenous values (p lags) protected val q = hparam("q").toInt // use the last q exogenous values (q lags) protected val spec = hparam("spec").toInt // trend terms: 0 - none, 1 - constant, 2 - linear, 3 - quadratic // 4 - sine, 5 cosine - modelName = s"ARX($p, $q, $n_exo)" - yForm = tForms("tForm_y").asInstanceOf [Transform] + _modelName = s"ARX_${p}_${q}_$n_exo" + yForm = tForms("tForm_y").asInstanceOf [Transform] - debug ("init", s"$modelName with $n_exo exogenous variables and additional term spec = $spec") + debug ("init", s"$modelName with $n_exo exogenous variables and additional term spec = $spec, x.dims = ${x.dims}") // debug ("init", s"[ x | y ] = ${x :^+ y}") //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -73,8 +75,9 @@ class ARX (x: MatrixD, y: VectorD, hh: Int, n_exo: Int, fname: Array [String], val x_fcast = yy(h-nyy until h) // get forecasted y-values var xy = x_act ++ x_fcast - for j <- 0 until n_exo do // for the j-th exogenous variable - xy = xy ++ hide (xx(n_endo + j*q until n_endo + (j+1)*q), h) + if n_exo > 0 and q > 0 then + for j <- 0 until n_exo do // for the j-th exogenous variable + xy = xy ++ hide (xx(n_endo + j*q until n_endo + (j+1)*q), h) xx(0 until spec) ++ xy end forge @@ -87,13 +90,21 @@ class ARX (x: MatrixD, y: VectorD, hh: Int, n_exo: Int, fname: Array [String], * @param fill whether to backfill with the rightmost value (true) or with 0 (false) */ def hide (z: VectorD, h: Int, fill: Boolean = true): VectorD = - val zl = z(z.dim - 1) // last available z value per horizon + val zl = z(z.dim - 1) // last available z value per horizon val z_ = new VectorD (z.dim) for k <- z.indices do z_(k) = if k <= z.dim - h then z(k+h-1) else if fill then zl else 0.0 z_ end hide + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Build a `ARX` model using the cols with the selected features. + * @param cols the cols of the input matrix with selected features + */ + def convertReg2Forc (cols: LSET [Int] = mcols): ARX = + new ARX (getX(?, cols), getY, hh, n_exo, cols.toArray.map (fname (_)), tRng, hparam, bakcast, tForms) + end convertReg2Forc + end ARX @@ -105,78 +116,110 @@ object ARX extends MakeMatrix4TS: //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Create an `ARX` object by building an input matrix xy and then calling the * `ARX` constructor. - * @param xe the matrix of exogenous variable values - * @param y the response vector (time series data) - * @param hh the maximum forecasting horizon (h = 1 to hh) - * @param fname_ the feature/variable names - * @param tRng the time range, if relevant (time index may suffice) - * @param hparam the hyper-parameters - * @param fEndo the array of functions used to transform endogenous variables - * @param fExo the array of functions used to transform exogenous variables - * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) + * @param xe the matrix of exogenous variable values + * @param y the endogenous/response vector (main time series data) + * @param hh the maximum forecasting horizon (h = 1 to hh) + * @param fname_ the feature/variable names + * @param tRng the time range, if relevant (time index may suffice) + * @param hparam the hyper-parameters + * @param fEndo_enab the set of transforms to be used for the endogenous + * @param fExo_enab the array containing the sets of transforms to be used for the exogenous + * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) */ def apply (xe: MatrixD, y: VectorD, hh: Int, fname_ : Array [String] = null, tRng: Range = null, hparam: HyperParameter = hp, - fEndo: Array [Transform] = null, fExo: Array [Transform] = null, + fEndo_enab: LSET [TransformT] = null, + fExo_enab: Array [LSET [TransformT]] = null, bakcast: Boolean = false): ARX = - val xy = buildMatrix (xe, y, hparam, bakcast) + var xe_bfill: MatrixD = null + if xe.dim2 > 0 and hparam("q").toInt > 0 then + xe_bfill = new MatrixD (xe.dim, xe.dim2) + for j <- xe.indices2 do xe_bfill(?, j) = backfill (xe(?, j)) // backfill each exogenous variable + + val xy = buildMatrix (xe_bfill, y, hparam, bakcast) val fname = if fname_ == null then formNames (xe.dim2, hparam) else fname_ new ARX (xy, y, hh, xe.dim2, fname, tRng, hparam, bakcast) end apply //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Create an `ARX` object by building an input matrix xy and then calling the - * `ARX` constructor. Also rescale the input data. - * @param xe the matrix of exogenous variable values - * @param y the endogenous/response vector (main time series data) - * @param hh the maximum forecasting horizon (h = 1 to hh) - * @param tRng the time range, if relevant (time index may suffice) - * @param hparam the hyper-parameters - * @param fEndo the array of functions used to transform endogenous variables - * @param fExo the array of functions used to transform exogenous variables - * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) - * @param tForm the z-transform (rescale to standard normal) + * `ARX` constructor, with rescaling of endogneous and exogenous variable values. + * @param xe the matrix of exogenous variable values + * @param y the endogenous/response vector (main time series data) + * @param hh the maximum forecasting horizon (h = 1 to hh) + * @param fname_ the feature/variable names + * @param tRng the time range, if relevant (time index may suffice) + * @param hparam the hyper-parameters + * @param fEndo_enab the set of transforms to be used for the endogenous + * @param fExo_enab the array containing the sets of transforms to be used for the exogenous + * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) + * @param tFormT the transform for rescaling endogenous and exogenous */ def rescale (xe: MatrixD, y: VectorD, hh: Int, fname_ : Array [String] = null, tRng: Range = null, hparam: HyperParameter = hp, - fEndo: Array [Transform] = null, fExo: Array [Transform] = null, + fEndo_enab: LSET [TransformT] = null, + fExo_enab: Array [LSET [TransformT]] = null, bakcast: Boolean = false, - tForm: VectorD | MatrixD => Transform = x => zForm(x)): ARX = + tFormT: TransformT = MinMax): ARX = + + if tFormT.name == "NormForm" then hparam("nneg") = 0 - val tForm_y = tForm(y) - if tForm_y.getClass.getSimpleName == "zForm" then hparam("nneg") = 0 + // rescale y + val tFormScale = tFormT.form + val tr_size = Model.trSize (y.dim) + val tForm_y = tFormScale (y(0 until tr_size)) // use (mean, std) of training set for both In-sample and TnT val y_scl = tForm_y.f(y) - val tForms = Map ("tForm_y" -> tForm_y) - val xy = buildMatrix (xe, y_scl, hparam, bakcast) - val fname = if fname_ == null then formNames (xe.dim2, hparam) else fname_ + var xe_bfill: MatrixD = null + if xe.dim2 > 0 and hparam("q").toInt > 0 then + xe_bfill = new MatrixD (xe.dim, xe.dim2) + for j <- xe.indices2 do xe_bfill(?, j) = backfill (xe(?, j)) // backfill each exogenous variable + if tFormScale != null then + val tForm_exo = tFormScale (xe_bfill(0 until tr_size)) + xe_bfill = tForm_exo.f (xe_bfill) // rescale the backfilled exogenous variable + + val tForms = Map ("tForm_y" -> tForm_y) + val xy = buildMatrix (xe_bfill, y_scl, hparam, bakcast) + val fname = if fname_ == null then formNames (xe.dim2, hparam) else fname_ new ARX (xy, y_scl, hh, xe.dim2, fname, tRng, hparam, bakcast, tForms) end rescale //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Build the input matrix by combining the p + spec columns for the trend and + /** Build the input matrix by combining the spec + p columns for the trend and * endogenous variable with the q * xe.dim2 columns for the exogenous variables. - * @param xe the matrix of exogenous variable values - * @param y the response vector (time series data) - * @param hp_ the hyper-parameters - * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) + * When cross = true, additional cross terms will be added. Columns produced + * by transformations will be added as well. + * @param xe_bfill the matrix of exogenous variable values + * @param y the endogenous/response vector (main time series data) + * @param hp_ the hyper-parameters + * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) */ - def buildMatrix (xe: MatrixD, y: VectorD, hp_ : HyperParameter, bakcast: Boolean): MatrixD = + def buildMatrix (xe_bfill: MatrixD, y: VectorD, hp_ : HyperParameter, bakcast: Boolean): MatrixD = + val (p, q, spec, lwave) = (hp_("p").toInt, hp_("q").toInt, hp_("spec").toInt, hp_("lwave").toDouble) - makeMatrix4T (y, spec, lwave, bakcast) ++^ // trend terms - makeMatrix4L (y, p, bakcast) ++^ // regular lag terms - makeMatrix4EXO (xe, q, 1, bakcast) // add exogenous terms + + // make matrix xy for trend terms and lagged terms of the endogenous variable + var xy = makeMatrix4T (y, spec, lwave, bakcast) ++^ // trend terms + makeMatrix4L (y, p, bakcast) // lagged linear terms + + // apply transformations fExo to the exogenous variables and add there columns to x_exo + if xe_bfill != null and q > 0 then + xy = xy ++^ makeMatrix4L (xe_bfill, q, bakcast) // add lagged exogenous term to xy + + println (s"xy.dims = ${xy.dims}") + xy end buildMatrix //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Form an array of names for the features included in the model. - * @param n_exo the number of exogenous variable - * @param hp_ the hyper-parameters - * @param n_fEn the number of functions used to map endogenous variables - * @param n_fEx the number of functions used to map exogenous variables + * @param n_exo the number of exogenous variable + * @param hp_ the hyper-parameters + * @param n_fEn the number of functions used to map endogenous variables (none for `ARX`) + * @param n_fExArr the number of functions used to map exogenous variables (none for `ARX`) */ - def formNames (n_exo: Int, hp_ : HyperParameter, n_fEn: Int = 0, n_fEx: Int = 0): Array [String] = + def formNames (n_exo: Int, hp_ : HyperParameter, n_fEn: Int = 0, n_fExArr: Array [Int] = null): Array [String] = + val (p, q, spec) = (hp_("p").toInt, hp_("q").toInt, hp_("spec").toInt) val names = ArrayBuffer [String] () for j <- 0 until n_exo; k <- q to 1 by -1 do names += s"xe${j}l$k" @@ -262,10 +305,17 @@ end aRXTest2 hp("q") = q // number of exo lags hp("spec") = s // trend specification: 0, 1, 2, 3, 5 val mod = ARX (xe, y, hh) // create model for time series data - mod.inSampleTest () // In-sample Testing + mod.inSample_Test () // In-sample Testing println (mod.summary ()) // statistical summary of fit end for + banner ("Test NormForm Transform") + val yform = NormForm (y) + val y_ = yform.f (y) + val xeform = NormForm (xe(?, 0)) + val xe_ = xeform.f (xe(?, 0)) + new Plot (null, y_, xe_, s"y (new_deaths) vs. t", lines = true) + end aRXTest3 @@ -277,7 +327,7 @@ end aRXTest3 */ @main def aRXTest4 (): Unit = - val exo_vars = Array ("icu_patients") + val exo_vars = Array ("icu_patients", "positive_rate") // val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") val (xxe, yy) = loadData (exo_vars, response) println (s"xxe.dims = ${xxe.dims}, yy.dim = ${yy.dim}") @@ -293,16 +343,16 @@ end aRXTest3 hp("p") = p // number of endo lags hp("q") = q // number of exo lags hp("spec") = s // trend specification: 0, 1, 2, 3, 5 -// val mod = ARX (xe, y, hh) // create model for time series data - val mod = ARX.rescale (xe, y, hh) +// val mod = ARX (xe, y, hh) // create model for time series data + val mod = ARX.rescale (xe, y, hh, tFormT = Log1p) banner (s"TnT Forecasts: ${mod.modelName} on COVID-19 Dataset") mod.trainNtest_x ()() // use customized trainNtest_x mod.setSkip (0) // mod.rollValidate (rc = 200) // TnT with Rolling Validation mod.rollValidate () // TnT with Rolling Validation default rc = 2 - mod.diagnoseAll (mod.getY, mod.getYf, Forecaster.teRng (y.dim), 0) // only diagnose on the testing set -// println (s"Final TnT Forecast Matrix yf = ${mod.getYf}") + mod.diagnoseAll (mod.getY, mod.getYf, Forecaster.teRng (y.dim)) // only diagnose on the testing set + println (s"Final TnT Forecast Matrix yf = ${mod.getYf}") end for end aRXTest4 @@ -317,7 +367,7 @@ end aRXTest4 */ @main def aRXTest5 (): Unit = - val exo_vars = Array ("icu_patients") + val exo_vars = Array ("icu_patients", "positive_rate") // val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") val (xxe, yy) = loadData (exo_vars, response) println (s"xxe.dims = ${xxe.dims}, yy.dim = ${yy.dim}") @@ -327,10 +377,11 @@ end aRXTest4 // val y = yy // full val y = yy(0 until 116) // clip the flat end val hh = 6 // maximum forecasting horizon - hp("p") = 10 // endo lags - hp("q") = 10 // exo lags - hp("spec") = 5 // trend specification: 0, 1, 2, 3, 5 + hp("p") = 6 // endo lags + hp("q") = 4 // exo lags + hp("spec") = 1 // trend specification: 0, 1, 2, 3, 5 hp("lwave") = 20 // wavelength (distance between peaks) + RidgeRegression.hp("lambda") = 6.0 val mod = ARX (xe, y, hh) // create model for time series data banner (s"In-ST Forecasts: ${mod.modelName} on COVID-19 Dataset") @@ -343,11 +394,15 @@ end aRXTest4 for tech <- SelectionTech.values do banner (s"Feature Selection Technique: $tech") - val (cols, rSq) = mod.selectFeatures (tech, false) // R^2, R^2 bar, sMAPE, R^2 cv + val (cols, rSq) = mod.selectFeatures (tech, "none") // R^2, R^2 bar, sMAPE, R^2 cv val k = cols.size println (s"k = $k") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "R^2 cv"), - s"R^2 vs k for ${mod.modelName} with $tech", lines = true) + + val modBest = mod.getBest.mod // regress on this x + println (stringOf (mod.getFname)) + println (stringOf (modBest.getFname)) + + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs k for ${mod.modelName} with $tech", lines = true) banner (s"Feature Importance with $tech") println (s"$tech: rSq = $rSq") val imp = mod.importance (cols.toArray, rSq) diff --git a/src/main/scala/scalation/modeling/forecasting/ARX_D.scala b/src/main/scala/scalation/modeling/forecasting/ARX_D.scala index 448d81943..7078e9c42 100644 --- a/src/main/scala/scalation/modeling/forecasting/ARX_D.scala +++ b/src/main/scala/scalation/modeling/forecasting/ARX_D.scala @@ -12,16 +12,21 @@ package scalation package modeling package forecasting +import scala.collection.mutable.{LinkedHashSet => LSET} + import scalation.mathstat._ -import scalation.modeling.neuralnet.{RegressionMV => REGRESSION} -import Example_Covid.{loadData, response} +//import scalation.modeling.neuralnet.{RegressionMV => REGRESSION} +import scalation.modeling.neuralnet.{RidgeRegressionMV => REGRESSION} + import MakeMatrix4TS._ +import TransformT._ //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `ARX_D` class provides basic time series analysis capabilities for * ARX_D models. ARX_D models are often used for forecasting. * `ARX_D` uses DIRECT (as opposed to RECURSIVE) multi-horizon forecasting. + * @note: `ARX_D` is dependent on [[ARX]] class for feature selection. * Given time series data stored in vector y, its next value y_t = combination of last p values. * * y_t = b dot x_t + e_t @@ -41,18 +46,18 @@ class ARX_D (x: MatrixD, y: MatrixD, hh: Int, n_exo: Int, fname: Array [String], tRng: Range = null, hparam: HyperParameter = hp, bakcast: Boolean = false, tForms: TransformMap = Map ("tForm_y" -> null)) - extends Forecaster_D (x, y, hh, tRng, hparam, bakcast): + extends Forecaster_D (x, y, hh, fname, tRng, hparam, bakcast): - private val debug = debugf ("ARX_D", true) // debug function - protected val p = hparam("p").toInt // use the last p endogenous values (p lags) - protected val q = hparam("q").toInt // use the last q exogenous values (q lags) - protected val spec = hparam("spec").toInt // trend terms: 0 - none, 1 - constant, 2 - linear, 3 - quadratic - // 4 - sine, 5 cosine - protected val nneg = hparam("nneg").toInt == 1 // 0 => unrestricted, 1 => predictions must be non-negative - protected val reg = new REGRESSION (x, y, fname, hparam) // delegate training to multi-variate regression + private val debug = debugf ("ARX_D", false) // debug function + protected val p = hparam("p").toInt // use the last p endogenous values (p lags) + protected val q = hparam("q").toInt // use the last q exogenous values (q lags) + protected val spec = hparam("spec").toInt // trend terms: 0 - none, 1 - constant, 2 - linear, 3 - quadratic + // 4 - sine, 5 cosine + protected val nneg = hparam("nneg").toInt == 1 // 0 => unrestricted, 1 => predictions must be non-negative + protected val reg = new REGRESSION (x, y, fname, hparam ++ REGRESSION.hp) // delegate training to multi-variate regression - modelName = s"ARX_D($p, $q, $n_exo)" - yForm = tForms("tForm_y").asInstanceOf [Transform] + _modelName = s"ARX_D_${p}_${q}_$n_exo" + yForm = tForms("tForm_y").asInstanceOf [Transform] debug ("init", s"$modelName with $n_exo exogenous variables and additional term spec = $spec") // debug ("init", s"[ x | y ] = ${x ++^ y}") @@ -66,8 +71,10 @@ class ARX_D (x: MatrixD, y: MatrixD, hh: Int, n_exo: Int, fname: Array [String], */ def train_x (x_ : MatrixD, y_ : MatrixD): Unit = debug ("train_x", s"$modelName, x_.dim = ${x_.dim}, y_.dim = ${y_.dim}") - reg.train (x_, y_) // train the multi-variate regression model - bb = reg.parameter // coefficients from regression + val idx = y_(?, y.dim2-1).indexOf (NO_DOUBLE) // index of first non-value in the last column + val (x_t, y_t) = if idx < 0 then (x_, y_) else (x_(0 until idx), y_(0 until idx)) + reg.train (x_t, y_t) // train the multi-variate regression model + bb = reg.parameter // coefficients from regression end train_x //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -78,9 +85,9 @@ class ARX_D (x: MatrixD, y: MatrixD, hh: Int, n_exo: Int, fname: Array [String], * @param b_ the parameters/coefficients for the model * @param vifs the Variance Inflation Factors (VIFs) */ - override def summary (x_ : MatrixD = getX, fname_ : Array [String] = reg.getFname, + override def summary (x_ : MatrixD = x, fname_ : Array [String] = reg.getFname, b_ : VectorD = b, vifs: VectorD = reg.vif ()): String = - super.summary (x_, fname_, b_, vifs) // summary from `Fit` + super.summary (x_, fname_, b_, vifs) // summary from `Fit` end summary //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -106,9 +113,9 @@ class ARX_D (x: MatrixD, y: MatrixD, hh: Int, n_exo: Int, fname: Array [String], * @param y_ the actual values to use in making predictions */ override def forecast (t: Int, y_ : VectorD): VectorD = - val pred = predict (t, MatrixD (y_).transpose) - for h <- 1 to hh do yf(t, h) = pred(h-1) - pred // yh is pred + val pred = predict (t, MatrixD (y_).ᵀ) + yf(t, 1 until hh+1) = pred + pred // yh is pred end forecast //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -123,6 +130,25 @@ class ARX_D (x: MatrixD, y: MatrixD, hh: Int, n_exo: Int, fname: Array [String], yf end forecastAll + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Build an `ARX_D` model using the cols with the selected features. + * @param cols the cols of the input matrix with selected features + * @param h the number of the horizon + */ + def getModel (cols: LSET [Int] = mcols): ARX_D = + new ARX_D (x(?, cols), y, hh, n_exo, cols.toArray.map (fname(_)), tRng, hparam, bakcast, tForms) + end getModel + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Build a single-horizon `ARX` model using the cols with the selected features. + * Note: uses `ARX` as it is the base model for ARX*_D. + * @param cols the cols of the input matrix with selected features + * @param h the number of the horizon + */ + def getModel_h (cols: LSET [Int] = mcols, h: Int = 1): ARX = + new ARX (x(?, cols), y(?, h-1), 1, n_exo, cols.toArray.map (fname(_)), tRng, hparam, bakcast, tForms) + end getModel_h + end ARX_D @@ -132,67 +158,87 @@ end ARX_D object ARX_D extends MakeMatrix4TS: //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create an `ARX_D` object by building an input matrix x and then calling the constructor. - * @param xe the matrix of exogenous variable values - * @param y the response vector (time series data) - * @param hh the maximum forecasting horizon (h = 1 to hh) - * @param fname_ the feature/variable names - * @param tRng the time range, if relevant (time index may suffice) - * @param hparam the hyper-parameters (defaults for `MakeMatrix4TS.hp`) - * @param fEndo the array of functions used to transform endogenous variables - * @param fExo the array of functions used to transform exogenous variables - * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) + /** Create an `ARX_D` object by building an input matrix xy and then calling the constructor. + * @param xe the matrix of exogenous variable values + * @param y the endogenous/response vector (main time series data) + * @param hh the maximum forecasting horizon (h = 1 to hh) + * @param fname_ the feature/variable names + * @param tRng the time range, if relevant (time index may suffice) + * @param hparam the hyper-parameters (defaults for `MakeMatrix4TS.hp`) + * @param fEndo_enab the set of transforms to be used for the endogenous + * @param fExo_enab the array containing the sets of transforms to be used for the exogenous + * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) */ def apply (xe: MatrixD, y: VectorD, hh: Int, fname_ : Array [String] = null, tRng: Range = null, hparam: HyperParameter = hp, - fEndo: Array [Transform] = null, fExo: Array [Transform] = null, + fEndo_enab: LSET [TransformT] = null, + fExo_enab: Array [LSET [TransformT]] = null, bakcast: Boolean = false): ARX_D = - val xy = ARX.buildMatrix (xe, y, hparam, bakcast) + var xe_bfill: MatrixD = null + if xe.dim2 > 0 and hparam("q").toInt > 0 then + xe_bfill = new MatrixD (xe.dim, xe.dim2) + for j <- xe.indices2 do xe_bfill(?, j) = backfill (xe(?, j)) // backfill each exogenous variable + + val xy = ARX.buildMatrix (xe_bfill, y, hparam, bakcast) + val fname = if fname_ == null then formNames (xe.dim2, hparam) else fname_ val yy = makeMatrix4Y (y, hh, bakcast) - val fname = if fname_ == null then ARX.formNames (xe.dim2, hparam) else fname_ new ARX_D (xy, yy, hh, xe.dim2, fname, tRng, hparam, bakcast) end apply //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Create an `ARX_D` object by building an input matrix xy and then calling the - * `ARX_D` constructor. Also rescale the input data. - * @param xe the matrix of exogenous variable values - * @param y the endogenous/response vector (main time series data) - * @param hh the maximum forecasting horizon (h = 1 to hh) - * @param tRng the time range, if relevant (time index may suffice) - * @param hparam the hyper-parameters - * @param fEndo the array of functions used to transform endogenous variables - * @param fExo the array of functions used to transform exogenous variables - * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) - * @param tForm the z-transform (rescale to standard normal) + * `ARX_D` constructor, with rescaling of endogneous and exogenous variable values. + * @param xe the matrix of exogenous variable values + * @param y the endogenous/response vector (main time series data) + * @param hh the maximum forecasting horizon (h = 1 to hh) + * @param fname_ the feature/variable names + * @param tRng the time range, if relevant (time index may suffice) + * @param hparam the hyper-parameters + * @param fEndo_enab the set of transforms to be used for the endogenous + * @param fExo_enab the array containing the sets of transforms to be used for the exogenous + * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) + * @param tFormT the transform for rescaling endogenous and exogenous */ def rescale (xe: MatrixD, y: VectorD, hh: Int, fname_ : Array [String] = null, tRng: Range = null, hparam: HyperParameter = hp, - fEndo: Array [Transform] = null, fExo: Array [Transform] = null, + fEndo_enab: LSET [TransformT] = null, + fExo_enab: Array [LSET [TransformT]] = null, bakcast: Boolean = false, - tForm: VectorD | MatrixD => Transform = x => zForm(x)): ARX_D = + tFormT: TransformT = MinMax): ARX_D = - val tForm_y = tForm(y) - if tForm_y.getClass.getSimpleName == "zForm" then hparam("nneg") = 0 + if tFormT.name == "NormForm" then hparam("nneg") = 0 + + // rescale y + val tFormScale = tFormT.form + val tr_size = Model.trSize (y.dim) + val tForm_y = tFormScale (y(0 until tr_size)) // use (mean, std) of training set for both In-sample and TnT val y_scl = tForm_y.f(y) - val tForms: TransformMap = Map ("tForm_y" -> tForm_y) - val xy = ARX.buildMatrix (xe, y_scl, hparam, bakcast) - val yy = makeMatrix4Y (y_scl, hh, bakcast) - val fname = if fname_ == null then formNames (xe.dim2, hparam) else fname_ + var xe_bfill: MatrixD = null + if xe.dim2 > 0 and hparam("q").toInt > 0 then + xe_bfill = new MatrixD (xe.dim, xe.dim2) + for j <- xe.indices2 do xe_bfill(?, j) = backfill (xe(?, j)) // backfill each exogenous variable + if tFormScale != null then + val tForm_exo = tFormScale (xe_bfill(0 until tr_size)) + xe_bfill = tForm_exo.f (xe_bfill) // rescale the backfilled exogenous variable + + val tForms = Map ("tForm_y" -> tForm_y) + val xy = ARX.buildMatrix (xe_bfill, y_scl, hparam, bakcast) + val fname = if fname_ == null then formNames (xe.dim2, hparam) else fname_ + val yy = makeMatrix4Y (y_scl, hh, bakcast) new ARX_D (xy, yy, hh, xe.dim2, fname, tRng, hparam, bakcast, tForms) end rescale //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Form an array of names for the features included in the model. - * @param n_exo the number of exogenous variable - * @param hp_ the hyper-parameters - * @param n_fEn the number of functions used to map endogenous variables - * @param n_fEx the number of functions used to map exogenous variables + * @param n_exo the number of exogenous variable + * @param hp_ the hyper-parameters + * @param n_fEn the number of functions used to map endogenous variables ((none for `ARX_D`) + * @param n_fExArr the number of functions used to map exogenous variables ((none for `ARX_D`) */ - def formNames (n_exo: Int, hp_ : HyperParameter, n_fEn: Int, n_fEx: Int): Array [String] = - ARX.formNames (n_exo, hp_, n_fEn, n_fEx) + def formNames (n_exo: Int, hp_ : HyperParameter, n_fEn: Int = 0, n_fExArr: Array [Int] = null): Array [String] = + ARX.formNames (n_exo, hp_, n_fEn, n_fExArr) end formNames end ARX_D @@ -241,6 +287,7 @@ end aRX_DTest end aRX_DTest2 */ +import Example_Covid.{loadData, response} //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `aRX_DTest3` main function tests the `ARX_D` class on real data: @@ -267,9 +314,9 @@ end aRX_DTest2 hp("q") = q // mumber of exo lags hp("spec") = s // trend specification: 0, 1, 2, 3, 5 val mod = ARX_D (xe, y, hh) // create model for time series data -// val mod = ARX_D.rescale (xe, y, hh) - mod.inSampleTest () // In-sample Testing + mod.inSample_Test () // In-sample Testing println (mod.summary ()) // statistical summary of fit FIX - crashes + println (mod.getYf) // print the forecast matrix end for end aRX_DTest3 @@ -294,21 +341,21 @@ end aRX_DTest3 val y = yy(0 until 116) // clip the flat end val hh = 6 // maximum forecasting horizon hp("lwave") = 20 // wavelength (distance between peaks) + RidgeRegression.hp("lambda") = 1.0 for p <- 6 to 6; q <- 4 to 4; s <- 1 to 1 do // number of lags (endo, exo); trend hp("p") = p // number of endo lags hp("q") = q // number of exo lags hp("spec") = s // trend specification: 0, 1, 2, 3, 5 val mod = ARX_D (xe, y, hh) // create model for time series data -// val mod = ARX_D.rescale (xe, y, hh) +// val mod = ARX_D.rescale (xe, y, hh, tForm = Log1p.form) banner (s"TnT Forecasts: ${mod.modelName} on COVID-19 Dataset") mod.trainNtest_x ()() // use customized trainNtest_x mod.setSkip (0) mod.rollValidate () -// println (s"After Roll TnT Forecast Matrix yf = ${mod.getYf}") - mod.diagnoseAll (mod.getY, mod.getYf, Forecaster.teRng (y.dim)) // only diagnose on the testing set -// println (s"Final TnT Forecast Matrix yf = ${mod.getYf}") + mod.diagnoseAll (mod.getY, mod.getYf, Forecaster.teRng (y.dim)) // only diagnose on the testing set + println (s"After Roll TnT Forecast Matrix yf = ${mod.getYf}") end for end aRX_DTest4 diff --git a/src/main/scala/scalation/modeling/forecasting/ARX_Quad.scala b/src/main/scala/scalation/modeling/forecasting/ARX_Quad.scala index 1cbaea299..5e50caefa 100644 --- a/src/main/scala/scalation/modeling/forecasting/ARX_Quad.scala +++ b/src/main/scala/scalation/modeling/forecasting/ARX_Quad.scala @@ -14,11 +14,12 @@ package scalation package modeling package forecasting -import scala.collection.mutable.ArrayBuffer +import scala.collection.mutable.{ArrayBuffer, LinkedHashSet => LSET} import scalation.mathstat._ import MakeMatrix4TS._ +import TransformT._ //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `ARX_Quad` class provides basic time series analysis capabilities for ARX quadratic models. @@ -48,10 +49,10 @@ class ARX_Quad (x: MatrixD, y: VectorD, hh: Int, n_exo: Int, fname: Array [Strin private val debug = debugf ("ARX_Quad", true) // debug function - modelName = s"ARX_Quad($p, $q, $n_exo)" + _modelName = s"ARX_Quad_${p}_${q}_$n_exo" debug ("init", s"$modelName with with $n_exo exogenous variables and additional term spec = $spec") - debug ("init", s"[ x | y ] = ${x :^+ y}") +// debug ("init", s"[ x | y ] = ${x :^+ y}") //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Forge a new vector from the first spec values of x, the last p-h+1 values @@ -67,10 +68,11 @@ class ARX_Quad (x: MatrixD, y: VectorD, hh: Int, n_exo: Int, fname: Array [Strin val nyy = p - x_act.dim // number of forecasted values needed val x_fcast = yy(h-nyy until h) // get forecasted y-values - val x_act_pp = xx(n_endo+p - (p+1-h) until n_endo+p) // get transformed lagged endogenous variable - val x_fcast_pp = scaleCorrection (x_fcast) + val x_act_pow = xx(n_endo+p - (p+1-h) until n_endo+p) // get transformed lagged endogenous variable +// val x_fcast_pow = scaleCorrection (x_fcast) + val x_fcast_pow = tForms("powForm").asInstanceOf [Transform].f(x_fcast) - var xy = x_act ++ x_fcast ++ x_act_pp ++ x_fcast_pp // add transformed lagged forecasted y-values + var xy = x_act ++ x_fcast ++ x_act_pow ++ x_fcast_pow // add transformed lagged forecasted y-values for j <- 0 until n_exo do // for the j-th exogenous variable xy = xy ++ hide (xx(n_endo+p + j*q until n_endo+p + (j+1)*q), h) xx(0 until spec) ++ xy @@ -79,16 +81,17 @@ class ARX_Quad (x: MatrixD, y: VectorD, hh: Int, n_exo: Int, fname: Array [Strin //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Apply scale correction to x_fcast. * @param x_fcast the vector to apply the scale correction to - */ + * def scaleCorrection (x_fcast: VectorD): VectorD = if tForms("tForm_y") != null then val f_pp = (tForms("tForm_endo").asInstanceOf [Transform].f(_: VectorD)) ⚬ - (tForms("ppForm").asInstanceOf [Transform].f(_: VectorD)) ⚬ + (tForms("powForm").asInstanceOf [Transform].f(_: VectorD)) ⚬ (tForms("tForm_y").asInstanceOf [Transform].fi(_: VectorD)) f_pp (x_fcast) else - tForms("ppForm").asInstanceOf [Transform].f(x_fcast) + tForms("powForm").asInstanceOf [Transform].f(x_fcast) end scaleCorrection + */ end ARX_Quad @@ -101,109 +104,114 @@ object ARX_Quad extends MakeMatrix4TS: //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Create an `ARX_Quad` object by building an input matrix xy and then calling the * `ARX_Quad` constructor. - * @param xe the matrix of exogenous variable values - * @param y the endogenous/response vector (main time series data) - * @param hh the maximum forecasting horizon (h = 1 to hh) - * @param fname_ the feature/variable names - * @param tRng the time range, if relevant (time index may suffice) - * @param hparam the hyper-parameters - * @param fEndo the array of functions used to transform endogenous variables - * @param fExo the array of functions used to transform exogenous variables - * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) + * @param xe the matrix of exogenous variable values + * @param y the endogenous/response vector (main time series data) + * @param hh the maximum forecasting horizon (h = 1 to hh) + * @param fname_ the feature/variable names + * @param tRng the time range, if relevant (time index may suffice) + * @param hparam the hyper-parameters + * @param fEndo_enab the set of transforms to be used for the endogenous + * @param fExo_enab the array containing the sets of transforms to be used for the exogenous + * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) */ def apply (xe: MatrixD, y: VectorD, hh: Int, fname_ : Array [String] = null, tRng: Range = null, hparam: HyperParameter = hp, - fEndo: Array [Transform] = null, fExo: Array [Transform] = null, + fEndo_enab: LSET [TransformT] = null, + fExo_enab: Array [LSET [TransformT]] = null, bakcast: Boolean = false): ARX_Quad = - val (xy, tForms) = buildMatrix (xe, y, hparam, bakcast) - val fname = if fname_ == null then formNames (xe.dim2, hparam) else fname_ - new ARX_Quad (xy, y, hh, xe.dim2, fname, tRng, hparam, bakcast, tForms) + var xe_bfil: MatrixD = null + if xe.dim2 > 0 and hparam("q").toInt > 0 then + xe_bfil = new MatrixD (xe.dim, xe.dim2) + for j <- xe.indices2 do xe_bfil(?, j) = backfill (xe(?, j)) // backfill each exogenous variable + + val xy = buildMatrix (xe_bfil, y, hparam, bakcast) + val fname = formNames (xe.dim2, hparam) + new ARX_Quad (xy, y, hh, xe.dim2, fname, tRng, hparam, bakcast) end apply //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Create an `ARX_Quad` object by building an input matrix xy and then calling the * `ARX_Quad` constructor. Also rescale the input data. - * @param xe the matrix of exogenous variable values - * @param y the endogenous/response vector (main time series data) - * @param hh the maximum forecasting horizon (h = 1 to hh) - * @param tRng the time range, if relevant (time index may suffice) - * @param hparam the hyper-parameters - * @param fEndo the array of functions used to transform endogenous variables - * @param fExo the array of functions used to transform exogenous variables - * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) - * @param tForm the z-transform (rescale to standard normal) + * @param xe the matrix of exogenous variable values + * @param y the endogenous/response vector (main time series data) + * @param hh the maximum forecasting horizon (h = 1 to hh) + * @param fname_ the feature/variable names + * @param tRng the time range, if relevant (time index may suffice) + * @param hparam the hyper-parameters + * @param fEndo_enab the set of transforms to be used for the endogenous + * @param fExo_enab the array containing the sets of transforms to be used for the exogenous + * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) + * @param tFormT the transform for rescaling endogenous and exogenous */ def rescale (xe: MatrixD, y: VectorD, hh: Int, fname_ : Array [String] = null, tRng: Range = null, hparam: HyperParameter = hp, - fEndo: Array [Transform] = null, fExo: Array [Transform] = null, + fEndo_enab: LSET [TransformT] = null, + fExo_enab: Array [LSET [TransformT]] = null, bakcast: Boolean = false, - tForm: VectorD | MatrixD => Transform = x => zForm(x)): ARX_Quad = - - val (xy, tForms) = buildMatrix (xe, y, hparam, bakcast, tForm) - if tForms("tForm_y").getClass.getSimpleName == "zForm" then hp("nneg") = 0 - val y_scl = tForms("tForm_y").f(y) - val fname = if fname_ == null then formNames (xe.dim2, hparam) else fname_ + tFormT: TransformT = MinMax): ARX_Quad = + + if tFormT.name == "NormForm" then hparam("nneg") = 0 + + // rescale y + val tFormScale = tFormT.form + val tr_size = Model.trSize (y.dim) + val tForm_y = tFormScale (y(0 until tr_size)) // use (mean, std) of training set for both In-sample and TnT + val y_scl = tForm_y.f(y) + + var xe_bfil: MatrixD = null + if xe.dim2 > 0 and hparam("q").toInt > 0 then + xe_bfil = new MatrixD (xe.dim, xe.dim2) + for j <- xe.indices2 do xe_bfil(?, j) = backfill (xe(?, j)) // backfill each exogenous variable + if tFormScale != null then + val tForm_exo = tFormScale (xe_bfil(0 until tr_size)) + xe_bfil = tForm_exo.f (xe_bfil) + + val powForm = PowForm (VectorD (0, Transform.hp("p").toDouble)) + val tForms = Map ("tForm_y" -> tForm_y, "powForm" -> powForm) + val xy = buildMatrix (xe_bfil, y_scl, hparam, bakcast, powForm) + val fname = if fname_ == null then formNames (xe.dim2, hparam) else fname_ new ARX_Quad (xy, y_scl, hh, xe.dim2, fname, tRng, hparam, bakcast, tForms) end rescale //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Build the input matrix by combining the p + spec columns for the trend and * endogenous variable with the q * xe.dim2 columns for the exogenous variables. - * @param xe the matrix of exogenous variable values - * @param y the response vector (time series data) + * @param xe_bfil the matrix of exogenous variable values + * @param y the endogenous/response vector (main time series data) * @param hp_ the hyper-parameters * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) - * @param tForm the z-transform (rescale to standard normal) + * @param powForm the power transform */ - def buildMatrix (xe: MatrixD, y: VectorD, hp_ : HyperParameter, bakcast: Boolean, - tForm: VectorD | MatrixD => Transform = null): (MatrixD, TransformMap) = - - val (p, pp, q, spec, lwave) = (hp_("p").toInt, hp_("pp").toDouble, hp_("q").toInt, hp_("spec").toInt, hp_("lwave").toDouble) - val ppForm = powForm (VectorD (pp)) - var y_pp = ppForm.f(y) - var y_scl = y - - val tForms: TransformMap = - if tForm != null then - val tForm_y = tForm (y) - y_scl = tForm_y.f(y) - val tForm_endo = tForm (y_pp) - y_pp = tForm_endo.f(y_pp) - Map ("tForm_y" -> tForm_y, "tForm_endo" -> tForm_endo, "ppForm" -> ppForm) - else - Map ("tForm_y" -> null, "ppForm" -> ppForm) + def buildMatrix (xe_bfil: MatrixD, y: VectorD, hp_ : HyperParameter, bakcast: Boolean, + powForm: Transform = PowForm (VectorD (0, Transform.hp("p").toDouble))): MatrixD = - val x_endo = MatrixD (y_scl, y_pp).transpose + val (p, q, spec, lwave) = (hp_("p").toInt, hp_("q").toInt, hp_("spec").toInt, hp_("lwave").toDouble) + val y_pp = powForm.f(y) // apply power transformation + val x_endo = MatrixD (y, y_pp).ᵀ // add trend terms and terms for the endogenous variable - var xy = makeMatrix4T (y, spec, lwave, bakcast) ++^ // trend terms - makeMatrix4L (x_endo, p, bakcast) // lagged linear terms - - if xe.dim2 > 0 then - val xe_bfill = new MatrixD (xe.dim, xe.dim2) - for j <- xe.indices2 do xe_bfill(?, j) = backfill (xe(?, j)) - var x_exo = xe_bfill - if tForm != null then - val tForm_exo = tForm (x_exo) - x_exo = tForm_exo.f(x_exo) - xy = xy ++^ makeMatrix4L (x_exo, q, bakcast) - - (xy, tForms) + var xy = makeMatrix4T (y, spec, lwave, bakcast) ++^ // trend terms + makeMatrix4L (x_endo, p, bakcast) // lagged linear terms + + if xe_bfil != null and q > 0 then // rescale the exogenous variables + xy = xy ++^ makeMatrix4L (xe_bfil, q, bakcast) + xy end buildMatrix //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Form an array of names for the features included in the model. - * @param n_exo the number of exogenous variable - * @param hp_ the hyper-parameters - * @param n_fEn the number of functions used to map endogenous variables - * @param n_fEx the number of functions used to map exogenous variables + * @param n_exo the number of exogenous variable + * @param hp_ the hyper-parameters + * @param n_fEn the number of functions used to map endogenous variables (none for `ARX_Quad`) + * @param n_fExArr the number of functions used to map exogenous variables (none for `ARX_Quad`) */ - def formNames (n_exo: Int, hp_ : HyperParameter, n_fEn: Int = 0, n_fEx: Int = 0): Array [String] = + def formNames (n_exo: Int, hp_ : HyperParameter, n_fEn: Int = 0, n_fExArr: Array [Int] = null): Array [String] = + val (p, q, spec) = (hp_("p").toInt, hp_("q").toInt, hp_("spec").toInt) val names = ArrayBuffer [String] () for j <- 0 until n_exo; k <- q to 1 by -1 do names += s"xe${j}l$k" - MakeMatrix4TS.formNames (spec, p, true) ++ names.toArray + MakeMatrix4TS.formNames (spec, p, Transform.hp("p").toDouble) ++ names.toArray end formNames end ARX_Quad @@ -273,17 +281,16 @@ end aRX_QuadTest2 // val y = yy // full val y = yy(0 until 116) // clip the flat end val hh = 6 // maximum forecasting horizon - hp("pp") = 1.5 // use 1.9 for the power/exponent (default is 2) + Transform.hp("p") = 1.5 // use 1.5, 1.9 for the power/exponent (default is 2) hp("lwave") = 20 // wavelength (distance between peaks) for p <- 6 to 6; q <- 4 to 4; s <- 1 to 1 do // number of endo lags; exo lags; trend hp("p") = p // endo lags hp("q") = q // exo lags hp("spec") = s // trend specification: 0, 1, 2, 3, 5 - val mod = ARX_Quad (xe, y, hh) // create model for time series data -// val mod = ARX_Quad.rescale(xe, y, hh) // create model for time series data - - mod.inSampleTest () // In-sample Testing + val mod = ARX_Quad (xe, y, hh) // create model for time series data +// val mod = ARX_Quad.rescale (xe, y, hh) // create model for time series data + mod.inSample_Test () // In-sample Testing println (mod.summary ()) // statistical summary of fit end for @@ -308,22 +315,22 @@ end aRX_QuadTest3 // val y = yy // full val y = yy(0 until 116) // clip the flat end val hh = 6 // maximum forecasting horizon - hp("pp") = 1.5 // use 1.5 for the power/exponent (default is 2) + Transform.hp("p") = 1.5 // use 1.5 for the power/exponent (default is 2) hp("lwave") = 20 // wavelength (distance between peaks) for p <- 6 to 6; q <- 4 to 4; s <- 1 to 1 do // number of lags (endo, exo); trend hp("p") = p // endo lags hp("q") = q // exo lags hp("spec") = s // trend specification: 0, 1, 2, 3, 5 - val mod = ARX_Quad (xe, y, hh) -// val mod = ARX_Quad.rescale (xe, y, hh) // create model for time series data + val mod = ARX_Quad (xe, y, hh) // create model for time series data +// val mod = ARX_Quad.rescale (xe, y, hh) // create model for time series data banner (s"TnT Forecasts: ${mod.modelName} on COVID-19 Dataset") mod.trainNtest_x ()() // use customized trainNtest_x mod.setSkip (0) mod.rollValidate () // TnT with Rolling Validation println (s"After Roll TnT Forecast Matrix yf = ${mod.getYf}") - mod.diagnoseAll (mod.getY, mod.getYf, Forecaster.teRng (y.dim), 0) // only diagnose on the testing set + mod.diagnoseAll (mod.getY, mod.getYf, Forecaster.teRng (y.dim)) // only diagnose on the testing set // println (s"Final TnT Forecast Matrix yf = ${mod.getYf}") end for @@ -352,10 +359,10 @@ end aRX_QuadTest4 val p = 10 val q = 10 hp("p") = p // endo lags - hp("pp") = 1.5 // use 1.5 for the power/exponent (default is 2) hp("q") = q // exo lags hp("spec") = 5 // trend specification: 0, 1, 2, 3, 5 hp("lwave") = 20 // wavelength (distance between peaks) + Transform.hp("p") = 1.5 // use 1.5 for the power/exponent (default is 2) val mod = ARX_Quad (xe, y, hh) // create model for time series data banner (s"In-ST Forecasts: ${mod.modelName} on COVID-19 Dataset") @@ -372,8 +379,7 @@ end aRX_QuadTest4 // val (cols, rSq) = mod.backwardElimAll () // R^2, R^2 bar, sMAPE, R^2 cv val k = cols.size println (s"k = $k") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "R^2 cv"), - s"R^2 vs n for ${mod.modelName}", lines = true) + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs n for ${mod.modelName}", lines = true) println (s"rSq = $rSq") end aRX_QuadTest5 diff --git a/src/main/scala/scalation/modeling/forecasting/ARX_Quad_D.scala b/src/main/scala/scalation/modeling/forecasting/ARX_Quad_D.scala index a5fbcc23f..def77ef1e 100644 --- a/src/main/scala/scalation/modeling/forecasting/ARX_Quad_D.scala +++ b/src/main/scala/scalation/modeling/forecasting/ARX_Quad_D.scala @@ -12,9 +12,12 @@ package scalation package modeling package forecasting +import scala.collection.mutable.{LinkedHashSet => LSET} + import scalation.mathstat._ import MakeMatrix4TS._ +import TransformT._ //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `ARX_Quad_D` class provides basic time series analysis capabilities for @@ -43,7 +46,7 @@ class ARX_Quad_D (x: MatrixD, y: MatrixD, hh: Int, n_exo: Int, fname: Array [Str private val debug = debugf ("ARX_Quad_D", true) // debug function - modelName = s"ARX_Quad_D($p, $q, $n_exo)" + _modelName = s"ARX_Quad_D_${p}_${q}_$n_exo" debug ("init", s"$modelName with $n_exo exogenous variables and additional term spec = $spec") // debug ("init", s"[ x | y ] = ${x ++^ y}") @@ -58,61 +61,87 @@ object ARX_Quad_D extends MakeMatrix4TS: //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Create an `ARX_Quad_D` object by building an input matrix x and then calling the constructor. - * @param xe the matrix of exogenous variable values - * @param y the response vector (time series data) - * @param hh the maximum forecasting horizon (h = 1 to hh) - * @param fname_ the feature/variable names - * @param tRng the time range, if relevant (time index may suffice) - * @param hparam the hyper-parameters (defaults for `MakeMatrix4TS.hp`) - * @param fEndo the array of functions used to transform endogenous variables - * @param fExo the array of functions used to transform exogenous variables - * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) + * @param xe the matrix of exogenous variable values + * @param y the endogenous/response vector (main time series data) + * @param hh the maximum forecasting horizon (h = 1 to hh) + * @param fname_ the feature/variable names + * @param tRng the time range, if relevant (time index may suffice) + * @param hparam the hyper-parameters (defaults for `MakeMatrix4TS.hp`) + * @param fEndo_enab the set of transforms to be used for the endogenous + * @param fExo_enab the array containing the sets of transforms to be used for the exogenous + * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) */ def apply (xe: MatrixD, y: VectorD, hh: Int, fname_ : Array [String] = null, tRng: Range = null, hparam: HyperParameter = hp, - fEndo: Array [Transform] = null, fExo: Array [Transform] = null, + fEndo_enab: LSET [TransformT] = null, + fExo_enab: Array [LSET [TransformT]] = null, bakcast: Boolean = false): ARX_Quad_D = - val (xy, tForms) = ARX_Quad.buildMatrix (xe, y, hparam, bakcast) + var xe_bfil: MatrixD = null + if xe.dim2 > 0 and hparam("q").toInt > 0 then + xe_bfil = new MatrixD (xe.dim, xe.dim2) + for j <- xe.indices2 do xe_bfil(?, j) = backfill (xe(?, j)) // backfill each exogenous variable + + val xy = ARX_Quad.buildMatrix (xe_bfil, y, hparam, bakcast) val yy = makeMatrix4Y (y, hh, bakcast) - val fname = if fname_ == null then ARX_Quad.formNames (xe.dim2, hparam) else fname_ - new ARX_Quad_D (xy, yy, hh, xe.dim2, fname, tRng, hparam, bakcast, tForms) + val fname = formNames (xe.dim2, hparam) + new ARX_Quad_D (xy, yy, hh, xe.dim2, fname, tRng, hparam, bakcast) end apply //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Create an `ARX_Quad_D` object by building an input matrix xy and then calling the * `ARX_Quad_D` constructor. Also rescale the input data. - * @param xe the matrix of exogenous variable values - * @param y the endogenous/response vector (main time series data) - * @param hh the maximum forecasting horizon (h = 1 to hh) - * @param tRng the time range, if relevant (time index may suffice) - * @param hparam the hyper-parameters - * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) - * @param tForm the z-transform (rescale to standard normal) + * @param xe the matrix of exogenous variable values + * @param y the endogenous/response vector (main time series data) + * @param hh the maximum forecasting horizon (h = 1 to hh) + * @param fname_ the feature/variable names + * @param tRng the time range, if relevant (time index may suffice) + * @param hparam the hyper-parameters + * @param fEndo_enab the set of transforms to be used for the endogenous + * @param fExo_enab the array containing the sets of transforms to be used for the exogenous + * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) + * @param tFormT the transform for rescaling endogenous and exogenous */ def rescale (xe: MatrixD, y: VectorD, hh: Int, fname_ : Array [String] = null, tRng: Range = null, hparam: HyperParameter = hp, - fEndo: Array [Transform] = null, fExo: Array [Transform] = null, + fEndo_enab: LSET [TransformT] = null, + fExo_enab: Array [LSET [TransformT]] = null, bakcast: Boolean = false, - tForm: VectorD | MatrixD => Transform = x => zForm(x)): ARX_Quad_D = - - val (xy, tForms) = ARX_Quad.buildMatrix (xe, y, hparam, bakcast, tForm) - val y_scl = tForms ("tForm_y").f(y) - val yy = makeMatrix4Y (y_scl, hh, bakcast) - if tForms("tForm_y").getClass.getSimpleName == "zForm" then hp("nneg") = 0 - val fname = if fname_ == null then formNames (xe.dim2, hparam) else fname_ + tFormT: TransformT = MinMax): ARX_Quad_D = + + if tFormT.name == "NormForm" then hparam("nneg") = 0 + + // rescale y + val tFormScale = tFormT.form + val tr_size = Model.trSize (y.dim) + val tForm_y = tFormScale (y(0 until tr_size)) // use (mean, std) of training set for both In-sample and TnT + val y_scl = tForm_y.f(y) + + var xe_bfil: MatrixD = null + if xe.dim2 > 0 and hparam("q").toInt > 0 then + xe_bfil = new MatrixD (xe.dim, xe.dim2) + for j <- xe.indices2 do xe_bfil(?, j) = backfill (xe(?, j)) // backfill each exogenous variable + if tFormScale != null then + val tForm_exo = tFormScale (xe_bfil(0 until tr_size)) + xe_bfil = tForm_exo.f (xe_bfil) + + val powForm = PowForm (VectorD (0, Transform.hp("p").toDouble)) + val tForms = Map ("tForm_y" -> tForm_y, "powForm" -> powForm) + val xy = ARX_Quad.buildMatrix (xe_bfil, y_scl, hparam, bakcast, powForm) + val yy = makeMatrix4Y (y_scl, hh, bakcast) + val fname = formNames (xe.dim2, hparam) new ARX_Quad_D (xy, yy, hh, xe.dim2, fname, tRng, hparam, bakcast, tForms) end rescale //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Form an array of names for the features included in the model. - * @param n_exo the number of exogenous variable - * @param hp_ the hyper-parameters - * @param n_fEn the number of functions used to map endogenous variables - * @param n_fEx the number of functions used to map exogenous variables + * @param n_exo the number of exogenous variable + * @param hp_ the hyper-parameters + * @param n_fEn the number of functions used to map endogenous variables (none for `ARX_Quad_D`) + * @param n_fExArr the number of functions used to map exogenous variables (none for `ARX_Quad_D`) */ - def formNames (n_exo: Int, hp_ : HyperParameter, n_fEn: Int, n_fEx: Int): Array [String] = - ARX_Quad.formNames (n_exo, hp_, n_fEn, n_fEx) + def formNames (n_exo: Int, hp_ : HyperParameter, n_fEn: Int = 0, n_fExArr: Array [Int] = null): Array [String] = + ARX.formNames (n_exo, hp_, n_fEn, n_fExArr) end formNames end ARX_Quad_D @@ -181,15 +210,16 @@ end aRX_Quad_DTest2 // val y = yy // full val y = yy(0 until 116) // clip the flat end val hh = 6 // maximum forecasting horizon - hp("pp") = 1.5 hp("lwave") = 20 // wavelength (distance between peaks) + Transform.hp("p") = 1.5 // power on Pow transform for p <- 6 to 6; q <- 4 to 4; s <- 1 to 1 do // number of lags (endo, exo); trend hp("p") = p // mumber of endo lags hp("q") = q // mumber of exo lags hp("spec") = s // trend specification: 0, 1, 2, 3, 5 +// val mod = ARX_Quad_D (xe, y, hh) // create model for time series data val mod = ARX_Quad_D.rescale (xe, y, hh) // create model for time series data - mod.inSampleTest () // In-sample Testing + mod.inSample_Test () // In-sample Testing println (mod.summary ()) // statistical summary of fit FIX - crashes end for @@ -214,14 +244,14 @@ end aRX_Quad_DTest3 // val y = yy // full val y = yy(0 until 116) // clip the flat end val hh = 6 // maximum forecasting horizon - hp("pp") = 1.5 hp("lwave") = 20 // wavelength (distance between peaks) + Transform.hp("p") = 1.5 // power on Pow transform for p <- 6 to 6; q <- 4 to 4; s <- 1 to 1 do // number of lags (endo, exo); trend hp("p") = p // number of endo lags hp("q") = q // try various rules hp("spec") = s // trend specification: 0, 1, 2, 3, 5 -// val mod = ARX_Quad_D (xe, y, hh) +// val mod = ARX_Quad_D (xe, y, hh) // create model for time series data val mod = ARX_Quad_D.rescale (xe, y, hh) // create model for time series data banner (s"TnT Forecasts: ${mod.modelName} on COVID-19 Dataset") mod.trainNtest_x ()() // use customized trainNtest_x @@ -235,36 +265,3 @@ end aRX_Quad_DTest3 end aRX_Quad_DTest4 - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRX_Quad_DTest5` main function tests the `ARX_Quad_D` object's ability to build input - * matrices. Build an input/predictor data matrix for the COVID-19 dataset. - * > runMain scalation.modeling.forecasting.aRX_Quad_DTest5 - * -@main def aRX_Quad_DTest5 (): Unit = - -// val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val exo_vars = Array ("icu_patients", "hosp_patients") - val (xxe, yy) = loadData (exo_vars, response) - println (s"xxe.dims = ${xxe.dims}, yy.dim = ${yy.dim}") - -// val xe = xxe // full - val xe = xxe(0 until 116) // clip the flat end -// val y = yy // full - val y = yy(0 until 116) // clip the flat end - val p = 3 // the number of endo lags - val pp = 3 - val q = 2 // the number of exo lags - val spec = 1 // additional terms - val lwave = 20 // wavelength (distance between peaks) - val hh = 2 // maximum forecasting horizon - - println (s"y = $y") - - val (x, y_) = ARX_Quad_D.buildMatrix4TS (xe, y, p, pp, q, hh, spec, lwave) - - println (s"y.dim = ${y.dim}, x.dims = ${x.dims}, y_.dims = ${y_.dims}") - -end aRX_Quad_DTest5 - */ - diff --git a/src/main/scala/scalation/modeling/forecasting/ARX_SR.scala b/src/main/scala/scalation/modeling/forecasting/ARX_SR.scala new file mode 100644 index 000000000..1bf342140 --- /dev/null +++ b/src/main/scala/scalation/modeling/forecasting/ARX_SR.scala @@ -0,0 +1,518 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author John Miller, Yousef Fekri Dabanloo + * @version 2.0 + * @date Tue Jan 14 15:47:45 EST 2025 + * @see LICENSE (MIT style license file). + * + * @note Model: Auto-Regressive on lagged y and xe with SR terms (ARX_SR) using OLS + * + * @see `scalation.modeling.Regression` + */ + +package scalation +package modeling +package forecasting + +import scala.collection.mutable.{ArrayBuffer, LinkedHashSet => LSET} + +import scalation.mathstat._ + +import MakeMatrix4TS._ +import TransformT._ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `ARX_SR` class provides time series analysis capabilities for ARX Symbolic + * Regression (SR) models. These models include trend, linear, power, root, and cross terms + * for the single endogenous (y) variable and zero or more exogenous (xe) variables. + * Given time series data stored in vector y and matrix xe, its next value y_t = combination + * of last p values of y, y^p, y^r and the last q values of each exogenous variable xe_j, + * again in linear, power and root forms (as well as ENDO-EXO cross terms). + * + * y_t = b dot x_t + e_t + * + * where y_t is the value of y at time t, x_t is a vector of inputs, and e_t is the + * residual/error term. + * @see `MakeMatrix4TS` for hyper-parameter specifications. + * @param x the data/input matrix (lagged columns of y and xe) @see `ARX_SR.apply` + * @param y the response/output vector (main time series data) + * @param hh the maximum forecasting horizon (h = 1 to hh) + * @param n_exo the number of exogenous variables + * @param fname the feature/variable names + * @param tRng the time range, if relevant (time index may suffice) + * @param hparam the hyper-parameters (defaults to `MakeMatrix4TS.hp`) + * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) + * @param tForms the map of transformations applied + * @param fExo_sz the array of the number of transformations for each exogenous + */ +class ARX_SR (x: MatrixD, y: VectorD, hh: Int, n_exo: Int, fname: Array [String], + tRng: Range = null, hparam: HyperParameter = hp, + bakcast: Boolean = false, + tForms: TransformMap = Map ("tForm_y" -> null), + fExo_sz: Array [Int] = null) + extends ARX (x, y, hh, n_exo, fname, tRng, hparam, bakcast, tForms): + + private val debug = debugf ("ARX_SR", true) // debug function + private val cross = hparam("cross").toInt == 1 // whether to include ENDO-EXO cross terms + + _modelName = s"ARX_SR_${p}_${q}_$n_exo" + + debug ("init", s"$modelName with with $n_exo exogenous variables and additional term spec = $spec") +// debug ("init", s"[ x | y ] = ${x :^+ y}") + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forge a new vector from the first spec values of x, the last p-h+1 values + * of x (past values) and recent values 1 to h-1 from the forecasts. + * @param xx the t-th row of the input matrix (lagged actual values) + * @param yy the t-th row of the forecast matrix (forecasted future values) + * @param h the forecasting horizon, number of steps ahead to produce forecasts + */ + override def forge (xx: VectorD, yy: VectorD, h: Int): VectorD = + // add terms for the endogenous variable + val n_endo = spec + p // number of trend + endogenous values + val x_act = xx(n_endo-(p+1-h) until n_endo) // get actual lagged y-values (endogenous) + val nyy = p - x_act.dim // number of forecasted values needed + val x_fcast = yy(h-nyy until h) // get forecasted y-values + + var xy = x_act ++ x_fcast // original values before any mapping +// val x_fEndo = scaleCorrection (x_fcast) // needed if transform first then rescaling + val tFormsEndo = tForms("fEndo") + val n_fEndo = tFormsEndo.length // number of functions used to map endogenous variables + + for j <- 0 until n_fEndo do + val x_act_f = xx((j+1)*p + n_endo-(p+1-h) until (j+1)*p + n_endo) // get transformed lagged endogenous variable + xy = xy ++ x_act_f ++ tFormsEndo(j).f(x_fcast) + + // add terms for the exogenous variables + val crs = if cross then 1 else 0 // whether to add endogenous-exogenous cross terms + val n_fExo = fExo_sz.sum + val count = n_exo * (1 + n_fExo + crs) + for j <- 0 until count do + xy = xy ++ hide (xx(n_endo+n_fEndo*p + j*q until n_endo+n_fEndo*p + (j+1)*q), h) // get actual and transformed lagged for exo variable j + xx(0 until spec) ++ xy + end forge + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Make re-scaling corrections to the forecasted y-values. + * @param x_fcast the forecasted y-values + * + def scaleCorrection (x_fcast: VectorD): Array [VectorD] = + val x_fEndo = Array.ofDim [VectorD] (n_fEndo) + if tForms("tForm_y") != null then + val f_tForm = Array.ofDim [FunctionV2V] (n_fEndo) + + for i <- 0 until n_fEndo do f_tForm(i) = (tForms("fEndo")(i).f(_: VectorD)) ⚬ (tForms("tForm_y").fi(_: VectorD)) + + var x_fcast_fEndo = MatrixD (f_tForm(0)(x_fcast)).ᵀ + for i <- 1 until n_fEndo do x_fcast_fEndo = x_fcast_fEndo :^+ f_tForm(i)(x_fcast) + x_fcast_fEndo = tForms("tForm_endo").f(x_fcast_fEndo) + for i <- 0 until n_fEndo do x_fEndo(i) = x_fcast_fEndo(?, i) + else + for i <- 0 until n_fEndo do x_fEndo(i) = tForms("fEndo")(i).f(x_fcast) + x_fEndo + end scaleCorrection + */ + +end ARX_SR + +import Example_Covid._ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `ARX_SR` companion object provides factory methods for the `ARX_SR` class. + */ +object ARX_SR extends MakeMatrix4TS: + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create an `ARX_SR` object by building an input matrix xy and then calling the + * `ARX_SR` constructor. + * @param xe the matrix of exogenous variable values + * @param y the endogenous/response vector (main time series data) + * @param hh the maximum forecasting horizon (h = 1 to hh) + * @param fname_ the feature/variable names + * @param tRng the time range, if relevant (time index may suffice) + * @param hparam the hyper-parameters + * @param fEndo_enab the set of transforms to be used for the endogenous + * @param fExo_enab the array containing the sets of transforms to be used for the exogenous + * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) + */ + def apply (xe: MatrixD, y: VectorD, hh: Int, fname_ : Array [String] = null, + tRng: Range = null, hparam: HyperParameter = hp, + fEndo_enab: LSET [TransformT] = LSET (Pow), + fExo_enab: Array [LSET [TransformT]] = Array (LSET (Pow), LSET (Pow)), + bakcast: Boolean = false): ARX_SR = + + val (n_fExo, n_xe) = (fExo_enab.length, xe.dim2) + require (n_fExo == n_xe, s"Length of fExo_enab $n_fExo must = number of exogenous variables $n_xe") + + val (fEndo, fExo) = getTransforms (fEndo_enab, fExo_enab) + + var xe_bfil: MatrixD = null + if xe.dim2 > 0 and hparam("q").toInt > 0 then + xe_bfil = new MatrixD (xe.dim, xe.dim2) + for j <- xe.indices2 do xe_bfil(?, j) = backfill (xe(?, j)) // backfill each exogenous variable + + val fEndo_size = fEndo_enab.size + val fExo_sizeArr: Array [Int] = fExo_enab.map (_.size) + + val tForms = Map ("tForm_y" -> null, "fEndo" -> fEndo) + val xy = buildMatrix (xe_bfil, y, hparam, fEndo, fExo, bakcast) + val fname = if fname_ == null then formNames (xe.dim2, hparam, fEndo_size, fExo_sizeArr) else fname_ + new ARX_SR (xy, y, hh, xe.dim2, fname, tRng, hparam, bakcast, tForms, fExo_sizeArr) + end apply + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create an `ARX_SR` object by building an input matrix xy and then calling the + * `ARX_SR` constructor, with rescaling of endogneous and exogenous variable values. + * @param xe the matrix of exogenous variable values + * @param y the endogenous/response vector (main time series data) + * @param hh the maximum forecasting horizon (h = 1 to hh) + * @param fname_ the feature/variable names + * @param tRng the time range, if relevant (time index may suffice) + * @param hparam the hyper-parameters + * @param fEndo_enab the set of transforms to be used for the endogenous + * @param fExo_enab the array containing the sets of transforms to be used for the exogenous + * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) + * @param tFormT the transform for rescaling endogenous and exogenous + */ + def rescale (xe: MatrixD, y: VectorD, hh: Int, fname_ : Array [String] = null, + tRng: Range = null, hparam: HyperParameter = hp, + fEndo_enab: LSET [TransformT] = LSET (Pow), + fExo_enab: Array [LSET [TransformT]] = Array (LSET (Pow), LSET (Pow)), + bakcast: Boolean = false, + tFormT: TransformT = MinMax): ARX_SR = + + val (n_fExo, n_xe) = (fExo_enab.length, xe.dim2) + require (n_fExo == n_xe, s"Length of fExo_enab $n_fExo must = number of exogenous variables $n_xe") + + if tFormT.name == "NormForm" then hparam("nneg") = 0 + + // rescale y + val tFormScale = tFormT.form + val tr_size = Model.trSize (y.dim) + val tForm_y = tFormScale (y(0 until tr_size)) // use (mean, std) of training set for both In-sample and TnT + val y_scl = tForm_y.f(y) + + var xe_bfil: MatrixD = null + if xe.dim2 > 0 and hparam("q").toInt > 0 then + xe_bfil = new MatrixD (xe.dim, xe.dim2) + for j <- xe.indices2 do xe_bfil(?, j) = backfill (xe(?, j)) // backfill each exogenous variable + if tFormScale != null then + val tForm_exo = tFormScale (xe_bfil(0 until tr_size)) + xe_bfil = tForm_exo.f (xe_bfil) // rescale the backfilled exogenous variable + + val fEndo_size = fEndo_enab.size + val fExo_sizeArr: Array [Int] = fExo_enab.map (_.size) + + val (fEndo, fExo) = getTransforms (fEndo_enab, fExo_enab) + val tForms = Map ("tForm_y" -> tForm_y, "fEndo" -> fEndo) + val xy = buildMatrix (xe_bfil, y_scl, hparam, fEndo, fExo, bakcast) + val fname = if fname_ == null then formNames (xe.dim2, hparam, fEndo_size, fExo_sizeArr) else fname_ + new ARX_SR (xy, y_scl, hh, xe.dim2, fname, tRng, hparam, bakcast, tForms, fExo_sizeArr) + end rescale + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Build the input matrix by combining the spec + p columns for the trend and + * endogenous variable with the q * xe.dim2 columns for the exogenous variables. + * When cross = true, additional cross terms will be added. Columns produced + * by transformations will be added as well. + * @param xe_bfil the matrix of exogenous variable values + * @param y the endogenous/response vector (main time series data) + * @param hp_ the hyper-parameters + * @param fEndo the transformation functions to apply on the endogenous variables + * @param fExo the transformation functions to apply on the exogenous variables + * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) + */ + def buildMatrix (xe_bfil: MatrixD, y: VectorD, hp_ : HyperParameter, + fEndo: Array [Transform], fExo: Array [Array [Transform]], bakcast: Boolean): MatrixD = + + val (p, q, spec, lwave, cross) = (hp_("p").toInt, hp_("q").toInt, hp_("spec").toInt, hp_("lwave").toDouble, hp_("cross").toInt == 1) + + // apply transformations fEndo to the endogenous variables and add these columns to x_endo + var x_endo = MatrixD (y).ᵀ // make a matrix out of vector y + for tr <- fEndo do x_endo = x_endo :^+ tr.f(y) // add each transformation of the endogenous variable + + // make matrix xy for trend terms and lagged terms of the endogenous variable + var xy = makeMatrix4T (y, spec, lwave, bakcast) ++^ // trend terms + makeMatrix4L (x_endo, p, bakcast) // lagged linear terms + + // apply transformations fExo to the exogenous variables and add there columns to x_exo + if xe_bfil != null and q > 0 then + var x_exo = new MatrixD (xe_bfil.dim, 0) + for j <- xe_bfil.indices2 do + val xe_j = xe_bfil(?, j) // extract the (j+1)th exogenouse variable + x_exo = x_exo :^+ xe_j // add the exogenous variable + val fExo_j = fExo(j) // extract the transformations for the (j+1)th exogenouse variable + if fExo_j.length > 0 then + for tr <- fExo_j do x_exo = x_exo :^+ tr.f(xe_j) // add each transformation of the exogenous variable + + // add cross terms of the endogenous and exogenous variables + if cross then x_exo = x_exo ++^ y *~: xe_bfil // element-wise multiplication of vector y and matrix xe + xy = xy ++^ makeMatrix4L (x_exo, q, bakcast) // add lagged exogenous term to xy + + println (s"xy.dims = ${xy.dims}") + xy + end buildMatrix + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Form vectors for the initial weights and their bounds for the transforms. + * @param fEndo_enab the set of transforms to be used for the endogenous + * @param fExo_enab the array containing the sets of transforms to be used for the exogenous + */ + private def initializeW (fEndo_enab: LSET [TransformT], fExo_enab: Array [LSET [TransformT]]): VectorD = + + var wInit = new VectorD (0) + // order: endo's transforms, exo1's transforms, exo2's transfroms, ... + + if fEndo_enab != null then + for t <- fEndo_enab do wInit = wInit ++ t.wlu.w + + if fExo_enab.length > 0 then + for set <- fExo_enab if set != null do + for t <- set do wInit = wInit ++ t.wlu.w + wInit + end initializeW + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Form arrays of transforms object using the vector of transform parameters. + * @param fEndo_enab the set of transforms to be used for the endogenous + * @param fExo_enab the array containing the sets of transforms to be used for the exogenous + */ + def getTransforms (fEndo_enab: LSET [TransformT], fExo_enab: Array [LSET [TransformT]]): + (Array [Transform], Array [Array [Transform]]) = + + val wInit = initializeW (fEndo_enab, fExo_enab) + val listEndo = new ArrayBuffer [Transform] () + var i = 0 + if fEndo_enab != null then + for t <- fEndo_enab do + listEndo += t.form (VectorD (wInit(i), wInit(i+1))) + i += 2 + + val listExo = new Array [Array [Transform]] (fExo_enab.length) + if fExo_enab.length > 0 then + var k = 0 + for set <- fExo_enab do + val listExo_k = new ArrayBuffer [Transform] () + if set != null then + for t <- set do + listExo_k += t.form (VectorD (wInit(i), wInit(i+1))) + i += 2 + listExo(k) = listExo_k.toArray + k += 1 + + (listEndo.toArray, listExo) + end getTransforms + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Form an array of names for the features included in the model. + * @param n_exo the number of exogenous variable + * @param hp_ the hyper-parameters + * @param n_fEn the number of functions used to map endogenous variables + * @param n_fExArr the number of functions used to map each exogenous variables + */ + def formNames (n_exo: Int, hp_ : HyperParameter, n_fEn: Int, n_fExArr: Array [Int]): Array [String] = + + val (spec, p, q, cross) = (hp_("spec").toInt, hp_("p").toInt, hp_("q").toInt, hp_("cross").toInt) + val names = ArrayBuffer [String] () + for i <- 0 until n_fEn; j <- p to 1 by -1 do names += s"f$i(yl$j)" // function lags endo terms + + // exogenous (match build order): + for j <- 0 until n_exo do + for k <- q to 1 by -1 do names += s"xe${j}l$k" // raw exogenous lags + + val n_fEx_j = n_fExArr(j) // transformations for this exo j + for i <- 0 until n_fEx_j do + for k <- q to 1 by -1 do names += s"g$j,$i(xe${j}l$k)" + end for + + if cross == 1 then + for j <- 0 until n_exo; k <- q to 1 by -1 do names += s"xe${j}l$k*yl$k" // lagged cross terms + + MakeMatrix4TS.formNames (spec, p) ++ names.toArray + end formNames + +end ARX_SR + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `aRX_SRTest` main function tests the `ARX_SR` class on real data: + * Forecasting Lake Levels using In-Sample Testing (In-ST). + * Test forecasts (h = 1 to hh steps ahead forecasts). + * @see cran.r-project.org/web/packages/fpp/fpp.pdf + * > runMain scalation.modeling.forecasting.aRX_SRTest + * +@main def aRX_SRTest (): Unit = + + val hh = 3 // maximum forecasting horizon + + val mod = ARX_SR (y, hh) // create model for time series data + banner (s"In-ST Forecasts: ${mod.modelName} on LakeLevels Dataset") + mod.trainNtest_x ()() // train and test on full dataset + + mod.forecastAll () // forecast h-steps ahead (h = 1 to hh) for all y + println (s"Final In-ST Forecast Matrix yf = ${mod.getYf}") + +end aRX_SRTest + */ + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `aRX_SRTest2` main function tests the `ARX_SR` class on real data: + * Forecasting Lake Levels using Train-n-Test Split (TnT) with Rolling Validation. + * Test forecasts (h = 1 to hh steps ahead forecasts). + * @see cran.r-project.org/web/packages/fpp/fpp.pdf + * > runMain scalation.modeling.forecasting.aRX_SRTest2 + * +@main def aRX_SRTest2 (): Unit = + + val hh = 3 // maximum forecasting horizon + + val mod = ARX_SR (y, hh) // create model for time series data + banner (s"TnT Forecasts: ${mod.modelName} on LakeLevels Dataset") + mod.trainNtest_x ()() // train and test on full dataset + + mod.rollValidate () // TnT with Rolling Validation + println (s"Final TnT Forecast Matrix yf = ${mod.getYf}") + +end aRX_SRTest2 + */ + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `aRX_SRTest3` main function tests the `ARX_SR` class on real data: + * Forecasting COVID-19 using In-Sample Testing (In-ST). + * Test forecasts (h = 1 to hh steps ahead forecasts). + * > runMain scalation.modeling.forecasting.aRX_SRTest3 + */ +@main def aRX_SRTest3 (): Unit = + +// val exo_vars = NO_EXO + val exo_vars = Array ("icu_patients") +// val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") + val (xxe, yy) = loadData (exo_vars, response) + println (s"xxe.dims = ${xxe.dims}, yy.dim = ${yy.dim}") + +// val xe = xxe // full + val xe = xxe(0 until 116) // clip the flat end +// val y = yy // full + val y = yy(0 until 116) // clip the flat end + val hh = 6 // maximum forecasting horizon + hp("lwave") = 20 // wavelength (distance between peaks) +// hp("cross") = 1 // 1 => add cross terms + Transform.hp("p") = 1.5 // the power to use in Pow + + val fEn = LSET (Pow) + val fEx = Array (LSET (Pow)) + + for p <- 6 to 6; s <- 1 to 1; q <- 6 to 6 do // number of lags; trend; number of exo lags + hp("p") = p // endo lags + hp("q") = q // exo lags + hp("spec") = s // trend specification: 0, 1, 2, 3, 5 + + val mod = ARX_SR (xe, y, hh, fEndo_enab = fEn, fExo_enab = fEx) // create model for time series data + mod.inSample_Test () // In-sample Testing + println (mod.summary ()) // statistical summary of fit + end for + +end aRX_SRTest3 + + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `aRX_SRTest4` main function tests the `ARX_SR` class on real data: + * Forecasting COVID-19 using Train-n-Test Split (TnT) with Rolling Validation. + * Test forecasts (h = 1 to hh steps ahead forecasts). + * > runMain scalation.modeling.forecasting.aRX_SRTest4 + */ +@main def aRX_SRTest4 (): Unit = + + val exo_vars = Array ("icu_patients") +// val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") + val (xxe, yy) = loadData (exo_vars, response) + println (s"xxe.dims = ${xxe.dims}, yy.dim = ${yy.dim}") + +// val xe = xxe // full + val xe = xxe(0 until 116) // clip the flat end +// val y = yy // full + val y = yy(0 until 116) // clip the flat end + val hh = 6 // maximum forecasting horizon + Transform.hp("p") = 1.5 // the power to use in Pow + hp("lwave") = 20 // wavelength (distance between peaks) +// hp("cross") = 1 // 1 => add cross terms + + val fEn = LSET (Pow) + val fEx = Array (LSET (Pow)) + + for p <- 6 to 6; q <- 4 to 4; s <- 1 to 1 do // number of lags (endo, exo); trend + hp("p") = p // endo lags + hp("q") = q // exo lags + hp("spec") = s // trend specification: 0, 1, 2, 3, 5 + + val mod = ARX_SR (xe, y, hh, fEndo_enab = fEn, fExo_enab = fEx) // create model for time series data + banner (s"TnT Forecasts: ${mod.modelName} on COVID-19 Dataset") + mod.trainNtest_x ()() // use customized trainNtest_x + + mod.forecastAll () // forecast h-steps ahead (h = 1 to hh) for all y + mod.diagnoseAll (mod.getY, mod.getYf) + + banner ("rollValidate") + mod.setSkip (0) + mod.rollValidate () // TnT with Rolling Validation + println (s"After Roll TnT Forecast Matrix yf = ${mod.getYf}") + mod.diagnoseAll (mod.getY, mod.getYf, Forecaster.teRng (y.dim)) // only diagnose on the testing set +// println (s"Final TnT Forecast Matrix yf = ${mod.getYf}") + end for + +end aRX_SRTest4 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `aRX_SRTest5` main function tests the `ARX_SR` class on real data: + * Forecasting COVID-19 using In-Sample Testing (In-ST). + * Test forecasts (h = 1 to hh steps ahead forecasts). + * This version performs feature selection. + * > runMain scalation.modeling.forecasting.aRX_SRTest5 + */ +@main def aRX_SRTest5 (): Unit = + + val exo_vars = Array ("icu_patients") +// val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") + val (xxe, yy) = loadData (exo_vars, response) + println (s"xxe.dims = ${xxe.dims}, yy.dim = ${yy.dim}") + +// val xe = xxe // full + val xe = xxe(0 until 116) // clip the flat end +// val y = yy // full + val y = yy(0 until 116) // clip the flat end + val hh = 6 // maximum forecasting horizon + val p = 6 + val q = 6 + Transform.hp("p") = 1.5 // the power to use in Pow + RidgeRegression.hp("lambda") = 1.0 // regularization/shrinkage parameter + hp("p") = p // endo lags + hp("q") = q // exo lags + hp("spec") = 5 // trend specification: 0, 1, 2, 3, 5 + hp("lwave") = 20 // wavelength (distance between peaks) +// hp("cross") = 1 // 1 => add cross terms + + val fEn = LSET (Pow) // functions to apply to endo lags + val fEx = Array (LSET (Pow)) // functions to apply to exo lags + + val mod = ARX_SR (xe, y, hh, fEndo_enab = fEn, fExo_enab = fEx) // create model for time series data + banner (s"In-ST Forecasts: ${mod.modelName} on COVID-19 Dataset") + mod.trainNtest_x ()() // train and test on full dataset + println (mod.summary ()) // statistical summary of fit + + mod.setSkip(0) + mod.rollValidate () // TnT with Rolling Validation + mod.diagnoseAll (mod.getY, mod.getYf, Forecaster.teRng(y.dim)) + + banner ("Feature Selection Technique: Stepwise") + val (cols, rSq) = mod.stepwiseSelAll () // R^2, R^2 bar, sMAPE, R^2 cv +// val (cols, rSq) = mod.backwardElimAll () // R^2, R^2 bar, sMAPE, R^2 cv + val k = cols.size + println (s"k = $k") + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs n for ${mod.modelName}", lines = true) + println (s"rSq = $rSq") + +end aRX_SRTest5 + diff --git a/src/main/scala/scalation/modeling/forecasting/ARX_SR_D.scala b/src/main/scala/scalation/modeling/forecasting/ARX_SR_D.scala new file mode 100644 index 000000000..053feb901 --- /dev/null +++ b/src/main/scala/scalation/modeling/forecasting/ARX_SR_D.scala @@ -0,0 +1,305 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author Yousef Fekri Dabanloo + * @version 2.0 + * @date Thu Jan 30 21:15:45 EST 2025 + * @see LICENSE (MIT style license file). + * + * @note Model: Auto-Regressive on lagged y and xe with SR terms (ARX_SR_D) using OLS - Direct Forecasting + * + * @see `scalation.modeling.Regression` + */ + +package scalation +package modeling +package forecasting + +import scala.collection.mutable.{LinkedHashSet => LSET} + +import scalation.mathstat._ + +import MakeMatrix4TS._ +import TransformT._ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `ARX_SR_D` class provides time series analysis capabilities for ARX_D Symbolic + * Regression (SR) models. These models include trend, linear, power, root, and cross terms + * for the single endogenous (y) variable and zero or more exogenous (xe) variables. + * Given time series data stored in vector y and matrix xe, its next value y_t = combination + * of last p values of y, y^p, y^r and the last q values of each exogenous variable xe_j, + * again in linear, power and root forms (as well as ENDO-EXO cross terms). + * + * y_t = b dot x_t + e_t + * + * where y_t is the value of y at time t, x_t is a vector of inputs, and e_t is the + * residual/error term. + * @see `MakeMatrix4TS` for hyper-parameter specifications. + * @param x the data/input matrix (lagged columns of y and xe) @see `ARX_SR_D.apply` + * @param y the response/output vector (main time series data) + * @param hh the maximum forecasting horizon (h = 1 to hh) + * @param n_exo the number of exogenous variables + * @param fname the feature/variable names + * @param tRng the time range, if relevant (time index may suffice) + * @param hparam the hyper-parameters (defaults to `MakeMatrix4TS.hp`) + * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) + * @param tForms the map of transformations applied + */ +class ARX_SR_D (x: MatrixD, y: MatrixD, hh: Int, n_exo: Int, fname: Array [String], + tRng: Range = null, hparam: HyperParameter = hp, + bakcast: Boolean = false, + tForms: TransformMap = Map ("tForm_y" -> null)) + extends ARX_D (x, y, hh, n_exo, fname, tRng, hparam, bakcast, tForms): + + private val debug = debugf ("ARX_SR_D", true) // debug function + + _modelName = s"ARX_SR_D_${p}_${q}_$n_exo" + + debug ("init", s"$modelName with with $n_exo exogenous variables and additional term spec = $spec") + debug ("init", s"[ x | y ] = ${x ++^ y}") + +end ARX_SR_D + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `ARX_SR_D` companion object provides factory methods for the `ARX_SR_D` class. + */ +object ARX_SR_D extends MakeMatrix4TS: + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create an `ARX_SR_D` object by building an input matrix xy and then calling the + * `ARX_SR_D` constructor. + * Caveat: only the first set of transformations is applied for `fExo_enab` + * @param xe the matrix of exogenous variable values + * @param y the endogenous/response vector (main time series data) + * @param hh the maximum forecasting horizon (h = 1 to hh) + * @param fname_ the feature/variable names + * @param tRng the time range, if relevant (time index may suffice) + * @param hparam the hyper-parameters + * @param fEndo_enab the set of transforms to be used for the endogenous + * @param fExo_enab the array containing the sets of transforms to be used for the exogenous + * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) + */ + def apply (xe: MatrixD, y: VectorD, hh: Int, fname_ : Array [String] = null, + tRng: Range = null, hparam: HyperParameter = hp, + fEndo_enab: LSET [TransformT] = LSET (Pow), + fExo_enab: Array [LSET [TransformT]] = Array (LSET (Pow), LSET (Pow)), + bakcast: Boolean = false): ARX_SR_D = + + val (n_fExo, n_xe) = (fExo_enab.length, xe.dim2) + require (n_fExo == n_xe, s"Length of fExo_enab $n_fExo must = number of exogenous variables $n_xe") + + val (fEndo, fExo) = ARX_SR.getTransforms (fEndo_enab, fExo_enab) + + var xe_bfil: MatrixD = null + if xe.dim2 > 0 and hparam("q").toInt > 0 then + xe_bfil = new MatrixD (xe.dim, xe.dim2) + for j <- xe.indices2 do xe_bfil(?, j) = backfill (xe(?, j)) // backfill each exogenous variable + + val fEndo_size = fEndo_enab.size + val fExo_sizeArr: Array [Int] = fExo_enab.map (_.size) + + val tForms = Map ("tForm_y" -> null, "fEndo" -> fEndo) + val xy = ARX_SR.buildMatrix (xe_bfil, y, hparam, fEndo, fExo, bakcast) + val fname = if fname_ == null then formNames (xe.dim2, hparam, fEndo_size, fExo_sizeArr) else fname_ + val yy = makeMatrix4Y (y, hh, bakcast) + new ARX_SR_D (xy, yy, hh, xe.dim2, fname, tRng, hparam, bakcast, tForms) + end apply + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create an `ARX_SR_D` object by building an input matrix xy and then calling the + * `ARX_SR_D` constructor, with rescaling of endogneous and exogenous variable values. + * Caveat: only the first set of transformations is applied for `fExo_enab` + * @param xe the matrix of exogenous variable values + * @param y the endogenous/response vector (main time series data) + * @param hh the maximum forecasting horizon (h = 1 to hh) + * @param fname_ the feature/variable names + * @param tRng the time range, if relevant (time index may suffice) + * @param hparam the hyper-parameters + * @param fEndo_enab the set of transforms to be used for the endogenous + * @param fExo_enab the array containing the sets of transforms to be used for the exogenous + * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) + * @param tFormT the transform for rescaling endogenous and exogenous + */ + def rescale (xe: MatrixD, y: VectorD, hh: Int, fname_ : Array [String] = null, + tRng: Range = null, hparam: HyperParameter = hp, + fEndo_enab: LSET [TransformT] = LSET (Pow), + fExo_enab: Array [LSET [TransformT]] = Array (LSET (Pow), LSET (Pow)), + bakcast: Boolean = false, + tFormT: TransformT = MinMax): ARX_SR_D = + + require (fExo_enab.length == xe.dim2, s"Length of fExo_enab must be the same as the number of exogenous variables") + + if tFormT.name == "NormForm" then hparam("nneg") = 0 + + // rescale y + val tFormScale = tFormT.form + val tr_size = Model.trSize (y.dim) + val tForm_y = tFormScale (y(0 until tr_size)) // use (mean, std) of training set for both In-sample and TnT + val y_scl = tForm_y.f(y) + + var xe_bfil: MatrixD = null + if xe.dim2 > 0 and hparam("q").toInt > 0 then + xe_bfil = new MatrixD (xe.dim, xe.dim2) + for j <- xe.indices2 do xe_bfil(?, j) = backfill (xe(?, j)) // backfill each exogenous variable + if tFormScale != null then + val tForm_exo = tFormScale (xe_bfil(0 until tr_size)) + xe_bfil = tForm_exo.f (xe_bfil) // rescale the backfilled exogenous variable + + val fEndo_size = fEndo_enab.size + val fExo_sizeArr: Array [Int] = fExo_enab.map (_.size) + + val (fEndo, fExo) = ARX_SR.getTransforms (fEndo_enab, fExo_enab) + val tForms = Map ("tForm_y" -> tForm_y, "fEndo" -> fEndo) + val xy = ARX_SR.buildMatrix (xe_bfil, y_scl, hparam, fEndo, fExo, bakcast) + val fname = if fname_ == null then formNames (xe.dim2, hparam, fEndo_size, fExo_sizeArr) else fname_ + val yy = makeMatrix4Y (y_scl, hh, bakcast) + new ARX_SR_D (xy, yy, hh, xe.dim2, fname, tRng, hparam, bakcast, tForms) + end rescale + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Form an array of names for the features included in the model. + * @param n_exo the number of exogenous variable + * @param hp_ the hyper-parameters + * @param n_fEn the number of functions used to map endogenous variables + * @param n_fExArr the number of functions used to map exogenous variables + */ + def formNames (n_exo: Int, hp_ : HyperParameter, n_fEn: Int, n_fExArr: Array [Int]): Array [String] = + ARX_SR.formNames (n_exo, hp_, n_fEn, n_fExArr) + end formNames + +end ARX_SR_D + +import Example_Covid._ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `aRX_SR_DTest3` main function tests the `ARX_SR_D` class on real data: + * Forecasting COVID-19 using In-Sample Testing (In-ST). + * Test forecasts (h = 1 to hh steps ahead forecasts). + * > runMain scalation.modeling.forecasting.aRX_SR_DTest3 + */ +@main def aRX_SR_DTest3 (): Unit = + +// val exo_vars = NO_EXO + val exo_vars = Array ("icu_patients") +// val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") + val (xxe, yy) = loadData (exo_vars, response) + println (s"xxe.dims = ${xxe.dims}, yy.dim = ${yy.dim}") + +// val xe = xxe // full + val xe = xxe(0 until 116) // clip the flat end +// val y = yy // full + val y = yy(0 until 116) // clip the flat end + val hh = 6 // maximum forecasting horizon + hp("lwave") = 20 // wavelength (distance between peaks) +// hp("cross") = 1 // 1 => add cross terms + Transform.hp("p") = 1.5 // the power to use in Pow + + val fEn = LSET (Pow) + val fEx = Array (LSET (Pow), LSET (Pow)) + + for p <- 6 to 6; q <- 4 to 4; s <- 1 to 1 do // number of lags (endo, exo); trend + hp("p") = p // endo lags + hp("q") = q // exo lags + hp("spec") = s // trend specification: 0, 1, 2, 3, 5 + val mod = ARX_SR_D (xe, y, hh, fEndo_enab = fEn, fExo_enab = fEx) // create model for time series data + mod.inSample_Test () // In-sample Testing + println (mod.summary ()) // statistical summary of fit + end for + +end aRX_SR_DTest3 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `aRX_SR_DTest4` main function tests the `ARX_SR_D` class on real data: + * Forecasting COVID-19 using Train and Test (TnT). + * Test forecasts (h = 1 to hh steps ahead forecasts). + * > runMain scalation.modeling.forecasting.aRX_SR_DTest4 + */ +@main def aRX_SR_DTest4 (): Unit = + + val exo_vars = Array ("icu_patients") +// val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") + val (xxe, yy) = loadData (exo_vars, response) + println (s"xxe.dims = ${xxe.dims}, yy.dim = ${yy.dim}") + +// val xe = xxe // full + val xe = xxe(0 until 116) // clip the flat end +// val y = yy // full + val y = yy(0 until 116) // clip the flat end + val hh = 6 // maximum forecasting horizon + hp("lwave") = 20 // wavelength (distance between peaks) +// hp("cross") = 1 // 1 => add cross terms + Transform.hp("p") = 1.5 // the power to use in Pow + + val fEn = LSET (Pow) + val fEx = Array (LSET (Pow)) + + for p <- 6 to 6; q <- 4 to 4; s <- 1 to 1 do // number of lags (endo, exo); trend + hp("p") = p // endo lags + hp("q") = q // exo lags + hp("spec") = s // trend specification: 0, 1, 2, 3, 5 + val mod = ARX_SR_D (xe, y, hh, fEndo_enab = fEn, fExo_enab = fEx) // create model for time series data + banner (s"TnT Forecasts: ${mod.modelName} on COVID-19 Dataset") + mod.trainNtest_x ()() // use customized trainNtest_x + println (mod.summary ()) // statistical summary of fit + + mod.setSkip (0) + mod.rollValidate () // TnT with Rolling Validation + println (s"After Roll TnT Forecast Matrix yf = ${mod.getYf}") + mod.diagnoseAll (mod.getY, mod.getYf, Forecaster.teRng (y.dim)) // only diagnose on the testing set +// println (s"Final TnT Forecast Matrix yf = ${mod.getYf}") + end for + +end aRX_SR_DTest4 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `aRX_SR_DTest5` main function tests the `ARX_SR_D` class on real data: + * Forecasting COVID-19 using In-Sample Testing (In-ST). + * Test forecasts (h = 1 to hh steps ahead forecasts). + * This version performs feature selection. + * > runMain scalation.modeling.forecasting.aRX_SR_DTest5 + */ +@main def aRX_SR_DTest5 (): Unit = + + val exo_vars = Array ("icu_patients") +// val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") + val (xxe, yy) = loadData (exo_vars, response) + println (s"xxe.dims = ${xxe.dims}, yy.dim = ${yy.dim}") + +// val xe = xxe // full + val xe = xxe(0 until 116) // clip the flat end +// val y = yy // full + val y = yy(0 until 116) // clip the flat end + val hh = 6 // maximum forecasting horizon + val p = 6 + val q = 6 + Transform.hp("p") = 1.5 // the power to use in Pow + RidgeRegression.hp("lambda") = 1.0 // regularization/shrinkage parameter + hp("p") = p // endo lags + hp("q") = q // exo lags + hp("spec") = 5 // trend specification: 0, 1, 2, 3, 5 + hp("lwave") = 20 // wavelength (distance between peaks) +// hp("cross") = 1 // 1 => add cross terms + + val fEn = LSET (Pow) // functions to apply to endo lags + val fEx = Array (LSET (Pow)) // functions to apply to exo lags + + val mod = ARX_SR_D (xe, y, hh, fEndo_enab = fEn, fExo_enab = fEx) // create model for time series data + banner (s"In-ST Forecasts: ${mod.modelName} on COVID-19 Dataset") + mod.trainNtest_x ()() // train and test on full dataset + println (mod.summary ()) // statistical summary of fit + + mod.setSkip(0) + mod.rollValidate () // TnT with Rolling Validation + mod.diagnoseAll (mod.getY, mod.getYf, Forecaster.teRng(y.dim)) + + banner ("Feature Selection Technique: Stepwise") + val (cols, rSq, modForc) = mod.featureSelectAtHorizon (h = 1, fsType = SelectionTech.Backward) //, cross = "many") + val k = cols.size + println (s"k = $k") + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs n for ${modForc.modelName}", lines = true) + println (s"rSq = $rSq") + +end aRX_SR_DTest5 + diff --git a/src/main/scala/scalation/modeling/forecasting/ARX_Symb.scala b/src/main/scala/scalation/modeling/forecasting/ARX_Symb.scala deleted file mode 100644 index 2b4b8f9f5..000000000 --- a/src/main/scala/scalation/modeling/forecasting/ARX_Symb.scala +++ /dev/null @@ -1,433 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller, Yousef Fekri Dabanloo - * @version 2.0 - * @date Tue Jan 14 15:47:45 EST 2025 - * @see LICENSE (MIT style license file). - * - * @note Model: Auto-Regressive on lagged y and xe with SR terms (ARX_Symb) using OLS - * - * @see `scalation.modeling.Regression` - */ - -package scalation -package modeling -package forecasting - -import scala.collection.mutable.ArrayBuffer - -import scalation.mathstat._ - -import MakeMatrix4TS._ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARX_Symb` class provides time series analysis capabilities for ARX Symbolic - * Regression (SR) models. These models include trend, linear, power, root, and cross terms - * for the single endogenous (y) variable and zero or more exogenous (xe) variables. - * Given time series data stored in vector y and matrix xe, its next value y_t = combination - * of last p values of y, y^p, y^r and the last q values of each exogenous variable xe_j, - * again in linear, power and root forms (as well as ENDO-EXO cross terms). - * - * y_t = b dot x_t + e_t - * - * where y_t is the value of y at time t, x_t is a vector of inputs, and e_t is the - * residual/error term. - * @see `MakeMatrix4TS` for hyper-parameter specifications. - * @param x the data/input matrix (lagged columns of y and xe) @see `ARX_Symb.apply` - * @param y the response/output vector (main time series data) - * @param hh the maximum forecasting horizon (h = 1 to hh) - * @param n_exo the number of exogenous variables - * @param fname the feature/variable names - * @param tRng the time range, if relevant (time index may suffice) - * @param hparam the hyper-parameters (defaults to `MakeMatrix4TS.hp`) - * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) - * @param tForms the map of transformations applied - */ -class ARX_Symb (x: MatrixD, y: VectorD, hh: Int, n_exo: Int, fname: Array [String], - tRng: Range = null, hparam: HyperParameter = hp, - bakcast: Boolean = false, - tForms: TransformMap = Map ("tForm_y" -> null)) - extends ARX (x, y, hh, n_exo, fname, tRng, hparam, bakcast, tForms): - - private val debug = debugf ("ARX_Symb", true) // debug function - private val n_fEndo = tForms("fEndo").length // number of functions used to map endogenous variables - private val n_fExo = tForms("fExo").length // number of functions used to map exogenous variables - private val cross = hparam("cross").toInt == 1 // whether to include ENDO-EXO cross terms - - modelName = s"ARX_Symb($p, $q, $n_exo)" - - debug ("init", s"$modelName with with $n_exo exogenous variables and additional term spec = $spec") - debug ("init", s"[ x | y ] = ${x :^+ y}") - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forge a new vector from the first spec values of x, the last p-h+1 values - * of x (past values) and recent values 1 to h-1 from the forecasts. - * @param xx the t-th row of the input matrix (lagged actual values) - * @param yy the t-th row of the forecast matrix (forecasted future values) - * @param h the forecasting horizon, number of steps ahead to produce forecasts - */ - override def forge (xx: VectorD, yy: VectorD, h: Int): VectorD = - // add terms for the endogenous variable - val n_endo = spec + p // number of trend + endogenous values - val x_act = xx(n_endo-(p+1-h) until n_endo) // get actual lagged y-values (endogenous) - val nyy = p - x_act.dim // number of forecasted values needed - val x_fcast = yy(h-nyy until h) // get forecasted y-values - - var xy = x_act ++ x_fcast // original values before any mapping - val x_fEndo = scaleCorrection (x_fcast) - for i <- 0 until n_fEndo do - val x_act_f = xx((i+1)*p + n_endo-(p+1-h) until (i+1)*p + n_endo) // get transformed lagged endogenous variable - xy = xy ++ x_act_f ++ x_fEndo(i) // add transformed lagged forecasted y-values - - // add terms for the exogenous variables - val crs = if cross then 1 else 0 // whether to add endogenous-exogenous cross terms - val count = n_exo * (1 + n_fExo + crs) - for j <- 0 until count do - xy = xy ++ hide (xx(n_endo+n_fEndo*p + j*q until n_endo+n_fEndo*p + (j+1)*q), h) // get actual and transformed lagged for exo variable j - xx(0 until spec) ++ xy - end forge - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Make re-scaling corrections to the forecasted y-values. - * @param x_fcast the forecasted y-values - */ - def scaleCorrection (x_fcast: VectorD): Array [VectorD] = - val x_fEndo = Array.ofDim [VectorD] (n_fEndo) - if tForms("tForm_y") != null then - val f_tForm = Array.ofDim [FunctionV2V] (n_fEndo) - - for i <- 0 until n_fEndo do f_tForm(i) = (tForms("fEndo")(i).f(_: VectorD)) ⚬ (tForms("tForm_y").fi(_: VectorD)) - - var x_fcast_fEndo = MatrixD (f_tForm(0)(x_fcast)).transpose - for i <- 1 until n_fEndo do x_fcast_fEndo = x_fcast_fEndo :^+ f_tForm(i)(x_fcast) - x_fcast_fEndo = tForms("tForm_endo").f(x_fcast_fEndo) - for i <- 0 until n_fEndo do x_fEndo(i) = x_fcast_fEndo(?, i) - else - for i <- 0 until n_fEndo do x_fEndo(i) = tForms("fEndo")(i).f(x_fcast) - x_fEndo - end scaleCorrection - -end ARX_Symb - -import Example_Covid._ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARX_Symb` companion object provides factory methods for the `ARX_Symb` class. - */ -object ARX_Symb extends MakeMatrix4TS: - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create an `ARX_Symb` object by building an input matrix xy and then calling the - * `ARX_Symb` constructor. - * @param xe the matrix of exogenous variable values - * @param y the endogenous/response vector (main time series data) - * @param hh the maximum forecasting horizon (h = 1 to hh) - * @param fname_ the feature/variable names - * @param tRng the time range, if relevant (time index may suffice) - * @param hparam the hyper-parameters - * @param fEndo the array of functions used to transform endogenous variables - * @param fExo the array of functions used to transform exogenous variables - * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) - */ - def apply (xe: MatrixD, y: VectorD, hh: Int, fname_ : Array [String] = null, - tRng: Range = null, hparam: HyperParameter = hp, - fEndo: Array [Transform] = Array (log1pForm), - fExo: Array [Transform] = Array (log1pForm), - bakcast: Boolean = false): ARX_Symb = - - val (n_fEndo, n_fExo) = (fEndo.length, fExo.length) - val (xy, tForms) = buildMatrix (xe, y, hparam, fEndo, fExo, bakcast) - val fname = if fname_ == null then formNames (xe.dim2, hparam, n_fEndo, n_fExo) else fname_ - new ARX_Symb (xy, y, hh, xe.dim2, fname, tRng, hparam, bakcast, tForms) - end apply - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create an `ARX_Symb` object by building an input matrix xy and then calling the - * `ARX_Symb` constructor, with rescaling of endogneous and exogenous variable values. - * @param xe the matrix of exogenous variable values - * @param y the endogenous/response vector (main time series data) - * @param hh the maximum forecasting horizon (h = 1 to hh) - * @param fname_ the feature/variable names - * @param tRng the time range, if relevant (time index may suffice) - * @param hparam the hyper-parameters - * @param fEndo the array of functions used to transform endogenous variables - * @param fExo the array of functions used to transform exogenous variables - * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) - * @param tForm the transform for y - */ - def rescale (xe: MatrixD, y: VectorD, hh: Int, fname_ : Array [String] = null, - tRng: Range = null, hparam: HyperParameter = hp, - fEndo: Array [Transform] = Array (log1pForm), - fExo: Array [Transform] = Array (log1pForm), - bakcast: Boolean = false, - tForm: VectorD | MatrixD => Transform = x => zForm(x)): ARX_Symb = - - val (n_fEndo, n_fExo) = (fEndo.length, fExo.length) - val (xy, tForms) = buildMatrix (xe, y, hparam, fEndo, fExo, bakcast, tForm) - if tForms("tForm_y").getClass.getSimpleName == "zForm" then hp("nneg") = 0 - val y_scl = tForms("tForm_y").f(y) - val fname = if fname_ == null then formNames (xe.dim2, hparam, n_fEndo, n_fExo) else fname_ - new ARX_Symb (xy, y_scl, hh, xe.dim2, fname, tRng, hparam, bakcast, tForms) - end rescale - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Build the input matrix by combining the p + spec columns for the trend and - * endogenous variable with the q * xe.dim2 columns for the exogenous variables. - * @param xe the matrix of exogenous variable values - * @param y_ypp the response vector (time series data) and raised to power pp - * @param hp_ the hyper-parameters - * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) - */ - def buildMatrix (xe: MatrixD, y: VectorD, hp_ : HyperParameter, fEndo: Array [Transform], - fExo: Array [Transform], bakcast: Boolean, - tForm: VectorD | MatrixD => Transform = null): (MatrixD, TransformMap) = - - val (p, q, spec, lwave, cross) = (hp_("p").toInt, hp_("q").toInt, hp_("spec").toInt, hp_("lwave").toDouble, hp_("cross").toInt == 1) - - // apply transformations to the endogenous and exogenous variables - var y_fEndo = MatrixD(fEndo(0).f(y)).transpose - for i <- 1 until fEndo.length do y_fEndo = y_fEndo :^+ fEndo(i).f(y) // add each transformation of the endogenous variable - - var y_scl = y - - val tForms: TransformMap = - if tForm != null then - val tForm_y = tForm(y) - y_scl = tForm_y.f(y) - val tForm_endo = tForm(y_fEndo) - y_fEndo = tForm_endo.f(y_fEndo) - Map("tForm_y" -> tForm_y, "tForm_endo" -> tForm_endo, "fEndo" -> fEndo, "fExo" -> fExo) - else - Map("tForm_y" -> null, "fEndo" -> fEndo, "fExo" -> fExo) - - val x_endo = y_scl +^: y_fEndo - - // add trend terms and terms for the endogenous variable - var xy = makeMatrix4T (y, spec, lwave, bakcast) ++^ - makeMatrix4L (x_endo, p, bakcast) // lagged linear terms - - if xe.dim2 > 0 then - val xe_bfill = new MatrixD(xe.dim, xe.dim2) - for j <- xe.indices2 do xe_bfill(?, j) = backfill(xe(?, j)) - var x_exo = xe_bfill - for i <- fExo.indices do x_exo = x_exo ++^ fExo(i).f(xe_bfill) // add each transformation of the exogenous variable - // add cross terms of the endogenous and exogenous variables - if cross then x_exo = x_exo ++^ y *~: xe_bfill // element-wise multiplication of vector y and matrix xe - - if tForm != null then - val tForm_exo = tForm(x_exo) - x_exo = tForm_exo.f(x_exo) - xy = xy ++^ makeMatrix4L (x_exo, q, bakcast) - - (xy, tForms) - end buildMatrix - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Form an array of names for the features included in the model. - * @param n_exo the number of exogenous variable - * @param hp_ the hyper-parameters - * @param n_fEn the number of functions used to map endogenous variables - * @param n_fEx the number of functions used to map exogenous variables - */ - def formNames (n_exo: Int, hp_ : HyperParameter, n_fEn: Int, n_fEx: Int): Array [String] = - val (spec, p, q, cross) = (hp_("cross").toInt, hp_("p").toInt, hp_("q").toInt, hp_("cross").toInt) - val names = ArrayBuffer [String] () - for i <- 0 until n_fEn; j <- p to 1 by -1 do names += s"f$i(yl$j)" // function lags endo terms - - for j <- 0 until n_exo; k <- q to 1 by -1 do names += s"xe${j}l$k" // exo lag terms - for i <- 0 until n_fEx do - for j <- 0 until n_exo; k <- q to 1 by -1 do names += s"g$i(xe${j}l$k)" // function lags exo terms - - if cross == 1 then - for j <- 0 until n_exo; k <- q to 1 by -1 do names += s"xe${j}l$k*yl$k" // lagged cross terms - - MakeMatrix4TS.formNames (spec, p) ++ names.toArray - end formNames - -end ARX_Symb - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRX_SymbTest` main function tests the `ARX_Symb` class on real data: - * Forecasting Lake Levels using In-Sample Testing (In-ST). - * Test forecasts (h = 1 to hh steps ahead forecasts). - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.aRX_SymbTest - * -@main def aRX_SymbTest (): Unit = - - val hh = 3 // maximum forecasting horizon - - val mod = ARX_Symb (y, hh) // create model for time series data - banner (s"In-ST Forecasts: ${mod.modelName} on LakeLevels Dataset") - mod.trainNtest_x ()() // train and test on full dataset - - mod.forecastAll () // forecast h-steps ahead (h = 1 to hh) for all y - println (s"Final In-ST Forecast Matrix yf = ${mod.getYf}") - -end aRX_SymbTest - */ - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRX_SymbTest2` main function tests the `ARX_Symb` class on real data: - * Forecasting Lake Levels using Train-n-Test Split (TnT) with Rolling Validation. - * Test forecasts (h = 1 to hh steps ahead forecasts). - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.aRX_SymbTest2 - * -@main def aRX_SymbTest2 (): Unit = - - val hh = 3 // maximum forecasting horizon - - val mod = ARX_Symb (y, hh) // create model for time series data - banner (s"TnT Forecasts: ${mod.modelName} on LakeLevels Dataset") - mod.trainNtest_x ()() // train and test on full dataset - - mod.rollValidate () // TnT with Rolling Validation - println (s"Final TnT Forecast Matrix yf = ${mod.getYf}") - -end aRX_SymbTest2 - */ - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRX_SymbTest3` main function tests the `ARX_Symb` class on real data: - * Forecasting COVID-19 using In-Sample Testing (In-ST). - * Test forecasts (h = 1 to hh steps ahead forecasts). - * > runMain scalation.modeling.forecasting.aRX_SymbTest3 - */ -@main def aRX_SymbTest3 (): Unit = - -// val exo_vars = NO_EXO - val exo_vars = Array ("icu_patients") -// val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (xxe, yy) = loadData (exo_vars, response) - println (s"xxe.dims = ${xxe.dims}, yy.dim = ${yy.dim}") - -// val xe = xxe // full - val xe = xxe(0 until 116) // clip the flat end -// val y = yy // full - val y = yy(0 until 116) // clip the flat end - val hh = 6 // maximum forecasting horizon - val pp = 1.5 - hp("lwave") = 20 // wavelength (distance between peaks) -// hp("cross") = 1 // 1 => add cross terms - - val ff = Array [Transform] (powForm (VectorD (pp))) - val gg = Array [Transform] () - - for p <- 6 to 6; s <- 1 to 1; q <- 6 to 6 do // number of lags; trend; number of exo lags - hp("p") = p // endo lags - hp("q") = q // exo lags - hp("spec") = s // trend specification: 0, 1, 2, 3, 5 - - val mod = ARX_Symb (xe, y, hh, fEndo = ff, fExo = gg) // create model for time series data - mod.inSampleTest () // In-sample Testing - println (mod.summary ()) // statistical summary of fit - end for - -end aRX_SymbTest3 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRX_SymbTest4` main function tests the `ARX_Symb` class on real data: - * Forecasting COVID-19 using Train-n-Test Split (TnT) with Rolling Validation. - * Test forecasts (h = 1 to hh steps ahead forecasts). - * > runMain scalation.modeling.forecasting.aRX_SymbTest4 - */ -@main def aRX_SymbTest4 (): Unit = - - val exo_vars = Array ("icu_patients") -// val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (xxe, yy) = loadData (exo_vars, response) - println (s"xxe.dims = ${xxe.dims}, yy.dim = ${yy.dim}") - -// val xe = xxe // full - val xe = xxe(0 until 116) // clip the flat end -// val y = yy // full - val y = yy(0 until 116) // clip the flat end - val hh = 6 // maximum forecasting horizon - val pp = 1.5 - hp("lwave") = 20 // wavelength (distance between peaks) -// hp("cross") = 1 // 1 => add cross terms - - val ff = Array [Transform] (powForm (VectorD (pp))) - val gg = Array [Transform] () - - for p <- 6 to 6; q <- 4 to 4; s <- 1 to 1 do // number of lags (endo, exo); trend - hp("p") = p // endo lags - hp("q") = q // exo lags - hp("spec") = s // trend specification: 0, 1, 2, 3, 5 - -// val mod = ARX_Symb (xe, y, hh, fEndo = ff, fExo = gg) - val mod = ARX_Symb.rescale (xe, y, hh, fEndo = ff, fExo = gg) // create model for time series data - banner (s"TnT Forecasts: ${mod.modelName} on COVID-19 Dataset") - mod.trainNtest_x ()() // use customized trainNtest_x - -// mod.forecastAll () // forecast h-steps ahead (h = 1 to hh) for all y -// mod.diagnoseAll (mod.getY, mod.getYf) - - banner ("rollValidate") - mod.setSkip (0) - mod.rollValidate () // TnT with Rolling Validation - println (s"After Roll TnT Forecast Matrix yf = ${mod.getYf}") - mod.diagnoseAll (mod.getY, mod.getYf, Forecaster.teRng (y.dim), 0) // only diagnose on the testing set -// println (s"Final TnT Forecast Matrix yf = ${mod.getYf}") - end for - -end aRX_SymbTest4 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRX_SymbTest5` main function tests the `ARX_Symb` class on real data: - * Forecasting COVID-19 using In-Sample Testing (In-ST). - * Test forecasts (h = 1 to hh steps ahead forecasts). - * This version performs feature selection. - * > runMain scalation.modeling.forecasting.aRX_SymbTest5 - * -@main def aRX_SymbTest5 (): Unit = - - val exo_vars = Array ("icu_patients") -// val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (xxe, yy) = loadData (exo_vars, response) - println (s"xxe.dims = ${xxe.dims}, yy.dim = ${yy.dim}") - -// val xe = xxe // full - val xe = xxe(0 until 116) // clip the flat end -// val y = yy // full - val y = yy(0 until 116) // clip the flat end - val hh = 6 // maximum forecasting horizon - val p = 6 - val q = 6 - hp("p") = p // endo lags - hp("q") = q // exo lags - hp("spec") = 5 // trend specification: 0, 1, 2, 3, 5 - hp("lwave") = 20 // wavelength (distance between peaks) - hp("cross") = 1 // 1 => add cross terms - hp("lambda") = 1.0 // regularization/shrinkage parameter - - val ff = Array (powTo (1.5), powTo (0.5), log1p, sin, cos) // functions to apply to endo lags - val gg = Array (powTo (1.5), powTo (0.5), log1p, sin, cos) // functions to apply to exo lags - - val mod = ARX_Symb (xe, y, hh, fEndo = ff, fExo = gg) // create model for time series data - banner (s"In-ST Forecasts: ${mod.modelName} on COVID-19 Dataset") - mod.trainNtest_x ()() // train and test on full dataset - println (mod.summary ()) // statistical summary of fit - - mod.setSkip(0) - mod.rollValidate () // TnT with Rolling Validation - mod.diagnoseAll (mod.getY, mod.getYf, Forecaster.teRng(y.dim), 0) - - banner ("Feature Selection Technique: Stepwise") - val (cols, rSq) = mod.stepwiseSelAll () // R^2, R^2 bar, sMAPE, R^2 cv -// val (cols, rSq) = mod.backwardElimAll () // R^2, R^2 bar, sMAPE, R^2 cv - val k = cols.size - println (s"k = $k") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "R^2 cv"), - s"R^2 vs n for ${mod.modelName}", lines = true) - println (s"rSq = $rSq") - -end aRX_SymbTest5 - */ - diff --git a/src/main/scala/scalation/modeling/forecasting/ARX_Symb_D.scala b/src/main/scala/scalation/modeling/forecasting/ARX_Symb_D.scala deleted file mode 100644 index 104319e84..000000000 --- a/src/main/scala/scalation/modeling/forecasting/ARX_Symb_D.scala +++ /dev/null @@ -1,214 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author Yousef Fekri Dabanloo - * @version 2.0 - * @date Thu Jan 30 21:15:45 EST 2025 - * @see LICENSE (MIT style license file). - * - * @note Model: Auto-Regressive on lagged y and xe with SR terms (ARX_Symb_D) using OLS - Direct Forecasting - * - * @see `scalation.modeling.Regression` - */ - - -package scalation -package modeling -package forecasting - -import scalation.mathstat._ - -import MakeMatrix4TS._ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARX_Symb_D` class provides time series analysis capabilities for ARX_D Symbolic - * Regression (SR) models. These models include trend, linear, power, root, and cross terms - * for the single endogenous (y) variable and zero or more exogenous (xe) variables. - * Given time series data stored in vector y and matrix xe, its next value y_t = combination - * of last p values of y, y^p, y^r and the last q values of each exogenous variable xe_j, - * again in linear, power and root forms (as well as ENDO-EXO cross terms). - * - * y_t = b dot x_t + e_t - * - * where y_t is the value of y at time t, x_t is a vector of inputs, and e_t is the - * residual/error term. - * @see `MakeMatrix4TS` for hyper-parameter specifications. - * @param x the data/input matrix (lagged columns of y and xe) @see `ARX_Symb_D.apply` - * @param y the response/output vector (main time series data) - * @param hh the maximum forecasting horizon (h = 1 to hh) - * @param n_exo the number of exogenous variables - * @param fname the feature/variable names - * @param tRng the time range, if relevant (time index may suffice) - * @param hparam the hyper-parameters (defaults to `MakeMatrix4TS.hp`) - * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) - * @param tForms the map of transformations applied - */ -class ARX_Symb_D (x: MatrixD, y: MatrixD, hh: Int, n_exo: Int, fname: Array [String], - tRng: Range = null, hparam: HyperParameter = hp, - bakcast: Boolean = false, - tForms: TransformMap = Map ("tForm_y" -> null)) - extends ARX_D (x, y, hh, n_exo, fname, tRng, hparam, bakcast, tForms): - - private val debug = debugf ("ARX_Symb_D", true) // debug function - - modelName = s"ARX_Symb_D($p, $q, $n_exo)" - - debug ("init", s"$modelName with with $n_exo exogenous variables and additional term spec = $spec") - debug ("init", s"[ x | y ] = ${x ++^ y}") - -end ARX_Symb_D - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARX_Symb_D` companion object provides factory methods for the `ARX_Symb_D` class. - */ -object ARX_Symb_D extends MakeMatrix4TS: - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create an `ARX_Symb_D` object by building an input matrix xy and then calling the - * `ARX_Symb_D` constructor. - * @param xe the matrix of exogenous variable values - * @param y the endogenous/response vector (main time series data) - * @param hh the maximum forecasting horizon (h = 1 to hh) - * @param fname_ the feature/variable names - * @param tRng the time range, if relevant (time index may suffice) - * @param hparam the hyper-parameters - * @param fEndo the array of transforms used to transform endogenous variables - * @param fExo the array of transforms used to transform exogenous variables - * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) - */ - def apply (xe: MatrixD, y: VectorD, hh: Int, fname_ : Array [String] = null, - tRng: Range = null, hparam: HyperParameter = hp, - fEndo: Array [Transform] = Array (log1pForm), - fExo: Array [Transform] = Array (log1pForm), - bakcast: Boolean = false): ARX_Symb_D = - - val (n_fEndo, n_fExo) = (fEndo.length, fExo.length) - val (xy, tForms) = ARX_Symb.buildMatrix (xe, y, hparam, fEndo, fExo, bakcast) - val yy = makeMatrix4Y (y, hh, bakcast) - val fname = if fname_ == null then formNames (xe.dim2, hparam, n_fEndo, n_fExo) else fname_ - new ARX_Symb_D (xy, yy, hh, xe.dim2, fname, tRng, hparam, bakcast, tForms) - end apply - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create an `ARX_Symb_D` object by building an input matrix xy and then calling the - * `ARX_Symb_D` constructor, with rescaling of endogneous and exogenous variable values. - * @param xe the matrix of exogenous variable values - * @param y the endogenous/response vector (main time series data) - * @param hh the maximum forecasting horizon (h = 1 to hh) - * @param fname_ the feature/variable names - * @param tRng the time range, if relevant (time index may suffice) - * @param hparam the hyper-parameters - * @param fEndo the array of transforms used to transform endogenous variables - * @param fExo the array of transforms used to transform exogenous variables - * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) - * @param tForm the transform for y - */ - def rescale (xe: MatrixD, y: VectorD, hh: Int, fname_ : Array [String] = null, - tRng: Range = null, hparam: HyperParameter = hp, - fEndo: Array [Transform] = Array (log1pForm), - fExo: Array [Transform] = Array (log1pForm), - bakcast: Boolean = false, - tForm: VectorD | MatrixD => Transform = x => zForm(x)): ARX_Symb_D = - - val (n_fEndo, n_fExo) = (fEndo.length, fExo.length) - val (xy, tForms) = ARX_Symb.buildMatrix (xe, y, hparam, fEndo, fExo, bakcast, tForm) - val fname = if fname_ == null then formNames (xe.dim2, hparam, n_fEndo, n_fExo) else fname_ - val y_scl = tForms("tForm_y").f(y) - if tForms("tForm_y").getClass.getSimpleName == "zForm" then hp("nneg") = 0 - val yy = makeMatrix4Y (y_scl, hh, bakcast) - new ARX_Symb_D (xy, yy, hh, xe.dim2, fname, tRng, hparam, bakcast, tForms) - end rescale - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Form an array of names for the features included in the model. - * @param n_exo the number of exogenous variable - * @param hp_ the hyper-parameters - * @param n_fEn the number of functions used to map endogenous variables - * @param n_fEx the number of functions used to map exogenous variables - */ - def formNames (n_exo: Int, hp_ : HyperParameter, n_fEn: Int, n_fEx: Int): Array [String] = - ARX_Symb.formNames (n_exo, hp_, n_fEn, n_fEx) - end formNames - -end ARX_Symb_D - -import Example_Covid._ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARX_Symb_DTest3` main function tests the `ARX_Symb_D` class on real data: - * Forecasting COVID-19 using In-Sample Testing (In-ST). - * Test forecasts (h = 1 to hh steps ahead forecasts). - * > runMain scalation.modeling.forecasting.aRX_Symb_DTest3 - */ -@main def aRX_Symb_DTest3 (): Unit = - -// val exo_vars = NO_EXO - val exo_vars = Array ("icu_patients") -// val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (xxe, yy) = loadData (exo_vars, response) - println (s"xxe.dims = ${xxe.dims}, yy.dim = ${yy.dim}") - -// val xe = xxe // full - val xe = xxe(0 until 116) // clip the flat end -// val y = yy // full - val y = yy(0 until 116) // clip the flat end - val hh = 6 // maximum forecasting horizon - hp("lwave") = 20 // wavelength (distance between peaks) - hp("cross") = 1 // 1 => add cross terms - - for p <- 6 to 6; q <- 4 to 4; s <- 1 to 1 do // number of lags (endo, exo); trend - hp("p") = p // endo lags - hp("q") = 2 // exo lags - hp("spec") = s // trend specification: 0, 1, 2, 3, 5 - val mod = ARX_Symb_D (xe, y, hh) // create model for time series data - mod.inSampleTest () // In-sample Testing - println (mod.summary ()) // statistical summary of fit - end for - -end aRX_Symb_DTest3 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARX_Symb_DTest4` main function tests the `ARX_Symb_D` class on real data: - * Forecasting COVID-19 using Train and Test (TnT). - * Test forecasts (h = 1 to hh steps ahead forecasts). - * > runMain scalation.modeling.forecasting.aRX_Symb_DTest4 - */ -@main def aRX_Symb_DTest4 (): Unit = - - val exo_vars = Array ("icu_patients") -// val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (xxe, yy) = loadData (exo_vars, response) - println (s"xxe.dims = ${xxe.dims}, yy.dim = ${yy.dim}") - -// val xe = xxe // full - val xe = xxe(0 until 116) // clip the flat end -// val y = yy // full - val y = yy(0 until 116) // clip the flat end - val hh = 6 // maximum forecasting horizon - val pp = 1.5 - hp("lwave") = 20 // wavelength (distance between peaks) -// hp("cross") = 1 // 1 => add cross terms - - val ff = Array [Transform] (powForm (VectorD (pp))) - val gg = Array [Transform] () - - for p <- 6 to 6; q <- 4 to 4; s <- 1 to 1 do // number of lags (endo, exo); trend - hp("p") = p // endo lags - hp("q") = q // exo lags - hp("spec") = s // trend specification: 0, 1, 2, 3, 5 -// val mod = ARX_Symb_D (xe, y, hh, fEndo = ff, fExo = gg) // create model for time series data - val mod = ARX_Symb_D.rescale (xe, y, hh, fEndo = ff, fExo = gg) // create model for time series data - banner (s"TnT Forecasts: ${mod.modelName} on COVID-19 Dataset") - mod.trainNtest_x ()() // use customized trainNtest_x -// println (mod.summary ()) // statistical summary of fit - - mod.setSkip (0) - mod.rollValidate () // TnT with Rolling Validation - println (s"After Roll TnT Forecast Matrix yf = ${mod.getYf}") - mod.diagnoseAll (mod.getY, mod.getYf, Forecaster.teRng (y.dim), 0) // only diagnose on the testing set -// println (s"Final TnT Forecast Matrix yf = ${mod.getYf}") - end for - -end aRX_Symb_DTest4 - diff --git a/src/main/scala/scalation/modeling/forecasting/ARY.scala b/src/main/scala/scalation/modeling/forecasting/ARY.scala index 4b4c30008..728ae2669 100644 --- a/src/main/scala/scalation/modeling/forecasting/ARY.scala +++ b/src/main/scala/scalation/modeling/forecasting/ARY.scala @@ -15,9 +15,12 @@ package scalation package modeling package forecasting +import scala.collection.mutable.{LinkedHashSet => LSET} + import scalation.mathstat._ import MakeMatrix4TS._ +import TransformT._ //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `ARY` class provides basic time series analysis capabilities for ARY models. @@ -47,8 +50,8 @@ class ARY (x: MatrixD, y: VectorD, hh: Int, fname: Array [String], protected val p = hparam("p").toInt // use the last p values (p lags) protected val spec = hparam("spec").toInt // trend terms: 0 - none, 1 - constant, 2 - linear, 3 - quadratic // 4 - sine, 5 cosine - modelName = s"ARY($p)" - yForm = tForms("tForm_y").asInstanceOf [Transform] + _modelName = s"ARY_$p" + yForm = tForms("tForm_y").asInstanceOf [Transform] // yForm defined in `Fit` via hierarchy debug ("init", s"$modelName with additional term spec = $spec") // debug ("init", s"[ x | y ] = ${x :^+ y}") @@ -70,6 +73,14 @@ class ARY (x: MatrixD, y: VectorD, hh: Int, fname: Array [String], x_trend ++ x_act ++ x_fcast end forge + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Build an `ARY` model using the cols with the selected features. + * @param cols the cols of the input matrix with selected features + */ + def convertReg2Forc (cols: LSET [Int] = mcols): ARY = + new ARY (getX(?, cols), getY, hh, cols.toArray.map (fname(_)), tRng, hparam, bakcast, tForms) + end convertReg2Forc + end ARY @@ -107,17 +118,22 @@ object ARY extends MakeMatrix4TSY: * @param tRng the time range, if relevant (time index may suffice) * @param hparam the hyper-parameters * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) - * @param tForm the z-transform (rescale to standard normal) + * @param tFormT the transform for rescaling endogenous and exogenous */ def rescale (y: VectorD, hh: Int, fname_ : Array [String] = null, tRng: Range = null, hparam: HyperParameter = hp, bakcast: Boolean = false, - tForm: VectorD | MatrixD => Transform = x => zForm(x)): ARY = + tFormT: TransformT = MinMax): ARY = + + if tFormT.name == "NormForm" then hparam("nneg") = 0 val p = hparam("p").toInt // use the last p values val spec = hparam("spec").toInt // 0 - none, 1 - constant, 2 - linear, 3 -quadratic, 4 - sin, 5 = cos - val tForm_y = tForm(y) - if tForm_y.getClass.getSimpleName == "zForm" then hparam("nneg") = 0 + + val tr_size = Model.trSize (y.dim) + val tForm_y = tFormT.form(y(0 until tr_size)) // use (mean, std) of training set for both In-sample and TnT +// val tForm_y = tForm(y) // use full dataset + val y_scl = tForm_y.f(y) val tForms = Map ("tForm_y" -> tForm_y) @@ -145,7 +161,7 @@ object ARY extends MakeMatrix4TSY: */ def ary1 (y: VectorD): SimpleRegression = val x = WeightedMovingAverage.backcast (y) +: y(0 until y.dim-1) - println (MatrixD (x, y).transpose) + println (MatrixD (x, y).ᵀ) SimpleRegression (x, y, null) end ary1 @@ -168,7 +184,7 @@ import Example_LakeLevels.y hp("spec") = 2 // trend specification: 0, 1, 2, 3, 5 val mod = ARY (y, hh) // create model for time series data - mod.inSampleTest () // In-Sample Testing + mod.inSample_Test () // In-Sample Testing println (mod.summary ()) // statistical summary end aRYTest @@ -216,7 +232,7 @@ end aRYTest2 hp("spec") = s // trend specification: 0, 1, 2, 3, 5 val mod = ARY (y, hh) // create model for time series data - mod.inSampleTest () // In-Sample Testing + mod.inSample_Test () // In-Sample Testing println (mod.summary ()) // statistical summary of fit end for @@ -240,16 +256,14 @@ end aRYTest3 for p <- 6 to 6; s <- 1 to 1 do // number of lags; trend hp("p") = p // endo lags hp("spec") = s // trend specification: 0, 1, 2, 3, 5 -// val mod = ARY (y, hh) // create model for time series data - val mod = ARY.rescale (y, hh) // create model for time series data - + val mod = ARY (y, hh) // create model for time series data banner (s"TnT Forecasts: ${mod.modelName} on COVID-19 Dataset") mod.trainNtest_x ()() // use customized trainNtest_x mod.setSkip (0) mod.rollValidate (rc = 2) // TnT with Rolling Validation println (s"After Roll TnT Forecast Matrix yf = ${mod.getYf}") - mod.diagnoseAll (mod.getY, mod.getYf, Forecaster.teRng (y.dim), 0) // only diagnose on the testing set + mod.diagnoseAll (mod.getY, mod.getYf, Forecaster.teRng (y.dim)) // only diagnose on the testing set // println (s"Final TnT Forecast Matrix yf = ${mod.getYf}") end for @@ -274,7 +288,7 @@ end aRYTest4 hp("lwave") = 20 // wavelength (distance between peaks) val mod = ARY (y, hh) // create model for time series data - mod.inSampleTest () // In-Sample Testing + mod.inSample_Test () // In-Sample Testing println (mod.summary ()) // statistical summary of fit banner ("Feature Selection Technique: Forward") @@ -282,8 +296,7 @@ end aRYTest4 // val (cols, rSq) = mod.backwardElimAll () // R^2, R^2 bar, sMAPE, R^2 cv val k = cols.size println (s"k = $k") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "R^2 cv"), - s"R^2 vs n for ${mod.modelName}", lines = true) + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs n for ${mod.modelName}", lines = true) println (s"rSq = $rSq") end aRYTest5 diff --git a/src/main/scala/scalation/modeling/forecasting/ARY_D.scala b/src/main/scala/scalation/modeling/forecasting/ARY_D.scala index 4a43f42f9..44b66a60b 100644 --- a/src/main/scala/scalation/modeling/forecasting/ARY_D.scala +++ b/src/main/scala/scalation/modeling/forecasting/ARY_D.scala @@ -12,10 +12,13 @@ package scalation package modeling package forecasting +import scala.collection.mutable.{LinkedHashSet => LSET} + import scalation.mathstat._ import scalation.modeling.neuralnet.{RegressionMV => REGRESSION} import MakeMatrix4TS._ +import TransformT._ //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `ARY_D` class provides basic time series analysis capabilities for @@ -39,17 +42,17 @@ class ARY_D (x: MatrixD, y: MatrixD, hh: Int, fname: Array [String], tRng: Range = null, hparam: HyperParameter = hp, bakcast: Boolean = false, tForms: TransformMap = Map ("tForm_y" -> null)) - extends Forecaster_D (x, y, hh, tRng, hparam, bakcast): + extends Forecaster_D (x, y, hh, fname, tRng, hparam, bakcast): - private val debug = debugf ("ARY_D", true) // debug function - private val p = hparam("p").toInt // use the last p values (p lags) - private val spec = hparam("spec").toInt // trend terms: 0 - none, 1 - constant, 2 - linear, 3 - quadratic - // 4 - sine, 5 cosine - private val nneg = hparam("nneg").toInt == 1 // 0 => unrestricted, 1 => predictions must be non-negative - private val reg = new REGRESSION (x, y, fname, hparam) // delegate training to multi-variate regression + private val debug = debugf ("ARY_D", true) // debug function + private val p = hparam("p").toInt // use the last p values (p lags) + private val spec = hparam("spec").toInt // trend terms: 0 - none, 1 - constant, 2 - linear, 3 - quadratic + // 4 - sine, 5 cosine + private val nneg = hparam("nneg").toInt == 1 // 0 => unrestricted, 1 => predictions must be non-negative + private val reg = new REGRESSION (x, y, fname, hparam) // delegate training to multi-variate regression - modelName = s"ARY_D($p)" - yForm = tForms("tForm_y").asInstanceOf [Transform] + _modelName = s"ARY_D_$p" + yForm = tForms("tForm_y").asInstanceOf [Transform] debug ("init", s"$modelName with additional term spec = $spec") // debug ("init", s"[ x | y ] = ${x ++^ y}") @@ -62,9 +65,11 @@ class ARY_D (x: MatrixD, y: MatrixD, hh: Int, fname: Array [String], * @param y_ the training/full response vector (e.g., full y) */ def train_x (x_ : MatrixD, y_ : MatrixD): Unit = - debug ("train", s"$modelName, x_.dim = ${x_.dim}, y_.dim = ${y_.dim}") - reg.train (x_, y_) // train the multi-variate regression model - bb = reg.parameter // coefficients from regression + debug ("train_x", s"$modelName, x_.dim = ${x_.dim}, y_.dim = ${y_.dim}") + val idx = y_(?, y.dim2-1).indexOf (NO_DOUBLE) // index of first non-value in the last column + val (x_t, y_t) = if idx < 0 then (x_, y_) else (x_(0 until idx), y_(0 until idx)) + reg.train (x_t, y_t) // train the multi-variate regression model + bb = reg.parameter // coefficients from regression end train_x //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -75,9 +80,9 @@ class ARY_D (x: MatrixD, y: MatrixD, hh: Int, fname: Array [String], * @param b_ the parameters/coefficients for the model * @param vifs the Variance Inflation Factors (VIFs) */ - override def summary (x_ : MatrixD = getX, fname_ : Array [String] = reg.getFname, + override def summary (x_ : MatrixD = x, fname_ : Array [String] = reg.getFname, b_ : VectorD = b, vifs: VectorD = reg.vif ()): String = - super.summary (x_, fname_, b_, vifs) // summary from `Fit` + super.summary (x_, fname_, b_, vifs) // summary from `Fit` end summary //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -103,9 +108,9 @@ class ARY_D (x: MatrixD, y: MatrixD, hh: Int, fname: Array [String], * @param y_ the actual values to use in making predictions */ override def forecast (t: Int, y_ : VectorD): VectorD = - val pred = predict (t, MatrixD (y_).transpose) - for h <- 1 to hh do yf(t, h) = pred(h-1) - pred // yh is pred + val pred = predict (t, MatrixD (y_).ᵀ) + yf(t, 1 until hh+1) = pred + pred // yh is pred end forecast //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -120,6 +125,25 @@ class ARY_D (x: MatrixD, y: MatrixD, hh: Int, fname: Array [String], yf end forecastAll + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Build an `ARY_D` model using the cols with the selected features. + * @param cols the cols of the input matrix with selected features + * @param h the number of the horizon + */ + def getModel (cols: LSET [Int] = mcols): ARY_D = + new ARY_D (x(?, cols), y, hh, cols.toArray.map (fname (_)), tRng, hparam, bakcast, tForms) + end getModel + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Build a single-horizon `ARY` model using the cols with the selected features. + * Note: uses `ARY` as it is the base model for ARY*_D. + * @param cols the cols of the input matrix with selected features + * @param h the number of the horizon + */ + def getModel_h (cols: LSET [Int] = mcols, h: Int = 1): ARY = + new ARY (x(?, cols), y(?, h-1), 1, cols.toArray.map (fname (_)), tRng, hparam, bakcast, tForms) + end getModel_h + end ARY_D @@ -140,8 +164,9 @@ object ARY_D extends MakeMatrix4TSY: def apply (y: VectorD, hh: Int, fname_ : Array [String] = null, tRng: Range = null, hparam: HyperParameter = hp, bakcast: Boolean = false): ARY_D = - val p = hparam("p").toInt // use the last p values - val spec = hparam("spec").toInt // 0 - none, 1 - constant, 2 - linear, 3 -quadratic, 4 - sin, 5 = cos + + val p = hparam("p").toInt // use the last p values + val spec = hparam("spec").toInt // 0 - none, 1 - constant, 2 - linear, 3 -quadratic, 4 - sin, 5 = cos val xy = ARY.buildMatrix (y, hparam, bakcast) val yy = makeMatrix4Y (y, hh, bakcast) val fname = if fname_ == null then formNames (spec, p) else fname_ @@ -157,23 +182,24 @@ object ARY_D extends MakeMatrix4TSY: * @param tRng the time range, if relevant (time index may suffice) * @param hparam the hyper-parameters * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) - * @param tForm the z-transform (rescale to standard normal) + * @param tFormT the transform for rescaling endogenous and exogenous */ def rescale (y: VectorD, hh: Int, fname_ : Array [String] = null, tRng: Range = null, hparam: HyperParameter = hp, bakcast: Boolean = false, - tForm: VectorD | MatrixD => Transform = x => zForm(x)): ARY_D = + tFormT: TransformT = MinMax): ARY_D = + + if tFormT.name == "NormForm" then hparam("nneg") = 0 val p = hparam("p").toInt // use the last p values val spec = hparam("spec").toInt // 0 - none, 1 - constant, 2 - linear, 3 -quadratic, 4 - sin, 5 = cos - val tForm_y = tForm(y) - if tForm_y.getClass.getSimpleName == "zForm" then hparam("nneg") = 0 + val tForm_y = tFormT.form(y) val y_scl = tForm_y.f(y) - val tForms: TransformMap = Map ("tForm_y" -> tForm_y) - val xy = ARY.buildMatrix (y_scl, hparam, bakcast) - val yy = makeMatrix4Y (y_scl, hh, bakcast) - val fname = if fname_ == null then formNames (spec, p) else fname_ + val tForms = Map ("tForm_y" -> tForm_y) + val xy = ARY.buildMatrix (y_scl, hparam, bakcast) + val fname = if fname_ == null then formNames (spec, p) else fname_ + val yy = makeMatrix4Y (y_scl, hh, bakcast) new ARY_D (xy, yy, hh, fname, tRng, hparam, bakcast, tForms) end rescale @@ -193,11 +219,11 @@ import Example_LakeLevels.y val hh = 3 // maximum forecasting horizon hp("p") = 3 // endo lags - hp("spec") = 2 // trend specification: 0, 1, 2, 3, 5 + hp("spec") = 1 // trend specification: 0, 1, 2, 3, 5 val mod = ARY_D (y, hh) // create model for time series data - mod.inSampleTest () // In-Sample Testing - println (mod.summary ()) // statistical summary + mod.inSample_Test () // In-Sample Testing +// println (mod.summary ()) // statistical summary FIX -- crashes end aRY_DTest @@ -220,7 +246,7 @@ end aRY_DTest mod.trainNtest_x ()() // train and test on full dataset mod.rollValidate () // TnT with Rolling Validation - mod.diagnoseAll (mod.getY, mod.getYf, Forecaster.teRng (y.dim), 0) // only diagnose on the testing set + mod.diagnoseAll (mod.getY, mod.getYf, Forecaster.teRng (y.dim)) // only diagnose on the testing set println (s"Final TnT Forecast Matrix yf = ${mod.getYf}") end aRY_DTest2 @@ -242,8 +268,8 @@ end aRY_DTest2 for p <- 1 to 6 do // number of lags hp("p") = p val mod = ARY_D (y, hh) // create model for time series data - mod.inSampleTest () // In-Sample Testing - println (mod.summary ()) // statictival summary + mod.inSample_Test () // In-Sample Testing +// println (mod.summary ()) // statistical summary -- FIX crashes end for end aRY_DTest3 @@ -262,12 +288,10 @@ end aRY_DTest3 val y = yy(0 until 116) // clip the flat end val hh = 6 // maximum forecasting horizon - for p <- 6 to 6; s <- 1 to 1 do // number of lags; trend + for p <- 5 to 5; s <- 1 to 1 do // number of lags; trend hp("p") = p hp("spec") = s -// val mod = ARY_D (y, hh) // create model for time series data - val mod = ARY_D.rescale(y, hh) // create model for time series data - + val mod = ARY_D (y, hh) // create model for time series data banner (s"TnT Forecasts: ${mod.modelName} on COVID-19 Dataset") mod.trainNtest_x ()() // use customized trainNtest_x diff --git a/src/main/scala/scalation/modeling/forecasting/ARY_Quad.scala b/src/main/scala/scalation/modeling/forecasting/ARY_Quad.scala index ba2acc656..02a285c18 100644 --- a/src/main/scala/scalation/modeling/forecasting/ARY_Quad.scala +++ b/src/main/scala/scalation/modeling/forecasting/ARY_Quad.scala @@ -18,6 +18,7 @@ package forecasting import scalation.mathstat._ import MakeMatrix4TS._ +import TransformT._ //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `ARY_Quad` class provides basic time series analysis capabilities for ARY quadratic models. @@ -44,12 +45,12 @@ class ARY_Quad (x: MatrixD, y: VectorD, hh: Int, fname: Array [String], extends ARY (x, y, hh, fname, tRng, hparam, bakcast, tForms): private val debug = debugf ("ARY_Quad", true) // debug function -// private val pp = hparam("pp").toDouble // power to raise the endogenous lags to (defaults to quadratic) + private val pow = Transform.hp("p").toDouble // power to raise the endogenous lags to (defaults to quadratic) - modelName = s"ARY_Quad($p)" + _modelName = s"ARY_Quad_$p" debug ("init", s"$modelName with additional term spec = $spec") - debug ("init", s"[ x | y ] = ${x :^+ y}") +// debug ("init", s"[ x | y ] = ${x :^+ y}") //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Forge a new vector from the first spec values of x, the last p-h+1 values @@ -65,28 +66,11 @@ class ARY_Quad (x: MatrixD, y: VectorD, hh: Int, fname: Array [String], val nyy = p - x_act.dim // number of forecasted values needed val x_fcast = yy(h-nyy until h) // get forecasted y-values -// val x2_act = x_act ~^ pp // get actual y^2-values -// val x2_fcast = x_fcast ~^ pp // get forecasted y^2-values - val x2_act = xx(n_endo + p - (p + 1 - h) until n_endo + p) // get transformed lagged endogenous variable - val x2_fcast = scaleCorrection(x_fcast) + val x2_act = x_act ~^ pow // get actual y^2-values + val x2_fcast = x_fcast ~^ pow // get forecasted y^2-values x_trend ++ x_act ++ x_fcast ++ x2_act ++ x2_fcast end forge - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Apply scale correction to x_fcast. - * - * @param x_fcast the vector to apply the scale correction to - */ - def scaleCorrection(x_fcast: VectorD): VectorD = - if tForms("tForm_y") != null then - val f_pp = (tForms("tForm_endo").asInstanceOf[Transform].f(_: VectorD)) ⚬ - (tForms("ppForm").asInstanceOf[Transform].f(_: VectorD)) ⚬ - (tForms("tForm_y").asInstanceOf[Transform].fi(_: VectorD)) - f_pp(x_fcast) - else - tForms("ppForm").asInstanceOf[Transform].f(x_fcast) - end scaleCorrection - end ARY_Quad @@ -96,7 +80,7 @@ end ARY_Quad object ARY_Quad extends MakeMatrix4TSY: //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create an `ARY_Quad` object by building an input matrix x and then calling the + /** Create an `ARY_Quad` object by building an input matrix xy and then calling the * `ARY_Quad` constructor. * @param y the response vector (time series data) * @param hh the maximum forecasting horizon (h = 1 to hh) @@ -111,9 +95,9 @@ object ARY_Quad extends MakeMatrix4TSY: val p = hparam("p").toInt // use the last p values val spec = hparam("spec").toInt // 0 - none, 1 - constant, 2 - linear, 3 -quadratic, 4 - sin, 5 = cos - val (xy, tForms) = buildMatrix (y, hparam, bakcast) + val xy = buildMatrix (y, hparam, bakcast) val fname = if fname_ == null then formNames (spec, p) else fname_ - new ARY_Quad (xy, y, hh, fname, tRng, hparam, bakcast, tForms) + new ARY_Quad (xy, y, hh, fname, tRng, hparam, bakcast) end apply //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -124,19 +108,27 @@ object ARY_Quad extends MakeMatrix4TSY: * @param tRng the time range, if relevant (time index may suffice) * @param hparam the hyper-parameters * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) - * @param tForm the z-transform (rescale to standard normal) + * @param tFormT the transform for rescaling endogenous and exogenous */ def rescale (y: VectorD, hh: Int, fname_ : Array [String] = null, tRng: Range = null, hparam: HyperParameter = hp, bakcast: Boolean = false, - tForm: VectorD | MatrixD => Transform = x => zForm(x)): ARY_Quad = + tFormT: TransformT = MinMax): ARY_Quad = + + if tFormT.name == "NormForm" then hparam("nneg") = 0 + + // rescale y + val tFormScale = tFormT.form + val tr_size = Model.trSize (y.dim) + val tForm_y = tFormScale (y(0 until tr_size)) // use (mean, std) of training set for both In-sample and TnT + val y_scl = tForm_y.f(y) val p = hparam("p").toInt // use the last p values val spec = hparam("spec").toInt // 0 - none, 1 - constant, 2 - linear, 3 -quadratic, 4 - sin, 5 = cos - val (xy, tForms) = buildMatrix (y, hparam, bakcast, tForm) - if tForms("tForm_y").getClass.getSimpleName == "zForm" then hp("nneg") = 0 - val y_scl = tForms("tForm_y").f(y) - val fname = if fname_ == null then formNames (spec, p) else fname_ + val powForm = PowForm (VectorD (0, Transform.hp("p").toDouble)) + val tForms = Map ("tForm_y" -> tForm_y, "powForm" -> powForm) + val xy = buildMatrix (y_scl, hparam, bakcast, powForm) + val fname = if fname_ == null then formNames (spec, p) else fname_ new ARY_Quad (xy, y_scl, hh, fname, tRng, hparam, bakcast, tForms) end rescale @@ -146,33 +138,19 @@ object ARY_Quad extends MakeMatrix4TSY: * @param y the response vector (time series data) * @param hp_ the hyper-parameters * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) - * @param tForm the z-transform (rescale to standard normal) + * @param powForm the power transform */ def buildMatrix (y: VectorD, hp_ : HyperParameter, bakcast: Boolean, - tForm: VectorD | MatrixD => Transform = null): (MatrixD, TransformMap) = - - val (p, pp, spec, lwave) = (hp_("p").toInt, hp_("pp").toDouble, hp_("spec").toInt, hp_("lwave").toDouble) - val ppForm = powForm (VectorD (pp)) - var y_pp = ppForm.f(y) - var y_scl = y + powForm: Transform = PowForm (VectorD (0, Transform.hp("p").toDouble))): MatrixD = - val tForms: TransformMap = - if tForm != null then - val tForm_y = tForm (y) - y_scl = tForm_y.f(y) - val tForm_endo = tForm (y_pp) - y_pp = tForm_endo.f(y_pp) - Map ("tForm_y" -> tForm_y, "tForm_endo" -> tForm_endo, "ppForm" -> ppForm) - else - Map ("tForm_y" -> null, "ppForm" -> ppForm) + val (p, spec, lwave) = (hp_("p").toInt, hp_("spec").toInt, hp_("lwave").toDouble) - val x_endo = MatrixD (y_scl, y_pp).transpose + val y_pow = powForm.f(y) + val x_endo = MatrixD (y, y_pow).ᵀ // add trend terms and terms for the endogenous variable - val xy = makeMatrix4T (y, spec, lwave, bakcast) ++^ // trend terms - makeMatrix4L (x_endo, p, bakcast) // lagged linear terms - - (xy, tForms) + makeMatrix4T (y, spec, lwave, bakcast) ++^ // trend terms + makeMatrix4L (x_endo, p, bakcast) // lagged linear terms end buildMatrix end ARY_Quad @@ -194,7 +172,7 @@ import Example_LakeLevels.y hp("spec") = 2 // trend specification: 0, 1, 2, 3, 5 val mod = ARY_Quad (y, hh) // create model for time series data - mod.inSampleTest () // In-Sample Testing + mod.inSample_Test () // In-Sample Testing println (mod.summary ()) // statistical summary end aRY_QuadTest @@ -214,13 +192,11 @@ end aRY_QuadTest hp("spec") = 2 // trend specification: 0, 1, 2, 3, 5 val mod = ARY_Quad (y, hh) // create model for time series data -// val mod = ARY_Quad.rescale(y, hh) // create model for time series data - banner (s"TnT Forecasts: ${mod.modelName} on LakeLevels Dataset") mod.trainNtest_x ()() // train and test on full dataset mod.rollValidate () // TnT with Rolling Validation - mod.diagnoseAll (mod.getY, mod.getYf, Forecaster.teRng (y.dim), 0) // only diagnose on the testing set + mod.diagnoseAll (mod.getY, mod.getYf, Forecaster.teRng (y.dim)) // only diagnose on the testing set println (s"Final TnT Forecast Matrix yf = ${mod.getYf}") end aRY_QuadTest2 @@ -238,15 +214,15 @@ end aRY_QuadTest2 // val y = yy // full val y = yy(0 until 116) // clip the flat end val hh = 6 // maximum forecasting horizon - hp("pp") = 1.5 // use 1.5 for the power/exponent (default is 2) hp("lwave") = 20 // wavelength (distance between peaks) + Transform.hp("p") = 1.5 // use 1.5 for the power/exponent (default is 2) for p <- 1 to 5; s <- 1 to 2 do // number of lags; trend hp("p") = p // endo lags hp("spec") = s // trend specification: 0, 1, 2, 3, 5 val mod = ARY_Quad (y, hh) // create model for time series data - mod.inSampleTest () // In-Sample Testing + mod.inSample_Test () // In-Sample Testing println (mod.summary ()) // statistical summary of fit end for @@ -265,22 +241,18 @@ end aRY_QuadTest3 // val y = yy // full val y = yy(0 until 116) // clip the flat end val hh = 6 // maximum forecasting horizon - hp("pp") = 1.5 // use 1.5 for the power/exponent (default is 2) - - hp("lwave") = 20 // wavelength (distance between peaks) + hp("lwave") = 20 // wavelength (distance between peaks) - for p <- 6 to 6; s <- 1 to 1 do // number of lags; trend + for p <- 1 to 10; s <- 1 to 5 do // number of lags; trend hp("p") = p // endo lags hp("spec") = s // trend specification: 0, 1, 2, 3, 5 -// val mod = ARY_Quad (y, hh) // create model for time series data - val mod = ARY_Quad.rescale(y, hh) // create model for time series data - + val mod = ARY_Quad (y, hh) // create model for time series data banner (s"TnT Forecasts: ${mod.modelName} on COVID-19 Dataset") mod.trainNtest_x ()() // use customized trainNtest_x mod.setSkip (0) mod.rollValidate () // TnT with Rolling Validation - mod.diagnoseAll (mod.getY, mod.getYf, Forecaster.teRng (y.dim), 0) // only diagnose on the testing set + mod.diagnoseAll (mod.getY, mod.getYf, Forecaster.teRng (y.dim)) // only diagnose on the testing set // println (s"Final TnT Forecast Matrix yf = ${mod.getYf}") end for @@ -305,7 +277,7 @@ end aRY_QuadTest4 hp("lwave") = 20 // wavelength (distance between peaks) val mod = ARY_Quad (y, hh) // create model for time series data - mod.inSampleTest () // In-Sample Testing + mod.inSample_Test () // In-Sample Testing println (mod.summary ()) // statistical summary of fit banner ("Feature Selection Technique: Forward") @@ -313,8 +285,7 @@ end aRY_QuadTest4 // val (cols, rSq) = mod.backwardElimAll () // R^2, R^2 bar, sMAPE, R^2 cv val k = cols.size println (s"k = $k") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "R^2 cv"), - s"R^2 vs n for ${mod.modelName}", lines = true) + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs n for ${mod.modelName}", lines = true) println (s"rSq = $rSq") end aRY_QuadTest5 diff --git a/src/main/scala/scalation/modeling/forecasting/Diagnoser.scala b/src/main/scala/scalation/modeling/forecasting/Diagnoser.scala index 6602064dd..a25d7f9e9 100644 --- a/src/main/scala/scalation/modeling/forecasting/Diagnoser.scala +++ b/src/main/scala/scalation/modeling/forecasting/Diagnoser.scala @@ -15,13 +15,13 @@ package forecasting import scalation.mathstat._ //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Diagnoser` trait provides methods to determine basic Quality of Fit QoF measures. - * @param y_ the response vector (time series) - * @param dfm the degrees of freedom for model/regression (0 or more) +/** The `Diagnoser` trait provides methods to determine basic Quality of Fit QoF measures + * for time series where forecasting at early time points may not be feasible. + * @param dfr the degrees of freedom for regression/model (0 or more) * @param df the degrees of freedom for error */ -abstract class Diagnoser (dfm: Double, df: Double) - extends Fit (dfm, df): +abstract class Diagnoser (dfr: Double, df: Double) + extends Fit (dfr, df): // For In-Sample Testing (In-ST), can't forecast for t = 0 (no past data, unless backcasting) // first value in time series may be atypical (but not an necessarily an outlier) => skip first 2 @@ -29,6 +29,13 @@ abstract class Diagnoser (dfm: Double, df: Double) // Call `setSkip` to change from the DEFAULT value of 2 // When comparing different models, should use the same skip value for all models + // Setting skip to 'max (p, q+1)' is common for ARMA (p, q) models, e.g., the state space in + // Kalman Filters need to hold p past values as well as the q past errors and the current error + // to be fully populated from data (e.g., not just zeroed out). + // For ARIMA using a Kalman Filter, it would be 'd + max(p, q+1)'. + // Some software avoids this burn-in issue by using a diffuse Kalman filter + + private val debug = debugf ("Diagnoser", false) // debug function protected var skip: Int = 2 // number of beginning elements to skip //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -44,7 +51,7 @@ abstract class Diagnoser (dfm: Double, df: Double) * Note: Degrees of Freedom are mainly relevant for full and train, not test. * @param size the size of dataset (full, train, or test sets) */ - def mod_resetDF (size: Int): Unit = resetDF (dfm, size - dfm) + def mod_resetDF (size: Int): Unit = resetDF (dfr, size - dfr) //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Diagnose the health of the model by computing the Quality of Fit (QoF) measures, @@ -56,7 +63,7 @@ abstract class Diagnoser (dfm: Double, df: Double) * @param w the weights on the instances (defaults to null) */ override def diagnose (y: VectorD, yp: VectorD, w: VectorD = null): VectorD = - println (s"diagnose: skip = $skip") + debug ("diagnose", s"skip = $skip") if skip > 0 then super.diagnose (y.drop (skip), yp.drop (skip), if w != null then w.drop (skip) else null) diff --git a/src/main/scala/scalation/modeling/forecasting/Example_Covid.scala b/src/main/scala/scalation/modeling/forecasting/Example_Covid.scala index aad57265e..4ae2af0f8 100644 --- a/src/main/scala/scalation/modeling/forecasting/Example_Covid.scala +++ b/src/main/scala/scalation/modeling/forecasting/Example_Covid.scala @@ -19,22 +19,23 @@ import scalation.mathstat._ //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `Example_Covid` object provides a convenient way to load Covid-19 weekly data. - * See test cases (odd In-ST, even TnT Split) below for + * See test cases (odd In-Sample, even TnT Split) below for * Loss/Equations Optimizer * (a: 1, 2) Plot and EDA - - * Univariate: * (b: 3, 4) Baseline Models none or CSSE none or various * (c: 5, 6) AR(p) Models Yule-Walker Durbin-Levinson - * (d: 7, 8) ARMA(p, q=0) Models CSSE BFGS - * (e: 9, 10) ARY(p) Models CSSE QR Factorization - * (f: 11, 12) ARY_D(p) Models CSSE + Direct QR Factorization - * (g: 13, 14) ARMA(p, q=1) Models CSSE BFGS + * (d: 7, 8) ARMA(p, q=0) Models CSSE BFGS? + * (e: 9, 10) ARY(p) Models CSSE Cholesky Factorization + * (f: 11, 12) ARY_D(p) Models CSSE + Direct QR, Cholesky Factorization + * (g: 13, 14) ARMA(p, q=1) Models CSSE BFGS? * Multivariate: - * (h: 15, 16) ARX(p, 2, 2) Models CSSE QR Factorization - * (i: 17, 18) ARX_D Models CSSE + Direct QR Factorization - * (j: 19, 20) ARX_Quad_D Models CSSE QR Factorization + * (h: 15, 16) ARX(p, 2, 2) Models CSSE Cholesky Factorization + * (i: 17, 18) ARX_D Models CSSE + Direct QR, Cholesky Factorization + * (j: 19, 20) ARX_Quad(p, 2, 2) Models CSSE Cholesky Factorization + * (k: 21, 22) ARX_Quad_D Models CSSE + Direct QR, Cholesky Factorization * - * Known Bugs: 13, 14 + * Known Bugs: SMA, WMA, SES, ARMA, ARY_D, ARX_D, ARX_Quad_D */ object Example_Covid: @@ -138,7 +139,7 @@ end example_CovidTest */ @main def example_CovidTest2 (): Unit = - import scala.collection.mutable.Set + import scala.collection.mutable.{LinkedHashSet => LSET} val (xx, yy) = loadData (header, response) // val (x, y) = (xx, yy) // full @@ -152,8 +153,8 @@ end example_CovidTest xj = scaleV (extreme (xj), (0.0, 2.0))(xj) // rescale vector xj to [0, 2] val xxj = MatrixD.fromVector (xj) // val mod = SymbolicRegression.quadratic (xxj, y) -// val mod = SymbolicRegression.rescale (xxj, y, null, Set (1.0, 2.0, 3.0), cross = false) - val mod = SymbolicRegression (xxj, y, null, Set (0.5, 1.0, 2.0, 3.0), cross = false) +// val mod = SymbolicRegression.rescale (xxj, y, null, LSET (1.0, 2.0, 3.0), cross = false) + val mod = SymbolicRegression (xxj, y, null, LSET (0.5, 1.0, 2.0, 3.0), cross = false) mod.trainNtest ()() val yp = mod.predict (mod.getX) println (mod.summary ()) @@ -165,17 +166,17 @@ end example_CovidTest2 //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `example_CovidTest3` main function tests the `Example_Covid` object. - * Uses In-Sample Testing (In-ST), i.e., train and test on the same data. + * Uses In-Sample Testing, i.e., train and test on the same data. * Runs several baseline models for horizons 1 to 6, see sMAPE metrics below: * - * 55.1927, 53.9282, 52.7133, 51.8648, 51.9621, 52.0771 Null - * 54.6045, 53.3254, 52.1120, 51.2903, 51.4475, 51.4937 Trend - * 24.2641, 31.8588, 42.4430, 50.1029, 57.4933, 63.5406 SMA - * 26.4055, 31.5936, 43.7356, 50.1744, 58.3506, 63.7234 WMA - * 18.6934, 29.1811, 38.6542, 47.1281, 54.8713, 61.9944 SES - * 19.0371, 29.5797, 39.0740, 47.4638, 55.1785, 62.1818 RW - * 18.3265, 28.7734, 38.2039, 46.7814, 54.5563, 61.7930 RWS - * 18.7298, 28.4908, 37.0997, 45.6487, 51.7248, 56.3708 AR(1) +55.1927, 53.9282, 52.7133, 51.8648, 51.9621, 52.0771 Null +54.6045, 53.3361, 52.1227, 51.3005, 51.4569, 51.5041 Trend +24.2641, 25.5725, 39.2995, 45.9060, 54.4583, 60.3090 SMA -- FIX Bug h=2 too low +26.4055, 23.3947, 40.9707, 44.6394, 55.1448, 59.5280 WMA -- FIX Bug h=2 too low +18.6934, 29.1811, 38.6542, 47.1281, 54.8713, 61.9944 SES +19.0371, 29.5797, 39.0740, 47.4638, 55.1785, 62.1818 RW +18.3265, 28.7734, 38.2039, 46.7814, 54.5563, 61.7930 RWS +18.7298, 28.4908, 37.4800, 46.3173, 53.3245, 59.5733 AR(1) * * > runMain scalation.modeling.forecasting.example_CovidTest3 */ @@ -185,14 +186,14 @@ end example_CovidTest2 new Plot (null, y, null, s"y ($response)", lines = true) - new NullModel (y, hh).inSampleTest () - new TrendModel (y, hh).inSampleTest () - new SimpleMovingAverage (y, hh).inSampleTest () - new WeightedMovingAverage (y, hh).inSampleTest () - new SimpleExpSmoothing (y, hh).inSampleTest () - new RandomWalk (y, hh).inSampleTest () - new RandomWalkS (y, hh).inSampleTest () - new AR (y, hh).inSampleTest () + new NullModel (y, hh).inSample_Test () // create a Null Model and do In-Sample Testing + new TrendModel (y, hh).inSample_Test () + new SimpleMovingAverage (y, hh).inSample_Test () + new WeightedMovingAverage (y, hh).inSample_Test () + new SimpleExpSmoothing (y, hh).inSample_Test () + new RandomWalk (y, hh).inSample_Test () + new RandomWalkS (y, hh).inSample_Test () + new AR (y, hh).inSample_Test () end example_CovidTest3 @@ -210,6 +211,15 @@ end example_CovidTest3 * 18.6713, 27.5720, 40.9387, 52.3496, 64.2481, 75.3015 RW * 18.0855, 26.7084, 39.6941, 51.2218, 63.1873, 74.6834 RWS * 19.1590, 31.1975, 44.4850, 55.3120, 65.5536, 74.4969 AR(1) + +55.0263, 57.1038, 59.9686, 62.7341, 64.4922, 67.5687 Null +58.5433, 61.9389, 65.3934, 69.2238, 72.2127, 75.0520 Trend +9.30514, 20.1768, 31.9284, 44.6519, 56.0476, 67.5464 SMA -- FIX Bug +12.2955, 20.0054, 33.8672, 44.7494, 57.1694, 67.9005 WMA -- FIX Bug +33.3083, 44.2916, 54.1432, 64.0841, 73.5420, 80.7100 SES -- FIX Bug +18.1532, 27.2211, 40.3519, 52.3739, 62.5276, 73.6424 RW +17.8157, 26.6262, 39.4029, 51.5366, 61.7820, 73.3250 RWS +18.4659, 29.8363, 42.1980, 53.5928, 62.9734, 73.3153 AR(1) * * > runMain scalation.modeling.forecasting.example_CovidTest4 */ @@ -219,83 +229,34 @@ end example_CovidTest3 new Plot (null, y, null, s"y ($response)", lines = true) - var mod: Forecaster = null - - banner ("TnT Test: Null Model") - mod = new NullModel (y, hh) - mod.trainNtest ()() - mod.setSkip (0) // start at beginning of test-set - mod.rollValidate () // TnT with Rolling Validation - mod.diagnoseAll (y, mod.getYf, Forecaster.teRng (y.dim)) // only diagnose on the testing set - - banner ("TnT Test: Trend Model") - mod = new TrendModel (y, hh) - mod.trainNtest ()() - mod.setSkip (0) - mod.rollValidate () // TnT with Rolling Validation - mod.diagnoseAll (y, mod.getYf, Forecaster.teRng (y.dim)) // only diagnose on the testing set - - banner ("TnT Test: Simple Moving Average Model") - mod = new SimpleMovingAverage (y, hh) - mod.trainNtest ()() - mod.setSkip (0) - mod.rollValidate () // TnT with Rolling Validation - mod.diagnoseAll (y, mod.getYf, Forecaster.teRng (y.dim)) // only diagnose on the testing set - - banner ("TnT Test: Weighted Moving Average Model") - mod = new WeightedMovingAverage (y, hh) - mod.trainNtest ()() - mod.setSkip (0) - mod.rollValidate () // TnT with Rolling Validation - mod.diagnoseAll (y, mod.getYf, Forecaster.teRng (y.dim)) // only diagnose on the testing set - - banner ("TnT Test: Simple Exponential Smoothing Model") - mod = new SimpleExpSmoothing (y, hh) - mod.trainNtest ()() - mod.setSkip (0) - mod.rollValidate () // TnT with Rolling Validation - mod.diagnoseAll (y, mod.getYf, Forecaster.teRng (y.dim)) // only diagnose on the testing set - - banner ("TnT Test: Random Walk Model") - mod = new RandomWalk (y, hh) - mod.trainNtest ()() - mod.setSkip (0) - mod.rollValidate () // TnT with Rolling Validation - mod.diagnoseAll (y, mod.getYf, Forecaster.teRng (y.dim)) // only diagnose on the testing set - - banner ("TnT Test: Random Walk Slope Adjusted Model") - mod = new RandomWalkS (y, hh) - mod.trainNtest ()() - mod.setSkip (0) - mod.rollValidate () // TnT with Rolling Validation - mod.diagnoseAll (y, mod.getYf, Forecaster.teRng (y.dim)) // only diagnose on the testing set - - banner ("TnT Test: Auto-Regressive AR(1) Model") - mod = new AR (y, hh) - mod.trainNtest ()() - mod.setSkip (0) - mod.rollValidate () // TnT with Rolling Validation - mod.diagnoseAll (y, mod.getYf, Forecaster.teRng (y.dim)) // only diagnose on the testing set + new NullModel (y, hh).tnT_Test () // create a Null Model and do TnT Testing + new TrendModel (y, hh).tnT_Test () + new SimpleMovingAverage (y, hh).tnT_Test () + new WeightedMovingAverage (y, hh).tnT_Test () + new SimpleExpSmoothing (y, hh).tnT_Test () + new RandomWalk (y, hh).tnT_Test () + new RandomWalkS (y, hh).tnT_Test () + new AR (y, hh).tnT_Test () end example_CovidTest4 //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `example_CovidTest5` main function tests the `Example_Covid` object. - * Uses In-Sample Testing (In-ST), i.e., train and test on the same data. + * Uses In-Sample Testing, i.e., train and test on the same data. * Runs Auto-Regressive AR(p) models for several p values and horizons 1 to 6, * see sMAPE metrics below: * - * 18.7298, 28.4908, 37.0997, 45.6487, 51.7248, 56.3708 AR(1) - * 16.3579, 24.7155, 33.0480, 40.0707, 46.0049, 50.8265 AR(2) - * 16.0114, 22.7408, 29.5631, 35.2773, 40.9870, 45.8408 AR(3) - * 15.8988, 22.5738, 28.5298, 33.3360, 39.1586, 43.1606 AR(4) - * 15.9279, 22.5769, 28.5035, 33.3019, 39.1381, 43.0520 AR(5) - * 15.9647, 22.6143, 28.5229, 33.3735, 39.1651, 42.9640 AR(6) - * 16.0207, 23.2172, 29.4751, 35.2827, 41.0976, 46.1932 AR(7) - * 16.0501, 22.7281, 28.6740, 34.1866, 39.5963, 44.9223 AR(8) - * 16.0196, 22.5269, 28.4223, 34.1619, 39.7297, 44.4649 AR(9) - * 16.1069, 22.6213, 28.6435, 34.2722, 39.9638, 44.8023 AR(10) +18.7298, 28.4908, 37.4800, 46.3173, 53.3245, 59.5733 AR(1) +16.3579, 24.7155, 33.0480, 40.1643, 46.8762, 53.2178 AR(2) +16.0114, 22.7408, 29.5631, 35.2773, 41.5856, 47.5716 AR(3) +15.8988, 22.5738, 28.5298, 33.3360, 39.1586, 44.3459 AR(4) +15.9279, 22.5769, 28.5035, 33.3019, 39.1381, 43.0520 AR(5) +15.9647, 22.6143, 28.5229, 33.3735, 39.1651, 42.9640 AR(6) +16.0207, 23.2172, 29.4751, 35.2827, 41.0976, 46.1932 AR(7) +16.0501, 22.7281, 28.6740, 34.1866, 39.5963, 44.9223 AR(8) +16.0196, 22.5269, 28.4223, 34.1619, 39.7297, 44.4649 AR(9) +16.1069, 22.6213, 28.6435, 34.2722, 39.9638, 44.8023 AR(10) * * > runMain scalation.modeling.forecasting.example_CovidTest5 */ @@ -308,7 +269,7 @@ end example_CovidTest4 for p <- 1 to 10 do // AR hyper-parameter settings hp("p") = p - new AR (y, hh).inSampleTest () // create and test an AR model + new AR (y, hh).inSample_Test () // create an AR model and do In-Sample Testing end for end example_CovidTest5 @@ -320,16 +281,16 @@ end example_CovidTest5 * Runs Auto-Regressive AR(p) models for several p values and horizons 1 to 6, * see sMAPE metrics below: * - * 19.1590, 31.1975, 44.4850, 55.3120, 65.5536, 74.4969 AR(1) - * 17.1764, 27.8131, 41.0173, 52.3883, 62.4018, 71.3206 AR(2) - * 16.1569, 24.1092, 35.0634, 45.3502, 56.0450, 65.4998 AR(3) - * 15.2413, 23.2293, 30.1320, 40.3648, 48.8558, 57.8766 AR(4) - * 15.4399, 23.3058, 30.4161, 40.4655, 49.3913, 58.6573 AR(5) - * 15.7443, 22.8374, 29.7678, 38.5566, 45.5084, 50.8096 AR(6) - * 15.8906, 24.2516, 31.1198, 40.2877, 47.4982, 56.6783 AR(7) - * 15.8394, 24.8442, 31.2414, 40.4416, 47.5974, 56.3880 AR(8) - * 15.2112, 23.6265, 30.7560, 40.1489, 49.4426, 58.3781 AR(9) - * 15.7954, 23.7332, 32.8467, 42.5300, 52.3179, 60.5518 AR(10) +18.4659, 29.8363, 42.1980, 53.5928, 62.9734, 73.3153 AR(1) +16.7534, 25.7382, 38.5096, 49.3593, 57.9183, 69.7091 AR(2) +16.3630, 21.1490, 29.8750, 39.7999, 47.3691, 59.0869 AR(3) +15.0428, 20.1558, 29.3151, 38.2679, 43.0488, 51.3448 AR(4) +14.9448, 20.2989, 27.2780, 37.3160, 41.9003, 54.0098 AR(5) +13.9802, 19.7390, 27.2648, 35.6434, 42.2692, 50.8636 AR(6) +14.3902, 22.1659, 32.1102, 44.2965, 50.4653, 59.6916 AR(7) +15.0354, 24.8868, 35.9570, 49.9725, 55.2307, 62.9366 AR(8) +14.3458, 23.0047, 32.7333, 44.5037, 50.6380, 61.1755 AR(9) +14.0441, 23.9778, 35.8541, 48.1709, 53.5309, 63.7929 AR(10) * * > runMain scalation.modeling.forecasting.example_CovidTest6 */ @@ -342,13 +303,7 @@ end example_CovidTest5 for p <- 1 to 10 do // AR hyper-parameter settings hp("p") = p - val mod = new AR (y, hh) // create an AR model - banner (s"TnT Test: ${mod.modelName} Model") - mod.trainNtest ()() - - mod.setSkip (0) - mod.rollValidate () // TnT with Rolling Validation - mod.diagnoseAll (y, mod.getYf, Forecaster.teRng (y.dim)) // only diagnose on the testing set + new AR (y, hh).tnT_Test () // create an AR model and do TnT Testing end for end example_CovidTest6 @@ -356,7 +311,7 @@ end example_CovidTest6 //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `example_CovidTest7` main function tests the `Example_Covid` object. - * Uses In-Sample Testing (In-ST), i.e., train and test on the same data. + * Uses In-Sample Testing, i.e., train and test on the same data. * Runs Auto-Regressive, Moving Average ARMA(p, 0) models for several p and * horizons 1 to 6, see sMAPE metrics below: * @@ -383,12 +338,7 @@ end example_CovidTest6 for p <- 1 to 10 do // ARMA hyper-parameter settings hp("p") = p - val mod = new ARMA (y, hh) // create an ARMA model - banner (s"In-ST Test: ${mod.modelName} Model") - mod.trainNtest ()() - - mod.forecastAll () - mod.diagnoseAll (mod.getY, mod.getYf) + new ARMA (y, hh).inSample_Test () // create an ARMA model and do In-Sample Testing end for end example_CovidTest7 @@ -423,13 +373,7 @@ end example_CovidTest7 for p <- 1 to 10 do // ARMA hyper-parameter settings hp("p") = p - val mod = new ARMA (y, hh) // create an ARMA model - banner (s"TnT Test: ${mod.modelName} Model") - mod.trainNtest ()() - - mod.setSkip (0) - mod.rollValidate () // TnT with Rolling Validation - mod.diagnoseAll (y, mod.getYf, Forecaster.teRng (y.dim)) // only diagnose on the testing set + new ARMA (y, hh).tnT_Test () // create an ARMA model and do TnT Testing end for end example_CovidTest8 @@ -437,38 +381,33 @@ end example_CovidTest8 //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `example_CovidTest9` main function tests the `Example_Covid` object. - * Uses In-Sample Testing (In-ST), i.e., train and test on the same data. + * Uses In-Sample Testing, i.e., train and test on the same data. * Runs Auto-Regressive, Lagged Regression ARY(p) models for several p values and * horizons 1 to 6, see sMAPE metrics below: * - * 20.1794, 29.8589, 38.1450, 45.5634, 52.3478, 57.4474 ARY(1) - * 17.7728, 25.1705, 33.1900, 39.4218, 44.8621, 50.5991 ARY(2) - * 17.3594, 23.7550, 30.3838, 35.4514, 40.5868, 46.4292 ARY(3) - * 17.2457, 23.5122, 29.4110, 33.9350, 38.8422, 44.2303 ARY(4) - * 17.2314, 23.5178, 29.4345, 33.9602, 38.9022, 44.3249 ARY(5) - * 17.2503, 23.8232, 29.8341, 34.4885, 39.0138, 43.8011 ARY(6) - * 17.1625, 23.8385, 29.8227, 34.2751, 38.9853, 44.6092 ARY(7) - * 17.2067, 23.5579, 29.4741, 34.0077, 38.6431, 44.3218 ARY(8) - * 17.1326, 23.4530, 29.4149, 34.1103, 38.8254, 44.3564 ARY(9) - * 17.1791, 23.4175, 29.3213, 34.1509, 38.8917, 44.2659 ARY(10) +18.7156, 28.5159, 37.2459, 45.8036, 51.9371, 56.6985 ARY(1) +16.2587, 23.7072, 31.9906, 38.9940, 44.9856, 50.2418 ARY(2) +15.8240, 22.2659, 29.1170, 34.8505, 40.8580, 46.2113 ARY(3) +15.7020, 22.0134, 28.1169, 33.2691, 39.1811, 44.0750 ARY(4) +15.6875, 22.0198, 28.1429, 33.2990, 39.2397, 44.1700 ARY(5) +15.6982, 22.3197, 28.5239, 33.7716, 39.3099, 43.7038 ARY(6) +15.6186, 22.3438, 28.5437, 33.6401, 39.2926, 44.4411 ARY(7) +15.6595, 22.0566, 28.1782, 33.3346, 38.9921, 44.1955 ARY(8) +15.5823, 21.9463, 28.1055, 33.4000, 39.1296, 44.2667 ARY(9) +15.6267, 21.9089, 28.0047, 33.4205, 39.1586, 44.2015 ARY(10) * * > runMain scalation.modeling.forecasting.example_CovidTest9 */ @main def example_CovidTest9 (): Unit = val hh = 6 // max forecasting horizon - hp("lambda") = 1.0 // regularization parameter +// hp("lambda") = 1.0 // regularization parameter new Plot (null, y, null, s"y ($response)", lines = true) for p <- 1 to 10 do // ARY hyper-parameter settings hp("p") = p - val mod = ARY (y, hh) // create an ARY model - banner (s"In-ST Test: ${mod.modelName} Model") - mod.trainNtest_x ()() // needs x matrix => use _x version - - mod.forecastAll () - mod.diagnoseAll (mod.getY, mod.getYf) + ARY (y, hh).inSample_Test () // create an ARY model and do In-Sample Testing end for end example_CovidTest9 @@ -480,37 +419,29 @@ end example_CovidTest9 * Runs Auto-Regressive, Lagged Regression ARY(p) models for several p values, * and horizons 1 to 6, see sMAPE metrics below: * - * 19.0003, 30.3936, 43.8008, 54.8254, 65.3736, 74.5465 ARY(1) - * 16.8486, 26.3959, 39.1085, 50.6966, 61.0053, 70.3446 ARY(2) - * 15.7448, 21.8608, 31.3677, 40.9140, 51.5319, 61.5140 ARY(3) - * 14.7953, 20.1791, 26.5422, 35.2717, 40.7200, 48.6407 ARY(4) - * 14.9856, 19.5241, 27.1485, 35.1070, 40.1716, 47.1898 ARY(5) - * 15.0238, 21.1032, 28.4153, 36.6326, 42.5539, 49.8734 ARY(6) - * 15.5620, 20.7860, 29.8501, 37.1646, 43.7716, 48.4778 ARY(7) - * 15.1719, 23.2761, 32.2952, 40.3584, 46.1975, 51.3488 ARY(8) - * 14.9497, 22.5065, 31.3207, 39.5034, 45.5495, 51.4103 ARY(9) - * 14.4824, 21.5906, 29.9550, 37.9214, 43.3013, 52.2868 ARY(10) - * - * FIX - discrepancy between rollValidate and diagnoseAll handled by sft parameter - why needed? +18.4998, 29.4024, 42.6675, 54.2169, 63.0084, 72.2290 ARY(1) +17.8595, 27.8544, 40.7768, 52.5351, 61.0972, 70.1973 ARY(2) +16.4745, 22.9627, 32.9324, 42.8385, 51.6931, 61.5292 ARY(3) +15.7400, 21.2937, 28.9582, 38.8393, 42.0740, 51.3300 ARY(4) +16.2315, 21.2230, 29.6593, 38.9380, 43.1389, 49.8287 ARY(5) +16.1056, 22.6427, 30.9964, 40.9072, 46.1625, 54.0774 ARY(6) +16.6737, 23.1236, 33.1328, 43.0148, 49.7683, 55.2326 ARY(7) +16.7242, 25.7990, 36.3484, 47.6660, 55.1432, 60.7600 ARY(8) +16.5522, 24.9271, 35.1184, 46.2757, 53.5985, 59.8611 ARY(9) +16.0764, 23.6507, 33.7168, 44.2123, 51.0500, 61.1110 ARY(10) * * > runMain scalation.modeling.forecasting.example_CovidTest10 */ @main def example_CovidTest10 (): Unit = val hh = 6 // max forecasting horizon - hp("lambda") = 1.0 // regularization parameter +// hp("lambda") = 1.0 // regularization parameter new Plot (null, y, null, s"y ($response)", lines = true) for p <- 1 to 10 do // ARY hyper-parameter settings hp("p") = p - val mod = ARY (y, hh) // create an ARY model - banner (s"TnT Test: ${mod.modelName} Model") - mod.trainNtest_x ()() // needs x matrix => use _x version - - mod.setSkip (0) - mod.rollValidate () // TnT with Rolling Validation - mod.diagnoseAll (y, mod.getYf, Forecaster.teRng (y.dim), 0) // only diagnose on the testing set + ARY (y, hh).tnT_Test () // create an ARY model and do TnT Testing end for end example_CovidTest10 @@ -518,7 +449,7 @@ end example_CovidTest10 //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `example_CovidTest11` main function tests the `Example_Covid` object. - * Uses In-Sample Testing (In-ST), i.e., train and test on the same data. + * Uses In-Sample Testing, i.e., train and test on the same data. * Runs Auto-Regressive, Lagged Regression, Direct ARY_D(p) models for several p values, * and horizons 1 to 6, see sMAPE metrics below: * @@ -532,6 +463,17 @@ end example_CovidTest10 * 17.0492, 23.1888, 29.2826, 34.0878, 39.2379, 44.7474 ARY_D(8) * 16.9841, 23.1090, 29.2154, 34.1249, 39.2711, 44.7709 ARY_D(9) * 17.0676, 23.1089, 28.9425, 33.9046, 38.9082, 44.0469 ARY_D(10) + +18.7192, 28.0356, 38.0739, 46.8690, 54.2154, 60.8921 ARY_D(1) // FIX Bug -- too high +16.2602, 23.9446, 33.8763, 42.7548, 50.5601, 58.3793 ARY_D(2) +15.8284, 23.1419, 32.7795, 41.9619, 50.0289, 57.6217 ARY_D(3) +15.7065, 22.8376, 32.3402, 41.6187, 49.6103, 57.1951 ARY_D(4) +15.6925, 22.8577, 32.3599, 41.6423, 49.6253, 57.2027 ARY_D(5) +15.7054, 22.8457, 32.0807, 41.5924, 49.5162, 57.1057 ARY_D(6) +15.6252, 22.9759, 32.3061, 41.7185, 49.5916, 57.1693 ARY_D(7) +15.6665, 22.8945, 32.1515, 41.6596, 49.5256, 57.1121 ARY_D(8) +15.5888, 22.8066, 32.0457, 41.6402, 49.5181, 57.0147 ARY_D(9) +15.6341, 22.7900, 31.9169, 41.5769, 49.4211, 56.8442 ARY_D(10) * * > runMain scalation.modeling.forecasting.example_CovidTest11 */ @@ -542,14 +484,9 @@ end example_CovidTest10 new Plot (null, y, null, s"y ($response)", lines = true) - for p <- 1 to 10 do // ARY hyper-parameter settings + for p <- 1 to 10 do // ARY_D hyper-parameter settings hp("p") = p - val mod = ARY_D (y, hh) // create an ARY_D model - banner (s"In-ST Test: ${mod.modelName} Model") - mod.trainNtest_x ()() // note: suffix "_x" currently required - - mod.forecastAll (mod.getYy) // forecast h-steps ahead (h = 1 to hh) for all y - mod.diagnoseAll (mod.getY, mod.getYf) + ARY_D (y, hh).inSample_Test () // create an ARY_D model and do In-Sample Testing end for end example_CovidTest11 @@ -561,16 +498,16 @@ end example_CovidTest11 * Runs Auto-Regressive, Lagged Regression, Direct ARY_D(p) models for several p values, * and horizons 1 to 6, see sMAPE metrics below: * - * 18.9312, 31.2905, 45.7578, 57.0037, 65.9690, 72.4626 ARY_D(1) - * 16.8059, 23.1653, 31.9736, 40.6603, 46.6809, 57.1835 ARY_D(2) - * 15.9031, 20.7335, 27.3975, 35.5557, 39.3269, 51.2769 ARY_D(3) - * 15.0132, 20.2209, 27.5774, 35.4134, 39.7899, 48.6745 ARY_D(4) - * 15.2338, 19.4826, 27.6054, 35.6699, 39.8746, 48.4355 ARY_D(5) - * 15.1603, 19.7425, 27.7367, 35.7799, 40.1055, 49.1122 ARY_D(6) - * 15.5484, 22.7247, 31.0076, 38.5501, 44.5176, 50.8537 ARY_D(7) - * 15.3248, 23.2628, 30.6794, 39.0621, 44.5661, 52.6579 ARY_D(8) - * 15.0875, 21.7912, 30.2152, 37.4165, 42.6637, 52.9831 ARY_D(9) - * 14.7569, 22.2172, 30.9435, 40.5641, 46.2016, 57.6445 ARY_D(10) +18.5012, 30.6359, 44.6907, 56.3819, 63.5357, 70.2028 ARY_D(1) +17.8594, 25.6887, 35.4256, 45.1790, 50.4527, 60.0111 ARY_D(2) +16.4715, 21.8016, 29.0381, 38.5104, 41.7722, 52.5948 ARY_D(3) +15.7366, 21.5619, 29.6182, 38.5514, 42.6748, 50.8053 ARY_D(4) +16.2315, 21.6376, 30.1303, 39.2472, 43.2712, 51.3975 ARY_D(5) +16.1058, 21.4914, 30.3555, 39.4806, 43.6360, 52.2788 ARY_D(6) +16.6739, 24.9328, 34.3104, 43.0995, 50.1624, 55.6230 ARY_D(7) +16.7248, 25.6616, 34.1789, 44.1880, 50.5008, 57.2136 ARY_D(8) +16.5494, 24.1130, 33.8701, 42.6320, 49.7175, 59.3071 ARY_D(9) +16.0705, 24.2940, 34.3169, 43.9710, 52.8199, 63.8433 ARY_D(10) * * > runMain scalation.modeling.forecasting.example_CovidTest12 */ @@ -581,15 +518,9 @@ end example_CovidTest11 new Plot (null, y, null, s"y ($response)", lines = true) - for p <- 1 to 10 do // ARX hyper-parameter settings + for p <- 1 to 10 do // ARY_D hyper-parameter settings hp("p") = p - val mod = ARY_D (y, hh) // create model ARY_D for time series data - banner (s"TnT Test: ${mod.modelName} Model") - mod.trainNtest_x ()() // note: suffix "_x" currently required - - mod.setSkip (0) - mod.rollValidate () // TnT with Rolling Validation - mod.diagnoseAll (y, mod.getYf, Forecaster.teRng (y.dim), 0) // only diagnose on the testing set + ARY_D (y, hh).tnT_Test () // create an ARY_D model and do TnT Testing end for end example_CovidTest12 @@ -597,7 +528,7 @@ end example_CovidTest12 //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `example_CovidTest13` main function tests the `Example_Covid` object. - * Uses In-Sample Testing (In-ST), i.e., train and test on the same data. + * Uses In-Sample Testing, i.e., train and test on the same data. * Runs Auto-Regressive, Moving Average ARMA(p, q) models for several p values, * and horizons 1 to 6, see sMAPE metrics below: * @@ -613,14 +544,9 @@ end example_CovidTest12 new Plot (null, y, null, s"y ($response)", lines = true) - for p <- 2 to 2 do // ARMA hyper-parameter settings + for p <- 1 to 10 do // ARMA hyper-parameter settings hp("p") = p - val mod = new ARMA (y, hh) // create an ARMA model - banner (s"In-ST Test: ${mod.modelName} Model") - mod.trainNtest ()() - - mod.forecastAll () - mod.diagnoseAll (mod.getY, mod.getYf) + new ARMA (y, hh).inSample_Test () // create an ARMA model and do In-Sample Testing end for end example_CovidTest13 @@ -644,15 +570,9 @@ end example_CovidTest13 new Plot (null, y, null, s"y ($response)", lines = true) - for p <- 2 to 2 do // ARMA hyper-parameter settings + for p <- 1 to 10 do // ARMA hyper-parameter settings hp("p") = p - val mod = new ARMA (y, hh) // create an ARMA model - banner (s"TnT Test: ${mod.modelName} Model") - mod.trainNtest ()() - - mod.setSkip (0) - mod.rollValidate () // TnT with Rolling Validation - mod.diagnoseAll (y, mod.getYf, Forecaster.teRng (y.dim)) // only diagnose on the testing set + new ARMA (y, hh).tnT_Test () // create an ARMA model and do TnT Testing end for end example_CovidTest14 @@ -660,27 +580,39 @@ end example_CovidTest14 //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `example_CovidTest15` main function tests the `Example_Covid` object. - * Uses In-Sample Testing (In-ST), i.e., train and test on the same data. + * Uses In-Sample Testing, i.e., train and test on the same data. * Runs Auto-Regressive, Exogenous ARX(p, q, n) models for several p values, * and horizons 1 to 6, see sMAPE metrics below: * - * 18.3346, 26.5990, 35.8624, 44.8289, 53.7512, 60.5086 ARX(1, 1, 2) - * 15.5184, 20.9192, 27.8176, 35.3589, 43.9210, 50.5047 ARX(2, 2, 2) - * 15.3592, 20.1736, 25.4967, 32.6258, 40.4916, 47.2481 ARX(3, 2, 2) - * 15.3224, 19.8423, 25.0511, 31.9170, 38.9812, 45.6829 ARX(4, 2, 2) - * 15.3200, 19.8433, 25.0510, 31.9146, 38.9858, 45.6849 ARX(5, 2, 2) - * 15.4286, 19.9065, 25.7220, 32.6493, 39.6406, 46.0115 ARX(6, 2, 2) - * 15.3576, 19.9718, 25.4068, 32.3474, 39.0521, 45.5616 ARX(7, 2, 2) - * 15.4913, 19.5610, 25.4153, 32.2240, 39.3885, 45.8530 ARX(8, 2, 2) - * 15.3410, 19.6328, 25.6180, 32.6323, 39.8298, 46.6052 ARX(9, 2, 2) - * 15.4446, 19.6831, 25.6035, 32.8968, 40.6220, 47.7878 ARX(10, 2, 2) +16.8457, 24.4372, 32.1950, 40.6005, 47.7611, 53.9067 ARX(1, 1, 2) +14.0454, 18.9222, 26.7142, 35.7187, 43.8227, 50.6001 ARX(2, 2, 2) +13.8452, 18.1834, 24.5848, 32.9174, 40.9337, 47.7760 ARX(3, 2, 2) +13.7872, 17.8297, 23.8105, 31.4904, 39.1773, 46.0194 ARX(4, 2, 2) +13.7843, 17.8311, 23.8111, 31.4940, 39.1855, 46.0289 ARX(5, 2, 2) +13.8789, 17.9482, 24.0073, 31.5983, 38.8170, 45.1675 ARX(6, 2, 2) +13.8181, 17.9809, 24.0099, 31.6827, 38.9771, 45.5835 ARX(7, 2, 2) +13.9439, 17.6081, 23.7523, 31.3991, 38.6531, 44.9676 ARX(8, 2, 2) +13.7840, 17.6349, 23.8658, 31.1578, 38.6519, 44.6899 ARX(9, 2, 2) +13.8821, 17.6217, 23.6753, 31.0605, 38.6224, 44.6828 ARX(10, 2, 2) + * +18.7156, 28.5159, 37.2459, 45.8036, 51.9371, 56.6985 ARX(1, 1, 0) Agrees with ARY(p) +16.2587, 23.7072, 31.9906, 38.9940, 44.9856, 50.2418 ARX(2, 2, 0) +15.8240, 22.2659, 29.1170, 34.8505, 40.8580, 46.2113 ARX(3, 2, 0) +15.7020, 22.0134, 28.1169, 33.2691, 39.1811, 44.0750 ARX(4, 2, 0) +15.6875, 22.0198, 28.1429, 33.2990, 39.2397, 44.1700 ARX(5, 2, 0) +15.6982, 22.3197, 28.5239, 33.7716, 39.3099, 43.7038 ARX(6, 2, 0) +15.6186, 22.3438, 28.5437, 33.6401, 39.2926, 44.4411 ARX(7, 2, 0) +15.6595, 22.0566, 28.1782, 33.3346, 38.9921, 44.1955 ARX(8, 2, 0) +15.5823, 21.9463, 28.1055, 33.4000, 39.1296, 44.2667 ARX(9, 2, 0) +15.6267, 21.9089, 28.0047, 33.4205, 39.1586, 44.2015 ARX(10, 2, 0) * * > runMain scalation.modeling.forecasting.example_CovidTest15 */ @main def example_CovidTest15 (): Unit = // val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val exo_vars = Array ("icu_patients", "hosp_patients") +// val exo_vars = Array ("icu_patients", "hosp_patients") + val exo_vars = NO_EXO val (xxe, yy) = loadData (exo_vars, response) println (s"xxe.dims = ${xxe.dims}, yy.dim = ${yy.dim}") @@ -689,6 +621,7 @@ end example_CovidTest14 // val y = yy // full val y = yy(0 until 116) // clip the flat end val hh = 6 // maximum forecasting horizon +// hp("lambda") = 1.0 // regularization parameter banner (s"exo_vars = ${stringOf (exo_vars)}, endo_var = $response") println (s"xe.dims = ${xe.dims}, y.dim = ${y.dim}") @@ -697,12 +630,7 @@ end example_CovidTest14 for p <- 1 to 10 do // ARX hyper-parameter settings hp("p") = p hp("q") = min (2, p) - val mod = ARX (xe, y, hh) // create model for time series data - banner (s"In-ST Test: ${mod.modelName} Model") - mod.trainNtest_x ()() // train and test on full dataset - - mod.forecastAll () - mod.diagnoseAll (mod.getY, mod.getYf) + ARX (xe, y, hh).inSample_Test () // create an ARX model and do In-Sample Testing end for end example_CovidTest15 @@ -714,23 +642,35 @@ end example_CovidTest15 * Runs Auto-Regressive, Exogenous ARX(p, q, n) models for several p values, * and horizons 1 to 6, see sMAPE metrics below: * - * 12.2356, 20.6830, 35.2603, 43.9974, 51.5944, 52.0301 ARX(1, 1, 2) - * 9.72391, 20.6254, 25.4950, 34.2458, 44.5078, 49.9804 ARX(2, 2, 2) - * 10.0738, 21.4470, 26.2178, 34.2212, 44.0982, 49.4524 ARX(3, 2, 2) - * 9.29391, 19.6487, 22.8980, 31.6528, 41.6049, 46.9430 ARX(4, 2, 2) - * 10.2806, 19.2649, 23.1211, 32.1942, 41.9189, 47.2119 ARX(5, 2, 2) - * 11.4258, 19.7370, 24.5103, 34.4673, 44.9873, 49.7458 ARX(6, 2, 2) - * 11.2501, 19.0128, 22.3547, 31.9938, 42.1729, 47.1063 ARX(7, 2, 2) - * 10.9763, 18.8067, 22.5181, 32.0960, 41.8394, 47.1825 ARX(8, 2, 2) - * 11.1796, 19.3087, 23.7479, 33.1067, 42.8283, 47.6904 ARX(9, 2, 2) - * 10.9499, 20.6255, 25.8116, 35.5139, 45.2163, 50.0280 ARX(10, 2, 2) +11.0073, 19.3191, 26.2452, 35.9969, 47.8490, 58.9978 ARX(1, 1, 2) +10.4339, 19.6885, 25.2416, 35.4041, 47.6633, 58.1026 ARX(2, 2, 2) +10.0477, 19.6210, 25.8577, 35.9296, 47.6708, 58.2116 ARX(3, 2, 2) +9.34031, 17.6473, 23.3025, 33.0873, 46.0021, 57.6114 ARX(4, 2, 2) +10.5476, 17.6866, 23.1955, 33.2009, 45.4878, 57.6834 ARX(5, 2, 2) +11.5526, 18.6435, 24.7745, 35.5272, 44.6416, 57.7697 ARX(6, 2, 2) +11.3831, 17.8615, 23.0079, 33.1844, 44.4669, 57.2788 ARX(7, 2, 2) +11.1061, 17.4816, 22.1642, 33.2042, 45.1645, 57.6688 ARX(8, 2, 2) +11.3308, 18.0780, 23.5240, 34.5589, 46.3419, 58.6557 ARX(9, 2, 2) +11.4131, 19.5224, 25.7109, 36.8454, 49.5382, 60.9186 ARX(10, 2, 2) + * +18.4998, 29.4024, 42.6675, 54.2169, 63.0084, 72.2290 ARX(1, 1, 0) Agrees with ARY(p) +17.8595, 27.8544, 40.7768, 52.5351, 61.0972, 70.1973 ARX(2, 2, 0) +16.4745, 22.9627, 32.9324, 42.8385, 51.6931, 61.5292 ARX(3, 2, 0) +15.7400, 21.2937, 28.9582, 38.8393, 42.0740, 51.3300 ARX(4, 2, 0) +16.2315, 21.2230, 29.6593, 38.9380, 43.1389, 49.8287 ARX(5, 2, 0) +16.1056, 22.6427, 30.9964, 40.9072, 46.1625, 54.0774 ARX(6, 2, 0) +16.6737, 23.1236, 33.1328, 43.0148, 49.7683, 55.2326 ARX(7, 2, 0) +16.7242, 25.7990, 36.3484, 47.6660, 55.1432, 60.7600 ARX(8, 2, 0) +16.5522, 24.9271, 35.1184, 46.2757, 53.5985, 59.8611 ARX(9, 2, 0) +16.0764, 23.6507, 33.7168, 44.2123, 51.0500, 61.1110 ARX(10, 2, 0) * * > runMain scalation.modeling.forecasting.example_CovidTest16 */ @main def example_CovidTest16 (): Unit = // val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val exo_vars = Array ("icu_patients", "hosp_patients") +// val exo_vars = Array ("icu_patients", "hosp_patients") + val exo_vars = NO_EXO val (xxe, yy) = loadData (exo_vars, response) println (s"xxe.dims = ${xxe.dims}, yy.dim = ${yy.dim}") @@ -739,6 +679,7 @@ end example_CovidTest15 // val y = yy // full val y = yy(0 until 116) // clip the flat end val hh = 6 // maximum forecasting horizon +// hp("lambda") = 1.0 // regularization parameter banner (s"exo_vars = ${stringOf (exo_vars)}, endo_var = $response") println (s"xe.dims = ${xe.dims}, y.dim = ${y.dim}") @@ -747,14 +688,7 @@ end example_CovidTest15 for p <- 1 to 10 do // ARX hyper-parameter settings hp("p") = p hp("q") = min (2, p) - val mod = ARX (xe, y, hh) // create an ARX model - banner (s"TnT Test: ${mod.modelName} Model") - mod.trainNtest_x ()() // use customized trainNtest_x - - mod.setSkip (0) - mod.rollValidate () // TnT with Rolling Validation - println (s"After Roll TnT Forecast Matrix yf = ${mod.getYf}") - mod.diagnoseAll (y, mod.getYf, Forecaster.teRng (y.dim), 0) // only diagnose on the testing set + ARX (xe, y, hh).tnT_Test () // create an ARX model and do TnT Testing end for end example_CovidTest16 @@ -762,117 +696,248 @@ end example_CovidTest16 //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `example_CovidTest17` main function tests the `Example_Covid` object. - * Uses In-Sample Testing (In-ST), i.e., train and test on the same data. + * Uses In-Sample Testing, i.e., train and test on the same data. * Runs Auto-Regressive, Exogenous ARX_D(p, q, n) models for several p values. - * > runMain scalation.modeling.forecasting.example_CovidTest17 * +16.8503, 25.2909, 35.4803, 44.6073, 52.2135, 59.4200 ARX_D(1, 1, 2) FIX Bug -- too high +14.0491, 20.5723, 30.7172, 40.1614, 48.7536, 56.8138 ARX_D(2, 2, 2) +13.8498, 20.2659, 30.2173, 39.6863, 48.4629, 56.3433 ARX_D(3, 2, 2) +13.7940, 19.9815, 29.8293, 39.3993, 48.0656, 55.9535 ARX_D(4, 2, 2) +13.7917, 19.9831, 29.8315, 39.4017, 48.0668, 55.9540 ARX_D(5, 2, 2) +13.8881, 20.0432, 29.5594, 39.4102, 47.9529, 55.8790 ARX_D(6, 2, 2) +13.8256, 20.2026, 29.7336, 39.5616, 48.0363, 55.9488 ARX_D(7, 2, 2) +13.9522, 20.0203, 29.5011, 39.4600, 47.9126, 55.8402 ARX_D(8, 2, 2) +13.7933, 19.8568, 29.2884, 39.3859, 47.7936, 55.7004 ARX_D(9, 2, 2) +13.8925, 19.8232, 29.0014, 39.2233, 47.6079, 55.3331 ARX_D(10, 2, 2) + * + * > runMain scalation.modeling.forecasting.example_CovidTest17 + */ @main def example_CovidTest17 (): Unit = - val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (xx, yy) = Example_Covid.loadData (exo_vars, response) -// val (x, y) = (xx, yy) // full - val (x, y) = (xx(0 until 116), yy(0 until 116)) // clip the flat end - val hh = 6 // max forecasting horizon +// val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") + val exo_vars = Array ("icu_patients", "hosp_patients") + val (xxe, yy) = loadData (exo_vars, response) + println (s"xxe.dims = ${xxe.dims}, yy.dim = ${yy.dim}") + +// val xe = xxe // full + val xe = xxe(0 until 116) // clip the flat end +// val y = yy // full + val y = yy(0 until 116) // clip the flat end + val hh = 6 // maximum forecasting horizon +// hp("lambda") = 1.0 // regularization parameter banner (s"exo_vars = ${stringOf (exo_vars)}, endo_var = $response") - println (s"x.dims = ${x.dims}, y.dim = ${y.dim}") + println (s"xe.dims = ${xe.dims}, y.dim = ${y.dim}") new Plot (null, y, null, s"y ($response)", lines = true) - for p <- 1 to 10 do // ARX hyper-parameter settings - val mod = ARX_MV.exo (y, p, x, hh)(1, p+1) // create an ARX_MV model - banner (s"In-ST Test: ${mod.modelName} Model") - mod.trainNtest ()() + for p <- 1 to 10 do // ARX_D hyper-parameter settings + hp("p") = p + hp("q") = min (2, p) + ARX_D (xe, y, hh).inSample_Test () // create an ARX_D model and do In-Sample Testing end for end example_CovidTest17 - */ //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `example_CovidTest18` main function tests the `Example_Covid` object. * Uses Train-n-Test Split (TnT) with Rolling Validation. * Runs Auto-Regressive, Exogenous ARX_D(p, q, n) models for several p values. - * > runMain scalation.modeling.forecasting.example_CovidTest18 * +11.0164, 18.1939, 23.2358, 31.6475, 42.8253, 54.3245 ARX_D(1, 1, 2) +10.4521, 13.8955, 13.5074, 24.7088, 35.3807, 59.9904 ARX_D(2, 2, 2) +10.0793, 11.2167, 14.3570, 25.6690, 40.5868, 62.7553 ARX_D(3, 2, 2) +9.34877, 11.5869, 17.2668, 29.0107, 44.4698, 63.0348 ARX_D(4, 2, 2) +10.5457, 13.4293, 17.9561, 31.2434, 47.5865, 68.7770 ARX_D(5, 2, 2) +11.5562, 12.9348, 18.9845, 34.2571, 55.0142, 76.4654 ARX_D(6, 2, 2) +11.3872, 12.5172, 19.2266, 33.6051, 60.1889, 80.5061 ARX_D(7, 2, 2) +11.1091, 12.5290, 17.6071, 34.1205, 61.5160, 76.5128 ARX_D(8, 2, 2) +11.3466, 12.4147, 18.3370, 34.4428, 61.0988, 78.5466 ARX_D(9, 2, 2) +11.4265, 12.6378, 17.6644, 34.6527, 61.0367, 81.0210 ARX_D(10, 2, 2) + * + * > runMain scalation.modeling.forecasting.example_CovidTest18 + */ @main def example_CovidTest18 (): Unit = - val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (xx, yy) = Example_Covid.loadData (exo_vars, response) -// val (x, y) = (xx, yy) // full - val (x, y) = (xx(0 until 116), yy(0 until 116)) // clip the flat end - val hh = 6 // max forecasting horizon +// val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") + val exo_vars = Array ("icu_patients", "hosp_patients") + val (xxe, yy) = loadData (exo_vars, response) + println (s"xxe.dims = ${xxe.dims}, yy.dim = ${yy.dim}") + +// val xe = xxe // full + val xe = xxe(0 until 116) // clip the flat end +// val y = yy // full + val y = yy(0 until 116) // clip the flat end + val hh = 6 // maximum forecasting horizon +// hp("lambda") = 1.0 // regularization parameter banner (s"exo_vars = ${stringOf (exo_vars)}, endo_var = $response") - println (s"x.dims = ${x.dims}, y.dim = ${y.dim}") + println (s"xe.dims = ${xe.dims}, y.dim = ${y.dim}") new Plot (null, y, null, s"y ($response)", lines = true) - for p <- 1 to 10 do // ARX hyper-parameter settings - val mod = ARX_MV.exo (y, p, x, hh)(1, p+1) // create an ARX_MV model - banner (s"TnT Test: ${mod.modelName} Model") - mod.trainNtest ()() - ARX_MV.rollValidate (mod) // direct does all horizon at once + for p <- 1 to 10 do // ARX_D hyper-parameter settings + hp("p") = p + hp("q") = min (2, p) + ARX_D (xe, y, hh).tnT_Test () // create an ARX_D model and do TnT Testing end for end example_CovidTest18 - */ //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `example_CovidTest19` main function tests the `Example_Covid` object. - * Uses In-Sample Testing (In-ST), i.e., train and test on the same data. - * Runs Auto-Regressive, Exogenous ARX_Quad_D(p, q, n) models for several p values. - * > runMain scalation.modeling.forecasting.example_CovidTest19 + * Uses In-Sample Testing, i.e., train and test on the same data. + * Runs Auto-Regressive, Exogenous ARX_Quad(p, q, n) models for several p values. + * Uses RidgeRegression with lambda = 0.1; y^pow with pow = 1.5. + * +16.6072, 24.4626, 32.6457, 40.8155, 47.9617, 54.1519 ARX_Quad(1, 1, 2) +14.8114, 18.7980, 24.6991, 33.2430, 42.0688, 49.9135 ARX_Quad(2, 2, 2) +14.3609, 18.4683, 24.3359, 32.3833, 40.2261, 48.3873 ARX_Quad(3, 2, 2) +14.1085, 18.5579, 24.8609, 32.6986, 40.6706, 48.4073 ARX_Quad(4, 2, 2) +13.8807, 18.3509, 24.6423, 32.0085, 40.2473, 47.6040 ARX_Quad(5, 2, 2) +13.6989, 18.4814, 24.6467, 32.5355, 40.7019, 48.2871 ARX_Quad(6, 2, 2) +13.7876, 18.4203, 24.6844, 32.2166, 40.6423, 48.0845 ARX_Quad(7, 2, 2) +13.9035, 17.5713, 23.6566, 31.4147, 40.4540, 48.7819 ARX_Quad(8, 2, 2) +13.7435, 17.6190, 23.4393, 31.4997, 40.7884, 49.3476 ARX_Quad(9, 2, 2) +13.8470, 17.8286, 22.5861, 30.3541, 38.8571, 47.1813 ARX_Quad(10, 2, 2) * + * > runMain scalation.modeling.forecasting.example_CovidTest19 + */ @main def example_CovidTest19 (): Unit = - val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "new_cases", "people_vaccinated", "people_fully_vaccinated") -// val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (xx, yy) = Example_Covid.loadData (exo_vars, response) -// val (x, y) = (xx, yy) // full - val (x, y) = (xx(0 until 116), yy(0 until 116)) // clip the flat end - val hh = 6 // max forecasting horizon +// val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") + val exo_vars = Array ("icu_patients", "hosp_patients") + val (xxe, yy) = loadData (exo_vars, response) + println (s"xxe.dims = ${xxe.dims}, yy.dim = ${yy.dim}") + +// val xe = xxe // full + val xe = xxe(0 until 116) // clip the flat end +// val y = yy // full + val y = yy(0 until 116) // clip the flat end + val hh = 6 // maximum forecasting horizon +// hp("lambda") = 1.0 // regularization parameter banner (s"exo_vars = ${stringOf (exo_vars)}, endo_var = $response") - println (s"x.dims = ${x.dims}, y.dim = ${y.dim}") + println (s"xe.dims = ${xe.dims}, y.dim = ${y.dim}") new Plot (null, y, null, s"y ($response)", lines = true) - for p <- 1 to 10 do // ARX hyper-parameter settings - val mod = ARX_Quad_MV.exo (y, p, x, hh)(1, p+1) // create an ARX_Quad_MV model - banner (s"In-ST Test: ${mod.modelName} Model") - mod.trainNtest ()() + for p <- 1 to 10 do // ARX_Quad hyper-parameter settings + hp("p") = p + hp("q") = min (2, p) + ARX_Quad (xe, y, hh).inSample_Test () // create an ARX_Quad model and do In-Sample Testing end for end example_CovidTest19 - */ //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `example_CovidTest20` main function tests the `Example_Covid` object. * Uses Train-n-Test Split (TnT) with Rolling Validation. - * Runs Auto-Regressive, Exogenous ARX_Quad_D(p, q, n) models for several p values. - * > runMain scalation.modeling.forecasting.example_CovidTest20 + * Runs Auto-Regressive, Exogenous ARX_Quad(p, q, n) models for several p values. + * Uses RidgeRegression with lambda = 0.1; y^pow with pow = 1.5. + * +11.8379, 18.8631, 25.3769, 34.6704, 46.0594, 58.3846 ARX_Quad(1, 1, 2) +11.4651, 13.9379, 17.4264, 29.5020, 41.4593, 69.7777 ARX_Quad(2, 2, 2) +10.9527, 12.5247, 16.9508, 28.3049, 44.0043, 66.2080 ARX_Quad(3, 2, 2) +10.0516, 12.8597, 17.9446, 30.4416, 45.4654, 64.7888 ARX_Quad(4, 2, 2) +10.9845, 13.3932, 18.5001, 32.2812, 48.1254, 69.1962 ARX_Quad(5, 2, 2) +11.1224, 13.8001, 20.4705, 36.9029, 56.0602, 82.4567 ARX_Quad(6, 2, 2) +11.8387, 13.8277, 21.2469, 37.6986, 63.8038, 88.8678 ARX_Quad(7, 2, 2) +11.5418, 14.6203, 21.4781, 40.8681, 70.1170, 88.3799 ARX_Quad(8, 2, 2) +12.3724, 14.8509, 21.5095, 40.9054, 70.3691, 87.8717 ARX_Quad(9, 2, 2) +12.6086, 15.7098, 21.6644, 41.5958, 69.4722, 82.9493 ARX_Quad(10, 2, 2) * + * > runMain scalation.modeling.forecasting.example_CovidTest20 + */ @main def example_CovidTest20 (): Unit = - val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") -// val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "new_cases", "people_vaccinated", "people_fully_vaccinated") - val (xx, yy) = Example_Covid.loadData (exo_vars, response) -// val (x, y) = (xx, yy) // full - val (x, y) = (xx(0 until 116), yy(0 until 116)) // clip the flat end - val hh = 6 // max forecasting horizon - val pw = 1.5 // power pw: tune with values around 2.0 +// val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") + val exo_vars = Array ("icu_patients", "hosp_patients") + val (xxe, yy) = loadData (exo_vars, response) + println (s"xxe.dims = ${xxe.dims}, yy.dim = ${yy.dim}") + +// val xe = xxe // full + val xe = xxe(0 until 116) // clip the flat end +// val y = yy // full + val y = yy(0 until 116) // clip the flat end + val hh = 6 // maximum forecasting horizon +// hp("lambda") = 1.0 // regularization parameter banner (s"exo_vars = ${stringOf (exo_vars)}, endo_var = $response") - println (s"x.dims = ${x.dims}, y.dim = ${y.dim}") + println (s"xe.dims = ${xe.dims}, y.dim = ${y.dim}") new Plot (null, y, null, s"y ($response)", lines = true) - for p <- 1 to 10 do // ARX hyper-parameter settings - val mod = ARX_Quad_MV.exo (y, p, x, hh, pw)(1, p+1) // create an ARX_Quad_MV model - banner (s"TnT Test: ${mod.modelName} Model") - mod.trainNtest ()() - ARX_MV.rollValidate (mod) // direct does all horizon at once + for p <- 1 to 10 do // ARX_Quad_D hyper-parameter settings + hp("p") = p + hp("q") = min (2, p) + ARX_Quad_D (xe, y, hh).tnT_Test () // create an ARX_Quad_D model and do TnT Testing end for end example_CovidTest20 + + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `example_CovidTest21` main function tests the `Example_Covid` object. + * Uses In-Sample Testing, i.e., train and test on the same data. + * Runs Auto-Regressive, Exogenous ARX_Quad_D(p, q, n) models for several p values. + * > runMain scalation.modeling.forecasting.example_CovidTest21 + */ +@main def example_CovidTest21 (): Unit = + +// val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") + val exo_vars = Array ("icu_patients", "hosp_patients") + val (xxe, yy) = loadData (exo_vars, response) + println (s"xxe.dims = ${xxe.dims}, yy.dim = ${yy.dim}") + +// val xe = xxe // full + val xe = xxe(0 until 116) // clip the flat end +// val y = yy // full + val y = yy(0 until 116) // clip the flat end + val hh = 6 // maximum forecasting horizon +// hp("lambda") = 1.0 // regularization parameter + + banner (s"exo_vars = ${stringOf (exo_vars)}, endo_var = $response") + println (s"xe.dims = ${xe.dims}, y.dim = ${y.dim}") + new Plot (null, y, null, s"y ($response)", lines = true) + + for p <- 1 to 10 do // ARX_Quad_D hyper-parameter settings + hp("p") = p + hp("q") = min (2, p) + ARX_Quad_D (xe, y, hh).inSample_Test () // create an ARX_Quad_D model and do In-Sample Testing + end for + +end example_CovidTest21 + + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `example_CovidTest22` main function tests the `Example_Covid` object. + * Uses Train-n-Test Split (TnT) with Rolling Validation. + * Runs Auto-Regressive, Exogenous ARX_Quad_D(p, q, n) models for several p values. + * > runMain scalation.modeling.forecasting.example_CovidTest22 */ +@main def example_CovidTest22 (): Unit = + +// val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") + val exo_vars = Array ("icu_patients", "hosp_patients") + val (xxe, yy) = loadData (exo_vars, response) + println (s"xxe.dims = ${xxe.dims}, yy.dim = ${yy.dim}") + +// val xe = xxe // full + val xe = xxe(0 until 116) // clip the flat end +// val y = yy // full + val y = yy(0 until 116) // clip the flat end + val hh = 6 // maximum forecasting horizon +// hp("lambda") = 1.0 // regularization parameter + + banner (s"exo_vars = ${stringOf (exo_vars)}, endo_var = $response") + println (s"xe.dims = ${xe.dims}, y.dim = ${y.dim}") + new Plot (null, y, null, s"y ($response)", lines = true) + + for p <- 1 to 10 do // ARX_Quad_D hyper-parameter settings + hp("p") = p + hp("q") = min (2, p) + ARX_Quad_D (xe, y, hh).tnT_Test () // create an ARX_Quad_D model and do TnT Testing + end for + +end example_CovidTest22 diff --git a/src/main/scala/scalation/modeling/forecasting/Example_GasFurnace.scala b/src/main/scala/scalation/modeling/forecasting/Example_GasFurnace.scala index 90e6c7b77..c6aecfd02 100644 --- a/src/main/scala/scalation/modeling/forecasting/Example_GasFurnace.scala +++ b/src/main/scala/scalation/modeling/forecasting/Example_GasFurnace.scala @@ -97,7 +97,7 @@ end example_GasFurnaceTest @main def example_GasFurnaceTest2 (): Unit = import Example_GasFurnace._ - import scala.collection.mutable.Set + import scala.collection.mutable.{LinkedHashSet => LSET} val (x, y) = loadData (header, "co2") @@ -109,8 +109,8 @@ end example_GasFurnaceTest xj = scaleV (extreme (xj), (0.0, 2.0))(xj) // rescale vector xj to [0, 2] val xxj = MatrixD.fromVector (xj) // val mod = SymbolicRegression.quadratic (xxj, y) -// val mod = SymbolicRegression.rescale (xxj, y, null, Set (1.0, 2.0, 3.0), cross = false) - val mod = SymbolicRegression (xxj, y, null, Set (0.5, 1.0, 2.0, 3.0), cross = false) +// val mod = SymbolicRegression.rescale (xxj, y, null, LSET (1.0, 2.0, 3.0), cross = false) + val mod = SymbolicRegression (xxj, y, null, LSET (0.5, 1.0, 2.0, 3.0), cross = false) mod.trainNtest ()() val yp = mod.predict (mod.getX) println (mod.summary ()) diff --git a/src/main/scala/scalation/modeling/forecasting/Example_ILI.scala b/src/main/scala/scalation/modeling/forecasting/Example_ILI.scala index 55b171268..dd3b62f77 100644 --- a/src/main/scala/scalation/modeling/forecasting/Example_ILI.scala +++ b/src/main/scala/scalation/modeling/forecasting/Example_ILI.scala @@ -15,10 +15,13 @@ package scalation package modeling package forecasting +import scala.collection.mutable.{LinkedHashSet => LSET} import scala.math._ import scalation.mathstat._ +import TransformT._ + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `Example_ILI` object provides a convenient way to load ILI weekly data. */ @@ -26,7 +29,8 @@ object Example_ILI: import scala.collection.mutable.HashMap - val fileName = "national_illness.csv" +// val fileName = "national_illness.csv" // commonly used ILI dataset + val fileName = "national_illness_clip.csv" // more recent data + 2 years of Covid clipped out val header = Array ("%WEIGHTED ILI", // percent per state weighted by population "%UNWEIGHTED ILI", // aggregated without weighting @@ -108,7 +112,7 @@ end example_ILITest */ @main def example_ILITest2 (): Unit = - import scala.collection.mutable.Set + import scala.collection.mutable.{LinkedHashSet => LSET} val (x, y) = loadData (header, response) @@ -120,8 +124,8 @@ end example_ILITest xj = scaleV (extreme (xj), (0.0, 2.0))(xj) // rescale vector xj to [0, 2] val xxj = MatrixD.fromVector (xj) // val mod = SymbolicRegression.quadratic (xxj, y) -// val mod = SymbolicRegression.rescale (xxj, y, null, Set (1.0, 2.0, 3.0), cross = false) - val mod = SymbolicRegression (xxj, y, null, Set (0.5, 1.0, 2.0, 3.0), cross = false) +// val mod = SymbolicRegression.rescale (xxj, y, null, LSET (1.0, 2.0, 3.0), cross = false) + val mod = SymbolicRegression (xxj, y, null, LSET (0.5, 1.0, 2.0, 3.0), cross = false) mod.trainNtest ()() val yp = mod.predict (mod.getX) println (mod.summary ()) @@ -154,20 +158,104 @@ end example_ILITest2 new Plot (null, y, null, s"y ($response)", lines = true) - new NullModel (y, hh).inSampleTest () - new TrendModel (y, hh).inSampleTest () - new SimpleMovingAverage (y, hh).inSampleTest () - new WeightedMovingAverage (y, hh).inSampleTest () - new SimpleExpSmoothing (y, hh).inSampleTest () - new RandomWalk (y, hh).inSampleTest () - new RandomWalkS (y, hh).inSampleTest () - new AR (y, hh).inSampleTest () + new NullModel (y, hh).inSample_Test () + new TrendModel (y, hh).inSample_Test () + new SimpleMovingAverage (y, hh).inSample_Test () + new WeightedMovingAverage (y, hh).inSample_Test () + new SimpleExpSmoothing (y, hh).inSample_Test () + new RandomWalk (y, hh).inSample_Test () + new RandomWalkS (y, hh).inSample_Test () + new AR (y, hh).inSample_Test () end example_ILITest3 //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `example_CovidTest5` main function tests the `Example_ILI` object. +/** The `example_ILITest4` main function tests the `Example_ILI` object. + * Uses Train-n-Test Split (TnT) with Rolling Validation. + * Runs several baseline models for horizons 1 to 6, see sMAPE metrics below: + * + * 57.1057, 60.0825, 62.9136, 64.7453, 67.9247, 70.6674 Null + * 61.9077, 65.1881, 68.7187, 71.4655, 73.9327, 75.9584 Trend + * 22.3044, 30.4325, 45.3661, 55.7217, 67.6973, 77.4038 SMA + * 23.8526, 30.0945, 46.9748, 55.8104, 68.7352, 77.7010 WMA + * 18.3769, 27.1712, 40.3425, 51.8124, 63.7356, 75.0046 SES + * 18.6713, 27.5720, 40.9387, 52.3496, 64.2481, 75.3015 RW + * 18.0855, 26.7084, 39.6941, 51.2218, 63.1873, 74.6834 RWS + * 19.1590, 31.1975, 44.4850, 55.3120, 65.5536, 74.4969 AR(1) + * + * > runMain scalation.modeling.forecasting.example_ILITest4 + */ +@main def example_ILITest4 (): Unit = + + val y = Example_ILI.loadData_y (response) + val hh = 12 // max forecasting horizon + + new Plot (null, y, null, s"y ($response)", lines = true) + + var mod: Forecaster = null + + banner ("TnT Test: Null Model") + mod = new NullModel (y, hh) + mod.trainNtest ()() + mod.setSkip (0) // start at beginning of test-set + mod.rollValidate () // TnT with Rolling Validation + mod.diagnoseAll (y, mod.getYf, Forecaster.teRng (y.dim)) // only diagnose on the testing set + + banner ("TnT Test: Trend Model") + mod = new TrendModel (y, hh) + mod.trainNtest ()() + mod.setSkip (0) + mod.rollValidate () // TnT with Rolling Validation + mod.diagnoseAll (y, mod.getYf, Forecaster.teRng (y.dim)) // only diagnose on the testing set + + banner ("TnT Test: Simple Moving Average Model") + mod = new SimpleMovingAverage (y, hh) + mod.trainNtest ()() + mod.setSkip (0) + mod.rollValidate () // TnT with Rolling Validation + mod.diagnoseAll (y, mod.getYf, Forecaster.teRng (y.dim)) // only diagnose on the testing set + + banner ("TnT Test: Weighted Moving Average Model") + mod = new WeightedMovingAverage (y, hh) + mod.trainNtest ()() + mod.setSkip (0) + mod.rollValidate () // TnT with Rolling Validation + mod.diagnoseAll (y, mod.getYf, Forecaster.teRng (y.dim)) // only diagnose on the testing set + + banner ("TnT Test: Simple Exponential Smoothing Model") + mod = new SimpleExpSmoothing (y, hh) + mod.trainNtest ()() + mod.setSkip (0) + mod.rollValidate () // TnT with Rolling Validation + mod.diagnoseAll (y, mod.getYf, Forecaster.teRng (y.dim)) // only diagnose on the testing set + + banner ("TnT Test: Random Walk Model") + mod = new RandomWalk (y, hh) + mod.trainNtest ()() + mod.setSkip (0) + mod.rollValidate () // TnT with Rolling Validation + mod.diagnoseAll (y, mod.getYf, Forecaster.teRng (y.dim)) // only diagnose on the testing set + + banner ("TnT Test: Random Walk Slope Adjusted Model") + mod = new RandomWalkS (y, hh) + mod.trainNtest ()() + mod.setSkip (0) + mod.rollValidate () // TnT with Rolling Validation + mod.diagnoseAll (y, mod.getYf, Forecaster.teRng (y.dim)) // only diagnose on the testing set + + banner ("TnT Test: Auto-Regressive AR(1) Model") + mod = new AR (y, hh) + mod.trainNtest ()() + mod.setSkip (0) + mod.rollValidate () // TnT with Rolling Validation + mod.diagnoseAll (y, mod.getYf, Forecaster.teRng (y.dim)) // only diagnose on the testing set + +end example_ILITest4 + + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `example_ILITest5` main function tests the `Example_ILI` object. * Uses In-Sample Testing (In-ST), i.e., train and test on the same data. * Runs Auto-Regressive AR(p) models for several p values and horizons 1 to 6, * see sMAPE metrics below: @@ -195,7 +283,7 @@ end example_ILITest3 for p <- 1 to 10 do // AR hyper-parameter settings hp("p") = p - new AR (y, hh).inSampleTest () // create and test an AR model + new AR (y, hh).inSample_Test () // create and test an AR model end for end example_ILITest5 @@ -219,7 +307,7 @@ end example_ILITest5 banner (s"Test: ${mod.modelName} on ILI Dataset") mod.trainNtest ()() // train and test the model on full dataset -// mod.setSkip (p) // full ARY-formula available when t >= p +// mod.setSkip (p) // full ARMA-formula available when t >= p mod.forecastAll () // forecast h-steps ahead (h = 1 to hh) for all y mod.diagnoseAll (y, mod.getYf, showYf = false) // model diagnostics for all horizons end for @@ -240,7 +328,7 @@ end example_ILITest6 val hh = 12 // maximum forecasting horizon hp("spec") = 1 // trend specification - for p <- 1 to 26 do + for p <- 52 to 52 do hp("p") = p // set p (ARY) hyper-parameter val mod = ARY (y, hh) // create model for time series data banner (s"Test: ${mod.modelName} on ILI Dataset") @@ -267,7 +355,7 @@ end example_ILITest7 val hh = 12 // maximum forecasting horizon hp("spec") = 1 // trend specification - for p <- 1 to 26 do + for p <- 52 to 52 do hp("p") = p // set p (ARY_D) hyper-parameter val mod = ARY_D (y, hh) // create model for time series data banner (s"Test: ${mod.modelName} on ILI Dataset") @@ -285,7 +373,7 @@ end example_ILITest8 /** The `example_ILITest9` main function test the `Example_ILI` object. * This test compares the `ARIMA` model for several values of p and q. * > runMain scalation.modeling.forecasting.example_ILITest9 - */ + * @main def example_ILITest9 (): Unit = import AR.hp @@ -305,10 +393,12 @@ end example_ILITest8 end for end example_ILITest9 + */ + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `example_ILITest10` main function test the `Example_ILI` object. - * This test compares the `ARX_Symb` and `ARX_Symb_D` models for several values of p and q. + * This test compares the `ARX_SR` and `ARX_SR_D` models for several values of p and q. * > runMain scalation.modeling.forecasting.example_ILITest10 */ @main def example_ILITest10 (): Unit = @@ -319,151 +409,47 @@ end example_ILITest9 val (xe, y) = loadData (exo_vars, response) println (s"xe.dims = ${xe.dims}, y.dim = ${y.dim}") - val hh = 12 // maximum forecasting horizon - val p = 10 - val q = 10 - val pp = 1.5 + val hh = 12 // maximum forecasting horizon + val p = 10 + val q = 10 + val pow = 1.5 hp("p") = p // endo lags hp("q") = q // exo lags hp("spec") = 1 // trend specification: 0, 1, 2, 3, 5 hp("lwave") = 20 // wavelength (distance between peaks) hp("cross") = 1 - hp("lambda") = 1.0 + RidgeRegression.hp("lambda") = 1.0 + Transform.hp("p") = pow // power to raise lags to - val ff = Array [Transform] (powForm (VectorD (pp))) - val gg = Array [Transform] () + val fEn = LSET (Pow) + val fEx = Array (LSET (Pow), LSET (Pow)) - val mod = ARX_Symb (xe, y, hh, fEndo = ff, fExo = gg) // create model for time series data - banner (s"In-ST Forecasts: ${mod.modelName} on COVID-19 Dataset") + val mod = ARX_SR (xe, y, hh, fEndo_enab = fEn, fExo_enab = fEx) // create model for time series data + banner (s"In-ST Forecasts: ${mod.modelName} on ILI Dataset") mod.trainNtest_x ()() // train and test on full dataset mod.setSkip(0) mod.rollValidate (rc = 2) // TnT with Rolling Validation - mod.diagnoseAll (y, mod.getYf, Forecaster.teRng(y.dim), 0) + mod.diagnoseAll (y, mod.getYf, Forecaster.teRng(y.dim)) banner ("Feature Selection Technique: stepwise") val (cols, rSq) = mod.stepwiseSelAll () // R^2, R^2 bar, sMAPE, R^2 cv // val (cols, rSq) = mod.backwardElimAll () // R^2, R^2 bar, sMAPE, R^2 cv val k = cols.size println (s"k = $k") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "R^2 cv"), - s"R^2 vs n for ${mod.modelName}", lines = true) + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs n for ${mod.modelName}", lines = true) println (s"rSq = $rSq") val modBest = mod.getBest.mod val x_fs = modBest.getX - val yy_D = MakeMatrix4TS.makeMatrix4Y (y, hh, false) // FIX - switch to usiing apply method in next line) - val mod_D = new ARX_Symb_D (x_fs, yy_D, hh, n_exo = 1, null) + val yy_D = MakeMatrix4TS.makeMatrix4Y (y, hh, false) // FIX - switch to using apply method in next line + val mod_D = new ARX_SR_D (x_fs, yy_D, hh, n_exo = 1, null) mod_D.trainNtest_x ()() mod_D.setSkip (0) mod_D.rollValidate (rc = 2) // TnT with Rolling Validation - mod_D.diagnoseAll (y, mod_D.getYf, Forecaster.teRng (y.dim), 0) // only diagnose on the testing set + mod_D.diagnoseAll (y, mod_D.getYf, Forecaster.teRng (y.dim)) // only diagnose on the testing set end example_ILITest10 -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `example_ILITest10` main function test the `Example_ILI` object. - * This test compares the several models for several values of p and q. - * > runMain scalation.modeling.forecasting.example_ILITest11 - */ -@main def example_ILITest11 (): Unit = - - import MakeMatrix4TS.hp - - // val exo_vars = Array ("%WEIGHTED ILI", "%UNWEIGHTED ILI") - val exo_vars = Array("OT") - - val (xe, y) = loadData (exo_vars, response) - println (s"xe.dims = ${xe.dims}, y.dim = ${y.dim}") - - val hh = 6 // maximum forecasting horizon - val p = 6 - val q = 6 - hp("p") = p // endo lags - hp("q") = q // exo lags - hp("spec") = 1 // trend specification: 0, 1, 2, 3, 5 - hp("lwave") = 20 // wavelength (distance between peaks) - hp("cross") = 0 - hp("lambda") = 1.0 - - - banner("RandomWalkS") - val mod1 = RandomWalkS(y, hh) // create model for time series data - banner(s"In-ST Forecasts: ${mod1.modelName} on ILI Dataset") - mod1.trainNtest()() // train and test on full dataset - mod1.forecastAll() // forecast h-steps ahead (h = 1 to hh) for all y - mod1.diagnoseAll(mod1.getY, mod1.getYf) - - println("rollValidate") - mod1.setSkip(0) - mod1.rollValidate() // TnT with Rolling Validation - mod1.diagnoseAll(mod1.getY, mod1.getYf, Forecaster.teRng(y.dim), 0) - - - banner("AR") - val mod2 = AR(y, hh) // create model for time series data - banner(s"In-ST Forecasts: ${mod2.modelName} on ILI Dataset") - mod2.trainNtest()() // train and test on full dataset - mod2.forecastAll() // forecast h-steps ahead (h = 1 to hh) for all y - mod2.diagnoseAll(mod2.getY, mod2.getYf) - - println("rollValidate") - mod2.setSkip(0) - mod2.rollValidate() // TnT with Rolling Validation - mod2.diagnoseAll(mod2.getY, mod2.getYf, Forecaster.teRng(y.dim), 0) - - - banner("ARX") - val mod3 = ARX(xe, y, hh) // create model for time series data - banner(s"In-ST Forecasts: ${mod3.modelName} on ILI Dataset") - mod3.trainNtest_x()() // train and test on full dataset - mod3.forecastAll() // forecast h-steps ahead (h = 1 to hh) for all y - mod3.diagnoseAll(mod3.getY, mod3.getYf) - - println("rollValidate") - mod3.setSkip(0) - mod3.rollValidate() // TnT with Rolling Validation - mod3.diagnoseAll(mod3.getY, mod3.getYf, Forecaster.teRng(y.dim), 0) - - - banner("ARX_D") - val mod4 = ARX_D(xe, y, hh) // create model for time series data - banner(s"In-ST Forecasts: ${mod4.modelName} on ILI Dataset") - mod4.trainNtest_x()() // train and test on full dataset - mod4.forecastAll() // forecast h-steps ahead (h = 1 to hh) for all y - mod4.diagnoseAll(mod4.getY, mod4.getYf) - - println("rollValidate") - mod4.setSkip(0) - mod4.rollValidate() // TnT with Rolling Validation - mod4.diagnoseAll(mod4.getY, mod4.getYf, Forecaster.teRng(y.dim), 0) - - - banner("ARX_Quad") - val mod5 = ARX_Quad(xe, y, hh) // create model for time series data - banner(s"In-ST Forecasts: ${mod5.modelName} on ILI Dataset") - mod5.trainNtest_x()() // train and test on full dataset - mod5.forecastAll() // forecast h-steps ahead (h = 1 to hh) for all y - mod5.diagnoseAll(mod5.getY, mod5.getYf) - - println("rollValidate") - mod5.setSkip(0) - mod5.rollValidate() // TnT with Rolling Validation - mod5.diagnoseAll(mod5.getY, mod5.getYf, Forecaster.teRng(y.dim), 0) - - - banner("ARX_Quad_D") - val mod6 = ARX_Quad_D(xe, y, hh) // create model for time series data - banner(s"In-ST Forecasts: ${mod6.modelName} on ILI Dataset") - mod6.trainNtest_x()() // train and test on full dataset - mod6.forecastAll() // forecast h-steps ahead (h = 1 to hh) for all y - mod6.diagnoseAll(mod6.getY, mod6.getYf) - - println("rollValidate") - mod6.setSkip(0) - mod6.rollValidate() // TnT with Rolling Validation - mod6.diagnoseAll(mod6.getY, mod6.getYf, Forecaster.teRng(y.dim), 0) - -end example_ILITest11 \ No newline at end of file diff --git a/src/main/scala/scalation/modeling/forecasting/Filter.scala b/src/main/scala/scalation/modeling/forecasting/Filter.scala new file mode 100644 index 000000000..6129825b4 --- /dev/null +++ b/src/main/scala/scalation/modeling/forecasting/Filter.scala @@ -0,0 +1,39 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author John Miller + * @version 2.0 + * @date Thu May 22 01:21:46 EDT 2025 + * @see LICENSE (MIT style license file). + * + * @note Filter: Base Trait for Filters + * + * @see www.motamed.nl/assets/pdf/2018_Motamed_Performance.pdf + * A Performance Analysis of Filtering Methods applied to WiFi-based Position Reconstruction + * Exponential, Moving-Average, Gaussian, Savitzky-Golay, Kalman Filters + */ + +package scalation +package modeling +package forecasting + +import scalation.mathstat._ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `Filter` trait provides basic time series capabilities for filters. + * A filter is used to pull out the important information from a time series. + * Commonly, this involves improving the signal-to-noise ratio, which is often + * accomplished by using a low-pass filter that remove high frequencies. Such + * filters are also called smoothers (the smoothed time series has less abrupt changes) + * @param y the response vector (time series data) + */ +trait Filter (y: VectorD): + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return a smoothed version of the given time series vector. + * @param y_ the actual time series values to be smoothed + * @param a the smoothing parameter + */ + def smooth (y_ : VectorD = y, a: Double = 0.0): VectorD + +end Filter + diff --git a/src/main/scala/scalation/modeling/forecasting/ForecastMatrix.scala b/src/main/scala/scalation/modeling/forecasting/ForecastMatrix.scala index 0dac9bf05..398a47e03 100644 --- a/src/main/scala/scalation/modeling/forecasting/ForecastMatrix.scala +++ b/src/main/scala/scalation/modeling/forecasting/ForecastMatrix.scala @@ -6,6 +6,8 @@ * @see LICENSE (MIT style license file). * * @note Model Framework: Forecast Matrix time x horizons + * + * @see `randomWalkTest4` to see flat and slanted versions of a forecast matrix */ package scalation @@ -16,7 +18,7 @@ import scalation.mathstat._ /*---------------------------------------------------------------------------- -The FORECASTING MATRIX yf: Example Values (made up (e.g., daily) values) +The FORECAST MATRIX yf: Example Values (made up (e.g., daily) values) yf(t, h) = element for base time t and horizon h (its time = t+h) yf(?, h) shows column h that corresponds to a forecast vector for horizon h @@ -49,24 +51,24 @@ Row time t = 3: yf(3, 0) = 4.0 = the actual value for day 3, */ trait ForecastMatrix (y: VectorD, hh: Int, tRng: Range = null): - private val debug = debugf ("ForecastMatrix", true) // debug function + private val debug = debugf ("ForecastMatrix", false) // debug function //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Make the full FORECAST MATRIX where the zeroth column holds the actual time series * and the last column is its time/time index. Columns 1, 2, ... hh are for h steps * ahead forecasts. - * @param y_ the actual time series vector to use in making forecasts - * @param hh the maximum forecasting horizon, number of steps ahead to produce forecasts + * @param y_ the actual time series vector to use in making forecasts + * @param hh_ the maximum forecasting horizon, number of steps ahead to produce forecasts */ def makeForecastMatrix (y_ : VectorD = y, hh_ : Int = hh): MatrixD = - val yf_ = new MatrixD (y_.dim, hh + 2) // forecasts for all time points t & horizons to h + val yf_ = new MatrixD (y_.dim, hh_ + 2) // forecasts for all time points t & horizons to h debug ("makeForecastMatrix", s"forecast matrix: y_.dim = ${y_.dim} --> yf_.dims = ${yf_.dims}") for t <- y_.indices do yf_(t, 0) = y_(t) // first column (0) holds the actual time series values if tRng == null then - for t <- yf_.indices do yf_(t, hh+1) = t // last column (h+1) holds time (logical day) + for t <- yf_.indices do yf_(t, hh_ + 1) = t // last column (h+1) holds time (logical day) else - for t <- tRng do yf_(t, hh+1) = t // last column (h+1) holds time (logical day) + for t <- tRng do yf_(t, hh_ + 1) = t // last column (h+1) holds time (logical day) yf_ end makeForecastMatrix @@ -77,12 +79,22 @@ trait ForecastMatrix (y: VectorD, hh: Int, tRng: Range = null): * Also reset the last column that holds the time index to 0, 1, 2, 3, ... * @param yf the current forecast matrix */ + def slant (yf: MatrixD): MatrixD = + val yf1 = yf(?, 0) ++ new VectorD (yf.dim2-2) + val yf2 = yf(?, 1 until yf.dim2) // start shift from col 1, flat yf aleady has col 0 to col 1 shift + val yf_ = yf1 +^: yf2.shiftDiag + val j = yf_.dim2 - 1 + for i <- yf_.indices do yf_(i, j) = i + yf_ + end slant +/* def slant (yf: MatrixD): MatrixD = val yf_ = yf.shiftDiag val j = yf_.dim2 - 1 for i <- yf_.indices do yf_(i, j) = i yf_ end slant +*/ //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Diagnose the health of the model by computing the Quality of Fit (QoF) measures, @@ -109,18 +121,16 @@ trait ForecastMatrix (y: VectorD, hh: Int, tRng: Range = null): * @param y_ the actual response/output vector * @param yf the entire FORECAST MATRIX * @param rRng the time range, defaults to null (=> full time range) - * @param sft the amount of shift for yfh (FIX - ideally unify the code and remove sft) - * @param showYf the amount of shift for yfh (FIX - ideally unify the code and remove sft) + * @param showYf whether to show the forecast matrix */ - def diagnoseAll (y_ : VectorD, yf: MatrixD, tRng: Range = null, sft: Int = 0, - showYf: Boolean = false): Unit = + def diagnoseAll (y_ : VectorD, yf: MatrixD, tRng: Range = null, showYf: Boolean = false): MatrixD = val ftMat = new MatrixD (hh, Fit.N_QoF) val t1 = if tRng == null then 0 else tRng.start // first time point for h <- 1 to hh do val yy = y_(t1+h-1 until y_.dim) // align the actual response values - val yfh = yf(?, h)(t1+sft until y_.dim-h+sft+1) // align column h of the forecast matrix - println (s"yy.dim = ${yy.dim}, yfh.dim = ${yfh.dim}") -// println (s"for h = $h: ${MatrixD (yy, yfh).transpose}") + val yfh = yf(?, h)(t1 until y_.dim-h+1) // align column h of the forecast matrix + debug ("diagnoseAll", s"yy.dim = ${yy.dim}, yfh.dim = ${yfh.dim}") +// println (s"for h = $h: ${MatrixD (yy, yfh).ᵀ}") // Forecaster.differ (yy, yfh) // uncomment for debugging assert (yy.dim == yfh.dim) // make sure the vector sizes agree @@ -132,9 +142,12 @@ trait ForecastMatrix (y: VectorD, hh: Int, tRng: Range = null): end for if showYf then println (s"Final Forecast Matrix yf = ${slant (yf)}") println ("fitMap QoF = ") - println (FitM.showFitMap (ftMat.transpose, QoF.values.map (_.toString))) + println (Fit.showFitMap (ftMat.ᵀ)) + ftMat end diagnoseAll +// had * @param sft the amount of shift for yfh (defaults to 0, FIX - ideally unify the code and remove sft) + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Diagnose the health of the model by computing the Quality of Fit (QoF) measures, * for all horizons and print the results in a table. @@ -143,7 +156,7 @@ trait ForecastMatrix (y: VectorD, hh: Int, tRng: Range = null): * @param yy the actual response/output matrix over all horizons * @param yf the entire FORECAST MATRIX */ - def diagnoseAll (yy: MatrixD, yf: MatrixD): Unit = + def diagnoseAll (yy: MatrixD, yf: MatrixD): MatrixD = val ftMat = new MatrixD (hh, Fit.N_QoF) for h <- 1 to hh do val qof = diagnose (yy(?, h-1), yf(?, h)) // use column h of yf @@ -151,7 +164,8 @@ trait ForecastMatrix (y: VectorD, hh: Int, tRng: Range = null): // println (FitM.fitMap (qof, qoF_names)) end for println ("fitMap QoF = ") - println (FitM.showFitMap (ftMat.transpose, QoF.values.map (_.toString))) + println (Fit.showFitMap (ftMat.ᵀ)) + ftMat end diagnoseAll end ForecastMatrix diff --git a/src/main/scala/scalation/modeling/forecasting/Forecaster.scala b/src/main/scala/scalation/modeling/forecasting/Forecaster.scala index d1867b7ae..78857e21a 100644 --- a/src/main/scala/scalation/modeling/forecasting/Forecaster.scala +++ b/src/main/scala/scalation/modeling/forecasting/Forecaster.scala @@ -13,11 +13,64 @@ package scalation package modeling package forecasting -//import scala.collection.mutable.LinkedHashSet -import scala.math.{abs, max, round} +import scala.annotation.unused +import scala.collection.mutable.IndexedSeq +import scala.math.{abs, max} import scalation.mathstat._ +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `Forecast` trait serves a minimal adpater of `Model` to the `Forecast` task. + * Most of implementation is in the `Forecaster` abstract class below. + */ +trait Forecast + extends Model: + + _taskType = TaskType.Forecast // the type of task performed + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the maximum lag used by the model (its capacity to look into the past). + * Models that use more than one past value to make predictions/forecasts must + * override this method, e.g., ARMA (2, 3) should set the cap to max(p, q) = 3. + */ + def cap: Int = 1 + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the used data/input matrix. Model that use x should override. + */ + def getX: MatrixD = null + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the feature/variable names. Model that use x should override. + */ + def getFname: Array [String] = Array ("no-x features") + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Given a time series y_, train the forecasting function y_ = f(lags (y_)) + e, + * where f(lags (y_)) is a function of the lagged values of y_, + * by fitting its parameters. + * @param x_null the data/input matrix (ignored, pass null) + * @param y_ the testing/full response/output vector (e.g., full y) + */ + def train (x_null: MatrixD, y_ : VectorD): Unit = {} + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** The standard signature for prediction does not apply to time series. + */ + def predict (z: VectorD): Double = + throw new UnsupportedOperationException ("predict (VectorD) use the alternative predict") + end predict + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** The standard signature for prediction does not apply to time series. + */ + def crossValidate (k: Int, rando: Boolean): Array [Statistic] = + throw new UnsupportedOperationException ("Use `rollValidate` instead of `crossValidate`") + end crossValidate + +end Forecast + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `Forecaster` abstract class provides a common framework for several forecasters. * Note, the train method must be called first followed by test. @@ -29,9 +82,9 @@ import scalation.mathstat._ */ abstract class Forecaster (y: VectorD, hh: Int, tRng: Range = null, hparam: HyperParameter = null, bakcast: Boolean = false) - extends Diagnoser (dfm = 1, df = y.dim - 1) + extends Diagnoser (dfr = 1, df = y.dim - 1) // dfr and df are updated later per model with ForecastMatrix (y, hh, tRng) - with Model: + with Forecast: private val debug = debugf ("Forecaster", false) // debug function private val flaw = flawf ("Forecaster") // flaw function @@ -44,21 +97,14 @@ abstract class Forecaster (y: VectorD, hh: Int, tRng: Range = null, hparam: Hype protected val yf = makeForecastMatrix (yb, hh) // forecasts for all time points t & horizons to h //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the maximum lag used by the model (its capacity to look into the past). - * Models that use more than one past value to make predictions/forecasts must - * override this method, e.g., ARMA (2, 3) should set the cap to max(p, q) = 3. - */ - def cap: Int = 1 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the used data/input matrix. Model that use x should override. + /** Return the used response/output vector y. */ - def getX: MatrixD = null + def getY: VectorD = y //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the used response/output vector y. + /** Return the used original/untransformed response/output vector y. */ - def getY: VectorD = y + def getY_org: VectorD = if yForm == null then y else yForm.fi(y) //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Return the used response/output vector yb (y prepended by one backcast value). @@ -70,20 +116,6 @@ abstract class Forecaster (y: VectorD, hh: Int, tRng: Range = null, hparam: Hype */ def getYf: MatrixD = yf - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the feature/variable names. Override for models like SARIMAX. - */ - def getFname: Array [String] = Array ("no-x features") - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Given a time series y_, train the forecasting function y_ = f(lags (y_)) + e, - * where f(lags (y_)) is a function of the lagged values of y_, - * by fitting its parameters. - * @param x_null the data/input matrix (ignored, pass null) - * @param y_ the testing/full response/output vector (e.g., full y) - */ - def train (x_null: MatrixD, y_ : VectorD): Unit = {} - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Test PREDICTIONS of a forecasting model y_ = f(lags (y_)) + e * and return its predictions and QoF vector. Testing may be in-sample @@ -102,7 +134,8 @@ abstract class Forecaster (y: VectorD, hh: Int, tRng: Range = null, hparam: Hype // Forecaster.differ (yy, yfh) // uncomment for debugging assert (yy.dim == yp.dim) // make sure the vector sizes agree - new Plot (null, yy, yp, s"test: Plot of yy, yp for $modelName vs. t", true) + if DO_PLOT then + new Plot (null, yy, yp, s"test: Plot of yy, yp for $modelName vs. t", true) mod_resetDF (yy.dim - skip) // reset the degrees of freedom // println (s"test: yy = $yy,\n yp = $yp") (yp, diagnose (yy, yp)) // return predicted and QoF vectors @@ -113,10 +146,9 @@ abstract class Forecaster (y: VectorD, hh: Int, tRng: Range = null, hparam: Hype * @param size the size of dataset (full, train, or test) */ override def mod_resetDF (size: Int): Unit = - val dfm = max (1, parameter.size - 1) // degrees of freedom for model - debug ("mod_resetDF", s"dfm = $dfm, df = ${size-dfm}") - resetDF (dfm, size - dfm) - resetDF (dfm, size - dfm) + val dfr = max (1, parameter.size - 1) // degrees of freedom for regression/model + debug ("mod_resetDF", s"dfr = $dfr, df = ${size-dfr}") + resetDF (dfr, size - dfr) end mod_resetDF //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -140,7 +172,7 @@ abstract class Forecaster (y: VectorD, hh: Int, tRng: Range = null, hparam: Hype * before testF. * @param h the forecasting horizon, number of steps ahead to produce forecasts * @param y_ the testing/full response/output vector - */ + * def testF (h: Int, y_ : VectorD): (VectorD, VectorD, VectorD) = val yfh = yf(?, h)(0 until y_.dim-h) // column h of the forecast matrix val yy = y_(h until y_.dim) // align the actual values @@ -152,6 +184,7 @@ abstract class Forecaster (y: VectorD, hh: Int, tRng: Range = null, hparam: Hype mod_resetDF (yy.dim) // reset the degrees of freedom (yy, yfh, diagnose (yy, yfh)) // return actual, forecasted and QoF vectors end testF + */ //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Return the hyper-parameters. @@ -170,13 +203,6 @@ abstract class Forecaster (y: VectorD, hh: Int, tRng: Range = null, hparam: Hype */ def residual: VectorD = { if e == null then flaw ("residual", "must call test method first"); e } - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** The standard signature for prediction does not apply to time series. - */ - def predict (z: VectorD): Double = - throw new UnsupportedOperationException ("predict (VectorD) use the alternative predict") - end predict - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Predict a value for y_t using the 1-step ahead forecast. * @@ -201,6 +227,7 @@ abstract class Forecaster (y: VectorD, hh: Int, tRng: Range = null, hparam: Hype yf(?, 1)(0 until y_.dim-1) // return yp: first horizon only else // debug ("predictAll", s"y_.dim = ${y_.dim}, yf.dims = ${yf.dims}") + yf(0, 1) = NO_DOUBLE // no prior data to make predictions for t <- 1 until yf.dim do yf(t, 1) = predict (t, y_) // skip t = 0 yf(?, 1) end predictAll @@ -239,10 +266,11 @@ abstract class Forecaster (y: VectorD, hh: Int, tRng: Range = null, hparam: Hype * Assign into FORECAST MATRIX and return the h-steps ahead forecast. * Note, `predictAll` provides predictions for h = 1 and for random walk the * forecast across all horizons is the same. + * Method should be overridden for each model besides `RandomWalk` * @param h the forecasting horizon, number of steps ahead to produce forecasts * @param y_ the actual values to use in making forecasts */ - def forecastAt (h: Int, y_ : VectorD = yb): VectorD = + def forecastAt (h: Int, @unused y_ : VectorD = yb): VectorD = yf(?, h) = yf(?, 1) yf(?, h) end forecastAt @@ -264,19 +292,38 @@ abstract class Forecaster (y: VectorD, hh: Int, tRng: Range = null, hparam: Hype (yfh - width, yfh + width) // return lower and upper bounds end forecastAtI +// T E S T I N G S C E N A R I O S + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Align the actual response vector for comparison with the predicted/forecasted * response vector, returning a time vector and sliced response vector. - * @param tr_size the size of the intial training set + * @param tr_size the size of the initial training set * @param y the actual response for the full dataset (to be sliced) */ def align (tr_size: Int, y: VectorD): (VectorD, VectorD) = (VectorD.range (tr_size, y.dim), y(tr_size until y.dim)) end align - def crossValidate (k: Int, rando: Boolean): Array [Statistic] = - throw new UnsupportedOperationException ("Use `rollValidate` instead of `crossValidate`") - end crossValidate + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /* Use validation to compute test Quality of Fit (QoF) measures by dividing + * the full dataset into a TESTING-set and a TRAINING-set. + * Delegates to `rollValidate` with no retraining and only diagnoses the first horizon. + * Must call the `set_TE_RATIO` method to change the default TE ratio. + * @param rando flag indicating whether to use randomized or simple validation (false) + * @param ratio the ratio of the TESTING-set to the full dataset (most common 70-30 (.3), 80-20 (.2)) + * @param idx the prescribed TESTING-set indices (default => null) + */ + def validate (rando: Boolean = false, ratio: Double = Model.TE_RATIO) + (@unused idx: IndexedSeq [Int] = null): + (VectorD, VectorD) = + debug ("validate", s"rando = $rando (requires false), ratio = $ratio (requires ${Model.TE_RATIO})") + val te_size = Model.teSize (y.dim) // size of testing set + val yf1 = rollValidate (y.dim, false)(?, 1) // get column 1 returned from `rollValidate` + val y_ = y(y.dim - te_size until y.dim) // trim the actual values to testing-set + val yf_ = yf1(y.dim - te_size until y.dim) // trim the forecast at h = 1 to testing-set + val qof = diagnose (y_, yf_) + (yf1, qof) + end validate //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Use rolling-validation to compute test Quality of Fit (QoF) measures @@ -285,97 +332,88 @@ abstract class Forecaster (y: VectorD, hh: Int, tRng: Range = null, hparam: Hype * Calls forecast for h-steps ahead out-of-sample forecasts. * Return the FORECAST MATRIX. * @param rc the retraining cycle (number of forecasts until retraining occurs) - * @param growing whether the training grows as it roll or kepps a fixed size + * @param growing whether the training grows as it roll or keeps a fixed size + * @param doPlot whether to show the plots */ - def rollValidate (rc: Int = 2, growing: Boolean = false): MatrixD = + def rollValidate (rc: Int = 2, growing: Boolean = false, doPlot: Boolean = true): MatrixD = val ftMat = new MatrixD (hh, Fit.N_QoF) banner (s"rollValidate: Evaluate ${modelName}'s QoF for horizons 1 to $hh:") val x = getX // some model use and input matrix, else null val y = getYb // get (expanded) response/output vector val yf = getYf // get the full in-sample forecast matrix - val te_size = Forecaster.teSize (y.dim) // size of testing set - val tr_size = Forecaster.trSize (y.dim) // size of initial training set + val te_size = Model.teSize (y.dim) // size of testing set + val tr_size = Model.trSize (y.dim) // size of initial training set debug ("rollValidate", s"y.dim = ${y.dim}, train: tr_size = $tr_size; test: te_size = $te_size, rc = $rc") val yp = new VectorD (te_size) // y-predicted over testing set (only for h=1) for i <- 0 until te_size do // iterate through testing set val is = if growing then 0 else i - val t = tr_size + i // next time point to forecast + val t = tr_size + i // next time point to forecast if i % rc == 0 then val x_ = if x != null then x(is until t) else null train (x_, y(is until t)) // retrain on sliding training set // yp(i) = predict (min (t+1, y.dim-1), y) // predict the next value (only for h=1) yp(i) = predict (t, y) // predict the next value (only for h=1) val yd = forecast (t, y) // forecast the next hh-values, yf is updated - println (s"yf(t, 0) = ${yf(t, 0)}, yp(i) = ${yp(i)}, yd = $yd") + debug ("rollValidate", s"yf(t, 0) = ${yf(t, 0)}, yp(i) = ${yp(i)}, yd = $yd") // assert (yp(i) =~ yd(0)) // make sure h=1 forecasts agree with predictions end for val (t, yy) = align (tr_size, y) // align vectors - new Plot (t, yy, yp, s"rollValidate: Plot yy, yp vs. t for $modelName", lines = true) + if doPlot then + new Plot (t, yy, yp, s"rollValidate: Plot yy, yp vs. t for $modelName", lines = true) val yf_ = yf(tr_size until y.dim) // forecast matrix for test-set for h <- 1 to hh do val yy_ = yy(h-1 until yy.dim) // trim the actual values val yfh = yf_(?, h)(0 until yy.dim-h+1) // column h of the forecast matrix - new Plot (t, yy_, yfh, s"rollValidate: Plot yy_, yfh vs. t for $modelName @h = $h", lines = true) + if doPlot then + new Plot (t, yy_, yfh, s"rollValidate: Plot yy_, yfh vs. t for $modelName @h = $h", lines = true) mod_resetDF (te_size - h) // reset degrees of freedom val qof = diagnose (yy_, yfh) ftMat(h-1) = qof // println (FitM.fitMap (qof, qoF_names)) end for - println ("fitMap qof = ") - println (FitM.showFitMap (ftMat.transpose, QoF.values.map (_.toString))) + if DO_REPORT then + println ("fitMap qof = ") + println (Fit.showFitMap (ftMat.ᵀ)) yf end rollValidate //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Perform In-Sample Testing (In-ST), i.e. train and test on the full data set. + /** Perform In-Sample Testing, i.e., train and test on the full data set. * @param skip the number of initial time points to skip (due to insufficient past) * @param showYf whether to show the forecast matrix */ - def inSampleTest (skip: Int = 2, showYf: Boolean = false): Unit = - banner (s"In-ST Test: $modelName") + override def inSample_Test (skip: Int = 2, showYf: Boolean = false): Unit = + banner (s"In-Sample Test: $modelName") trainNtest ()() // train on full and test on full forecastAll () // forecast over all horizons setSkip (skip) // diagnose: skip the first 'skip' rows diagnoseAll (getY, getYf) // compute metrics for all horizons -// println (s"Final In-ST Forecast Matrix yf = ${mod.getYf}") -// println (s"Final In-ST Forecast Matrix yf = ${mod.getYf.shiftDiag}") - end inSampleTest - -// F E A T U R E S E L E C T I O N + if showYf then + println (s"Final In-Sample Forecast Matrix yf = ${getYf}") +// println (s"Final In-Sample Forecast Matrix yf = ${getYf.shiftDiag}") + end inSample_Test //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Perform forward selection to find the most predictive variable to add the - * existing model, returning the variable to add and the new model. - * May be called repeatedly. - * Note, all lags up and including 'p|q' define the model. - * @see `Fit` for index of QoF measures. - * Models supporting feature selection (e.g., `ARY`) should override this method. - * @param cols the lags/columns currently included in the existing model (currently ignored) - * @param idx_q index of Quality of Fit (QoF) to use for comparing quality - * - def forwardSel (cols: LinkedHashSet [Int], idx_q: Int = QoF.rSqBar.ordinal): BestStep = - throw new UnsupportedOperationException ("forwardSel is only provided by some models that override this method") - end forwardSel - */ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Perform forward selection to find the most predictive variables to have - * in the model, returning the variables added and the new Quality of Fit (QoF) - * measures for all steps. - * @see `Fit` for index of QoF measures. - * @param idx_q index of Quality of Fit (QoF) to use for comparing quality - * @param cross whether to include the cross-validation QoF measure - * - def forwardSelAll (idx_q: Int = QoF.rSqBar.ordinal, cross: Boolean = true): - (LinkedHashSet [Int], MatrixD) = - throw new UnsupportedOperationException ("forwardSelAll is only provided by some models that override this method") - end forwardSelAll + /** Perform Train-n-Test (TnT) Testing, i.e., train and test with rolling validation. + * @param skip the number of initial time points to skip (due to insufficient past) + * @param rc the retraining cycles (how often to retrain the model) + * @param showYf whether to show the forecast matrix */ + def tnT_Test (skip: Int = 0, rc: Int = 2, showYf: Boolean = false): Unit = + banner (s"TnT Test: $modelName") + trainNtest ()() // initial training updated by `rollValidate` + setSkip (skip) // diagnose: skip the first 'skip' rows + rollValidate (rc) // TnT with Rolling Validation + diagnoseAll (getY,getYf, Forecaster.teRng (y.dim)) // only diagnose on the testing set + if showYf then + println (s"Final TnT Forecast Matrix yf = ${getYf}") +// println (s"Final TnT Forecast Matrix yf = ${getYf.shiftDiag}") + end tnT_Test end Forecaster @@ -386,37 +424,13 @@ end Forecaster */ object Forecaster: -// private val debug = debugf ("Forecaster", true) // debug function - private val flaw = flawf ("Forecaster") // flaw function - - private var TE_RATIO = 0.2 // ratio of testing set to full dataset - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Set the training ratio = ratio of training set to full dataset. - * @param m the size of the full dataset - */ - def set_TE_RATIO (ratio: Double): Unit = - if ratio out (0.05, 0.95) then flaw ("init", s"testing ratio = $ratio should be in (0.05, 0.95)") - TE_RATIO = ratio - end set_TE_RATIO - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Calculate the size (number of instances) for a testing set (round up). - * @param m the size of the full dataset - */ - inline def teSize (m: Int): Int = (round (m * TE_RATIO + 0.5)).toInt - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Calculate the size (number of instances) for a training set. - * @param m the size of the full dataset - */ - inline def trSize (m: Int): Int = m - teSize (m) + private val flaw = flawf ("Forecaster") // flaw function //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Return the index range for the testing set. * @param m the size of the full dataset */ - def teRng (m: Int): Range = trSize (m) until m + def teRng (m: Int): Range = Model.trSize (m) until m //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Compute a reverse dot product of the parameter vector b and the most recent diff --git a/src/main/scala/scalation/modeling/forecasting/Forecaster_D.scala b/src/main/scala/scalation/modeling/forecasting/Forecaster_D.scala index 6f8de1623..bbfb3d8ce 100644 --- a/src/main/scala/scalation/modeling/forecasting/Forecaster_D.scala +++ b/src/main/scala/scalation/modeling/forecasting/Forecaster_D.scala @@ -13,35 +13,53 @@ package scalation package modeling package forecasting +import scala.annotation.unused +import scala.collection.mutable.{LinkedHashSet => LSET} import scala.math.max -//import scala.math.min import scalation.mathstat._ //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `Forecaster_D` abstract class provides a common framework for several forecasters. + * @note `Forecaster_D` is dependent on [[Forecaster_Reg]] class to do feature selection. * Note, the `train_x` method must be called first followed by `test`. * @param x the input lagged time series data * @param y the response matrix (time series data per horizon) * @param hh the maximum forecasting horizon (h = 1 to hh) + * @param fname the feature/variable names * @param tRng the time range, if relevant (index as time may suffice) * @param hparam the hyper-parameters for models extending this abstract class * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) */ -abstract class Forecaster_D (x: MatrixD, y: MatrixD, hh: Int, tRng: Range = null, - hparam: HyperParameter = MakeMatrix4TS.hp, +abstract class Forecaster_D (x: MatrixD, y: MatrixD, hh: Int, fname: Array [String], + tRng: Range = null, hparam: HyperParameter = MakeMatrix4TS.hp, bakcast: Boolean = false) extends Forecaster (y(?, 0), hh, tRng, hparam, bakcast): // no automatic backcasting, @see `ARY_D.apply` private val debug = debugf ("Forecaster_D", false) // debug function protected var bb: MatrixD = null // use parameter matrix bb instead of vector b + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the set of columns (numbers) for the features in this model. + */ + def mcols: LSET [Int] = LSET.range (0, getX.dim2) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Get the data/input matrix built from lagged y (and optionally xe) values. + */ + override def getX: MatrixD = x //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Return the used response/output matrix y. */ def getYy: MatrixD = y + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the feature/variable names. Overrides definition in `Forecaster` + */ + override def getFname: Array [String] = fname + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Train/fit e.g., an `ARY_D` model to the times-series data in vector y_. * Estimate the coefficient vector b for a p-th order Auto-Regressive ARY_D(p) model. @@ -64,7 +82,8 @@ abstract class Forecaster_D (x: MatrixD, y: MatrixD, hh: Int, tRng: Range = null (VectorD, VectorD) = train_x (x_, y_) // train the model on training set val (yp, qof) = test (xx, yy) // test the model on testing set - println (report (qof)) // report on Quality of Fit (QoF) + if DO_REPORT then + println (report (qof)) // report on Quality of Fit (QoF) (yp, qof) end trainNtest_x @@ -84,7 +103,8 @@ abstract class Forecaster_D (x: MatrixD, y: MatrixD, hh: Int, tRng: Range = null val y0 = y_(0 until m, 0) // actual values (except last) for h = 1 val yf1 = yf(0 until m, 1) // forecasted values for h = 1 - new Plot (null, y0, yf1, s"test: Plot of y0, yf1 for $modelName vs. t", true) + if DO_PLOT then + new Plot (null, y0, yf1, s"test: Plot of y0, yf1 for $modelName vs. t", true) mod_resetDF (y0.dim) // reset the degrees of freedom (yf1, diagnose (y0, yf1)) // return predicted and QoF vectors end test @@ -94,9 +114,9 @@ abstract class Forecaster_D (x: MatrixD, y: MatrixD, hh: Int, tRng: Range = null * @param size the size of dataset (full, train, or test) */ override def mod_resetDF (size: Int): Unit = - val dfm = max (1, parameter.size - 1) // degrees of freedom for model - debug ("mod_resetDF", s"dfm = $dfm, df = ${size-dfm}") - resetDF (dfm, size - dfm) + val dfr = max (1, parameter.size - 1) // degrees of freedom for regression/model + debug ("mod_resetDF", s"dfr = $dfr, df = ${size-dfr}") + resetDF (dfr, size - dfr) end mod_resetDF //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -138,7 +158,7 @@ abstract class Forecaster_D (x: MatrixD, y: MatrixD, hh: Int, tRng: Range = null * * @param y_ the actual values to use in making forecasts */ - def forecastAll (y_ : MatrixD): MatrixD = yf + def forecastAll (@unused y_ : MatrixD): MatrixD = yf //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Use rolling-validation to compute test Quality of Fit (QoF) measures @@ -148,13 +168,14 @@ abstract class Forecaster_D (x: MatrixD, y: MatrixD, hh: Int, tRng: Range = null * Return the forecasted values in the FORECAST MATRIX,. * @param rc_ the retraining cycle (number of forecasts until retraining occurs) * @param growing whether the training grows as it roll or kepps a fixed size + * @param doPlot whether to show the plots */ - override def rollValidate (rc: Int = 2, growing: Boolean = false): MatrixD = + override def rollValidate (rc: Int = 2, growing: Boolean = false, doPlot: Boolean = false): MatrixD = banner (s"rollValidate: Evaluate ${modelName}'s QoF for horizons 1 to $hh:") val yf = getYf // get the full in-sample forecast matrix - val te_size = Forecaster.teSize (y.dim) // size of testing set - val tr_size = y.dim - te_size // size of initial training set + val te_size = Model.teSize (y.dim) // size of testing set + val tr_size = Model.trSize (y.dim) // size of initial training set debug ("rollValidate", s"y.dims = ${y.dims}, train: tr_size = $tr_size; test: te_size = $te_size, rc = $rc") val yp = new VectorD (te_size) @@ -165,17 +186,170 @@ abstract class Forecaster_D (x: MatrixD, y: MatrixD, hh: Int, tRng: Range = null val x_ = if x != null then x(is until t) else null train_x (x_, y(is until t)) // retrain on sliding training set debug ("rollValidate", s"retrain on i = $i, bb = $bb") -// val yd = predict (t, y) // predict the next value (only for h=1) + yp(i) = predict (t, y)(0) // predict the next value (only for h=1) val yd = forecast (t, y(?, 0)) // forecast the next hh-values, yf is updated - yp(i) = yd(0) - println (s"yf(t, 0) = ${yf(t, 0)}, yp(i) = ${yp(i)}, yd = $yd") + debug ("rollValidate", s"yf(t, 0) = ${yf(t, 0)}, yp(i) = ${yp(i)}, yd = $yd") end for - val y_ = y(?, 0)(tr_size until y.dim-1) // trim the actual values - val t = VectorD.range (tr_size, y.dim-1) - new Plot (t, y_, yp, s"rollValidate: Plot y_, yp vs. t for $modelName", lines = true) + if doPlot then + val (t, y_) = align (tr_size, y(?, 0)) + new Plot (t, y_, yp, s"rollValidate: Plot y_, yp vs. t for $modelName", lines = true) yf end rollValidate + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Perform In-Sample Testing, i.e. train and test on the full data set. + * @param skip the number of initial time points to skip (due to insufficient past) + * @param showYf whether to show the forecast matrix + */ + override def inSample_Test (skip: Int = 2, showYf: Boolean = false): Unit = + banner (s"In-Sample Test: $modelName") + trainNtest_x ()() // train on full and test on full + setSkip (skip) // diagnose: skip the first 'skip' rows + diagnoseAll (getY, getYf) // compute metrics for all horizons + if showYf then + println (s"Final In-Sample Forecast Matrix yf = ${getYf}") +// println (s"Final In-Sample Forecast Matrix yf = ${getYf.shiftDiag}") + end inSample_Test + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Perform Train-n-Test (TnT) Testing, i.e. train and test with rolling validation. + * @param skip the number of initial time points to skip (due to insufficient past) + * @param rc the retraining cycles (how often to retrain the model) + * @param showYf whether to show the forecast matrix + */ + override def tnT_Test (skip: Int = 0, rc: Int = 2, showYf: Boolean = false): Unit = + banner (s"TnT Test: $modelName") + trainNtest_x ()() // initial training updated by `rollValidate` + setSkip (skip) // diagnose: skip the first 'skip' rows + rollValidate (rc) // TnT with Rolling Validation + diagnoseAll (getY,getYf, Forecaster.teRng (y.dim)) // only diagnose on the testing set + if showYf then + println (s"Final TnT Forecast Matrix yf = ${getYf}") +// println (s"Final TnT Forecast Matrix yf = ${getYf.shiftDiag}") + end tnT_Test + +// F E A T U R E S E L E C T I O N + + import SelectionTech._ + + def getBest: BestStep = ??? // FIX -- implement or throw exception + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Build an `Forecaster_D` model using the cols with the selected features. + * @param cols the cols of the input matrix with selected features + * @param h the number of the horizon + */ + def getModel (cols: LSET [Int] = mcols): Forecaster_D + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Build a single-horizon `Forecaster_Reg` model using the cols with the selected features. + * Note: uses `Forecaster_Reg` as it is the base model for Forecaster_D. + * @param cols the cols of the input matrix with selected features + * @param h the number of the horizon + */ + def getModel_h (cols: LSET [Int] = mcols, h: Int = 1): Forecaster_Reg + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Perform regression-based feature selection to find the most predictive variables + * to have in the model, returning the variables left and the new Quality of Fit + * (QoF) measures for all steps. + * @see `scalation.modeling.Forecaster_Reg` for index of QoF measures. + * @param h the number of the horizon + * @param fsType the type of the feature selection to use + * @param cross indicator to include the cross-validation/validation QoF measure (defaults to "none") + * @param first first variable to consider for elimination + * (default (1) assume intercept x_0 will be in any model) + * @param swap whether to allow a swap step (swap out a feature for a new feature in one step) + * @param qk index of Quality of Fit (QoF) to use for comparing quality + */ + def featureSelectAtHorizon (h: Int, fsType: SelectionTech, cross: String = "none", + first: Int = 1, swap: Boolean = true) + (using qk: Int): (LSET [Int], MatrixD, Forecaster_Reg) = + require (1 <= h && h <= hh, s"horizon h=$h out of range [1, $hh]") + + val fsFun: (Forecaster_Reg => (LSET [Int], MatrixD)) = fsType match // choose the FS routine once + case Forward => (m: Forecaster_Reg) => m.forwardSelAll (cross) + case Backward => (m: Forecaster_Reg) => m.backwardElimAll (first, cross) + case Stepwise => (m: Forecaster_Reg) => m.stepwiseSelAll (cross, swap) + case Beam => (m: Forecaster_Reg) => m.beamSelAll (cross) + + val mod_h = getModel_h (h = h) // build a single-horizon model bound to y(:, k-1) with hh=1 + val (_, rSq) = fsFun (mod_h) + val cols = mod_h.getBest.mod_cols + val modForc = getModel_h (cols, h) + (cols, rSq, modForc) + end featureSelectAtHorizon + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Perform regression-based feature selection to find the most predictive variables + * to have in the model, returning the variables left and the new Quality of Fit + * (QoF) measures for all steps. + * @param fsType the type of the feature selection to use + * @param cross indicator to include the cross-validation/validation QoF measure (defaults to "none") + * @param first first variable to consider for elimination + * (default (1) assume intercept x_0 will be in any model) + * @param swap whether to allow a swap step (swap out a feature for a new feature in one step) + * @param qk index of Quality of Fit (QoF) to use for comparing quality + */ + def featureSelection (fsType: SelectionTech = Stepwise, cross: String = "none", + first: Int = 1, swap: Boolean = true) + (using qk: Int): (Array [LSET [Int]], Array [MatrixD], Array [Forecaster_Reg], MatrixD, MatrixD) = + val colsArr = new Array [LSET [Int]](hh) + val rSqArr = new Array [MatrixD](hh) + val modArr = new Array [Forecaster_Reg](hh) + val yf = new MatrixD(y.dim, hh) + val ftMat = new MatrixD (hh, Fit.N_QoF) + + for h <- 1 to hh do + val (cls, rSq, modForc) = featureSelectAtHorizon (h, fsType, cross, first, swap) + colsArr(h-1) = cls + rSqArr(h-1) = rSq + modForc.setSkip (0) + modForc.rollValidate (rc = 2) + ftMat(h-1) = modForc.diagnoseAll (modForc.getY, modForc.getYf, Forecaster.teRng (y.dim))(0) + yf(?, h - 1) = modForc.getYf(?, 1) + modArr(h-1) = modForc + end for + (colsArr, rSqArr, modArr, yf, ftMat) + end featureSelection + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Perform attention feature selection to find the best model + * @param scoredCols the columns ordered based on feature scores + * + def featureSelectionAtt (scoredCols: LSET [Int]): (Array [MatrixD], Array [MatrixD], Array [MatrixD]) = + val total = scoredCols.size + val min_cols = 4 + val qof_all = new Array[MatrixD](hh) + val qof_inSample = new Array[MatrixD](hh) + val qof_TnT = new Array[MatrixD](hh) + + for k <- 1 to hh do + println (s"k = $k") + val qof_k = new MatrixD (3*(total-min_cols), Fit.N_QoF) + qof_inSample(k-1) = new MatrixD (total-min_cols, Fit.N_QoF) + qof_TnT(k-1) = new MatrixD (total-min_cols, Fit.N_QoF) + for i <- 0 until total-min_cols do + val cls = scoredCols.take (total-i) + val mod_i = getModel_h (cls, k) + val (x_mod_i, y_mod_i) = (mod_i.getX, mod_i.getY) + val t_rng = 0 until Model.trSize (y_mod_i.dim) + val (x_tr, y_tr) = (x_mod_i(t_rng), y_mod_i(t_rng)) + val (_, qof) = mod_i.trainNtest_x (x_tr, y_tr)(x_tr, y_tr) + qof_k(3*i) += i + qof_k(3*i + 1) = qof + qof_inSample(k-1)(i) = qof_k(3*i + 1) + mod_i.setSkip (0) + mod_i.rollValidate (rc = 2) + qof_k(3*i + 2) = mod_i.diagnoseAll (mod_i.getY, mod_i.getYf, Forecaster.teRng (y.dim))(0) + qof_TnT(k-1)(i) = qof_k(3*i + 2) + end for + qof_all(k-1) = qof_k + end for + (qof_all, qof_inSample, qof_TnT) + end featureSelectionAtt + */ + end Forecaster_D diff --git a/src/main/scala/scalation/modeling/forecasting/Forecaster_D.scala.bak b/src/main/scala/scalation/modeling/forecasting/Forecaster_D.scala.bak new file mode 100644 index 000000000..1f38da5e2 --- /dev/null +++ b/src/main/scala/scalation/modeling/forecasting/Forecaster_D.scala.bak @@ -0,0 +1,284 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author John Miller + * @version 2.0 + * @date Sun Jun 30 13:27:00 EDT 2024 + * @see LICENSE (MIT style license file). + * + * @note Model Framework: Abstract Class for Forecasters with Matrix Input + * most models will need to override `train`, `predict`, `forecast` and `forecastAt` + */ + +package scalation +package modeling +package forecasting + +import scala.annotation.unused +import scala.collection.mutable.{LinkedHashSet => LSET} +import scala.math.max + +import scalation.mathstat._ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `Forecaster_D` abstract class provides a common framework for several forecasters. + * Note, the `train_x` method must be called first followed by `test`. + * @param x the input lagged time series data + * @param y the response matrix (time series data per horizon) + * @param hh the maximum forecasting horizon (h = 1 to hh) + * @param fname the feature/variable names + * @param tRng the time range, if relevant (index as time may suffice) + * @param hparam the hyper-parameters for models extending this abstract class + * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) + */ +abstract class Forecaster_D (x: MatrixD, y: MatrixD, hh: Int, fname: Array [String], + tRng: Range = null, hparam: HyperParameter = MakeMatrix4TS.hp, + bakcast: Boolean = false) + extends Forecaster (y(?, 0), hh, tRng, hparam, bakcast): // no automatic backcasting, @see `ARY_D.apply` + + private val debug = debugf ("Forecaster_D", false) // debug function + + protected var bb: MatrixD = null // use parameter matrix bb instead of vector b + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the used predictor matrix x. + */ + override def getX: MatrixD = x + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the used response/output matrix y. + */ + def getYy: MatrixD = y + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the feature/variable names. Overrides definition in `Forecaster` + */ + override def getFname: Array [String] = fname + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Train/fit e.g., an `ARY_D` model to the times-series data in vector y_. + * Estimate the coefficient vector b for a p-th order Auto-Regressive ARY_D(p) model. + * Uses OLS Matrix Fatorization to determine the coefficients, i.e., the b (φ) vector. + * @param x_ the data/input matrix (e.g., full x) + * @param y_ the training/full response vector (e.g., full y) + */ + def train_x (x_ : MatrixD, y_ : MatrixD): Unit + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Train and test the forecasting model y_ = f(y-past) + e and report its QoF + * and plot its predictions. Return the predictions and QoF. + * NOTE: must use `trainNtest_x` when an x matrix is used, such as in `ARY_D`. + * @param x_ the training/full data/input matrix (defaults to full x) + * @param y_ the training/full response/output vector (defaults to full y) + * @param xx the testing/full data/input matrix (defaults to full x) + * @param yy the testing/full response/output vector (defaults to full y) + */ + def trainNtest_x (x_ : MatrixD = x, y_ : MatrixD = y)(xx: MatrixD = x, yy: MatrixD = y): + (VectorD, VectorD) = + train_x (x_, y_) // train the model on training set + val (yp, qof) = test (xx, yy) // test the model on testing set + if DO_REPORT then + println (report (qof)) // report on Quality of Fit (QoF) + (yp, qof) + end trainNtest_x + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Test PREDICTIONS of a forecasting model y_ = f(lags (y_)) + e + * and return its predictions and QoF vector. Testing may be in-sample + * (on the training set) or out-of-sample (on the testing set) as determined + * by the parameters passed in. Note: must call train before test. + * Must override to get Quality of Fit (QoF). + * @param x_null the data/input matrix + * @param y_ the actual testing/full response/output matrix + */ + def test (x_ : MatrixD, y_ : MatrixD): (VectorD, VectorD) = + val m = y_.dim + predictAll (y_) // make all predictions - saved in yf + debug ("test", s"x_.dims = ${x_.dims}, y_.dims, ${y_.dims}, yf.dims = ${yf.dims}") + + val y0 = y_(0 until m, 0) // actual values (except last) for h = 1 + val yf1 = yf(0 until m, 1) // forecasted values for h = 1 + if DO_PLOT then + new Plot (null, y0, yf1, s"test: Plot of y0, yf1 for $modelName vs. t", true) + mod_resetDF (y0.dim) // reset the degrees of freedom + (yf1, diagnose (y0, yf1)) // return predicted and QoF vectors + end test + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Models need to provide a means for updating the Degrees of Freedom (DF). + * @param size the size of dataset (full, train, or test) + */ + override def mod_resetDF (size: Int): Unit = + val dfr = max (1, parameter.size - 1) // degrees of freedom for regression/model + debug ("mod_resetDF", s"dfr = $dfr, df = ${size-dfr}") + resetDF (dfr, size - dfr) + end mod_resetDF + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the vector of parameter/coefficient values (they are model specific). + * Override for models with other parameters besides bb(?, 0). + */ + override def parameter: VectorD = bb(?, 0) // parameter vector (first column) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Predict a value for y_t using the 1-step ahead forecast. + * + * y_t = b_0 + b_1 y_t-1 + b_2 y_t-2 + ... + b_p y_t-p = b dot x_t + * + * FIX - parameter order is in conflict with AR models. + * @param t the time point being predicted + * @param y_ the actual values to use in making predictions (ignored) + */ + def predict (t: Int, y_ : MatrixD): VectorD + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Predict all values (for all horizons) corresponding to the given time series vector y_. + * Create FORECAST MATRIX yf and return it. + * Note `forecastAll` simply returns the values produced by `predictAll`. + * @param y_ the actual time series values to use in making predictions + */ + def predictAll (y_ : MatrixD): MatrixD = + for t <- 0 until y_.dim do + val pred = predict (t, y_) +// debug ("predictAll", s"pred = $pred") + yf(t, 1 until hh+1) = pred // FIX - yf for VAR is different + yf + end predictAll + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forecast values for all y_.dim time points and all horizons (1 through hh-steps ahead). + * Record these in the FORECAST MATRIX yf, where + * + * yf(t, h) = h-steps ahead forecast for y_t + * + * @param y_ the actual values to use in making forecasts + */ + def forecastAll (@unused y_ : MatrixD): MatrixD = yf + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Use rolling-validation to compute test Quality of Fit (QoF) measures + * by dividing the dataset into a TRAINING SET (tr) and a TESTING SET (te). + * as follows: [ <-- tr_size --> | <-- te_size --> ] + * Calls forecast for h-steps ahead out-of-sample forecasts. + * Return the forecasted values in the FORECAST MATRIX,. + * @param rc_ the retraining cycle (number of forecasts until retraining occurs) + * @param growing whether the training grows as it roll or kepps a fixed size + * @param doPlot whether to show the plots + */ + override def rollValidate (rc: Int = 2, growing: Boolean = false, doPlot: Boolean = true): MatrixD = + banner (s"rollValidate: Evaluate ${modelName}'s QoF for horizons 1 to $hh:") + + val yf = getYf // get the full in-sample forecast matrix + val te_size = Model.teSize (y.dim) // size of testing set + val tr_size = Model.trSize (y.dim) // size of initial training set + debug ("rollValidate", s"y.dims = ${y.dims}, train: tr_size = $tr_size; test: te_size = $te_size, rc = $rc") + + val yp = new VectorD (te_size) + for i <- yp.indices do // iterate through testing set + val is = if growing then 0 else i + val t = tr_size + i // next time point to forecast + if i % rc == 0 then + val x_ = if x != null then x(is until t) else null + train_x (x_, y(is until t)) // retrain on sliding training set + debug ("rollValidate", s"retrain on i = $i, bb = $bb") + yp(i) = predict (t, y)(0) // predict the next value (only for h=1) + val yd = forecast (t, y(?, 0)) // forecast the next hh-values, yf is updated + println (s"yf(t, 0) = ${yf(t, 0)}, yp(i) = ${yp(i)}, yd = $yd") + end for + + if doPlot then + val (t, y_) = align (tr_size, y(?, 0)) + new Plot (t, y_, yp, s"rollValidate: Plot y_, yp vs. t for $modelName", lines = true) + yf + end rollValidate + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Perform In-Sample Testing, i.e. train and test on the full data set. + * @param skip the number of initial time points to skip (due to insufficient past) + * @param showYf whether to show the forecast matrix + */ + override def inSample_Test (skip: Int = 2, showYf: Boolean = false): Unit = + banner (s"In-Sample Test: $modelName") + trainNtest_x ()() // train on full and test on full + setSkip (skip) // diagnose: skip the first 'skip' rows + diagnoseAll (getY, getYf) // compute metrics for all horizons + if showYf then + println (s"Final In-Sample Forecast Matrix yf = ${getYf}") +// println (s"Final In-Sample Forecast Matrix yf = ${getYf.shiftDiag}") + end inSample_Test + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Perform Train-n-Test (TnT) Testing, i.e. train and test with rolling validation. + * @param skip the number of initial time points to skip (due to insufficient past) + * @param rc the retraining cycles (how often to retrain the model) + * @param showYf whether to show the forecast matrix + */ + override def tnT_Test (skip: Int = 0, rc: Int = 2, showYf: Boolean = false): Unit = + banner (s"TnT Test: $modelName") + trainNtest_x ()() // initial training updated by `rollValidate` + setSkip (skip) // diagnose: skip the first 'skip' rows + rollValidate (rc) // TnT with Rolling Validation + diagnoseAll (getY,getYf, Forecaster.teRng (y.dim)) // only diagnose on the testing set + if showYf then + println (s"Final TnT Forecast Matrix yf = ${getYf}") +// println (s"Final TnT Forecast Matrix yf = ${getYf.shiftDiag}") + end tnT_Test + +// F E A T U R E S E L E C T I O N + + import SelectionTech._ + + def getBest: BestStep = ??? // FIX -- implement or throw exception + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Build a single-horizon `ARX` model using input matrix x_fs with the selected features. + * Note: uses `ARX` as it is the base model for ARX*_D. + * @param k the number of the horizon + * @param x_fs the input matrix containing the selected features + */ + def getModel_k (k: Int = 1, x_fs: MatrixD = x): ARX = + new ARX (x_fs, y(?, k-1), 1, 1, fname, tRng, hparam, bakcast) + end getModel_k + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Convert a rergression to a forecasting model. + * @param mod the regression model + */ + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Convert the underlying Regression Model to a subtype of `Forecaster_D` Forecasting Model. + * @param mod the regression model to convert, e.g., the best model after feature selection + */ + def convertReg2Forc (mod: Model_FS = getBest.mod): Forecaster_D + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Perform regression-based feature selection to find the most predictive variables + * to have in the model, returning the variables left and the new Quality of Fit + * (QoF) measures for all steps. + * @see `scalation.modeling.Forecaster_Reg` for index of QoF measures. + * @param k the number of the horizon + * @param fsType the type of the feature selection to use + * @param cross indicator to include the cross-validation/validation QoF measure (defaults to "none") + * @param first first variable to consider for elimination + * (default (1) assume intercept x_0 will be in any model) + * @param swap whether to allow a swap step (swap out a feature for a new feature in one step) + * @param qk index of Quality of Fit (QoF) to use for comparing quality + */ + def featureSelectAtHorizon (k: Int, fsType: SelectionTech = Stepwise, cross: String = "none", + first: Int = 1, swap: Boolean = true) + (using qk: Int): (LSET [Int], MatrixD, ARX) = + require (1 <= k && k <= hh, s"horizon k=$k out of range [1,$hh]") + + val fsFun: (ARX => (LSET [Int], MatrixD)) = fsType match // choose the FS routine once + case Forward => (m: ARX) => m.forwardSelAll (cross) + case Backward => (m: ARX) => m.backwardElimAll (first, cross) + case Stepwise => (m: ARX) => m.stepwiseSelAll (cross, swap) + case Beam => (m: ARX) => m.beamSelAll (cross)(0) // get first model + + val mod_k = getModel_k (k, x) // build a single-horizon model bound to y(:, k-1) with hh=1 + val (cols, rSq) = fsFun (mod_k) + val modBest = mod_k.getBest.mod + val modForc = mod_k.convertReg2Forc (modBest) + (cols, rSq, modForc) + end featureSelectAtHorizon + +end Forecaster_D + diff --git a/src/main/scala/scalation/modeling/forecasting/Forecaster_Reg.scala b/src/main/scala/scalation/modeling/forecasting/Forecaster_Reg.scala index 7f10bfdd6..cd2bd3b77 100644 --- a/src/main/scala/scalation/modeling/forecasting/Forecaster_Reg.scala +++ b/src/main/scala/scalation/modeling/forecasting/Forecaster_Reg.scala @@ -6,6 +6,7 @@ * @see LICENSE (MIT style license file). * * @note Model Framework: Abstract Class for Forecasters that utilize Regression + * Extending classes include ARY, ARX, ARX_Quad, ARX_SR, ... * * @see `scalation.modeling.Regression` */ @@ -19,9 +20,15 @@ import scala.math.max import scala.util.control.Breaks.{break, breakable} import scalation.mathstat._ -import scalation.modeling.{Regression => REGRESSION} -//import scalation.modeling.{RidgeRegression => REGRESSION} + +// Select via import the type of regression: +// Regular, Ridge (L_2), Lasso (L_1), Bridge (L_q), RidgeBridge (L_2 and L_q) + +//import scalation.modeling.{Regression => REGRESSION} +import scalation.modeling.{RidgeRegression => REGRESSION} // default //import scalation.modeling.{LassoRegression => REGRESSION} +//import scalation.modeling.{BridgeRegression => REGRESSION} +//import scalation.modeling.{RidgeBridgeRegression => REGRESSION} //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `Forecaster_Reg` abstract class provides base methods for use by extending classes @@ -40,33 +47,47 @@ abstract class Forecaster_Reg (x: MatrixD, y: VectorD, hh: Int, fname: Array [St extends Forecaster (y, hh, tRng, hparam, bakcast) with FeatureSelection: - private val debug = debugf ("Forecaster_Reg", false) // debug function - private val flaw = flawf ("Forecaster_Reg") // debug function - protected val reg = new REGRESSION (x, y, fname, hparam) // delegate training to regression + private val debug = debugf ("Forecaster_Reg", false) // debug function + private val flaw = flawf ("Forecaster_Reg") // flaw function + protected val nneg = hparam("nneg").toInt == 1 // 0 => unrestricted, 1 => predictions must be non-negative + protected val reg = new REGRESSION (x, y, fname, hparam ++ REGRESSION.hp) // delegate training to regression + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the set of columns (numbers) for the features in this model. + */ + def mcols: LSET [Int] = LSET.range (0, getX.dim2) //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Get the data/input matrix built from lagged y (optionally xe) values. + /** Get the data/input matrix built from lagged y (and optionally xe) values. */ override def getX: MatrixD = x + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the feature/variable names. Overrides definition in `Forecast` trait. + */ + override def getFname: Array [String] = fname + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train/fit an `ARY` model to the times-series data in vector y_. - * Estimate the coefficient vector b for a p-th order Auto-Regressive ARY(p) model. - * Uses OLS Matrix Fatorization to determine the coefficients, i.e., the b (φ) vector. + /** Train/fit a `Forecaster_Reg` model to the times-series data y_ = f(x_). + * Estimate the coefficient vector b for a `Forecaster_Reg` model. + * Uses OLS Matrix Fatorization to determine the coefficients, e.g., the b (φ) vector. * @param x_ the data/input matrix (e.g., full x) * @param y_ the training/full response vector (e.g., full y) */ override def train (x_ : MatrixD, y_ : VectorD): Unit = debug ("train", s"$modelName, x_.dims = ${x_.dims}, y_.dim = ${y_.dim}") - reg.train (x_, y_) // train the regression model - b = reg.parameter // coefficients from regression + val idx = y_.indexOf (NO_DOUBLE) // index of first non-value + val (x_t, y_t) = if idx < 0 then (x_, y_) else (x_(0 until idx), y_(0 until idx)) + reg.train (x_t, y_t) // train the regression model + b = reg.parameter // coefficients from regression + debug ("train", s"parameter vector b = $b") end train //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train and test the forecasting model y_ = f(y-past) + e and report its QoF + /** Train and test the forecasting model y_ = f(x_) + e and report its QoF * and plot its predictions. Return the predictions and QoF. - * NOTE: must use `trainNtest_x` when an x matrix is used, such as in `ARY`. + * NOTE: must use `trainNtest_x` when an x matrix is used, such as in `ARX`. * @param x_ the training/full data/input matrix (defaults to full x) * @param y_ the training/full response/output vector (defaults to full y) * @param xx the testing/full data/input matrix (defaults to full x) @@ -74,42 +95,41 @@ abstract class Forecaster_Reg (x: MatrixD, y: VectorD, hh: Int, fname: Array [St */ def trainNtest_x (x_ : MatrixD = x, y_ : VectorD = y) (xx: MatrixD = x, yy: VectorD = y): (VectorD, VectorD) = - train (x_, y_) // train the model on training set - val (yp, qof) = test (xx, yy) // test the model on testing set - println (report (qof)) // report on Quality of Fit (QoF) + train (x_, y_) // train the model on training set + val (yp, qof) = test (xx, yy) // test the model on testing set + if DO_REPORT then + println (report (qof)) // report on Quality of Fit (QoF) (yp, qof) end trainNtest_x //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test FORECASTS of a forecasting model y_ = f(lags (y_)) + e - * and RETURN (1) aligned actual values, (2) its forecasts and (3) QoF vector. + /** Test FORECASTS of a forecasting model y_ = f(y_) + e and RETURN + * (1) aligned actual values, (2) its forecasts and (3) QoF vector. * Testing may be in-sample (on the training set) or out-of-sample (on the testing set) - * as determined by the parameters passed in. Note: must call train and forecastAll - * before testF. + * as determined by the parameters passed in. + * Note: must call train and forecastAll before testF. * @param h the forecasting horizon, number of steps ahead to produce forecasts * @param y_ the testing/full response/output vector - */ + * override def testF (h: Int, y_ : VectorD): (VectorD, VectorD, VectorD) = val h_ = h - 1 - val yy = y_(h_ until y_.dim) // align the actual values - val yfh = yf(?, h)(0 until y_.dim-h_) // column h of the forecast matrix + val yy = y_(h_ until y_.dim) // align the actual values + val yfh = yf(?, h)(0 until y_.dim-h_) // column h of the forecast matrix println (s"yy.dim = ${yy.dim}, yfh.dim = ${yfh.dim}") -// Forecaster.differ (yy, yfh) // uncomment for debugging - assert (yy.dim == yfh.dim) // make sure the vector sizes agree +// Forecaster.differ (yy, yfh) // uncomment for debugging + assert (yy.dim == yfh.dim) // make sure the vector sizes agree new Plot (null, yy, yfh, s"testF: yy, yfh vs. t for $modelName @h = $h", lines = true) - mod_resetDF (yy.dim) // reset the degrees of freedom - (yy, yfh, diagnose (yy, yfh)) // return actual, forecasted and QoF vectors + mod_resetDF (yy.dim) // reset the degrees of freedom + (yy, yfh, diagnose (yy, yfh)) // return actual, forecasted and QoF vectors end testF + */ //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Predict a value for y_t using the 1-step ahead forecast. - * - * y_t = b_0 + b_1 y_t-1 + b_2 y_t-2 + ... + b_p y_t-p = b dot x_t - * * @see `modeling.rectify` define in `Predictor.scala` * @param t the time point being predicted - * @param y_ the actual values to use in making predictions (ignored) + * @param y_ the actual values to use in making predictions */ override def predict (t: Int, y_ : VectorD): Double = val yp = rectify (reg.predict (x(t)), nneg) @@ -135,14 +155,14 @@ abstract class Forecaster_Reg (x: MatrixD, y: VectorD, hh: Int, fname: Array [St * @param y_ the actual values to use in making predictions */ override def forecast (t: Int, y_ : VectorD = y): VectorD = - val yh = new VectorD (hh) // hold forecasts for each horizon + val yh = new VectorD (hh) // hold forecasts for each horizon for h <- 1 to hh do - val xy = forge (x(t), yf(t), h) // pull past and prior forecasted values - val pred = rectify (reg.predict (xy), nneg) // slide in prior forecasted values + val xy = if h == 1 then x(t) else forge (x(t), yf(t), h) // pull past and prior forecasted values + val pred = rectify (reg.predict (xy), nneg) // slide in prior forecasted values // debug ("forecast", s"h = $h, @t = $t, xy = $xy, yp = $pred, y_ = ${y_(t)}") - yf(t, h) = pred // record in forecast matrix - yh(h-1) = pred // record forecasts for each horizon - yh // return forecasts for all horizons + yf(t, h) = pred // record in forecast matrix + yh(h-1) = pred // record forecasts for each horizon + yh // return forecasts for all horizons end forecast //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -156,12 +176,12 @@ abstract class Forecaster_Reg (x: MatrixD, y: VectorD, hh: Int, fname: Array [St override def forecastAt (h: Int, y_ : VectorD = y): VectorD = if h < 2 then flaw ("forecastAt", s"horizon h = $h must be at least 2") - for t <- y_.indices do // make forecasts over all time points for horizon h + for t <- y_.indices do // make forecasts over all time points for horizon h val xy = forge (x(t), yf(t), h) val pred = rectify (reg.predict (xy), nneg) - debug ("forecastAt", s"h = $h, @t = $t, xy = $xy, yp = $pred, y_ = ${y_(t)}") - yf(t, h) = pred // record in forecast matrix - yf(?, h) // return the h-step ahead forecast vector +// debug ("forecastAt", s"h = $h, @t = $t, xy = $xy, yp = $pred, y_ = ${y_(t)}") + yf(t, h) = pred // record in forecast matrix + yf(?, h) // return the h-step ahead forecast vector end forecastAt //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -169,9 +189,9 @@ abstract class Forecaster_Reg (x: MatrixD, y: VectorD, hh: Int, fname: Array [St * @param size the size of dataset (full, train, or test) */ override def mod_resetDF (size: Int): Unit = - val dfm = max (1, parameter.size - 1) // degrees of freedom for model/dataset - debug ("mod_resetDF", s"dfm = $dfm, df = ${size-dfm}") - resetDF (dfm, size - dfm) + val dfr = max (1, parameter.size - 1) // degrees of freedom for regression/model + debug ("mod_resetDF", s"dfr = $dfr, df = ${size-dfr}") + resetDF (dfr, size - dfr) end mod_resetDF //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -184,27 +204,67 @@ abstract class Forecaster_Reg (x: MatrixD, y: VectorD, hh: Int, fname: Array [St */ override def summary (x_ : MatrixD = getX, fname_ : Array [String] = fname, b_ : VectorD = b, vifs: VectorD = reg.vif ()): String = - super.summary (x_, fname_, b_, vifs) // summary from `Fit` + super.summary (x_, fname_, b_, vifs) // summary from `Fit` end summary //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Perform In-Sample Testing (In-ST), i.e. train and test on the full data set. + /** Perform In-Sample Testing, i.e. train and test on the full data set. * @param skip the number of initial time points to skip (due to insufficient past) * @param showYf whether to show the forecast matrix */ - override def inSampleTest (skip: Int = 2, showYf: Boolean = false): Unit = - banner (s"In-ST Test: $modelName") - trainNtest_x ()() // train on full and test on full + override def inSample_Test (skip: Int = 2, showYf: Boolean = false): Unit = + banner (s"In-Sample Test: $modelName") + trainNtest_x ()() // train on full and test on full forecastAll () // forecast over all horizons setSkip (skip) // diagnose: skip the first 'skip' rows diagnoseAll (getY, getYf) // compute metrics for all horizons -// println (s"Final In-ST Forecast Matrix yf = ${mod.getYf}") -// println (s"Final In-ST Forecast Matrix yf = ${mod.getYf.shiftDiag}") - end inSampleTest + if showYf then + println (s"Final In-Sample Forecast Matrix yf = ${getYf}") +// println (s"Final In-Sample Forecast Matrix yf = ${getYf.shiftDiag}") + end inSample_Test + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Perform Train-n-Test (TnT) Testing, i.e. train and test with rolling validation. + * @param skip the number of initial time points to skip (due to insufficient past) + * @param rc the retraining cycles (how often to retrain the model) + * @param showYf whether to show the forecast matrix + */ + override def tnT_Test (skip: Int = 0, rc: Int = 2, showYf: Boolean = false): Unit = + banner (s"TnT Test: $modelName") + trainNtest_x ()() // initial training updated by `rollValidate` + setSkip (skip) // diagnose: skip the first 'skip' rows + rollValidate (rc) // TnT with Rolling Validation + diagnoseAll (getY,getYf, Forecaster.teRng (y.dim)) // only diagnose on the testing set + if showYf then + println (s"Final TnT Forecast Matrix yf = ${getYf}") +// println (s"Final TnT Forecast Matrix yf = ${getYf.shiftDiag}") + end tnT_Test // F E A T U R E S E L E C T I O N - private var theBest = BestStep ()() // record the best model from feature selection + // @see givens in `modeling.FeatureSelection` + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Build a sub-model that is restricted to the given columns of the data matrix. + * Must be implemented for models that support feature selection. + * Otherwise, use @see `NoBuildModel + * @note: Forecasting models should use this method to build there own sub-models. FIX. + * @param x_cols the columns that the new model is restricted to + * @param fname2 the variable/feature names for the new model (defaults to null) + */ + def buildModel (x_cols: MatrixD, fname2: Array [String] = null): REGRESSION = + reg.buildModel (x_cols, fname2) + end buildModel + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Convert the underlying Regression Model to a subtype of `Forecaster_Reg` Forecasting Model. + * @param mod the model to convert, e.g., the best model after feature selection + */ + def convertReg2Forc (col: LSET [Int]): Forecaster_Reg + + private var theBest = BestStep ()() // record the best model from Feature Selection (FS) + private val t_rng = if fullset_FS then 0 until y.dim // use full dataset for Feature Selection (FS) + else 0 until Model.trSize (y.dim) // use training set for Feature Selection (FS) //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Reset the best-step to default @@ -221,31 +281,43 @@ abstract class Forecaster_Reg (x: MatrixD, y: VectorD, hh: Int, fname: Array [St * Note: for QoF where smaller if better, must switch to '<'. * @param best new best-step found during feature selection * @param qk index of Quality of Fit (QoF) to use for comparing quality - * defaults to smapeIC, could try rSqBar could work better + * defaults to smapeC, could try rSqBar could work better */ private def updateBest (best: BestStep) (using qk: Int): Unit = if best.qof != null then if theBest.qof == null || (best gt theBest.qof(qk)) then theBest = best end updateBest + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Build a model from the given columns `col_j`, run it, and returning the new model + * and its qof. + * @param cols_j the lags/columns to be included in the new model + */ + private def buildNrun (cols_j: LSET [Int]): (Model_FS, VectorD) = + val x_cols = x(?, cols_j) // x projected onto cols_j columns + val mod_j = buildModel (x_cols) // regress with x_j added + val (x_tr, y_tr) = (x_cols(t_rng), y(t_rng)) // get full/training data + mod_j.train (x_tr, y_tr) // train model + val qof_j = mod_j.test (x_tr, y_tr)._2 // test for qof + (mod_j, qof_j) // return model and its qof + end buildNrun + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Perform forward selection to find the most predictive variable to add the * existing model, returning the variable to add and the new model. * May be called repeatedly. * Adapt from regression to time series forecasting. * @see `Fit` for index of QoF measures. - * @param cols the lags/columns currently included in the existing model (currently ignored) + * @param cols the lags/columns currently included in the existing model * @param qk index of Quality of Fit (QoF) to use for comparing quality */ def forwardSel (cols: LSET [Int])(using qk: Int): BestStep = - var best = BestStep ()() // best step so far + var best = BestStep ()() // best step so far for j <- x.indices2 if ! (cols contains j) do - val cols_j = cols union LSET (j) // try adding variable/column x_j - val x_cols = x(?, cols_j) // x projected onto cols_j columns - val mod_j = reg.buildModel (x_cols) // regress with x_j added - mod_j.train (x_cols, y) // train model - best = best.better (j, mod_j.test ()._2, mod_j) // which is better + val cols_j = cols union LSET (j) // try adding variable/column x_j + val (mod_j, qof_j) = buildNrun (cols_j) // build and run the model + best = best.better (j, qof_j, mod_j, cols_j) // which is better end for if best.col == -1 then @@ -264,35 +336,35 @@ abstract class Forecaster_Reg (x: MatrixD, y: VectorD, hh: Int, fname: Array [St val y = bmod.getY val yp = bmod.predict (bmod.getX) val qof = bmod.diagnose (y.drop (skip), yp.drop (skip), null) - BestStep (best.col, qof, bmod)(qof(qk)) + BestStep (best.col, qof, bmod, best.mod_cols)(qof(qk)) else best end correctQoF //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Perform forward selection to find the most predictive variables to have + /** Perform FORWARD SELECTION to find the MOST predictive variables to have * in the model, returning the variables added and the new Quality of Fit (QoF) * measures for all steps. * @see `modeling.Fit` for index of QoF measures. * @see `modeling.Predictor` for more information - * @param cross whether to include the cross/roll-validation QoF measure + * @param cross indicator to include the cross-validation/validation QoF measure (defaults to "many") * @param qk index of Quality of Fit (QoF) to use for comparing quality */ - override def forwardSelAll (cross: Boolean = false)(using qk: Int): (LSET [Int], MatrixD) = + override def forwardSelAll (cross: String = "many")(using qk: Int): (LSET [Int], MatrixD) = resetBest () - val rSq = new MatrixD (x.dim2, Fit.qofVectorSize) // QoF: R^2, R^2 Bar, sMAPE, R^2 cv - val cols = LSET (0) // start with x_0 in model (e.g., intercept) - updateQoF (rSq, 0, cross, reg.select0 (qk)) // update Qof results for 0-th variable + val rSq = new MatrixD (x.dim2, Fit.qofVectorSize) // QoF: R^2, R^2 Bar, sMAPE, R^2 cv + val cols = LSET (0) // start with x_0 in model (e.g., intercept) + updateQoF (rSq, 0, cross, reg.select0 (qk)) // update Qof results for 0-th variable banner (s"forwardSelAll: (qk = $qk, l = 0) INITIAL variable (0, ${fname(0)}) => cols = $cols") breakable { for l <- 1 until x.dim2 do - val best = forwardSel (cols) // add most predictive variable - if best.col == -1 then break () // could not find variable to add + val best = forwardSel (cols) // add most predictive variable + if best.col == -1 then break () // could not find variable to add updateBest (best) - cols += best.col // add variable x_j - updateQoF (rSq, l, cross, best) // update QoF results for l-th variable + cols += best.col // add variable x_j + updateQoF (rSq, l, cross, best) // update QoF results for l-th variable val (jj, jj_qof) = (best.col, best.qof(qk)) banner (s"forwardSelAll: (l = $l) ADD variable ($jj, ${fname(jj)}) => cols = $cols @ $jj_qof") end for @@ -312,15 +384,13 @@ abstract class Forecaster_Reg (x: MatrixD, y: VectorD, hh: Int, fname: Array [St * (default (1) assume intercept x_0 will be in any model) * @param qk index of Quality of Fit (QoF) to use for comparing quality */ - def backwardElim (cols: LSET [Int], first: Int = 1) (using qk: Int): BestStep = - var best = BestStep ()() // best step so far + def backwardElim (cols: LSET [Int], first: Int = 1) (using qk: Int): BestStep = + var best = BestStep ()() // best step so far for j <- first until x.dim2 if cols contains j do - val cols_j = cols diff LSET (j) // try removing variable/column x_j - val x_cols = x(?, cols_j) // x projected onto cols_j columns - val mod_j = reg.buildModel (x_cols) // regress with x_j added - mod_j.train () // train model - best = best.better (j, mod_j.test ()._2, mod_j) // which is better + val cols_j = cols diff LSET (j) // try removing variable/column x_j + val (mod_j, qof_j) = buildNrun (cols_j) // build and run the model + best = best.better (j, qof_j, mod_j, cols_j) // which is better end for if best.col == -1 then @@ -329,95 +399,95 @@ abstract class Forecaster_Reg (x: MatrixD, y: VectorD, hh: Int, fname: Array [St end backwardElim //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Perform backward elimination to find the least predictive variables to remove + /** Perform BACKWARD ELIMINATION to find the LEAST predictive variables to remove * from the full model, returning the variables left and the new Quality of Fit (QoF) * measures for all steps. * @see `modeling.Fit` for index of QoF measures. * @see `modeling.Predictor` for more information * @param first first variable to consider for elimination - * @param cross whether to include the cross/roll-validation QoF measure + * @param cross indicator to include the cross-validation/validation QoF measure (defaults to "none") * @param qk index of Quality of Fit (QoF) to use for comparing quality */ - override def backwardElimAll (first: Int = 1, cross: Boolean = false)(using qk: Int): + override def backwardElimAll (first: Int = 1, cross: String = "none")(using qk: Int): (LSET [Int], MatrixD) = resetBest () - val rSq = new MatrixD (x.dim2, Fit.qofVectorSize) // R^2, R^2 Bar, sMAPE, R^2 cv - val cols = LSET.range (0, x.dim2) // start with all x_j in model - val rem = ArrayBuffer [Int] () // start with no columns removed + val rSq = new MatrixD (x.dim2, Fit.qofVectorSize) // R^2, R^2 Bar, sMAPE, R^2 cv + val cols = reg.mcols // start with all x_j in model + val rem = ArrayBuffer [Int] () // start with no columns removed - val best0 = reg.fullModel (qk) - updateQoF (rSq, 0, cross, best0) // update QoF results for full model + val best0 = reg.fullModel (qk) // call reg's fullModel + updateBest (best0) + updateQoF (rSq, 0, cross, best0) // update QoF results for full model val jj_qof = best0.qof(qk) - - banner (s"backwardElimAll: (qk = $qk, l = 0) INITIAL variables (all) => cols = $cols @ $jj_qof") + debug ("backwardElimAll", s"(qk = $qk, l = 0) INITIAL variables (all) => cols = $cols @ $jj_qof") breakable { - for l <- 1 until x.dim2 - 1 do // l indicates number of variables eliminated - val best = backwardElim (cols, first) // remove least predictive variable - if best.col == -1 then break () // could not find variable to remove + for l <- 1 until x.dim2 - 1 do // l indicates number of variables eliminated + val best = backwardElim (cols, first) // remove least predictive variable + if best.col == -1 then break () // could not find variable to remove updateBest (best) - cols -= best.col // remove variable x_j - rem += best.col // keep track of removed columns - updateQoF (rSq, l, cross, best) // update QoF results + cols -= best.col // remove variable x_j + rem += best.col // keep track of removed columns + updateQoF (rSq, l, cross, best) // update QoF results val (jj, jj_qof) = (best.col, best.qof(qk)) - banner (s"backwardElimAll: (l = $l) REMOVE variable ($jj, ${fname(jj)}) => cols = $cols @ $jj_qof") + debug ("backwardElimAll", s"(l = $l) REMOVE variable ($jj, ${fname(jj)}) => cols = $cols @ $jj_qof") end for } // breakable - updateQoF (rSq, x.dim2-1, cross, reg.select0 (qk)) // update Qof results for 0-th variable - rem += cols.max // remove last non-zero column - rem += 0 // remove column 0 + updateQoF (rSq, x.dim2-1, cross, reg.select0 (qk)) // update Qof results for 0-th variable + rem += cols.max // remove last non-zero column + rem += 0 // remove column 0 - (LSET.from (rem.reverse), rSq.reverse) // reverse the order results + (LSET.from (rem.reverse), rSq.reverse) // reverse the order results end backwardElimAll //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Perform stepwise regression to find the most predictive variables to have - * in the model, returning the variables left and the new Quality of Fit (QoF) + /** Perform STEPWISE SELECTION to find a GOOD COMBINATION of predictive variables to have + * in the model, returning the variables selected and the new Quality of Fit (QoF) * measures for all steps. At each step it calls forwardSel and backwardElim * and takes the best of the two actions. Stops when neither action yields improvement. * @see `modeling.Fit` for index of QoF measures. * @see `modeling.Predictor` for more information - * @param cross whether to include the cross/roll-validation QoF measure + * @param cross indicator to include the cross-validation/validation QoF measure (defaults to "none") * @param swap whether to allow a swap step (swap out a feature for a new feature in one step) * @param qk index of Quality of Fit (QoF) to use for comparing quality */ - override def stepwiseSelAll (cross: Boolean = false, swap: Boolean = true)(using qk: Int): + override def stepwiseSelAll (cross: String = "none", swap: Boolean = true)(using qk: Int): (LSET [Int], MatrixD) = resetBest () - val rSq = new MatrixD (x.dim2 - 1, Fit.qofVectorSize) // QoF: R^2, R^2 Bar, sMAPE, R^2 cv - val cols = LSET (0) // start with x_0 in model - var last_q = Fit.extreme (qk) // current best QoF initialized to extreme + val rSq = new MatrixD (x.dim2 - 1, Fit.qofVectorSize) // QoF: R^2, R^2 Bar, sMAPE, R^2 cv + val cols = LSET (0) // start with x_0 in model + var last_q = Fit.extreme (qk) // current best QoF initialized to extreme val vars = ArrayBuffer [Int]() banner (s"stepwiseSelAll: (qk = $qk, l = 0) INITIAL variable (0, ${fname(0)}) => cols = $cols") breakable { for l <- 1 until x.dim2 - 1 do - val bestf = forwardSel (cols) // add most predictive variable OR - val bestb = backwardElim (cols, 1) // remove least predictive variable + val bestf = forwardSel (cols) // add most predictive variable OR + val bestb = backwardElim (cols, 1) // remove least predictive variable debug ("stepwiseSelAll", s"bestf = $bestf, bestb = $bestb") - val slack = 25.0 / l~^2 // increase slack to include more features - // slack => likely to ADD features at the beginning + val slack = 25.0 / l~^2 // increase slack to include more features + // slack => likely to ADD features at the beginning if (bestb.col == -1 || (bestf ge bestb.qof(qk) + slack)) && // forward as good as backward (bestf.col != -1 && (bestf gt last_q + slack)) then // a better model has been found updateBest (bestf) vars += bestf.col - cols += bestf.col // ADD variable bestf.col + cols += bestf.col // ADD variable bestf.col last_q = bestf.qof(qk) - updateQoF (rSq, l, cross, bestf) // update QoF results + updateQoF (rSq, l, cross, bestf) // update QoF results println (s"\nstepwiseSelAll: (l = $l) ADD variable $bestf") val (jj, jj_qof) = (bestf.col, last_q) banner (s"stepwiseSelAll: (l = $l) ADD variable ($jj, ${fname(jj)}) => cols = $cols @ $jj_qof") - else if bestb.col != -1 && (bestb gt last_q) then // a better model has been found + else if bestb.col != -1 && (bestb gt last_q) then // a better model has been found updateBest (bestb) vars += bestb.col - cols -= bestb.col // REMOVE variable bestb.col + cols -= bestb.col // REMOVE variable bestb.col last_q = bestb.qof(qk) - updateQoF (rSq, l, cross, bestb) // update QoF results + updateQoF (rSq, l, cross, bestb) // update QoF results println (s"\nstepwiseSelAll: (l = $l) REMOVE variable $bestb") val (jj, jj_qof) = (bestb.col, last_q) banner (s"stepwiseSelAll: (l = $l) REMOVE variable ($jj, ${fname(jj)}) => cols = $cols @ $jj_qof") @@ -427,24 +497,23 @@ abstract class Forecaster_Reg (x: MatrixD, y: VectorD, hh: Int, fname: Array [St val (out, in) = (bestb.col, bestf.col) val bestfb = reg.swapVars (cols, out, in, qk) updateBest (bestfb) - if out != -1 && in != -1 && (bestfb gt last_q) then // a better model has been found + if out != -1 && in != -1 && (bestfb gt last_q) then // a better model has been found vars += bestb.col vars += bestf.col - cols -= bestb.col // REMOVE variable bestb.col (swap out) - cols += bestf.col // ADD variable bestf.col (swap in) + cols -= bestb.col // REMOVE variable bestb.col (swap out) + cols += bestf.col // ADD variable bestf.col (swap in) last_q = bestfb.qof(qk) - updateQoF (rSq, l, cross, bestfb) // update QoF results + updateQoF (rSq, l, cross, bestfb) // update QoF results println (s"\nstepwiseSelAll: (l = $l) SWAP variable $bestb with $bestf") else println (s"\nstepwiseSelAll: (l = $l) last_q = $last_q better ($bestb, $bestf)") - break () // can't find a better model -> quit - end if + break () // can't find a better model -> quit end if - val x_cols = x(?, cols) // x projected onto cols columns - val mod_ = reg.buildModel (x_cols) // regress on this x - mod_.train () // train model - println (mod_.report (mod_.test ()._2)) // test and report + val x_cols = x(?, cols) // x projected onto cols columns + val mod_ = buildModel (x_cols) // regress on this x + mod_.train () // train model + println (mod_.report (mod_.test ()._2)) // test and report end for } // breakable @@ -455,6 +524,89 @@ abstract class Forecaster_Reg (x: MatrixD, y: VectorD, hh: Int, fname: Array [St (cols, rSq(1 until cols.size)) end stepwiseSelAll + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Perform BEAM SEARCH SELECTION to find a GOOD COMBINATION of predictive features/variables to + * have in the model, returning the top k sets of features/variables selected and the new Quality of + * Fit (QoF) measures/metrics for all steps. At each step, iterate over the models in the beam + * (top k) and create candidates by adding features (phase 1) and then removing features (phase 2). + * From all the candidates, keep the best k and start a new iteration. Stops when there is + * no improvement in any of top k or the maximum number of features is reached. + * @see `Fit` for index of QoF measures/metrics. + * @param cross indicator to include the cross-validation/validation QoF measure (defaults to "many") + * @param bk the beam width holding the top k models (defaults to 3) + * @param qk index of Quality of Fit (QoF) to use for comparing quality + */ + def beamSelAll (cross: String = "many", bk: Int = 3)(using qk: Int): (LSET [Int], MatrixD) = + val beam = new PriorityQueueFW [BestStep] (bk+1) + + resetBest () + val rSq = new MatrixD (x.dim2, Fit.qofVectorSize) // QoF: R^2, R^2 Bar, sMAPE, R^2 cv + val cols = LSET (0) // start with x_0 in model (e.g., intercept) + updateQoF (rSq, 0, cross, reg.select0 (qk)) // update Qof results for 0-th variable + + // phase I: initialize the beam with bk best candidates from forward selection + + banner (s"beamSelAll: (qk = $qk, l = 0) INITIAL variable (0, ${fname(0)}) => cols = $cols") + + breakable { + for l <- 1 until x.dim2 do + val best = forwardSel (cols) // add most predictive variable + if best.col == -1 then break () // could not find variable to add + beam.enqueue (best) + cols += best.col // add variable x_j + updateQoF (rSq, l, cross, best) // update QoF results for l-th variable + val (jj, jj_qof) = (best.col, best.qof(qk)) + banner (s"beamSelAll: (l = $l) ADD variable ($jj, ${fname(jj)}) => cols = $cols @ $jj_qof") + end for + } // breakable + + banner (s"end of phase I: beam = $beam") + + // phase II: perform local search to improve the candidates -- evolved from initial AI code + + var improved = true + while improved do + improved = false + val nextBeam = new PriorityQueueFW [BestStep] (bk+1) + val seen = LSET [LSET [Int]] () // prevent re-evaluating same models + + for cand <- beam.toArray do // try all candidates in the beam + val candCols = cand.mod_cols + + // 1. WIDER FORWARD: Try adding EVERY possible feature not in candCols + for j <- 0 until x.dim2 if !candCols.contains(j) do + val news = candCols union LSET (j) + if ! seen.contains (news) then + seen += news + val (mod_j, qof_j) = buildNrun (news) // build and run the model for news + nextBeam.enqueue (BestStep (j, qof_j, mod_j, news)()) + end for + + // 2. WIDER BACKWARD: Try removing EVERY possible feature currently in candCols + if candCols.size > 1 then + for j <- candCols if j != 0 do // keep intercept (0) + val news = candCols diff LSET (j) // new set of columns + if ! seen.contains (news) then + seen += news + val (mod_j, qof_j) = buildNrun (news) // build and run the model for news + nextBeam.enqueue (BestStep (j, qof_j, mod_j, news)()) + end for + end for + + // check if the best of nextBeam beats the best of the current beam + if ! nextBeam.isEmpty && nextBeam.head.qof(qk) > beam.head.qof(qk) then + beam.clear () + for b <- nextBeam.toArray do beam.enqueue (b) + improved = true + banner (s"Phase II Improvement: new top QoF = ${beam.head.qof(qk)}") + end if + end while + + val top = beam.dequeue () // return the best candidate from beam + updateBest (top) + (top.mod_cols, top.mod.getX) + end beamSelAll + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Return the relative importance of selected variables, ordered highest to * lowest, rescaled so the highest is one. diff --git a/src/main/scala/scalation/modeling/forecasting/KalmanFilter.scala b/src/main/scala/scalation/modeling/forecasting/KalmanFilter.scala new file mode 100644 index 000000000..6e865ca3a --- /dev/null +++ b/src/main/scala/scalation/modeling/forecasting/KalmanFilter.scala @@ -0,0 +1,134 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author Nirupom Bose Roy + * @version 2.0 + * @date Sun Sep 13 20:37:41 EDT 2015 + * @see LICENSE (MIT style license file). + * + * @note Model: Kalman Filter + * + * @see web.mit.edu/kirtley/kirtley/binlustuff/literature/control/Kalman%20filter.pdf + * @see en.wikipedia.org/wiki/Kalman_filter + */ + +package scalation +package modeling +package forecasting + +import scalation.mathstat._ +import scalation.random.NormalVec + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `KalmanFilter` class provides a simple implementation of a Kalman filter. + * It is useful for smoothing noisy data and for providing better estimates of the + * state of a system. + * @param f the state transition matrix + * @param q the process noise covariance matrix + * @param h the measurement matrix + * @param r the measurement noise covariance matrix + * @param x the initial state vector + * @param p the initial covariance matrix + */ +class KalmanFilter (val f: MatrixD, val q: MatrixD, + val h: MatrixD, val r: MatrixD, + var x: VectorD, var p: MatrixD): + + private val MAX_ITER = 20 // maximum number of iterations + private val doPlot = true // flag for drawing plot + private val n = f.dim // dimension of the state vector + private val _0 = VectorD (n) // vector of 0's + + val traj = if doPlot then new MatrixD (MAX_ITER, n+1) else new MatrixD (0, 0) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Predict the state of the process at the next time point. + */ + def predict (): Unit = + x = f * x // new predicted state + p = f * p * f.ᵀ + q // new predicted covariance + end predict + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Update the state and covariance estimates with the current and possibly noisy measurements + * @param z current measurement/observation of the state + */ + def update (z: VectorD): Unit = + val y = z - h * x // measurement residual + val s = h * p * h.ᵀ + r // residual covariance + val k = p * h.ᵀ * s.inverse // optimal Kalman gain + x = x + k * y // updated state estimate + + val i = MatrixD.eye (p.dim, p.dim) // identity matrix + val ikh = i - k * h + p = ikh * p * ikh.ᵀ + k * r * k.ᵀ // updated covariance estimate + end update + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Iteratively solve for x using predict and update phases. + * @param dt the time increment (delta t) + */ + def solve (dt: Double): VectorD = + var t = 0.0 // initial time + + for k <- 0 until MAX_ITER do + t += dt // advance time + if doPlot then traj(k) = x :+ t // add current time t, state x to trajectory + + // predict + predict () // estimate new state x and covariance pp + + // update + val v = NormalVec (_0, r).gen // observation noise + val z = h * x + v // new observation + update (z) + end for + x + end solve + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Copy this Kalman Filter and return it. + */ + def copyFilter (): KalmanFilter = + new KalmanFilter (f.copy, q.copy, h.copy, r.copy, x.copy, p.copy) + end copyFilter + +end KalmanFilter + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `kalmanFilterTest` main function is used to test the `KalmanFilter` class. + * @see en.wikipedia.org/wiki/Kalman_filter + * > runMain scalation.modeling.forecasting.kalmanFilterTest + */ +@main def kalmanFilterTest (): Unit = + + banner ("KalmanFilterTest") + + val dt = 0.1 // time increment (delta t) + val var_a = 0.5 // variance of uncontrolled acceleration a + val var_z = 0.5 // variance from observation noise + + val ff = MatrixD ((2, 2), 1.0, dt, // state transition matrix + 0.0, 1.0) + + val qq = MatrixD ((2, 2), dt~^4/4, dt~^3/2, // process noise covariance matrix + dt~^3/2, dt~^2) * var_a + + val hh = MatrixD ((1, 2), 1.0, 0.0) // measurement matrix + + val rr = MatrixD ((1, 1), var_z) // measurement noise covariance matrix + + val x0 = VectorD (0.0, 0.0) // initial state vector + + val n = ff.dim + val pp = new MatrixD (n, n) // initial covariance estimate matrix + + val kf = new KalmanFilter (ff, qq, hh, rr, x0, pp) + + println ("solve = " + kf.solve (dt)) + println ("traj = " + kf.traj) + + new Plot (kf.traj(?, 2), kf.traj(?, 0), kf.traj(?, 1)) + +end kalmanFilterTest + diff --git a/src/main/scala/scalation/modeling/forecasting/KalmanFilter.scala.bak b/src/main/scala/scalation/modeling/forecasting/KalmanFilter.scala.bak new file mode 100644 index 000000000..95a064df6 --- /dev/null +++ b/src/main/scala/scalation/modeling/forecasting/KalmanFilter.scala.bak @@ -0,0 +1,241 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author John Miller, Hao Peng + * @version 2.0 + * @date Sun Sep 13 20:37:41 EDT 2015 + * @see LICENSE (MIT style license file). + * + * @note Model: Kalman Filter + * + * @see web.mit.edu/kirtley/kirtley/binlustuff/literature/control/Kalman%20filter.pdf + * @see en.wikipedia.org/wiki/Kalman_filter + */ + +package scalation +package modeling +package forecasting + +import scala.annotation.unused + +import scalation.mathstat._ +import scalation.random.NormalVec + +// FIX: needs more thorough testing and estimation for matrices + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `KalmanFilter` class is used to fit state-space models. + * x_t = F x_t-1 + G u_t + w_t (State Equation) + * z_t = H x_t + v_t (Observation/Measurement Equation) + * @param x0 the initial state vector + * @param ff the state transition matrix (F) + * @param hh the observation matrix (H) + * @param qq the process noise covariance matrix (Q) + * @param rr the observation noise covariance matrix (R) + * @param gg the optional control-input matrix (G) + * @param u the optional control vector + */ +class KalmanFilter (x0: VectorD, ff: MatrixD, hh: MatrixD, qq: MatrixD, rr: MatrixD, + gg: MatrixD = null, u: VectorD = null): + + private val MAX_ITER = 20 // maximum number of iterations + private val doPlot = true // flag for drawing plot + private val n = ff.dim // dimension of the state vector + private val _0 = VectorD (n) // vector of 0's + private val ii = MatrixD.eye (n, n) // identity matrix + private val fft = ff.transpose // transpose of ff + private val hht = hh.transpose // transpose of hh + private var x = x0 // the state estimate + private var pp = new MatrixD (n, n) // the covariance estimate + + val traj = if doPlot then new MatrixD (MAX_ITER, n+1) else new MatrixD (0, 0) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Predict the state of the process at the next time point. + */ + def predict (): Unit = + x = ff * x // new predicted state + if u != null && gg != null then x += gg * u // if using control + pp = ff * pp * fft + qq // new predicted covariance + end predict + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Update the state and covariance estimates with the current and possibly + * noisy measurements + * @param z current measurement/observation of the state + */ + def update (z: VectorD): Unit = + val y = z - hh * x // measurement residual + val ss = hh * pp * hht + rr // residual covariance + val kk = pp * hht * ss.inverse // optimal Kalman gain + x = x + kk * y // updated state estimate + pp = (ii - kk * hh) * pp // updated covariance estimate + end update + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Iteratively solve for x using predict and update phases. + * @param dt the time increment (delta t) + * @param u the control vector + */ + def solve (dt: Double, @unused u: VectorD = null): VectorD = + var t = 0.0 // initial time + + for k <- 0 until MAX_ITER do + + t += dt // advance time + if doPlot then traj(k) = x :+ t // add current time t, state x to trajectory + + // predict + predict () // estimate new state x and covariance pp + + // update + val v = NormalVec (_0, rr).gen // observation noise - FIX - should work in trait + val z = hh * x + v // new observation + + update (z) + end for + x + end solve + +end KalmanFilter + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `KalmanFilter` object provide factory methods for special types of Kalman Filters. + */ +object KalmanFilter: + + private val debug = debugf ("KalmanFilter", true) // debug function + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a `KalmanFilter` for p-th order Auto-Regressive AR(p) time series models. + * x_t = F x_t-1 + w_t (State Equation) + * z_t = H x_t + v_t (Observation/Measurement Equation) + * @param y the time series vector + * @param hparam the hyperparameters + */ + def apply (y: VectorD, hparam: HyperParameter = AR.hp): KalmanFilter = + val p = hparam("p").toInt // order of AR model + val ar = new AR (y, 1) // use simple MoM for initial estimtes + ar.train (null, y) // train the MoM model + val b = ar.parameter // get the parameter vector b (φ) + val e = ar.residual // get the residual/error vector + val x0 = y(0 until p) // initial state vector + val ff = buildMatrix (p, b) // state transition matrix (F) + val hh = new MatrixD (1, p); hh(0, 0) = 1 // observation matrix (H) + val qq = new MatrixD (1, 1); qq(0, 0) = e.variance // process noise covariance matrix (Q = var(e_t)) + val rr = new MatrixD (1, 1) // observation noise covariance matrix (R = [0]) + new KalmanFilter (x0, ff, hh, qq, rr) + end apply + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Build the state transition matrix F for a p-th order Auto-Regressive AR(p) model. + * @param p the order of the AR model + * @param b the coefficient vector b (φ) for a p-th order Auto-Regressive AR(p) model + */ + def buildMatrix (p: Int, b: VectorD): MatrixD = + val f = new MatrixD (p, p) + f(0) = b // first row is vector b (φ) + for i <- 1 until p do f(i, i-1) = 1 // diagonal submatrix + debug ("buildMatrix", s"F = $f") + f + end buildMatrix + +end KalmanFilter + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `kalmanFilterTest` main function is used to test the `KalmanFilter` class. + * @see en.wikipedia.org/wiki/Kalman_filter + * > runMain scalation.modeling.forecasting.kalmanFilterTest + */ +@main def kalmanFilterTest (): Unit = + + banner ("KalmanFilterTest") + + val dt = 0.1 // time increment (delta t) + val var_a = 0.5 // variance of uncontrolled acceleration a + val var_z = 0.5 // variance from observation noise + + val ff = MatrixD ((2, 2), 1.0, dt, // transition matrix + 0.0, 1.0) + + val hh = MatrixD ((1, 2), 1.0, 0.0) + + val qq = MatrixD ((2, 2), dt~^4/4, dt~^3/2, + dt~^3/2, dt~^2) * var_a + + val rr = MatrixD ((1, 1), var_z) + + val x0 = VectorD (0.0, 0.0) + + val kf = new KalmanFilter (x0, ff, hh, qq, rr) + + println ("solve = " + kf.solve (dt)) + println ("traj = " + kf.traj) + + new Plot (kf.traj(?, 2), kf.traj(?, 0), kf.traj(?, 1)) + +end kalmanFilterTest + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `kalmanFilterTest2` main function is used to test the `KalmanFilter` class. + * Test the Kalman Filter on the COVID-19 dataset. + * > runMain scalation.modeling.forecasting.kalmanFilterTest2 + */ +@main def kalmanFilterTest2 (): Unit = + + import Example_Covid.loadData_y + + banner ("KalmanFilterTest2: COVID-19") + + val yy = loadData_y () +// val y = yy // full + val y = yy(0 until 116) // clip the flat end +// val hh = 6 // maximum forecasting horizon + + val kf = KalmanFilter (y) // create a Kalman Filter + + println ("solve = " + kf.solve (1.0)) + println ("traj = " + kf.traj) + + new Plot (kf.traj(?, 2), kf.traj(?, 0), kf.traj(?, 1)) + +end kalmanFilterTest2 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `kalmanFilterTest3` main function is used to test the `KalmanFilter` class. + * @see https://faculty.washington.edu/ezivot/econ584/notes/statespacemodels.pdf + * > runMain scalation.modeling.forecasting.kalmanFilterTest2 + * +@main def kalmanFilterTest3 (): Unit = + + banner ("KalmanFilterTest: AR(2)") + + val dt = 0.1 // time increment (delta t) + val var_a = 0.5 // variance of uncontrolled acceleration a + val var_z = 0.5 // variance from observation noise + + val ff = MatrixD ((2, 2), phi1, phi2, // transition matrix + 1.0, 0.0) + + val hh = MatrixD ((1, 2), 1.0, 0.0) + + val qq = MatrixD ((2, 2), dt~^4/4, dt~^3/2, + dt~^3/2, dt~^2) * var_a + + val rr = MatrixD ((1, 1), var_z) + + val x0 = VectorD (0.0, 0.0) + + val kf = new KalmanFilter (x0, ff, hh, qq, rr) + + println ("solve = " + kf.solve (dt)) + println ("traj = " + kf.traj) + + new Plot (kf.traj(?, 2), kf.traj(?, 0), kf.traj(?, 1)) + +end kalmanFilterTest3 + */ + diff --git a/src/main/scala/scalation/modeling/forecasting/KalmanFilter.scala.bak2 b/src/main/scala/scalation/modeling/forecasting/KalmanFilter.scala.bak2 new file mode 100644 index 000000000..8b3da573e --- /dev/null +++ b/src/main/scala/scalation/modeling/forecasting/KalmanFilter.scala.bak2 @@ -0,0 +1,245 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author John Miller, Hao Peng + * @version 2.0 + * @date Sun Sep 13 20:37:41 EDT 2015 + * @see LICENSE (MIT style license file). + * + * @note Model: Kalman Filter + * + * @see web.mit.edu/kirtley/kirtley/binlustuff/literature/control/Kalman%20filter.pdf + * @see en.wikipedia.org/wiki/Kalman_filter + */ + +package scalation +package modeling +package forecasting + +import scala.annotation.unused + +import scalation.mathstat._ +import scalation.random.NormalVec + +// FIX: needs more thorough testing and estimation for matrices + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `KalmanFilter` class is used to fit state-space models. + * x_t = F x_t-1 + G u_t + w_t (State Equation) + * z_t = H x_t + v_t (Observation/Measurement Equation) + * @param x0 the initial state vector + * @param ff the state transition matrix (F) + * @param hh the observation matrix (H) + * @param qq the process noise covariance matrix (Q) + * @param rr the observation noise covariance matrix (R) + * @param gg the optional control-input matrix (G) + * @param u the optional control vector + */ +class KalmanFilter (x0: VectorD, ff: MatrixD, hh: MatrixD, qq: MatrixD, rr: MatrixD, + gg: MatrixD = null, u: VectorD = null): + + private val MAX_ITER = 20 // maximum number of iterations + private val doPlot = true // flag for drawing plot + private val n = ff.dim // dimension of the state vector + private val _0 = VectorD (n) // vector of 0's + private val ii = MatrixD.eye (n, n) // identity matrix + private val fft = ff.ᵀ // transpose of ff + private val hht = hh.ᵀ // transpose of hh + private var x = x0 // the state estimate + private var pp = new MatrixD (n, n) // the covariance estimate + + val traj = if doPlot then new MatrixD (MAX_ITER, n+1) else new MatrixD (0, 0) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Predict the state of the process at the next time point. + */ + def predict (): Unit = + x = ff * x // new predicted state + if u != null && gg != null then x += gg * u // if using control + pp = ff * pp * fft + qq // new predicted covariance + end predict + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Update the state and covariance estimates with the current and possibly + * noisy measurements + * @param z current measurement/observation of the state + */ + def update (z: VectorD): Unit = + val y = z - hh * x // measurement residual + val ss = hh * pp * hht + rr // residual covariance + val kk = pp * hht * ss.inverse // optimal Kalman gain + x = x + kk * y // updated state estimate + pp = (ii - kk * hh) * pp // updated covariance estimate + end update + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Iteratively solve for x using predict and update phases. + * @param dt the time increment (delta t) + * @param u the control vector + */ + def solve (dt: Double, @unused u: VectorD = null): VectorD = + var t = 0.0 // initial time + + for k <- 0 until MAX_ITER do + + t += dt // advance time + if doPlot then traj(k) = x :+ t // add current time t, state x to trajectory + + // predict + predict () // estimate new state x and covariance pp + + // update + val v = NormalVec (_0, rr).gen // observation noise - FIX - should work in trait + val z = hh * x + v // new observation + + update (z) + end for + x + end solve + +end KalmanFilter + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `KalmanFilter` object provide factory methods for special types of Kalman Filters. + */ +object KalmanFilter: + + private val debug = debugf ("KalmanFilter", true) // debug function + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a `KalmanFilter` for p-th order Auto-Regressive AR(p) time series models. + * x_t = F x_t-1 + w_t (State Equation) + * z_t = H x_t + v_t (Observation/Measurement Equation) + * @see Kalman Filter section in "Introduction to Computational Data Science" + * @param y the time series vector + * @param hparam the hyperparameters + */ + def apply (y: VectorD, hparam: HyperParameter = AR.hp): KalmanFilter = + val p = hparam("p").toInt // order of AR model + val yy = y.standardize // standardize the input (y-mu)/sigma + val ar = new AR (yy, 1) // use simple MoM for initial estimtes + ar.train (null, yy) // train the MoM model + val b = ar.parameter.drop (1) // get the parameter vector b (φ); skip intercept + val yp = ar.predictAll () // make predictions + val e = yy - yp // get the residual/error vector + val x0 = yy(0 until p) // initial state vector + val ff = buildMatrix (p, b) // state transition matrix (F) + val hh = new MatrixD (1, p); hh(0, 0) = 1 // observation matrix (H) + val qq = new MatrixD (p, p); qq(?, ?) = e.variance // process noise covariance matrix (Q = var(e_t)) + val rr = new MatrixD (1, 1) // observation noise covariance matrix (R = [0]) + debug ("apply", s"p = $p, x0 = $x0, ff = $ff, hh = $hh, qq = $qq, rr = $rr") + new KalmanFilter (x0, ff, hh, qq, rr) + end apply + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Build the state transition matrix F for a p-th order Auto-Regressive AR(p) model. + * @param p the order of the AR model + * @param b the coefficient vector b (φ) for a p-th order Auto-Regressive AR(p) model + */ + def buildMatrix (p: Int, b: VectorD): MatrixD = + val f = new MatrixD (p, p) + f(0) = b // first row is vector b (φ) + for i <- 1 until p do f(i, i-1) = 1 // diagonal submatrix + f + end buildMatrix + +end KalmanFilter + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `kalmanFilterTest` main function is used to test the `KalmanFilter` class. + * @see en.wikipedia.org/wiki/Kalman_filter + * > runMain scalation.modeling.forecasting.kalmanFilterTest + */ +@main def kalmanFilterTest (): Unit = + + banner ("KalmanFilterTest") + + val dt = 0.1 // time increment (delta t) + val var_a = 0.5 // variance of uncontrolled acceleration a + val var_z = 0.5 // variance from observation noise + + val ff = MatrixD ((2, 2), 1.0, dt, // transition matrix + 0.0, 1.0) + + val hh = MatrixD ((1, 2), 1.0, 0.0) + + val qq = MatrixD ((2, 2), dt~^4/4, dt~^3/2, + dt~^3/2, dt~^2) * var_a + + val rr = MatrixD ((1, 1), var_z) + + val x0 = VectorD (0.0, 0.0) + + val kf = new KalmanFilter (x0, ff, hh, qq, rr) + + println ("solve = " + kf.solve (dt)) + println ("traj = " + kf.traj) + + new Plot (kf.traj(?, 2), kf.traj(?, 0), kf.traj(?, 1)) + +end kalmanFilterTest + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `kalmanFilterTest2` main function is used to test the `KalmanFilter` class. + * Test the Kalman Filter on the COVID-19 dataset. + * > runMain scalation.modeling.forecasting.kalmanFilterTest2 + */ +@main def kalmanFilterTest2 (): Unit = + + import Example_Covid.loadData_y + + banner ("KalmanFilterTest2: COVID-19") + + val yy = loadData_y () +// val y = yy // full + val y = yy(0 until 116) // clip the flat end +// val hh = 6 // maximum forecasting horizon + + AR.hp("p") = 3 + val kf = KalmanFilter (y) // create a Kalman Filter + + println ("solve = " + kf.solve (1.0)) + println ("traj = " + kf.traj) + + new Plot (kf.traj(?, 2), kf.traj(?, 0), kf.traj(?, 1)) + +end kalmanFilterTest2 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `kalmanFilterTest3` main function is used to test the `KalmanFilter` class. + * @see https://faculty.washington.edu/ezivot/econ584/notes/statespacemodels.pdf + * > runMain scalation.modeling.forecasting.kalmanFilterTest2 + * +@main def kalmanFilterTest3 (): Unit = + + banner ("KalmanFilterTest: AR(2)") + + val dt = 0.1 // time increment (delta t) + val var_a = 0.5 // variance of uncontrolled acceleration a + val var_z = 0.5 // variance from observation noise + + val ff = MatrixD ((2, 2), phi1, phi2, // transition matrix + 1.0, 0.0) + + val hh = MatrixD ((1, 2), 1.0, 0.0) + + val qq = MatrixD ((2, 2), dt~^4/4, dt~^3/2, + dt~^3/2, dt~^2) * var_a + + val rr = MatrixD ((1, 1), var_z) + + val x0 = VectorD (0.0, 0.0) + + val kf = new KalmanFilter (x0, ff, hh, qq, rr) + + println ("solve = " + kf.solve (dt)) + println ("traj = " + kf.traj) + + new Plot (kf.traj(?, 2), kf.traj(?, 0), kf.traj(?, 1)) + +end kalmanFilterTest3 + */ + diff --git a/src/main/scala/scalation/modeling/forecasting/MakeMatrix4TS.scala b/src/main/scala/scalation/modeling/forecasting/MakeMatrix4TS.scala index 811a37f49..abfdfca6f 100644 --- a/src/main/scala/scalation/modeling/forecasting/MakeMatrix4TS.scala +++ b/src/main/scala/scalation/modeling/forecasting/MakeMatrix4TS.scala @@ -14,17 +14,20 @@ package scalation package modeling package forecasting -import scala.collection.mutable.ArrayBuffer +import scala.collection.mutable.{ArrayBuffer, LinkedHashSet => LSET} import scala.math.{cos, sin} import scalation.mathstat._ +import TransformT._ + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `TransformMap` type and its extension methods provides maps of transforms. */ type TransformMap = Map [String, Transform | Array [Transform]] extension (tr: Transform | Array [Transform]) + def apply (i: Int = -1): Transform = tr match case t: Transform => t @@ -32,7 +35,7 @@ extension (tr: Transform | Array [Transform]) def length: Int = tr match - case t: Transform => 1 + case _ : Transform => 1 case tArr: Array [Transform] => tArr.length def f(x: VectorD): VectorD = apply ().f(x) @@ -46,7 +49,7 @@ extension (tr: Transform | Array [Transform]) /** The `MakeMatrix4TSY` trait provides factory method templates for for invoking * `ARY*` constructors. */ -trait MakeMatrix4TSY: +trait MakeMatrix4TSY: //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Create an `ARY*` object by making/building an input matrix x and then calling the @@ -71,12 +74,12 @@ trait MakeMatrix4TSY: * @param tRng the time range, if relevant (time index may suffice) * @param hparam the hyper-parameters * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) - * @param tForm the z-transform (rescale to standard normal) + * @param tFormT the transform for rescaling endogenous and exogenous */ def rescale (y: VectorD, hh: Int, fname_ : Array [String] = null, tRng: Range = null, hparam: HyperParameter = MakeMatrix4TS.hp, bakcast: Boolean = false, - tForm: VectorD | MatrixD => Transform = x => zForm(x)): Forecaster_Reg | Forecaster_D + tFormT: TransformT = MinMax): Forecaster_Reg | Forecaster_D end MakeMatrix4TSY @@ -85,53 +88,56 @@ end MakeMatrix4TSY /** The `MakeMatrix4TS` trait provides factory method templates for for invoking * `ARX*` constructors. */ -trait MakeMatrix4TS: +trait MakeMatrix4TS: //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Create an `ARX*` object by building an input matrix xy and then calling the * `ARX*` constructor. - * @param xe the matrix of exogenous variable values - * @param y the endogenous/response vector (main time series data) - * @param hh the maximum forecasting horizon (h = 1 to hh) - * @param fname_ the feature/variable names - * @param tRng the time range, if relevant (time index may suffice) - * @param hparam the hyper-parameters - * @param fEndo the array of functions used to transform endogenous variables - * @param fExo the array of functions used to transform exogenous variables - * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) + * @param xe the matrix of exogenous variable values + * @param y the endogenous/response vector (main time series data) + * @param hh the maximum forecasting horizon (h = 1 to hh) + * @param fname_ the feature/variable names + * @param tRng the time range, if relevant (time index may suffice) + * @param hparam the hyper-parameters + * @param fEndo_enab the set of transforms to be used for the endogenous + * @param fExo_enab the array containing the sets of transforms to be used for the exogenous + * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) */ def apply (xe: MatrixD, y: VectorD, hh: Int, fname_ : Array [String] = null, tRng: Range = null, hparam: HyperParameter = MakeMatrix4TS.hp, - fEndo: Array [Transform] = null, fExo: Array [Transform] = null, + fEndo_enab: LSET [TransformT] = null, + fExo_enab: Array [LSET [TransformT]] = null, bakcast: Boolean = false): Forecaster_Reg | Forecaster_D //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Create an `ARX*` object by building an input matrix xy and then calling the * `ARX*` constructor. Rescale the input data. - * @param xe the matrix of exogenous variable values - * @param y the endogenous/response vector (main time series data) - * @param hh the maximum forecasting horizon (h = 1 to hh) - * @param tRng the time range, if relevant (time index may suffice) - * @param hparam the hyper-parameters - * @param fEndo the array of functions used to transform endogenous variables - * @param fExo the array of functions used to transform exogenous variables - * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) - * @param tForm the z-transform (rescale to standard normal) + * @param xe the matrix of exogenous variable values + * @param y the endogenous/response vector (main time series data) + * @param hh the maximum forecasting horizon (h = 1 to hh) + * @param fname_ the feature/variable names + * @param tRng the time range, if relevant (time index may suffice) + * @param hparam the hyper-parameters + * @param fEndo_enab the set of transforms to be used for the endogenous + * @param fExo_enab the array containing the sets of transforms to be used for the exogenous + * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) + * @param tFormT the transform for rescaling endogenous and exogenous */ def rescale (xe: MatrixD, y: VectorD, hh: Int, fname_ : Array [String] = null, tRng: Range = null, hparam: HyperParameter = MakeMatrix4TS.hp, - fEndo: Array [Transform] = null, fExo: Array [Transform] = null, + fEndo_enab: LSET [TransformT] = null, + fExo_enab: Array [LSET [TransformT]] = null, bakcast: Boolean = false, - tForm: VectorD | MatrixD => Transform = x => zForm(x)): Forecaster_Reg | Forecaster_D + tFormT: TransformT = MinMax): Forecaster_Reg | Forecaster_D //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Form an array of names for the features included in the model. - * @param n_exo the number of exogenous variable - * @param hp_ the hyper-parameters - * @param n_fEn the number of functions used to map endogenous variables - * @param n_fEx the number of functions used to map exogenous variables + * @param n_exo the number of exogenous variable + * @param hp_ the hyper-parameters + * @param n_fEn the number of functions used to map endogenous variables + * @param n_fExArr the number of functions used to map exogenous variables */ - def formNames (n_exo: Int, hp_ : HyperParameter, n_fEn: Int = 0, n_fEx: Int = 0): Array [String] + def formNames (n_exo: Int, hp_ : HyperParameter, n_fEn: Int = 0, n_fExArr: Array [Int] = null): Array [String] end MakeMatrix4TS @@ -145,14 +151,10 @@ object MakeMatrix4TS: /** Base hyper-parameter specification for regression based time series models. */ val hp = new HyperParameter - hp += ("p", 1, 1) // number of lags for the endogenous variable + hp += ("p", 3, 3) // number of lags for the endogenous variable hp += ("sp", 7, 7) // the seasonal period (time units between repetitive behavior) hp += ("ps", 2, 2) // number of seasonal lags for the endogenous variable - hp += ("pp", 2.0, 2.0) // the power (defaults to quadratic) to raise the lags of the endogenous variable to - hp += ("pr", 0.5, 0.5) // the root (defaults to sqrt) to take of the lags of the endogenous variable - hp += ("q", 1, 1) // number of lags for the exogenous variables - hp += ("qp", 2.0, 2.0) // the power (defaults to quadratic) to raise the lags of the exogenous variables to - hp += ("qr", 0.5, 0.5) // the root (defaults to sqrt) to take of the lags of the exogenous variables + hp += ("q", 2, 2) // number of lags for the exogenous variables hp += ("spec", 1, 1) // trend terms: 0 - none, 1 - constant, 2 - linear, 3 - quadratic, // 4 - sine, 5 - cosine hp += ("lwave", 7, 7) // wavelength for sine/cosine (distance between peaks) @@ -160,40 +162,39 @@ object MakeMatrix4TS: hp += ("nneg", 1, 1) // 0 - unrestricted, 1 - predictions must be non-negative hp += ("factorization", "Fac_QR", "Fac_QR") // type of matrix factorization - hp += ("lambda", 0.1, 0.1) // shrinkage/regularization parameter /*------------------------------------------------------------------------------- + @see `scalation.mathstat.Transform` for Transform.hp("p") giving pow, e.g., x^pow Usage: ARY: p, spec, lwave lags of endogenous variable - ARY_Quad: p, pp, spec, lwave lags of endogenous variable with power/quadratic terms + ARY_Quad: p, spec, lwave, pow lags of endogenous variable with power/quadratic terms ARX: p, q, spec, lwave lags of endogenous variable and exogenous variable - ARX_Quad: p, pp, q, spec, lwave lags of endogenous variable with power/quadratic terms and exogenous variable - ARX_Symb: p, pp, pr, q, qp, qr, spec, lwave supports powers and roots for all variables - for DIRECT forecasting use ARY_D, ARY_Quad_D, ARX_D, ARX_Quad_D, ARX_Symb_D - NOTE: Last three hyper-parameters can be used any any such model + ARX_Quad: p, q, spec, lwave, pow lags of endogenous variable with power/quadratic terms and exogenous variable + ARX_SR: p, q, spec, lwave, multiple transforms supports transforms for all variables + for DIRECT forecasting use ARY_D, ARY_Quad_D, ARX_D, ARX_Quad_D, ARX_SR_D -------------------------------------------------------------------------------*/ - private val flaw = flawf ("MakeMatrix4TS") // flaw function - val trend = Array ("const", "lin", "quad", "sin", "cos") // types of trends + private val flaw = flawf ("MakeMatrix4TS") // flaw function + val trend = Array ("const", "lin", "quad", "sin", "cos") // types of trends //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Form an array of names for the features included in the model. Handles all * `*ARY*` models. The `*ARX*` models require custom `formNames` methods. * @param spec the number of trend terms * @param p the number of lags for the endogenous variable (lags 1 to p) - * @param pwr whether to raise the lagged endogenous values to a power (defaults to false) + * @param pow the power to raise the lagged endogenous values to (defaults to 0.0) * @param sp the seasonal period (time units until repetitive behavior) * @param start the first seasonal lag to use (not subsumed by regular lags) * @param ps the number of seasonal lags for the endogenous variable (lags 1 to ps) */ - def formNames (spec: Int, p: Int, pwr: Boolean = false, + def formNames (spec: Int, p: Int, pow: Double = 0.0, sp: Int = -1, start: Int = 1, ps: Int = 0): Array [String] = val names = ArrayBuffer [String] () - for j <- 0 until spec do names += s"${trend(j)}" // trend terms - for j <- ps to start by -1 do names += s"yl${j*sp}" // seasonal lags terms - for j <- p to 1 by -1 do names += s"yl$j" // regular lags terms - if pwr then for j <- p to 1 by -1 do names += s"yl$j^" // power lags terms -// println (s"formNames: $names") + for j <- 0 until spec do names += s"${trend(j)}" // trend terms + for j <- ps to start by -1 do names += s"yl${j*sp}" // seasonal lags terms + for j <- p to 1 by -1 do names += s"yl$j" // regular lags terms + if pow != 0.0 then for j <- p to 1 by -1 do names += s"yl$j^$pow" // power lags terms + println (s"MakeMatrix4TS.formNames: names = $names") names.toArray end formNames @@ -202,10 +203,10 @@ object MakeMatrix4TS: * same prepended with one backcasted value. * @see `WeightedMovingAverage` * @param y the given output/response vector - * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) + * @param bakcast whether a backcasted value is prepended to the time series */ - private inline def getYb (y: VectorD, bakcast: Boolean = false): VectorD = - if bakcast then WeightedMovingAverage.backcast (y) +: y // y prepended with one backcast + private inline def getYb (y: VectorD, bakcast: Boolean): VectorD = + if bakcast then WeightedMovingAverage.backcast (y) +: y // y prepended with one backcast else y end getYb @@ -219,19 +220,20 @@ object MakeMatrix4TS: * @param bakcast whether a backcasted value is prepended to the time series */ def makeMatrix4T (y: VectorD, spec: Int, lwave: Double, bakcast: Boolean = false): MatrixD = + if bakcast then println ("makeMatrix4T: FIX: backcasted value not accounted for in any these methods") val m = y.dim val m2 = m / 2.0 - val w = _2Pi / lwave // 2 Pi over wavelength + val w = _2Pi / lwave // 2 Pi over wavelength val x = new MatrixD (m, spec) - val t_0m = VectorD.range (0, m) // vector 0, 1, ..., m-1 + val t_0m = VectorD.range (0, m) // vector 0, 1, ..., m-1 - if spec >= 1 then x(?, 0) = VectorD.one (m) // intercept/constant term - if spec >= 2 then x(?, 1) = t_0m / m // time trend (linear) - if spec >= 3 then x(?, 2) = (t_0m-m2)~^2 / m2~^2 // time-squared trend (quadratic) + if spec >= 1 then x(?, 0) = VectorD.one (m) // intercept/constant term + if spec >= 2 then x(?, 1) = t_0m / m // time trend (linear) + if spec >= 3 then x(?, 2) = (t_0m-m2)~^2 / m2~^2 // time-squared trend (quadratic) if spec >= 4 then - x(?, 3) = t_0m.map ((t: Double) => sin (t * w)) // sine wave trend + x(?, 3) = t_0m.map ((t: Double) => sin (t * w)) // sine wave trend if spec == 5 then - x(?, 4) = t_0m.map ((t: Double) => cos (t * w)) // cosine wave trend + x(?, 4) = t_0m.map ((t: Double) => cos (t * w)) // cosine wave trend x end makeMatrix4T @@ -274,7 +276,7 @@ object MakeMatrix4TS: * @param bakcast whether a backcasted value is prepended to the time series */ def makeMatrix4S (y: VectorD, p: Int, sp: Int, ps: Int, bakcast: Boolean = false): MatrixD = - val n = if sp > p then ps else ps - 1 // number of seasonal lags to use + val n = if sp > p then ps else ps - 1 // number of seasonal lags to use if n <= 0 || 2 * sp <= p then flaw ("makeMatrix4S", "seasonality subsumed in regular lags, so it's ignored") return null @@ -298,9 +300,9 @@ object MakeMatrix4TS: def makeMatrix4Y (y: VectorD, hh: Int, bakcast: Boolean = false): MatrixD = val yb = getYb (y, bakcast) val m = y.dim - val yy = new MatrixD (m, hh) // yy = [ y_h ] for h = 1 to hh + val yy = new MatrixD (m, hh) // yy = [ y_h ] for h = 1 to hh for t <- yy.indices; h <- yy.indices2 do - yy(t, h) = if t + h >= m then -0.0 else yb(t + h) // yy -> actual and horizons + yy(t, h) = if t + h >= m then NO_DOUBLE else yb(t + h) // yy -> actual and horizons yy end makeMatrix4Y @@ -346,13 +348,24 @@ object MakeMatrix4TS: * @param xej the j-th exogenous variable vector */ def backfill (xej: VectorD): VectorD = - val xe_j = 0.0 +: xej // prepend with zero - val ii = xe_j.indexWhere (_ != 0.0) // find the first non-zero value - println (s"backfill: from index ii = $ii") - for i <- ii-1 to 0 by -1 do // replace zero prefix with backcasted values - xe_j(i) = WeightedMovingAverage.backcast (xe_j, i) // backcast from index i + val xe_j = 0.0 +: xej // prepend with zero + val ii = xe_j.indexWhere (_ != 0.0) // find the first non-zero value +// println (s"backfill: from index ii = $ii") + for i <- ii-1 to 0 by -1 do // replace zero prefix with backcasted values + xe_j(i) = WeightedMovingAverage.backcast (xe_j, i) // backcast from index i xe_j(1 until xe_j.dim) end backfill + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Backfill the zero prefix of the exogenous variables j (xe) by backcasting. The zero + * prefix will be at least of size 1 as 0.0 is initially prepended. + * @param xe the matrix of exogenous variables + */ + def backfill (xe: MatrixD): MatrixD = + val xe_bfill = new MatrixD (xe.dim, xe.dim2) + for j <- xe.indices2 do xe_bfill(?, j) = backfill (xe(?, j)) + xe_bfill + end backfill + end MakeMatrix4TS diff --git a/src/main/scala/scalation/modeling/forecasting/NullModel.scala b/src/main/scala/scalation/modeling/forecasting/NullModel.scala index d37200963..be4951142 100644 --- a/src/main/scala/scalation/modeling/forecasting/NullModel.scala +++ b/src/main/scala/scalation/modeling/forecasting/NullModel.scala @@ -35,9 +35,10 @@ import Example_LakeLevels.y class NullModel (y: VectorD, hh: Int, tRng: Range = null, hparam: HyperParameter = null, bakcast: Boolean = false) - extends Forecaster (y, hh, tRng, hparam, bakcast): + extends Forecaster (y, hh, tRng, hparam, bakcast) + with NoSubModels: - modelName = s"NullModel" + _modelName = "NullModel" //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Given a time series y_, train the forecasting function y_ = f(lags (y_)) + e, diff --git a/src/main/scala/scalation/modeling/forecasting/RandomWalk.scala b/src/main/scala/scalation/modeling/forecasting/RandomWalk.scala index b079a93d5..6a48a9ee5 100644 --- a/src/main/scala/scalation/modeling/forecasting/RandomWalk.scala +++ b/src/main/scala/scalation/modeling/forecasting/RandomWalk.scala @@ -33,9 +33,10 @@ import scalation.random.Randi class RandomWalk (y: VectorD, hh: Int, tRng: Range = null, hparam: HyperParameter = null, bakcast: Boolean = false) - extends Forecaster (y, hh, tRng, hparam, bakcast): + extends Forecaster (y, hh, tRng, hparam, bakcast) + with NoSubModels: - modelName = s"RandomWalk" + _modelName = "RandomWalk" end RandomWalk @@ -140,7 +141,7 @@ end randomWalkTest2 mod.forecastAll (y) // forecast h-steps ahead (h = 1 to hh) for all y mod.diagnoseAll (y, mod.getYf) -// println (s"Final In-ST Forecast Matrix yf = ${mod.getYf}") + println (s"Final In-ST Forecast Matrix yf = ${mod.getYf}") end randomWalkTest3 @@ -165,18 +166,42 @@ end randomWalkTest3 mod.setSkip (0) mod.rollValidate () // TnT with Rolling Validation mod.diagnoseAll (y, mod.getYf, Forecaster.teRng (y.dim)) // only diagnose on the testing set - println (s"Final TnT Forecast Matrix yf = ${mod.getYf}") + println (s"Final TnT Forecast Matrix yf = ${mod.getYf}") // flat forecast matrix yf + println (s"Final TnT Forecast Matrix yf = ${mod.slant (mod.getYf)}") // slanted yf -- easier to visualize end randomWalkTest4 -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `randomWalkTest5` main function tests the `RandomWalk` class on small dataset. - * Test forecasts (h = 1 step ahead forecasts). +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `randomWalkTest5` main function tests the `RandomWalk` class on a simple dataset + * using In-Sample Testing (In-ST). + * Test forecasts (h = 1 to hh steps ahead forecasts). * > runMain scalation.modeling.forecasting.randomWalkTest5 */ @main def randomWalkTest5 (): Unit = + val y_ = VectorD (1, 3, 5, 7, 9, 11, 13, 15, 17, 19) + + val hh = 3 // maximum forecasting horizon + + val mod = new RandomWalk (y_, hh) // create model for time series data + banner (s"In-ST Forecasts: ${mod.modelName} on Simple Dataset") + mod.trainNtest ()() // train and test on full dataset + + mod.forecastAll () // forecast h-steps ahead (h = 1 to hh) for all y + mod.diagnoseAll (y_, mod.getYf) + println (s"Final In-ST Forecast Matrix yf = ${mod.getYf}") + +end randomWalkTest5 + + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `randomWalkTest6` main function tests the `RandomWalk` class on small dataset. + * Test forecasts (h = 1 step ahead forecasts). + * > runMain scalation.modeling.forecasting.randomWalkTest6 + */ +@main def randomWalkTest6 (): Unit = + val y = VectorD (1, 3, 4, 2, 5, 7, 9, 8, 6, 3) val mod = new RandomWalk (y, 1) // create model for time series data @@ -185,16 +210,16 @@ end randomWalkTest4 println (s"Final In-ST Forecast Matrix yf = ${mod.getYf}") new Baseline (y, "RW") -end randomWalkTest5 +end randomWalkTest6 //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `randomWalkTest6` main function tests the `RandomWalk` class on small dataset. +/** The `randomWalkTest7` main function tests the `RandomWalk` class on small dataset. * Test forecasts (h = 1 step ahead forecasts). - * > runMain scalation.modeling.forecasting.randomWalkTest6 + * > runMain scalation.modeling.forecasting.randomWalkTest7 * @param stm the random number stream to use (command-line argument, e.g., 2) */ -@main def randomWalkTest6 (stm: Int): Unit = +@main def randomWalkTest7 (stm: Int): Unit = val y = VectorD (1, 3, 4, 2, 5, 7, 9, 8, 6, 3) val n = y.dim @@ -205,5 +230,5 @@ end randomWalkTest5 new Plot (null, y ++ yy, null, "training and testing sets", lines = true) -end randomWalkTest6 +end randomWalkTest7 diff --git a/src/main/scala/scalation/modeling/forecasting/RandomWalkS.scala b/src/main/scala/scalation/modeling/forecasting/RandomWalkS.scala index decb6ccfe..dd9150280 100644 --- a/src/main/scala/scalation/modeling/forecasting/RandomWalkS.scala +++ b/src/main/scala/scalation/modeling/forecasting/RandomWalkS.scala @@ -37,11 +37,12 @@ import Example_LakeLevels.y class RandomWalkS (y: VectorD, hh: Int, tRng: Range = null, hparam: HyperParameter = RandomWalkS.hp, bakcast: Boolean = false) - extends Forecaster (y, hh, tRng, hparam, bakcast): + extends Forecaster (y, hh, tRng, hparam, bakcast) + with NoSubModels: private val sw = hparam("sw").toDouble // slope weight (same as RW when sw = 0 - modelName = s"RandomWalkS" + _modelName = "RandomWalkS" //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Predict a value for y_t using the 1-step ahead forecast based on the diff --git a/src/main/scala/scalation/modeling/forecasting/ReportBest.scala b/src/main/scala/scalation/modeling/forecasting/ReportBest.scala new file mode 100644 index 000000000..9e4bf28f8 --- /dev/null +++ b/src/main/scala/scalation/modeling/forecasting/ReportBest.scala @@ -0,0 +1,133 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author Yousef Fekri Dabanloo + * @version 2.0 + * @date Fri Feb 27 19:50:13 EST 2026 + * @see LICENSE (MIT style license file). + * + * @note Report Results from the Best Model Found + */ + +package scalation +package modeling +package forecasting + +import scalation.mathstat._ + +import Example_Covid.{loadData, response} + +import SelectionTech._ +import MakeMatrix4TS._ +import TransformT._ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Report the results for the best models given by feature selection to have + * in the model, returning the variables left and the new Quality of Fit (QoF) + * measures for all steps. + * @param mod the model to be evaluated + * @param hh the max forecasting horizon + * @param dir the directory to save the report and qof + * @param toFile whether to send to stdout or to a file + * @param ifTest whether to use test set or full dataset + * @param fsType the type of the feature selection to use + * @param cross indicator to include the cross-validation/validation QoF measure (defaults to "none") + * @param first first variable to consider for elimination + * (default (1) assume intercept x_0 will be in any model) + * @param swap whether to allow a swap step (swap out a feature for a new feature in one step) + * @param qk index of Quality of Fit (QoF) to use for comparing quality + */ +def reportBest (mod: Forecaster_D, hh: Int, dir: String, toFile: Boolean = true, + ifTest: Boolean = true, fsType: SelectionTech = Stepwise, cross: String = "none", + first: Int = 1, swap: Boolean = true) (using qk: Int): Unit = + + val ew = new EasyWriter ("scalation", dir + "/" + "report.txt", toFile) + val y_org = mod.getY_org + val (x, y) = (mod.getX, mod.getYy) + val tr_size = Model.trSize (y.dim) + val normForm_y = TransformT.Norm.form (y_org(0 until tr_size)) + val t_rng = 0 until tr_size + val (x_tr, y_tr) = (x(t_rng), y(t_rng)) + val (_, qof0) = mod.trainNtest_x (x_tr, y_tr)(x_tr, y_tr) + ew.println (mod.report (qof0)) + mod.setSkip (0) + mod.rollValidate () + val ftMat0 = mod.diagnoseAll (mod.getY, mod.getYf, Forecaster.teRng (y.dim)) + + val qof = new MatrixD (1, hh+1) + val header: Array [String] = (1 to hh).map (i => s"horizon_$i").toArray ++ Array ("mean") + + val (cols, rSqs, mods, yf_pred, ftMat) = mod.featureSelection (fsType, cross, first, swap) + val yForm = mod.getYForm + val yf_full = y_org +^: yForm.fi(yf_pred) + val yf_test = yf_full(Model.trSize (y.dim) until y.dim) + var yf_save = if ifTest then yf_test else yf_full + if yf_save == null then yf_save = new MatrixD (1, 1) + if toFile then + yf_save.write ("log/scalation" + "/" + dir + "/" + "yf.csv", fullPath = true) + else if ifTest then + println (s"yf_test = $yf_save") + else + println(s"yf_full = $yf_save") + + ew.println (s"TnT Forecasts: ${mod.modelName}") + ew.println ("fitMap0 QoF = ") + ew.println (FitM.showFitMap (ftMat0.transpose, QoF.values.map (_.toString))) + val mean_size = cols.map (_.size).sum.toDouble / cols.size + ew.println (s"\nx.dims = ${x.dims}, cols mean_size = ${mean_size}") + + for h <- 0 until hh do + ew.println (s"mods($h).dims = ${mods(h).getX.dims}, cols($h).size = ${cols(h).size}") + ew.println (s"cols($h) = ${cols(h)}") + ew.println (s"rSqs($h) = ${rSqs(h)}\n") + end for + + ew.println ("\nfitMap QoF = ") + ew.println (FitM.showFitMap (ftMat.transpose, QoF.values.map (_.toString))) + val smapes_tr = ftMat(?, 8) + qof(0, 0 until hh) = smapes_tr + qof(0, hh) = smapes_tr.mean + if toFile then + qof.write ("log/scalation" + "/" + dir + "/" + "qof.csv", header, fullPath = true) + else + println (s"qof = $qof") + ew.println ("\n=============Independent test=============") + ew.println (s"yf_test.dims = ${yf_test.dims}") + val stats = FitM.getTSResult (yf_test, hh, normForm_y) + ew.println (s"sample sizes = ${stats(0)}") + ew.println (s"sMAPEs = ${stats(1)}") + ew.println (s"Normalized MAEs = ${stats(2)}") + ew.println (s"Normalized MSEs = ${stats(3)}") + ew.finish () + +end reportBest + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `reportBestTest` main function fit the parameters and tests the `ARX_D` class on real data: + * Forecasting Covid19 using Train and Test (TnT). + * Backward/Stepwise feature selection + * Test forecasts (h = 1 to hh steps ahead forecasts). + * > runMain scalation.modeling.forecasting.reportBestTest + */ +@main def reportBestTest (): Unit = + + val exo_vars = Array ("icu_patients", "positive_rate") + val (xxe, yy) = loadData (exo_vars, response) + println (s"xxe.dims = ${xxe.dims}, yy.dim = ${yy.dim}") + RidgeRegression.hp("factorization") = "Fac_Cholesky" + + val xe = xxe(0 until 116) // clip the flat end + val y = yy(0 until 116) // clip the flat end + val hh = 6 // maximum forecasting horizon + hp("lwave") = 20 // wavelength (distance between peaks) + hp("spec") = 1 // trend specification: 0, 1, 2, 3, 5 + val (p, q, lmb) = (6, 4, 0.5) + hp("p") = p // number of endo lags + hp("q") = q // number of exo lags + RidgeRegression.hp("lambda") = lmb + + val mod = ARX_D.rescale (xe, y, hh, tFormT = Log1p) // create model for time series data + reportBest (mod, hh, "reportBestTest", true, false, SelectionTech.Backward) + +end reportBestTest + diff --git a/src/main/scala/scalation/modeling/forecasting_old/SARIMA.scala b/src/main/scala/scalation/modeling/forecasting/SARIMA.scala similarity index 99% rename from src/main/scala/scalation/modeling/forecasting_old/SARIMA.scala rename to src/main/scala/scalation/modeling/forecasting/SARIMA.scala index d756e1565..73d0c87b4 100644 --- a/src/main/scala/scalation/modeling/forecasting_old/SARIMA.scala +++ b/src/main/scala/scalation/modeling/forecasting/SARIMA.scala @@ -15,7 +15,7 @@ package scalation package modeling -package forecasting_old +package forecasting //import scala.math.sqrt @@ -232,8 +232,8 @@ class SARIMA (y: VectorD, dd: Int = 0, period: Int = 2, //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Return the model name including its current hyper-parameter. */ - modelName = if dd > 0 then s"SARIMA ($p, $d, $q) x ($pp, $dd, $qq)_${period}" - else s"SARIMA ($p, $d, $q)" + _modelName = if dd > 0 then s"SARIMA_${p}_${d}_${q}_x_${pp}_${dd}_${qq}_$period" + else s"SARIMA_${p}_${d}_$q" //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Return the maximum lag used by this model (its capacity to look into the past). diff --git a/src/main/scala/scalation/modeling/forecasting_old/SARIMAX.scalaa b/src/main/scala/scalation/modeling/forecasting/SARIMAX.scalaa similarity index 100% rename from src/main/scala/scalation/modeling/forecasting_old/SARIMAX.scalaa rename to src/main/scala/scalation/modeling/forecasting/SARIMAX.scalaa diff --git a/src/main/scala/scalation/modeling/forecasting/SARY.scala b/src/main/scala/scalation/modeling/forecasting/SARY.scala index e87d8f626..4b9693db4 100644 --- a/src/main/scala/scalation/modeling/forecasting/SARY.scala +++ b/src/main/scala/scalation/modeling/forecasting/SARY.scala @@ -1,13 +1,14 @@ //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller +/** @author John Miller, Hruthi Muggalla * @version 2.0 * @date Sun Jun 30 13:27:00 EDT 2024 * @see LICENSE (MIT style license file). * - * @note Model: Auto-Regressive on lagged y (SARY) using OLS + * @note Model: Seasonal Auto-Regressive on lagged y (SARY) using OLS * * @see `scalation.modeling.Regression` + * @see `scalation.modeling.RidgeRegression` * @see `scalation.modeling.forecasting.ARX` when exogenous variable are needed */ @@ -21,13 +22,15 @@ import MakeMatrix4TS._ //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `SARY` class provides basic time series analysis capabilities for SARY models. + * These models extend `ARY` models by including seasonal (periodic) lags. * SARY models utilize multiple linear regression based on lagged values of y. - * Given time series data stored in vector y, its next value y_t = combination of last - * p values of y. + * Given time series data stored in vector y, its next value y_t = combination of + * the last p lagged values of y and the last ps seasonally lagged values. * * y_t = b dot x_t + e_t * - * where y_t is the value of y at time t and e_t is the residual/error term. + * where y_t is the value of y at time t, b is the parameter vector, x_t collects past + * lagged (both regular and seasonal) values, and e_t is the residual/error term. * @param x the data/input matrix (lagged columns of y) @see `SARY.apply` * @param y the response/output vector (time series data) * @param hh the maximum forecasting horizon (h = 1 to hh) @@ -37,26 +40,51 @@ import MakeMatrix4TS._ * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) */ class SARY (x: MatrixD, y: VectorD, hh: Int, fname: Array [String], - tRng: Range = null, hparam: HyperParameter = hp, - bakcast: Boolean = false) // backcast value used only `MakeMatrix4TS` + tRng: Range = null, hparam: HyperParameter = hp, + bakcast: Boolean = false) // backcast value used only `MakeMatrix4TS` extends ARY (x, y, hh, fname, tRng, hparam, bakcast): // no automatic backcasting, @see `SARY.apply` private val debug = debugf ("SARY", true) // debug function private val sp = hparam("sp").toInt // the seasonal period private val ps = hparam("ps").toInt // use the last ps seasonal values (ps seasonal lags) - modelName = s"SARY($p, $ps @ $sp)" + // Store the actual lag structure used in matrix construction + private val (actualSeasonalLags, actualRegularLags) = getLagStructure () + + _modelName = s"SARY_${p}_${ps}_$sp" debug ("init", s"$modelName with additional term spec = $spec") debug ("init", s"[ x | y ] = ${x :^+ y}") + debug ("init", s"Seasonal lags used: $actualSeasonalLags") + debug ("init", s"Regular lags used: $actualRegularLags") + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Extract the actual lag structure from the feature names. + * FIX -- this is a brittle solution that is likely to break. + */ + private def getLagStructure (): (Set [Int], Set [Int]) = + var seasonalLags, regularLags = Set [Int] () + + for i <- spec until fname.length do // use fname to detect the real lag structure + val name = fname(i) + if name.startsWith ("yl") then + val lag = name.substring(2).toInt // e.g., yl3 => lag 3 + if lag > p && lag % sp == 0 then seasonalLags += lag // FIX -- how can lag > p and not be a seasonal lag? + else regularLags += lag + end if + end for + + (seasonalLags, regularLags) + end getLagStructure //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Forge a new vector from the first spec values of x, the last p-h+1 values * of x (past values) and recent values 1 to h-1 from the forecasts. + * FIX -- need past seasonal values as well -- OLD VERSION * @param xx the t-th row of the input matrix (lagged actual values) * @param yy the t-th row of the forecast matrix (forecasted future values) * @param h the forecasting horizon, number of steps ahead to produce forecasts - */ + * override def forge (xx: VectorD, yy: VectorD, h: Int): VectorD = val n_endo = spec + ps + p // number of trend + seasonal + endogenous values val x_trend = xx(0 until spec) // get trend values @@ -71,6 +99,80 @@ class SARY (x: MatrixD, y: VectorD, hh: Int, fname: Array [String], // Issue: if p >= sp then some seasonal values are redundant, so would not be in built matrix // Issue: if h >= sp then some seasonal values would be future values (data leakage) + */ + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forge a new vector from the first spec values of x, the available actual values, + * and recent values from the forecasts. + * @param xx the t-th row of the input matrix (lagged actual values) + * @param yy the t-th row of the forecast matrix (forecasted future values) + * @param h the forecasting horizon, number of steps ahead to produce forecasts + */ + override def forge (xx: VectorD, yy: VectorD, h: Int): VectorD = + val n_trend = spec // number of trend components + + // Get trend values (always available) + val x_trend = xx(0 until n_trend) + + // Handle seasonal components + val xs_forged = forgeSeasonalComponents (xx, yy, h, n_trend) + + // Handle regular lag components + val xr_forged = forgeRegularComponents (xx, yy, h, n_trend + actualSeasonalLags.size) + + x_trend ++ xs_forged ++ xr_forged + end forge + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forge the seasonal components of the feature vector. + * @param xx the current row of input matrix + * @param yy the forecast vector + * @param h the forecasting horizon + * @param startIdx starting index for seasonal components in xx + */ + private def forgeSeasonalComponents (xx: VectorD, yy: VectorD, h: Int, startIdx: Int): VectorD = + val seasonalLagsSorted = actualSeasonalLags.toSeq.sorted + val xs_forged = new VectorD(seasonalLagsSorted.size) + + for (sl, idx) <- seasonalLagsSorted.zipWithIndex do + if h < sl then + // Seasonal lag refers to past actual data + xs_forged(idx) = xx(startIdx + idx) + else + // Seasonal lag requires forecasted value + // Calculate which forecast step corresponds to this seasonal lag + val forecastStep = h - sl + 1 + if forecastStep > 0 && forecastStep <= h then + xs_forged(idx) = yy(forecastStep - 1) + else + xs_forged(idx) = 0.0 // Fallback - should not happen + xs_forged + end forgeSeasonalComponents + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forge the regular lag components of the feature vector. + * @param xx the current row of input matrix + * @param yy the forecast vector + * @param h the forecasting horizon + * @param startIdx starting index for regular components in xx + */ + private def forgeRegularComponents (xx: VectorD, yy: VectorD, h: Int, startIdx: Int): VectorD = + val regularLagsSorted = actualRegularLags.toSeq.sorted + val xr_forged = new VectorD(regularLagsSorted.size) + + for (rl, idx) <- regularLagsSorted.zipWithIndex do + if h < rl then + // Regular lag refers to past actual data + xr_forged(idx) = xx(startIdx + idx) + else + // Regular lag requires forecasted value + val forecastStep = h - rl + 1 + if forecastStep > 0 && forecastStep <= h then + xr_forged(idx) = yy(forecastStep - 1) + else + xr_forged(idx) = 0.0 // Fallback - should not happen + xr_forged + end forgeRegularComponents end SARY @@ -82,14 +184,14 @@ object SARY: //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Create an `SARY` object by making/building an input matrix x and then calling the - * `SARY` constructor. + * `SARY` constructor. OLD VERSION * @param y the response vector (time series data) * @param hh the maximum forecasting horizon (h = 1 to hh) * @param fname_ the feature/variable names * @param tRng the time range, if relevant (time index may suffice) * @param hparam the hyper-parameters * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) - */ + * def apply (y: VectorD, hh: Int, fname_ : Array [String] = null, tRng: Range = null, hparam: HyperParameter = hp, bakcast: Boolean = false): SARY = @@ -102,11 +204,89 @@ object SARY: val xs = makeMatrix4S (y, p, sp, ps, bakcast) // seasonal lags terms val xl = makeMatrix4L (y, p, bakcast) // regular lag terms val start = if xs.dim2 == ps then 1 else 2 // first seasonal lag to use (not subsumed) - val fname = if fname_ == null then formNames (spec, p, false, sp, start, ps) + val fname = if fname_ == null then formNames (spec, p, 0.0, sp, start, ps) + else fname_ + new SARY (xt ++^ xs ++^ xl, y, hh, fname, tRng, hparam, bakcast) + end apply + */ + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create an `SARY` object by making/building an input matrix x and then calling the + * `SARY` constructor. + * @param y the response vector (time series data) + * @param hh the maximum forecasting horizon (h = 1 to hh) + * @param fname_ the feature/variable names + * @param tRng the time range, if relevant (time index may suffice) + * @param hparam the hyper-parameters + * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) + */ + def apply (y: VectorD, hh: Int, fname_ : Array [String] = null, + tRng: Range = null, hparam: HyperParameter = hp, + bakcast: Boolean = false): SARY = + val p = hparam("p").toInt // use the last p values + val sp = hparam("sp").toInt // the seasonal period (time units until repetitive behavior) + val ps = hparam("ps").toInt // use the last ps seasonal values + val spec = hparam("spec").toInt // 0 - none, 1 - constant, 2 - linear, 3 -quadratic, 4 - sin, 5 = cos + val lwave = hparam("lwave").toDouble // wavelength (distance between peaks) + + val xt = makeMatrix4T (y, spec, lwave, bakcast) // trend terms + val (xs, seasonalLags) = makeMatrix4S_NoDupes (y, p, sp, ps, bakcast) + val xl = makeMatrix4L_NoDupes (y, p, seasonalLags, bakcast) + + val start = if xs.dim2 == ps then 1 else 2 // first seasonal lag to use (not subsumed) + val fname = if fname_ == null then formNames (spec, p, 0.0, sp, start, ps) else fname_ new SARY (xt ++^ xs ++^ xl, y, hh, fname, tRng, hparam, bakcast) end apply + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create seasonal matrix without duplicates with regular lags. + * @param y the time series data + * @param p number of regular lags + * @param sp seasonal period + * @param ps number of seasonal lags + * @param bakcast whether to use backcasting + */ + private def makeMatrix4S_NoDupes (y: VectorD, p: Int, sp: Int, ps: Int, bakcast: Boolean): (MatrixD, Set[Int]) = + val n = y.dim + val seasonalLags = (1 to ps).map(i => i * sp).filter(_ > p).toSet + val actualPs = seasonalLags.size + + if actualPs == 0 then + (new MatrixD (n, 0), Set.empty) + else + val xs = new MatrixD (n, actualPs) + for t <- 0 until n; (sl, j) <- seasonalLags.toSeq.sorted.zipWithIndex do + xs(t, j) = if t - sl < 0 then + if bakcast then y(0) else 0.0 + else y(t - sl) + (xs, seasonalLags) + end makeMatrix4S_NoDupes + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create regular lag matrix without duplicates with seasonal lags. + * @param y the time series data + * @param p number of regular lags + * @param seasonalLags set of seasonal lags already used + * @param bakcast whether to use backcasting + */ + private def makeMatrix4L_NoDupes (y: VectorD, p: Int, seasonalLags: Set[Int], bakcast: Boolean): MatrixD = + val n = y.dim + // Filter out regular lags that are already included as seasonal lags + val regularLags = (1 to p).filterNot(seasonalLags.contains) + val actualP = regularLags.size + + if actualP == 0 then + new MatrixD (n, 0) + else + val xl = new MatrixD (n, actualP) + for t <- 0 until n; (rl, j) <- regularLags.zipWithIndex do + xl(t, j) = if t - rl < 0 then + if bakcast then y(0) else 0.0 + else y(t - rl) + xl + end makeMatrix4L_NoDupes + end SARY import Example_Covid.loadData_y @@ -168,8 +348,10 @@ end sARYTest2 val y = yy(0 until 116) // clip the flat end val hh = 6 // maximum forecasting horizon hp("lwave") = 20 // wavelength (distance between peaks) + hp("ps") = 2 // seasonal AR-lags (P) + hp("sp") = 3 // seasonal period - for p <- 2 to 5; s <- 1 to 2 do // number of lags; trend + for p <- 3 to 6; s <- 1 to 1 do // number of lags; trend hp("p") = p // endo lags hp("spec") = s // trend specification: 0, 1, 2, 3, 5 val mod = SARY (y, hh) // create model for time series data @@ -211,7 +393,7 @@ end sARYTest3 mod.setSkip (0) mod.rollValidate () // TnT with Rolling Validation println (s"After Roll TnT Forecast Matrix yf = ${mod.getYf}") - mod.diagnoseAll (y, mod.getYf, Forecaster.teRng (y.dim), 0) // only diagnose on the testing set + mod.diagnoseAll (y, mod.getYf, Forecaster.teRng (y.dim)) // only diagnose on the testing set // println (s"Final TnT Forecast Matrix yf = ${mod.getYf}") end for @@ -249,8 +431,7 @@ end sARYTest4 // val (cols, rSq) = mod.backwardElimAll () // R^2, R^2 bar, sMAPE, R^2 cv val k = cols.size println (s"k = $k") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "R^2 cv"), - s"R^2 vs n for ${mod.modelName}", lines = true) + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs n for ${mod.modelName}", lines = true) println (s"rSq = $rSq") end sARYTest5 @@ -273,7 +454,7 @@ end sARYTest5 for p <- 1 to 5; s <- 1 to 1 do // number of lags; trend hp("p") = p // endo lags hp("spec") = s // trend specification: 0, 1, 2, 3, 5 - val mod = SARY.quadratic (y, hh) // create model for time series data + val mod = SARY.quadratic (y, hh) // create model for time series data banner (s"In-ST Forecasts: ${mod.modelName} on COVID-19 Dataset") mod.trainNtest_x ()() // train and test on full dataset println (mod.summary ()) // statistical summary of fit diff --git a/src/main/scala/scalation/modeling/forecasting/SARY.scala.bak b/src/main/scala/scalation/modeling/forecasting/SARY.scala.bak new file mode 100644 index 000000000..6ab417c53 --- /dev/null +++ b/src/main/scala/scalation/modeling/forecasting/SARY.scala.bak @@ -0,0 +1,293 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author John Miller + * @version 2.0 + * @date Sun Jun 30 13:27:00 EDT 2024 + * @see LICENSE (MIT style license file). + * + * @note Model: Seasonal Auto-Regressive on lagged y (SARY) using OLS + * + * @see `scalation.modeling.Regression` + * @see `scalation.modeling.RidgeRegression` + * @see `scalation.modeling.forecasting.ARX` when exogenous variable are needed + */ + +package scalation +package modeling +package forecasting + +import scalation.mathstat._ + +import MakeMatrix4TS._ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `SARY` class provides basic time series analysis capabilities for SARY models. + * These models extend `ARY` models by including seasonal (periodic) lags. + * SARY models utilize multiple linear regression based on lagged values of y. + * Given time series data stored in vector y, its next value y_t = combination of + * the last p lagged values of y and the last ps seasonally lagged values. + * + * y_t = b dot x_t + e_t + * + * where y_t is the value of y at time t, b is the parameter vector, x_t collects past + * lagged (both regular and seasonal) values, and e_t is the residual/error term. + * @param x the data/input matrix (lagged columns of y) @see `SARY.apply` + * @param y the response/output vector (time series data) + * @param hh the maximum forecasting horizon (h = 1 to hh) + * @param fname the feature/variable names + * @param tRng the time range, if relevant (time index may suffice) + * @param hparam the hyper-parameters (defaults to `MakeMatrix4TS.hp`) + * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) + */ +class SARY (x: MatrixD, y: VectorD, hh: Int, fname: Array [String], + tRng: Range = null, hparam: HyperParameter = hp, + bakcast: Boolean = false) // backcast value used only `MakeMatrix4TS` + extends ARY (x, y, hh, fname, tRng, hparam, bakcast): // no automatic backcasting, @see `SARY.apply` + + private val debug = debugf ("SARY", true) // debug function + private val sp = hparam("sp").toInt // the seasonal period + private val ps = hparam("ps").toInt // use the last ps seasonal values (ps seasonal lags) + + _modelName = s"SARY_${p}_${ps}_$sp" + + debug ("init", s"$modelName with additional term spec = $spec") + debug ("init", s"[ x | y ] = ${x :^+ y}") + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forge a new vector from the first spec values of x, the last p-h+1 values + * of x (past values) and recent values 1 to h-1 from the forecasts. + * FIX - need past seasonal values as well + * @param xx the t-th row of the input matrix (lagged actual values) + * @param yy the t-th row of the forecast matrix (forecasted future values) + * @param h the forecasting horizon, number of steps ahead to produce forecasts + */ + override def forge (xx: VectorD, yy: VectorD, h: Int): VectorD = + val n_endo = spec + ps + p // number of trend + seasonal + endogenous values + val x_trend = xx(0 until spec) // get trend values + val xs_act = xx(spec until spec + ps) // get actual seasonally lagged y-values + val x_act = xx(n_endo-(p+1-h) until n_endo) // get actual lagged y-values (endogenous) + val nyy = p - x_act.dim // number of forecasted values needed +// println (s"forge: h = $h, n_nedo = $n_endo, [ ${x_trend.dim}, ${x_act.dim} ], nyy = $nyy") + val x_fcast = yy(h-nyy until h) // get forecasted y-values +// val xs_fcast = getYS (yy, p, sp, ps, xs_act.dim) // FIX get forecasted seasonal y-values + x_trend ++ xs_act ++ x_act ++ x_fcast // FIX assumes all actual seasonal values are used + end forge + +// Issue: if p >= sp then some seasonal values are redundant, so would not be in built matrix +// Issue: if h >= sp then some seasonal values would be future values (data leakage) + +end SARY + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `SARY` companion object provides factory methods for the `SARY` class. + */ +object SARY: + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create an `SARY` object by making/building an input matrix x and then calling the + * `SARY` constructor. + * @param y the response vector (time series data) + * @param hh the maximum forecasting horizon (h = 1 to hh) + * @param fname_ the feature/variable names + * @param tRng the time range, if relevant (time index may suffice) + * @param hparam the hyper-parameters + * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) + */ + def apply (y: VectorD, hh: Int, fname_ : Array [String] = null, + tRng: Range = null, hparam: HyperParameter = hp, + bakcast: Boolean = false): SARY = + val p = hparam("p").toInt // use the last p values + val sp = hparam("sp").toInt // the seasonal period (time units until repetitive behavior) + val ps = hparam("ps").toInt // use the last ps seasonal values + val spec = hparam("spec").toInt // 0 - none, 1 - constant, 2 - linear, 3 -quadratic, 4 - sin, 5 = cos + val lwave = hparam("lwave").toDouble // wavelength (distance between peaks) + val xt = makeMatrix4T (y, spec, lwave, bakcast) // trend terms + val xs = makeMatrix4S (y, p, sp, ps, bakcast) // seasonal lags terms + val xl = makeMatrix4L (y, p, bakcast) // regular lag terms + val start = if xs.dim2 == ps then 1 else 2 // first seasonal lag to use (not subsumed) + val fname = if fname_ == null then formNames (spec, p, 0.0, sp, start, ps) + else fname_ + new SARY (xt ++^ xs ++^ xl, y, hh, fname, tRng, hparam, bakcast) + end apply + +end SARY + +import Example_Covid.loadData_y +import Example_LakeLevels.y + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `RYTest` main function tests the `SARY` class on real data: + * Forecasting Lake Levels using In-Sample Testing (In-ST). + * Test forecasts (h = 1 to hh steps ahead forecasts). + * @see cran.r-project.org/web/packages/fpp/fpp.pdf + * > runMain scalation.modeling.forecasting.sARYTest + */ +@main def sARYTest (): Unit = + + val hh = 3 // maximum forecasting horizon + + val mod = SARY (y, hh) // create model for time series data + banner (s"In-ST Forecasts: ${mod.modelName} on LakeLevels Dataset") + mod.trainNtest_x ()() // train and test on full dataset + + mod.forecastAll () // forecast h-steps ahead (h = 1 to hh) for all y + mod.diagnoseAll (y, mod.getYf) + println (s"Final In-ST Forecast Matrix yf = ${mod.getYf}") + +end sARYTest + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `sARYTest2` main function tests the `SARY` class on real data: + * Forecasting Lake Levels using Train-n-Test Split (TnT) with Rolling Validation. + * Test forecasts (h = 1 to hh steps ahead forecasts). + * @see cran.r-project.org/web/packages/fpp/fpp.pdf + * > runMain scalation.modeling.forecasting.sARYTest2 + */ +@main def sARYTest2 (): Unit = + + val hh = 3 // maximum forecasting horizon + + val mod = SARY (y, hh) // create model for time series data + banner (s"TnT Forecasts: ${mod.modelName} on LakeLevels Dataset") + mod.trainNtest_x ()() // train and test on full dataset + + mod.rollValidate () // TnT with Rolling Validation + println (s"Final TnT Forecast Matrix yf = ${mod.getYf}") + +end sARYTest2 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `sARYTest3` main function tests the `SARY` class on real data: + * Forecasting COVID-19 using In-Sample Testing (In-ST). + * Test forecasts (h = 1 to hh steps ahead forecasts). + * > runMain scalation.modeling.forecasting.sARYTest3 + */ +@main def sARYTest3 (): Unit = + + val yy = loadData_y () +// val y = yy // full + val y = yy(0 until 116) // clip the flat end + val hh = 6 // maximum forecasting horizon + hp("lwave") = 20 // wavelength (distance between peaks) + + for p <- 2 to 5; s <- 1 to 2 do // number of lags; trend + hp("p") = p // endo lags + hp("spec") = s // trend specification: 0, 1, 2, 3, 5 + val mod = SARY (y, hh) // create model for time series data + banner (s"In-ST Forecasts: ${mod.modelName} on COVID-19 Dataset") + mod.trainNtest_x ()() // train and test on full dataset + println (mod.summary ()) // statistical summary of fit + +// mod.setSkip (p) // full AR-formula available when t >= p + mod.forecastAll () // forecast h-steps ahead (h = 1 to hh) for all y + mod.diagnoseAll (y, mod.getYf) +// println (s"Final In-ST Forecast Matrix yf = ${mod.getYf}") +// println (s"Final In-ST Forecast Matrix yf = ${mod.getYf.shiftDiag}") + end for + +end sARYTest3 + + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `sARYTest4` main function tests the `SARY` class on real data: + * Forecasting COVID-19 using Train-n-Test Split (TnT) with Rolling Validation. + * Test forecasts (h = 1 to hh steps ahead forecasts). + * > runMain scalation.modeling.forecasting.sARYTest4 + */ +@main def sARYTest4 (): Unit = + + val yy = loadData_y () +// val y = yy // full + val y = yy(0 until 116) // clip the flat end + val hh = 6 // maximum forecasting horizon + hp("lwave") = 20 // wavelength (distance between peaks) + + for p <- 1 to 10; s <- 1 to 5 do // number of lags; trend + hp("p") = p // endo lags + hp("spec") = s // trend specification: 0, 1, 2, 3, 5 + val mod = SARY (y, hh) // create model for time series data + banner (s"TnT Forecasts: ${mod.modelName} on COVID-19 Dataset") + mod.trainNtest_x ()() // use customized trainNtest_x + + mod.setSkip (0) + mod.rollValidate () // TnT with Rolling Validation + println (s"After Roll TnT Forecast Matrix yf = ${mod.getYf}") + mod.diagnoseAll (y, mod.getYf, Forecaster.teRng (y.dim)) // only diagnose on the testing set +// println (s"Final TnT Forecast Matrix yf = ${mod.getYf}") + end for + +end sARYTest4 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `sARYTest5` main function tests the `SARY` class on real data: + * Forecasting COVID-19 using In-Sample Testing (In-ST). + * Test forecasts (h = 1 to hh steps ahead forecasts). + * This version performs feature selection. + * > runMain scalation.modeling.forecasting.sARYTest5 + */ +@main def sARYTest5 (): Unit = + + val yy = loadData_y () +// val y = yy // full + val y = yy(0 until 116) // clip the flat end + val hh = 6 // maximum forecasting horizon + hp("p") = 10 // endo lags + hp("spec") = 5 // trend specification: 0, 1, 2, 3, 5 + hp("lwave") = 20 // wavelength (distance between peaks) + + val mod = SARY (y, hh) // create model for time series data + banner (s"In-ST Forecasts: ${mod.modelName} on COVID-19 Dataset") + mod.trainNtest_x ()() // train and test on full dataset + println (mod.summary ()) // statistical summary of fit + + mod.forecastAll () // forecast h-steps ahead (h = 1 to hh) for all y + mod.diagnoseAll (y, mod.getYf) + println (s"Final In-ST Forecast Matrix yf = ${mod.getYf}") + + banner ("Feature Selection Technique: Forward") + val (cols, rSq) = mod.forwardSelAll () // R^2, R^2 bar, sMAPE, R^2 cv +// val (cols, rSq) = mod.backwardElimAll () // R^2, R^2 bar, sMAPE, R^2 cv + val k = cols.size + println (s"k = $k") + new PlotM (null, rSq.transpose, Regression.metrics, s"R^2 vs n for ${mod.modelName}", lines = true) + println (s"rSq = $rSq") + +end sARYTest5 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `sARYTest6` main function tests the `SARY` class on real data: + * Forecasting COVID-19 using In-Sample Testing (In-ST). + * Test forecasts (h = 1 to hh steps ahead forecasts). + * > runMain scalation.modeling.forecasting.sARYTest6 + * +@main def sARYTest6 (): Unit = + + val yy = loadData_y () +// val y = yy // full + val y = yy(0 until 116) // clip the flat end + val hh = 6 // maximum forecasting horizon + hp("lwave") = 20 // wavelength (distance between peaks) + + for p <- 1 to 5; s <- 1 to 1 do // number of lags; trend + hp("p") = p // endo lags + hp("spec") = s // trend specification: 0, 1, 2, 3, 5 + val mod = SARY.quadratic (y, hh) // create model for time series data + banner (s"In-ST Forecasts: ${mod.modelName} on COVID-19 Dataset") + mod.trainNtest_x ()() // train and test on full dataset + println (mod.summary ()) // statistical summary of fit + +// mod.setSkip (p) // full AR-formula available when t >= p + mod.forecastAll () // forecast h-steps ahead (h = 1 to hh) for all y + mod.diagnoseAll (y, mod.getYf) +// println (s"Final In-ST Forecast Matrix yf = ${mod.getYf}") +// println (s"Final In-ST Forecast Matrix yf = ${mod.getYf.shiftDiag}") + end for + +end sARYTest6 + */ + diff --git a/src/main/scala/scalation/modeling/forecasting/SGFilter.scala b/src/main/scala/scalation/modeling/forecasting/SGFilter.scala new file mode 100644 index 000000000..a4dbd0836 --- /dev/null +++ b/src/main/scala/scalation/modeling/forecasting/SGFilter.scala @@ -0,0 +1,88 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author John Miller + * @version 2.0 + * @date Thu May 22 01:21:46 EDT 2025 + * @see LICENSE (MIT style license file). + * + * @note Smoother: Savitzky–Golay Filter + * + * @see en.wikipedia.org/wiki/Savitzky%E2%80%93Golay_filter + */ + +package scalation +package modeling +package forecasting + +import scalation.mathstat._ + +import Example_Covid.loadData_y +import Example_LakeLevels.y + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `SGFilter` class provides basic time series capabilities for Savitzky–Golay + * filters that are used to smooth data. + * Note, would need to be adapted for use as a forecaster as it uses future data. + * @see `WeightedMovingAverage` + * @param y the response vector (time series data) + */ +class SGFilter (y: VectorD) + extends Filter (y): + + val c = VectorD (-3, 12, 17, 12, -3) / 35.0 // Convolution coefficients for + // 5 point quadratic + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return a smoothed version of the given time series vector. + * + * z(t) = c dot y(t-2 until t+3) = c dot [ y(t-2), y(t-1), y(t), y(t+1), y(t+2) ] + * + * @param y_ the actual time series values to be smoothed + * @param a the smoothing parameter + */ + def smooth (y_ : VectorD = y, a: Double = 0.0): VectorD = + val n = y_.dim + val z = new VectorD (n) + z(0) = (c(2 until 5) dot y(0 until 3)) * (35.0/26.0) // use 3 points for z(0) the first point + z(1) = (c(1 until 5) dot y(0 until 4)) * (35.0/38.0) // use 4 points for z(1) + for t <- 2 until n-2 do z(t) = c dot y(t-2 until t+3) // use 5 points, excepts first and last 2 + z(n-2) = (c(0 until 4) dot y(n-4 until n)) * (35.0/38.0) // use 4 points for z(n-2) + z(n-1) = (c(0 until 3) dot y(n-3 until n)) * (35.0/26.0) // use 3 points for z(n-1) the last point + z + end smooth + +end SGFilter + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `sGFilterTest` main function tests the `SGFilter` class on real data: + * Smoothing Lake Levels data. + * @see cran.r-project.org/web/packages/fpp/fpp.pdf + * > runMain scalation.modeling.forecasting.sGFilterTest + */ +@main def sGFilterTest (): Unit = + + val filter = new SGFilter (y) // create smoother for time series data + val ys = filter.smooth () + new Plot (null, y, ys, "Plot y (data) and ys (smoothed data) vs. time", lines = true) + +end sGFilterTest + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `sGFilterTest2` main function tests the `SGFilter` class on real data: + * Smoothing COVID-19 data. + * > runMain scalation.modeling.forecasting.sGFilterTest2 + */ +@main def sGFilterTest2 (): Unit = + + val yy = loadData_y () +// val y = yy // full + val y = yy(0 until 116) // clip the flat end + + val filter = new SGFilter (y) // create smoother for time series data + val ys = filter.smooth () + new Plot (null, y, ys, "Plot y (data) and ys (smoothed data) vs. time", lines = true) + +end sGFilterTest2 + diff --git a/src/main/scala/scalation/modeling/forecasting/SimpleExpSmoothing.scala b/src/main/scala/scalation/modeling/forecasting/SimpleExpSmoothing.scala index 47413e9b0..197e981a9 100644 --- a/src/main/scala/scalation/modeling/forecasting/SimpleExpSmoothing.scala +++ b/src/main/scala/scalation/modeling/forecasting/SimpleExpSmoothing.scala @@ -41,17 +41,19 @@ import Example_LakeLevels.y class SimpleExpSmoothing (y: VectorD, hh: Int, tRng: Range = null, hparam: HyperParameter = SimpleExpSmoothing.hp, bakcast: Boolean = false) - extends Forecaster (y, hh, tRng, hparam, bakcast): + extends Forecaster (y, hh, tRng, hparam, bakcast) + with Filter (y) + with NoSubModels: private val TOL = 1E-4 // tolerance private val lo_up = makeBounds (1, 0.0, 1.05) // lower & upper bounds on α for optimizer (1.0 + slack) - private var α = hparam ("α").toDouble // default value for the smoothing parameter + private var α = hparam("α").toDouble // default value for the smoothing parameter private var s = VectorD.nullv // vector of smoothed/leveled values (state) // private val sf = new VectorD (y.dim) // to hold smooth values for a forecast horizon - private var opt = true // whehther to optimize the smoothing parameter + private var opt = true // whether to optimize the smoothing parameter - modelName = "SimpleExpSmoothing" + _modelName = "SimpleExpSmoothing" //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Reset the smoothing parameter α. @@ -68,11 +70,11 @@ class SimpleExpSmoothing (y: VectorD, hh: Int, tRng: Range = null, /** Smooth the time-series data y, returning the leveled/smoothed data s. * May be viewed as unoptimized training. * @see Smoothing Equation in section 7.1. - * s_t+1 = α y_t + (1 - α) s_t // smoothing equation - * @param a the smoothing parameter (decay rate for older values) + * s_t+1 = α y_t + (1 - α) s_t // smoothing equation * @param y_ the response/output vector (training/full) + * @param a the smoothing parameter (decay rate for older values) */ - def smooth (a: Double = α, y_ : VectorD = y): VectorD = + def smooth (y_ : VectorD = y, a: Double = α): VectorD = s = new VectorD (y_.dim) s(0) = y(0) for t <- 0 until y_.dim-1 do s(t+1) = a * y_(t) + (1 - a) * s(t) @@ -87,15 +89,14 @@ class SimpleExpSmoothing (y: VectorD, hh: Int, tRng: Range = null, */ override def train (x_null: MatrixD, y_ : VectorD): Unit = - def f_obj (x: VectorD): Double = (y_ - smooth (x(0), y_)).normSq // only one parameter + def f_obj (x: VectorD): Double = (y_ - smooth (y_, x(0))).normSq // only one parameter if opt then val optimizer = new Optimizer (f_obj, l_u = lo_up) // Bounded Quasi-Newton optimizer // val optimizer = new Optimizer (f_obj) // Quasi-Newton optimizer val opt = optimizer.solve (VectorD (α), toler = TOL) // optimize value for α α = (opt._2)(0) // pull α from vector result - end if - s = smooth (α) + s = smooth (y_, α) end train //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: diff --git a/src/main/scala/scalation/modeling/forecasting/SimpleMovingAverage.scala b/src/main/scala/scalation/modeling/forecasting/SimpleMovingAverage.scala index 1221e7f85..4bbdc0512 100644 --- a/src/main/scala/scalation/modeling/forecasting/SimpleMovingAverage.scala +++ b/src/main/scala/scalation/modeling/forecasting/SimpleMovingAverage.scala @@ -12,11 +12,9 @@ package scalation package modeling package forecasting -import scalation.mathstat._ +import scala.math.max -import Forecaster.rdot -import Example_Covid.loadData_y -import Example_LakeLevels.y +import scalation.mathstat._ //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `SimpleMovingAverage` class provides basic time series analysis capabilities for @@ -35,13 +33,14 @@ import Example_LakeLevels.y class SimpleMovingAverage (y: VectorD, hh: Int, tRng: Range = null, hparam: HyperParameter = SimpleMovingAverage.hp, bakcast: Boolean = false) - extends Forecaster (y, hh, tRng, hparam, bakcast): + extends Forecaster (y, hh, tRng, hparam, bakcast) + with NoSubModels: - private val flaw = flawf ("SimpleMovingAverage") // flaw function - private val q = hparam("q").toInt // take mean of last q values + private val debug = debugf ("SimpleMovingAverage", true) // debug function + private val flaw = flawf ("SimpleMovingAverage") // flaw function + private val q = hparam("q").toInt // take mean of last q values - b = VectorD.one (q) / q // equal weight - modelName = s"SimpleMovingAverage($q)" + _modelName = s"SimpleMovingAverage_$q" //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Predict a value for y_t using the 1-step ahead forecast. @@ -51,7 +50,10 @@ class SimpleMovingAverage (y: VectorD, hh: Int, tRng: Range = null, * @param t the time point being predicted * @param y_ the actual values to use in making predictions (mean (inclusive, exclusice)) */ - override def predict (t: Int, y_ : VectorD): Double = y_.mean (max0 (t-q), t) + override def predict (t: Int, y_ : VectorD): Double = + if t < 1 then -0.0 // not enough prior data + else y_.mean (max0 (t-q), t) // mean of prior q actual values + end predict //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Produce a vector of size hh, h = 1 to hh-steps ahead forecasts for the model, @@ -63,7 +65,8 @@ class SimpleMovingAverage (y: VectorD, hh: Int, tRng: Range = null, override def forecast (t: Int, y_ : VectorD = yb): VectorD = val yh = new VectorD (hh) // hold forecasts for each horizon for h <- 1 to hh do - val pred = rdot (b, yf, t, h-1) // slide in prior forecasted values + val pred = if t < 1 then -0.0 // not enough prior data + else forge (t, h).mean // record in forecast matrix yf(t, h) = pred // record in forecast matrix yh(h-1) = pred // record forecasts for each horizon yh // return forecasts for all horizons @@ -81,10 +84,26 @@ class SimpleMovingAverage (y: VectorD, hh: Int, tRng: Range = null, if h < 2 then flaw ("forecastAt", s"horizon h = $h must be at least 2") for t <- y_.indices do // make forecasts over all time points for horizon h - yf(t, h) = rdot (b, yf, t, h-1) // record in forecast matrix + yf(t, h) = if t < 1 then -0.0 // not enough prior data + else forge (t, h).mean // record in forecast matrix yf(?, h) // return the h-step ahead forecast vector end forecastAt + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forge a vector from actual (move up column 0 in yf) and prior forecasted values + * (move right from column h in yf) to be used in the moving average calculation. + * @param t the time point from which to make forecasts + * @param h the forecasting horizon, number of steps ahead to produce forecasts + */ + def forge (t: Int, h: Int): VectorD = + var yft = yf(t, max (1, h-q) until h) // @t: get prior forecasts h-q .. h-1 + if yft.dim < q then + val gap = q - yft.dim // still need gap values + yft = yft ++ yf(max0 (t-gap) until t, 0) // get remaining values from actuals (column 0) + debug ("forge", s"($t, $h) = $yft") + yft + end forge + end SimpleMovingAverage @@ -113,6 +132,8 @@ object SimpleMovingAverage: end SimpleMovingAverage +import Example_Covid.loadData_y +import Example_LakeLevels.y //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `simpleMovingAverageTest` main function tests the `SimpleMovingAverage` class on real data: @@ -203,3 +224,30 @@ end simpleMovingAverageTest3 end simpleMovingAverageTest4 + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `simpleMovingAverageTest5` main function tests the `SimpleMovingAverage` class + * a simple data using In-Sample Testing (In-ST). + * Test forecasts (h = 1 to hh steps ahead forecasts). + * > runMain scalation.modeling.forecasting.simpleMovingAverageTest5 + */ +@main def simpleMovingAverageTest5 (): Unit = + + import SimpleMovingAverage.hp + + val y_ = VectorD (1, 3, 5, 7, 9, 11, 13, 15, 17, 19) + + hp("q") = 3 // size of moving average window: test 2 and 3 + val hh = 5 // maximum forecasting horizon + + val mod = new SimpleMovingAverage (y_, hh) // create model for time series data + banner (s"In-ST Forecasts: ${mod.modelName} on Simple Dataset") + mod.trainNtest ()() // train and test on full dataset + + mod.forecastAll () // forecast h-steps ahead (h = 1 to hh) for all y + val yf_ = mod.getYf + mod.diagnoseAll (y_, yf_) + println (s"Final In-ST Forecast Matrix yf = $yf_") + +end simpleMovingAverageTest5 + diff --git a/src/main/scala/scalation/modeling/forecasting/SimpleMovingAverage2.scala b/src/main/scala/scalation/modeling/forecasting/SimpleMovingAverage2.scala new file mode 100644 index 000000000..4fd494aae --- /dev/null +++ b/src/main/scala/scalation/modeling/forecasting/SimpleMovingAverage2.scala @@ -0,0 +1,258 @@ +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author John Miller, Ruthvik Mankari + * @version 2.0 + * @date Sun Jun 30 13:27:00 EDT 2024 + * @see LICENSE (MIT style license file). + * + * @note Model: Simple Moving Average (not the same as MA in ARMA) + */ + +package scalation +package modeling +package forecasting + +import scalation.mathstat._ +import Example_Covid.loadData_y +import Example_LakeLevels.y + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `SimpleMovingAverage2` class provides basic time series analysis capabilities for + * SimpleMovingAverage2 models. SimpleMovingAverage2 models are often used for forecasting. + * Given time series data stored in vector y, its next value y_t = mean of last q values. + * + * y_t = mean (y_t-1, ..., y_t-q) + e_t + * + * where y_t is the value of y at time t and e_t is the residual/error term. + * @param y the response vector (time series data) + * @param hh the maximum forecasting horizon (h = 1 to hh) + * @param tRng the time range, if relevant (time index may suffice) + * @param hparam the hyper-parameters (defaults to SimpleMovingAverage2.hp) + * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) + */ +class SimpleMovingAverage2 (y: VectorD, hh: Int, tRng: Range = null, + hparam: HyperParameter = SimpleMovingAverage2.hp, + bakcast: Boolean = false) + extends Forecaster (y, hh, tRng, hparam, bakcast) + with NoSubModels: + + private val flaw = flawf ("SimpleMovingAverage2") // flaw function + private val q = hparam("q").toInt // take mean of last q values + + b = VectorD.one (q) / q // equal weight + _modelName = s"SimpleMovingAverage2_$q" + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Predict a value for y_t using the 1-step ahead forecast. + * + * y_t = f (y_t-1, ...) = mean of last q values (simple moving average model) + * + * @param t the time point being predicted + * @param y_ the actual values to use in making predictions (mean (inclusive, exclusice)) + */ + override def predict (t: Int, y_ : VectorD): Double = y_.mean (max0 (t-q), t) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Produce a vector of size hh, h = 1 to hh-steps ahead forecasts for the model, + * i.e., forecast the following time points: t+1, ..., t+h. + * Intended to work with rolling validation (analog of predict method). + * @param t the time point from which to make forecasts + * @param y_ the actual values to use in making predictions + * @author Ruthvik Mankari + */ + override def forecast(t: Int, y_ : VectorD = yb): VectorD = + val yh = new VectorD(hh) + + for h <- 1 to hh do + // Check if enough history exists + if t < q + (h - 1) then + yf(t, h) = -0.0 + yh(h - 1) = -0.0 + else + // Copy last q points from actuals (up to a = t - h) + var window = VectorD((t - h - q + 1 to t - h).map(y_(_)).toArray) + var pred = 0.0 + + // Perform recursive forecasting + for _ <- 0 until h do + pred = window.mean + window = window(1 until q) :+ pred // slide window forward + + yf(t, h) = pred + yh(h - 1) = pred + end for + + yh + end forecast + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forecast values for all y_.dim time points at horizon h (h-steps ahead). + * Assign into FORECAST MATRIX and return the h-steps ahead forecast. + * Note, `predictAll` provides predictions for h = 1. + * @see `forecastAll` method in `Forecaster` trait. + * @param h the forecasting horizon, number of steps ahead to produce forecasts + * @param y_ the actual values to use in making forecasts + * @author Ruthvik Mankari + */ + override def forecastAt(h: Int, y_ : VectorD = yb): VectorD = + if h < 1 then flaw("forecastAt", s"horizon h = $h must be ≥ 1") + + for t <- y_.indices do + if t < q + (h - 1) then + yf(t, h) = -0.0 + else + var window = VectorD((t - h - q + 1 to t - h).map(y_(_)).toArray) + var pred = 0.0 + for _ <- 0 until h do + pred = window.mean + window = window(1 until q) :+ pred + yf(t, h) = pred + end for + + yf(?, h) + end forecastAt +end SimpleMovingAverage2 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `SimpleMovingAverage2` companion object provides factory methods for the + * `SimpleMovingAverage2` class. + */ +object SimpleMovingAverage2: + + /** Base hyper-parameter specification for `SimpleMovingAverage2` and `WeightedMovingAverage2` classes + */ + val hp = new HyperParameter + hp += ("q", 2, 2) // number of prior values for mean + hp += ("u", 1.0, 1.0) // slider from flat (0.0) to linear (1.0) weights + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a `SimpleMovingAverage2` object. + * @param y the response vector (time series data) + * @param hh the maximum forecasting horizon (h = 1 to hh) + * @param tRng the time range, if relevant (time index may suffice) + * @param hparam the hyper-parameters + */ + def apply (y: VectorD, hh: Int, tRng: Range = null, hparam: HyperParameter = hp): SimpleMovingAverage2 = + new SimpleMovingAverage2 (y, hh, tRng, hparam) + end apply + +end SimpleMovingAverage2 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `simpleMovingAverage2Test` main function tests the `SimpleMovingAverage2` class on real data: + * Forecasting Lake Levels using In-Sample Testing (In-ST). + * Test forecasts (h = 1 to hh steps ahead forecasts). + * @see cran.r-project.org/web/packages/fpp/fpp.pdf + * > runMain scalation.modeling.forecasting.simpleMovingAverage2Test + */ +@main def simpleMovingAverage2Test (): Unit = + + val hh = 3 // maximum forecasting horizon + + val mod = new SimpleMovingAverage2 (y, hh) // create model for time series data + banner (s"In-ST Forecasts: ${mod.modelName} on LakeLevels Dataset") + mod.trainNtest ()() // train and test on full dataset + + mod.forecastAll () // forecast h-steps ahead (h = 1 to hh) for all y + mod.diagnoseAll (y, mod.getYf) + println (s"Final In-ST Forecast Matrix yf = ${mod.getYf}") + +end simpleMovingAverage2Test + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `simpleMovingAverage2Test2` main function tests the `SimpleMovingAverage2` class on real data: + * Forecasting Lake Levels using Train-n-Test Split (TnT) with Rolling Validation. + * Test forecasts (h = 1 to hh steps ahead forecasts). + * @see cran.r-project.org/web/packages/fpp/fpp.pdf + * > runMain scalation.modeling.forecasting.simpleMovingAverage2Test2 + */ +@main def simpleMovingAverage2Test2 (): Unit = + + val hh = 3 // maximum forecasting horizon + + val mod = new SimpleMovingAverage2 (y, hh) // create model for time series data + banner (s"TnT Forecasts: ${mod.modelName} on LakeLevels Dataset") + mod.trainNtest ()() // train and test on full dataset + + mod.rollValidate () // TnT with Rolling Validation + println (s"Final TnT Forecast Matrix yf = ${mod.getYf}") + +end simpleMovingAverage2Test2 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `simpleMovingAverage2Test3` main function tests the `SimpleMovingAverage2` class on real data: + * Forecasting COVID-19 using In-Sample Testing (In-ST). + * Test forecasts (h = 1 to hh steps ahead forecasts). + * > runMain scalation.modeling.forecasting.simpleMovingAverage2Test3 + */ +@main def simpleMovingAverage2Test3 (): Unit = + + val yy = loadData_y () +// val y = yy // full + val y = yy(0 until 116) // clip the flat end + val hh = 6 // maximum forecasting horizon +// SimpleMovingAverage2.hp("q") = 3.0 + + val mod = new SimpleMovingAverage2 (y, hh) // create model for time series data + banner (s"In-ST Forecasts: ${mod.modelName} on COVID-19 Dataset") + mod.trainNtest ()() // train and test on full dataset + + mod.forecastAll () // forecast h-steps ahead (h = 1 to hh) for all y + mod.diagnoseAll (y, mod.getYf) + println (s"Final In-ST Forecast Matrix yf = ${mod.getYf}") + +end simpleMovingAverage2Test3 + + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `simpleMovingAverage2Test4` main function tests the `SimpleMovingAverage2` class on real data: + * Forecasting COVID-19 using Train-n-Test Split (TnT) with Rolling Validation. + * Test forecasts (h = 1 to hh steps ahead forecasts). + * > runMain scalation.modeling.forecasting.simpleMovingAverage2Test4 + */ +@main def simpleMovingAverage2Test4 (): Unit = + + val yy = loadData_y () +// val y = yy // full + val y = yy(0 until 116) // clip the flat end + val hh = 6 // maximum forecasting horizon + + val mod = new SimpleMovingAverage2 (y, hh) // create model for time series data + banner (s"TnT Forecasts: ${mod.modelName} on COVID-19 Dataset") + mod.trainNtest ()() + + mod.rollValidate () // TnT with Rolling Validation + println (s"Final TnT Forecast Matrix yf = ${mod.getYf}") + +end simpleMovingAverage2Test4 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `simpleMovingAverage2Test5` main function tests the `SimpleMovingAverage2` class + * on a simple dataset using In-Sample Testing (In-ST). + * Test forecasts (h = 1 to hh steps ahead forecasts). + * > runMain scalation.modeling.forecasting.simpleMovingAverage2Test5 + */ +@main def simpleMovingAverage2Test5 (): Unit = + + import SimpleMovingAverage2.hp + + val y_ = VectorD (1, 3, 5, 7, 9, 11, 13, 15, 17, 19) + + hp("q") = 3 // size of moving average window: test 2 and 3 + val hh = 5 // maximum forecasting horizon + + val mod = new SimpleMovingAverage2 (y_, hh) // create model for time series data + banner (s"In-ST Forecasts: ${mod.modelName} on Simple Dataset") + mod.trainNtest ()() // train and test on full dataset + + mod.forecastAll () // forecast h-steps ahead (h = 1 to hh) for all y + val yf_ = mod.getYf + mod.diagnoseAll (y_, yf_) + println (s"Final In-ST Forecast Matrix yf = $yf_") + +end simpleMovingAverage2Test5 + diff --git a/src/main/scala/scalation/modeling/forecasting/Stationarity.scala b/src/main/scala/scalation/modeling/forecasting/Stationarity.scala index 5606718b2..fd2e2148a 100644 --- a/src/main/scala/scalation/modeling/forecasting/Stationarity.scala +++ b/src/main/scala/scalation/modeling/forecasting/Stationarity.scala @@ -92,7 +92,6 @@ trait UnitRoot (protected val testName: String, protected val nobs: Int, computeCV () // computing critical values computePval (stat) // computing p-value newTest = false - end if pval end pvalue @@ -133,7 +132,6 @@ trait UnitRoot (protected val testName: String, protected val nobs: Int, println (s" 1% ${criticalVals(idx(0))}") println (s" 5% ${criticalVals(idx(1))}") println (s" 10% ${criticalVals(idx(2))}") - end if println (" ---------------") println (" Test Conclusion") // outputting test conclusion @@ -154,13 +152,11 @@ trait UnitRoot (protected val testName: String, protected val nobs: Int, if lags < 0 then // number of lags cannot be strictly negative lags = 0 println ("\n WARNING: number of lags cannot be negative, it has been set to 0 by default.\n") - end if if ! lagsType.isEmpty && lags != prevLags then lagsType = "" // if user has switched from a default lags value // to a value of his choice (for all tests) else if maxLags < 0 then // maxLags cannot be strictly negative maxLags = 0 println ("\n WARNING: maximum number of lags cannot be negative, it has been set to a default value (L12-rule).\n") - end if // updating lags only for PP and KPSS tests, for ADF and DFGLS tests lags will be updated at the next optimization or // set back to prevLags if maxLags, trend, method and level are the same as before @@ -176,7 +172,6 @@ trait UnitRoot (protected val testName: String, protected val nobs: Int, newTest = true newLags = true prevLags = lags - end if end setLags //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -188,7 +183,6 @@ trait UnitRoot (protected val testName: String, protected val nobs: Int, if lagsType != "long" && lagsType != "short" then println("\n WARNING: unknown default type of lags, long has been selected by default.\n") lagsType = "long" // default lags type is long - end if prevLagsType = lagsType end setLagsType @@ -211,13 +205,12 @@ trait UnitRoot (protected val testName: String, protected val nobs: Int, trendType = "constant trend"; npar = 3 else if trend == "ctt" then trendType = "quadratic trend"; npar = 4 - end if + println (s"setTrend: trend = $trend, npar = $npar") if trend != prevTrend then newTest = true newTrend = true prevTrend = trend - end if end setTrend //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: diff --git a/src/main/scala/scalation/modeling/forecasting/Stationarity_KPSS.scala b/src/main/scala/scalation/modeling/forecasting/Stationarity_KPSS.scala index 9292ce1a4..9adb15fe1 100644 --- a/src/main/scala/scalation/modeling/forecasting/Stationarity_KPSS.scala +++ b/src/main/scala/scalation/modeling/forecasting/Stationarity_KPSS.scala @@ -130,7 +130,6 @@ class Stationarity_KPSS (yy : VectorD, lags_ : Int, lagsType_ : String, trend_ : else if trend == "nc" then throw new IllegalArgumentException ("\n ERROR: olsDetrend: no detrending possible when regression trend is no constant.\n") - end if if trend == "c" then y = y - y.mean // use mean centered @@ -175,13 +174,11 @@ class Stationarity_KPSS (yy : VectorD, lags_ : Int, lagsType_ : String, trend_ : if newTrend then // if new trend setData () // set back to original data for new detrending olsDetrend () // detrending data by OLS - end if if newTrend || newLags then // if new trend or new lags computeStat () // computing statistic newTrend = false newLags = false - end if stat end statistic @@ -222,7 +219,7 @@ end Stationarity_KPSS val nobs = 1000 val noise = new Normal () // generating stationary random data - val y = VectorD (for i <- 0 until nobs yield noise.gen) + val y = VectorD (for _ <- 0 until nobs yield noise.gen) val test = new Stationarity_KPSS (y, 0, "short", "c") // init KPSS test with lags of type short and constant trend test.show () // outputting test results diff --git a/src/main/scala/scalation/modeling/forecasting/TranARY.scala b/src/main/scala/scalation/modeling/forecasting/TranARY.scala deleted file mode 100644 index 53862ca51..000000000 --- a/src/main/scala/scalation/modeling/forecasting/TranARY.scala +++ /dev/null @@ -1,329 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Sun Jun 30 13:27:00 EDT 2024 - * @see LICENSE (MIT style license file). - * - * @note Model: Transformed Auto-Regressive on lagged y (ARY) using OLS - * - * @see `scalation.modeling.TranRegression` - */ - -package scalation -package modeling -package forecasting - -import scala.math._ - -import scalation.mathstat._ -import scalation.modeling.{Regression => REGRESSION} -//import scalation.modeling.{RidgeRegression => REGRESSION} -//import scalation.modeling.{LassoRegression => REGRESSION} - -import MakeMatrix4TS._ -import Example_Covid.loadData_y -import Example_LakeLevels.y - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `TranARY` class provides basic time series analysis capabilities for - * TranARY models. TranARY models are often used for forecasting. - * Given time series data stored in vector y, its next value y_t = combination of last p values. - * - * tran (y_t) = b dot x_t + e_t - * - * where y_t is the value of y at time t and e_t is the residual/error term. - * @see `TranARY.apply` for applying transformations (tran, itran) - * @param x the data/input matrix (lagged columns of y) @see `ARY.apply` - * @param y the response/output vector (time series data) - * @param hh the maximum forecasting horizon (h = 1 to hh) - * @param tRng the time range, if relevant (time index may suffice) - * @param hparam the hyper-parameters (defaults to `MakeMatrix4TS.hp`) - * @param itran the inverse transformation to return to the original scale (defaults to expm1) - * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) - */ -class TranARY (x: MatrixD, y: VectorD, hh: Int, tRng: Range = null, - hparam: HyperParameter = hp, val itran: FunctionS2S = expm1, - bakcast: Boolean = false) - extends Forecaster (y, hh, tRng, hparam, bakcast): // no automatic backcasting, @see `ARY.apply` - - private val debug = debugf ("TranARY", true) // debug function - private val flaw = flawf ("TranARY") // flaw function - private val p = hparam("p").toInt // use the last p values (p lags) - private val spec = hparam("spec").toInt // additional terms: 0 => none, 1 => constant, 2 => linear - private val reg = new REGRESSION (x, y, null, hparam) // delegate training to regression - - modelName = s"TranARY($p)" - - debug ("init", s"$modelName with additional term spec = $spec") - debug ("init", s"[ x | y ] = ${x :^+ y}") - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Get the data/input matrix built from lagged y values. - */ - override def getX: MatrixD = x - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train/fit an `TranARY` model to the times-series data in vector y_. - * Estimate the coefficient vector b for a p-th order Auto-Regressive TranARY(p) model. - * Uses OLS Matrix Fatorization to determine the coefficients, i.e., the b (φ) vector. - * @param x_ the data/input matrix (e.g., full x) - * @param y_ the training/full response vector (e.g., full y) - */ - override def train (x_ : MatrixD, y_ : VectorD): Unit = - debug ("train", s"$modelName, x_.dims = ${x_.dims}, y_.dim = ${y_.dim}") - reg.train (x_, y_) // train the regression model - b = reg.parameter // coefficients from regression - end train - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train and test the forecasting model y_ = f(y-past) + e and report its QoF - * and plot its predictions. Return the predictions and QoF. - * NOTE: must use `trainNtest_x` when an x matrix is used, such as in `TranARY`. - * @param x_ the training/full data/input matrix (defaults to full x) - * @param y_ the training/full response/output vector (defaults to full y) - * @param xx the testing/full data/input matrix (defaults to full x) - * @param yy the testing/full response/output vector (defaults to full y) - */ - def trainNtest_x (x_ : MatrixD = x, y_ : VectorD = y)(xx: MatrixD = x, yy: VectorD = y): (VectorD, VectorD) = - train (x_, y_) // train the model on training set - val (yp, qof) = test (xx, yy) // test the model on testing set - println (report (qof)) // report on Quality of Fit (QoF) - (yp, qof) - end trainNtest_x - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test FORECASTS of a forecasting model y_ = f(lags (y_)) + e - * and RETURN (1) aligned actual values, (2) its forecasts and (3) QoF vector. - * Testing may be in-sample (on the training set) or out-of-sample (on the testing set) - * as determined by the parameters passed in. Note: must call train and forecastAll - * before testF. - * @param h the forecasting horizon, number of steps ahead to produce forecasts - * @param y_ the testing/full response/output vector - */ - override def testF (h: Int, y_ : VectorD): (VectorD, VectorD, VectorD) = - val h_ = h - 1 - val yy = y_(h_ until y_.dim) // align the actual values - val yfh = yf(?, h)(0 until y_.dim-h_) // column h of the forecast matrix - println (s"yy.dim = ${yy.dim}, yfh.dim = ${yfh.dim}") -// Forecaster.differ (yy, yfh) // uncomment for debugging - assert (yy.dim == yfh.dim) // make sure the vector sizes agree - - new Plot (null, yy, yfh, s"testF: yy, yfh vs. t for $modelName @h = $h", lines = true) - mod_resetDF (yy.dim) // reset the degrees of freedom - (yy, yfh, diagnose (yy, yfh)) // return actual, forecasted and QoF vectors - end testF - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Models need to provide a means for updating the Degrees of Freedom (DF). - * @param size the size of dataset (full, train, or test) - */ - override def mod_resetDF (size: Int): Unit = - val dfm = max (1, parameter.size - 1) // degrees of freedom for model - debug ("mod_resetDF", s"dfm = $dfm, df = ${size-dfm}") - resetDF (dfm, size - dfm) - end mod_resetDF - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Predict a value for y_t using the 1-step ahead forecast. - * - * y_t = b_0 + b_1 y_t-1 + b_2 y_t-2 + ... + b_p y_t-p = b dot x_t - * - * FIX - parameter order is in conflict with AR models. - * @param t the time point being predicted - * @param y_ the actual values to use in making predictions (ignored) - */ - override def predict (t: Int, y_ : VectorD): Double = - val yp = reg.predict (x(t)) -// debug ("predict", s"@t = $t, b = $b dot x(t) = ${x(t)} = yp = $yp vs. y_ = ${y_(t)}") - yp - end predict - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Produce a vector of size hh, h = 1 to hh-steps ahead forecasts for the model, - * i.e., forecast the following time points: t+1, ..., t+h. - * Intended to work with rolling validation (analog of predict method). - * @param t the time point from which to make forecasts - * @param y_ the actual values to use in making predictions - */ - override def forecast (t: Int, y_ : VectorD = y): VectorD = - val yh = new VectorD (hh) // hold forecasts for each horizon - for h <- 1 to hh do - val xy = forge (x(min (t+1, x.dim-1)), yf(t), h) // FIX - why t+1 - val pred = reg.predict (xy) // slide in prior forecasted values -// debug ("forecast", s"h = $h, @t = $t, xy = $xy, yp = $pred, y_ = ${y_(t)}") - yf(t, h) = pred // record in forecast matrix - yh(h-1) = pred // record forecasts for each horizon - yh // return forecasts for all horizons - end forecast - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all y_.dim time points at horizon h (h-steps ahead). - * Assign into FORECAST MATRIX and return the h-steps ahead forecast. - * Note, `predictAll` provides predictions for h = 1. - * @see `forecastAll` method in `Forecaster` trait. - * @param h the forecasting horizon, number of steps ahead to produce forecasts - * @param y_ the actual values to use in making forecasts - */ - override def forecastAt (h: Int, y_ : VectorD = y): VectorD = - if h < 2 then flaw ("forecastAt", s"horizon h = $h must be at least 2") - - for t <- y_.indices do // make forecasts over all time points for horizon h - val xy = forge (x(t), yf(t), h) - val pred = reg.predict (xy) -// debug ("forecastAt", s"h = $h, @t = $t, xy = $xy, yp = $pred, y_ = ${y_(t)}") - yf(t, h) = pred // record in forecast matrix - yf(?, h) // return the h-step ahead forecast vector - end forecastAt - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forge a new vector from the first spec values of x, the last p-h+1 values - * of x (past values) and values 1 to h-1 from the forecasts. - * @param xx the t-th row of the input matrix (lagged actual values) - * @param yy the t-th row of the forecast matrix (forecasted future values) - */ - def forge (xx: VectorD, yy: VectorD, h: Int): VectorD = - val pp = p + 1 - var xy = xx(0 until spec) ++ xx (xx.dim+h-pp until xx.dim) - val nyy = pp - xy.dim - if nyy > 0 then xy = xy ++ yy(h-nyy until h) - xy - end forge - -end TranARY - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `TranARY` companion object provides factory methods for the `TranARY` class. - */ -object TranARY: - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create an `TranARY` object by building an input matrix x and then calling the - * constructor. - * @see `scalation.modeling.TranRegression` for several options for (tran, itran) pairs - * @param y the response vector (time series data) - * @param hh the maximum forecasting horizon (h = 1 to hh) - * @param tRng the time range, if relevant (time index may suffice) - * @param hparam the hyper-parameters - * @param tran the transformation function (defaults to log1p) - * @param itran the inverse transformation function to rescale predictions to original y scale (defaults to expm1) - * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) - */ - def apply (y: VectorD, hh: Int, tRng: Range = null, hparam: HyperParameter = hp, - tran: FunctionS2S = log1p, itran: FunctionS2S = expm1, - bakcast: Boolean = false): TranARY = - val p = hparam("p").toInt // use the last p values - val spec = hparam("spec").toInt // trend terms to include - val lwave = hparam("lwave").toDouble // wavelength (distance between peaks) - val yt = y.map (tran) // y transformed - val xt = makeMatrix4T (yt, spec, lwave, bakcast) // trend terms - val xl = makeMatrix4L (yt, p, bakcast) // regular lag terms - new TranARY (xt ++^ xl, yt, hh, tRng, hparam, itran, bakcast) // hook for user to transform back - end apply - -end TranARY - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `tranARYTest` main function tests the `TranARY` class on real data: - * Forecasting Lake Levels using In-Sample Testing (In-ST). - * Test forecasts (h = 1 to hh steps ahead forecasts). - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.traARYTest - */ -@main def tranARYTest (): Unit = - - val hh = 3 // maximum forecasting horizon - - val mod = TranARY (y, hh) // create model for time series data - banner (s"In-ST Forecasts: ${mod.modelName} on LakeLevels Dataset") - mod.trainNtest_x ()() // train and test on full dataset - - mod.forecastAll () // forecast h-steps ahead (h = 1 to hh) for all y - mod.diagnoseAll (y, mod.getYf) - println (s"Final In-ST Forecast Matrix yf = ${mod.getYf}") - -end tranARYTest - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `tranARYTest2` main function tests the `TranARY` class on real data: - * Forecasting Lake Levels using Train-n-Test Split (TnT) with Rolling Validation. - * Test forecasts (h = 1 to hh steps ahead forecasts). - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.tranARYTest2 - */ -@main def tranARYTest2 (): Unit = - - val hh = 3 // maximum forecasting horizon - - val mod = TranARY (y, hh) // create model for time series data - banner (s"TnT Forecasts: ${mod.modelName} on LakeLevels Dataset") - mod.trainNtest_x ()() // train and test on full dataset - - mod.rollValidate () // TnT with Rolling Validation - println (s"After Roll TnT Forecast Matrix yf = ${mod.getYf}") - mod.diagnoseAll (mod.getY, mod.getYf, Forecaster.teRng (y.dim), 0) // only diagnose on the testing set - tran - mod.diagnoseAll (y, mod.getYf.map_ (mod.itran), Forecaster.teRng (y.dim), 0) // only diagnose on the testing set - orig - println (s"Final TnT Forecast Matrix yf = ${mod.getYf}") - -end tranARYTest2 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `tranARYTest3` main function tests the `TranARY` class on real data: - * Forecasting COVID-19 using In-Sample Testing (In-ST). - * Test forecasts (h = 1 to hh steps ahead forecasts). - * > runMain scalation.modeling.forecasting.tranARYTest3 - */ -@main def tranARYTest3 (): Unit = - - val yy = loadData_y () -// val y = yy // full - val y = yy(0 until 116) // clip the flat end - val hh = 6 // maximum forecasting horizon - - for p <- 1 to 5 do // number of lags - hp("p") = p - val mod = TranARY (y, hh) // create model for time series data - banner (s"In-ST Forecasts: ${mod.modelName} on COVID-19 Dataset") - mod.trainNtest_x ()() // train and test on full dataset - - mod.forecastAll () // forecast h-steps ahead (h = 1 to hh) for all y - mod.diagnoseAll (y, mod.getYf) - println (s"Final In-ST Forecast Matrix yf = ${mod.getYf}") - end for - -end tranARYTest3 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `tranARYTest4` main function tests the `TranARY` class on real data: - * Forecasting COVID-19 using Train-n-Test Split (TnT) with Rolling Validation. - * Test forecasts (h = 1 to hh steps ahead forecasts). - * > runMain scalation.modeling.forecasting.tranARYTest4 - */ -@main def tranARYTest4 (): Unit = - - val yy = loadData_y () -// val y = yy // full - val y = yy(0 until 116) // clip the flat end - val hh = 6 // maximum forecasting horizon - - for p <- 2 to 2 do // number of lags - hp("p") = p - val mod = TranARY (y, hh) // create model for time series data - banner (s"TnT Forecasts: ${mod.modelName} on COVID-19 Dataset") - mod.trainNtest_x ()() // use customized trainNtest_ - - mod.setSkip (0) - mod.rollValidate () // TnT with Rolling Validation - println (s"After Roll TnT Forecast Matrix yf = ${mod.getYf}") - mod.diagnoseAll (mod.getY, mod.getYf, Forecaster.teRng (y.dim), 0) // only diagnose on the testing set - tran - mod.diagnoseAll (y, mod.getYf.map_ (mod.itran), Forecaster.teRng (y.dim), 0) // only diagnose on the testing set - orig - println (s"Final TnT Forecast Matrix yf = ${mod.getYf}") - end for - -end tranARYTest4 - diff --git a/src/main/scala/scalation/modeling/forecasting/TranARY.scalaa b/src/main/scala/scalation/modeling/forecasting/TranARY.scalaa new file mode 100644 index 000000000..0572bfed1 --- /dev/null +++ b/src/main/scala/scalation/modeling/forecasting/TranARY.scalaa @@ -0,0 +1,330 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author John Miller + * @version 2.0 + * @date Sun Jun 30 13:27:00 EDT 2024 + * @see LICENSE (MIT style license file). + * + * @note Model: Transformed Auto-Regressive on lagged y (ARY) using OLS + * + * @see `scalation.modeling.TranRegression` + */ + +package scalation +package modeling +package forecasting + +import scala.math._ + +import scalation.mathstat._ +import scalation.modeling.{Regression => REGRESSION} +//import scalation.modeling.{RidgeRegression => REGRESSION} +//import scalation.modeling.{LassoRegression => REGRESSION} + +import MakeMatrix4TS._ +import Example_Covid.loadData_y +import Example_LakeLevels.y + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `TranARY` class provides basic time series analysis capabilities for + * TranARY models. TranARY models are often used for forecasting. + * Given time series data stored in vector y, its next value y_t = combination of last p values. + * + * tran (y_t) = b dot x_t + e_t + * + * where y_t is the value of y at time t and e_t is the residual/error term. + * @see `TranARY.apply` for applying transformations (tran, itran) + * @param x the data/input matrix (lagged columns of y) @see `ARY.apply` + * @param y the response/output vector (time series data) + * @param hh the maximum forecasting horizon (h = 1 to hh) + * @param tRng the time range, if relevant (time index may suffice) + * @param hparam the hyper-parameters (defaults to `MakeMatrix4TS.hp`) + * @param itran the inverse transformation to return to the original scale (defaults to expm1) + * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) + */ +class TranARY (x: MatrixD, y: VectorD, hh: Int, tRng: Range = null, + hparam: HyperParameter = hp, val itran: FunctionS2S = expm1, + bakcast: Boolean = false) + extends Forecaster (y, hh, tRng, hparam, bakcast): // no automatic backcasting, @see `ARY.apply` + + private val debug = debugf ("TranARY", true) // debug function + private val flaw = flawf ("TranARY") // flaw function + private val p = hparam("p").toInt // use the last p values (p lags) + private val spec = hparam("spec").toInt // additional terms: 0 => none, 1 => constant, 2 => linear + private val reg = new REGRESSION (x, y, null, hparam) // delegate training to regression + + _modelName = s"TranARY_$p" + + debug ("init", s"$modelName with additional term spec = $spec") + debug ("init", s"[ x | y ] = ${x :^+ y}") + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Get the data/input matrix built from lagged y values. + */ + override def getX: MatrixD = x + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Train/fit an `TranARY` model to the times-series data in vector y_. + * Estimate the coefficient vector b for a p-th order Auto-Regressive TranARY(p) model. + * Uses OLS Matrix Fatorization to determine the coefficients, i.e., the b (φ) vector. + * @param x_ the data/input matrix (e.g., full x) + * @param y_ the training/full response vector (e.g., full y) + */ + override def train (x_ : MatrixD, y_ : VectorD): Unit = + debug ("train", s"$modelName, x_.dims = ${x_.dims}, y_.dim = ${y_.dim}") + reg.train (x_, y_) // train the regression model + b = reg.parameter // coefficients from regression + end train + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Train and test the forecasting model y_ = f(y-past) + e and report its QoF + * and plot its predictions. Return the predictions and QoF. + * NOTE: must use `trainNtest_x` when an x matrix is used, such as in `TranARY`. + * @param x_ the training/full data/input matrix (defaults to full x) + * @param y_ the training/full response/output vector (defaults to full y) + * @param xx the testing/full data/input matrix (defaults to full x) + * @param yy the testing/full response/output vector (defaults to full y) + */ + def trainNtest_x (x_ : MatrixD = x, y_ : VectorD = y)(xx: MatrixD = x, yy: VectorD = y): (VectorD, VectorD) = + train (x_, y_) // train the model on training set + val (yp, qof) = test (xx, yy) // test the model on testing set + println (report (qof)) // report on Quality of Fit (QoF) + (yp, qof) + end trainNtest_x + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Test FORECASTS of a forecasting model y_ = f(lags (y_)) + e + * and RETURN (1) aligned actual values, (2) its forecasts and (3) QoF vector. + * Testing may be in-sample (on the training set) or out-of-sample (on the testing set) + * as determined by the parameters passed in. Note: must call train and forecastAll + * before testF. + * @param h the forecasting horizon, number of steps ahead to produce forecasts + * @param y_ the testing/full response/output vector + * + override def testF (h: Int, y_ : VectorD): (VectorD, VectorD, VectorD) = + val h_ = h - 1 + val yy = y_(h_ until y_.dim) // align the actual values + val yfh = yf(?, h)(0 until y_.dim-h_) // column h of the forecast matrix + println (s"yy.dim = ${yy.dim}, yfh.dim = ${yfh.dim}") +// Forecaster.differ (yy, yfh) // uncomment for debugging + assert (yy.dim == yfh.dim) // make sure the vector sizes agree + + new Plot (null, yy, yfh, s"testF: yy, yfh vs. t for $modelName @h = $h", lines = true) + mod_resetDF (yy.dim) // reset the degrees of freedom + (yy, yfh, diagnose (yy, yfh)) // return actual, forecasted and QoF vectors + end testF + */ + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Models need to provide a means for updating the Degrees of Freedom (DF). + * @param size the size of dataset (full, train, or test) + */ + override def mod_resetDF (size: Int): Unit = + val dfr = max (1, parameter.size - 1) // degrees of freedom for regression/model + debug ("mod_resetDF", s"dfr = $dfr, df = ${size-dfr}") + resetDF (dfr, size - dfr) + end mod_resetDF + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Predict a value for y_t using the 1-step ahead forecast. + * + * y_t = b_0 + b_1 y_t-1 + b_2 y_t-2 + ... + b_p y_t-p = b dot x_t + * + * FIX - parameter order is in conflict with AR models. + * @param t the time point being predicted + * @param y_ the actual values to use in making predictions (ignored) + */ + override def predict (t: Int, y_ : VectorD): Double = + val yp = reg.predict (x(t)) +// debug ("predict", s"@t = $t, b = $b dot x(t) = ${x(t)} = yp = $yp vs. y_ = ${y_(t)}") + yp + end predict + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Produce a vector of size hh, h = 1 to hh-steps ahead forecasts for the model, + * i.e., forecast the following time points: t+1, ..., t+h. + * Intended to work with rolling validation (analog of predict method). + * @param t the time point from which to make forecasts + * @param y_ the actual values to use in making predictions + */ + override def forecast (t: Int, y_ : VectorD = y): VectorD = + val yh = new VectorD (hh) // hold forecasts for each horizon + for h <- 1 to hh do + val xy = forge (x(min (t+1, x.dim-1)), yf(t), h) // FIX - why t+1 + val pred = reg.predict (xy) // slide in prior forecasted values +// debug ("forecast", s"h = $h, @t = $t, xy = $xy, yp = $pred, y_ = ${y_(t)}") + yf(t, h) = pred // record in forecast matrix + yh(h-1) = pred // record forecasts for each horizon + yh // return forecasts for all horizons + end forecast + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forecast values for all y_.dim time points at horizon h (h-steps ahead). + * Assign into FORECAST MATRIX and return the h-steps ahead forecast. + * Note, `predictAll` provides predictions for h = 1. + * @see `forecastAll` method in `Forecaster` trait. + * @param h the forecasting horizon, number of steps ahead to produce forecasts + * @param y_ the actual values to use in making forecasts + */ + override def forecastAt (h: Int, y_ : VectorD = y): VectorD = + if h < 2 then flaw ("forecastAt", s"horizon h = $h must be at least 2") + + for t <- y_.indices do // make forecasts over all time points for horizon h + val xy = forge (x(t), yf(t), h) + val pred = reg.predict (xy) +// debug ("forecastAt", s"h = $h, @t = $t, xy = $xy, yp = $pred, y_ = ${y_(t)}") + yf(t, h) = pred // record in forecast matrix + yf(?, h) // return the h-step ahead forecast vector + end forecastAt + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forge a new vector from the first spec values of x, the last p-h+1 values + * of x (past values) and values 1 to h-1 from the forecasts. + * @param xx the t-th row of the input matrix (lagged actual values) + * @param yy the t-th row of the forecast matrix (forecasted future values) + */ + def forge (xx: VectorD, yy: VectorD, h: Int): VectorD = + val pp = p + 1 + var xy = xx(0 until spec) ++ xx (xx.dim+h-pp until xx.dim) + val nyy = pp - xy.dim + if nyy > 0 then xy = xy ++ yy(h-nyy until h) + xy + end forge + +end TranARY + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `TranARY` companion object provides factory methods for the `TranARY` class. + */ +object TranARY: + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create an `TranARY` object by building an input matrix x and then calling the + * constructor. + * @see `scalation.modeling.TranRegression` for several options for (tran, itran) pairs + * @param y the response vector (time series data) + * @param hh the maximum forecasting horizon (h = 1 to hh) + * @param tRng the time range, if relevant (time index may suffice) + * @param hparam the hyper-parameters + * @param tran the transformation function (defaults to log1p) + * @param itran the inverse transformation function to rescale predictions to original y scale (defaults to expm1) + * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) + */ + def apply (y: VectorD, hh: Int, tRng: Range = null, hparam: HyperParameter = hp, + tran: FunctionS2S = log1p, itran: FunctionS2S = expm1, + bakcast: Boolean = false): TranARY = + val p = hparam("p").toInt // use the last p values + val spec = hparam("spec").toInt // trend terms to include + val lwave = hparam("lwave").toDouble // wavelength (distance between peaks) + val yt = y.map (tran) // y transformed + val xt = makeMatrix4T (yt, spec, lwave, bakcast) // trend terms + val xl = makeMatrix4L (yt, p, bakcast) // regular lag terms + new TranARY (xt ++^ xl, yt, hh, tRng, hparam, itran, bakcast) // hook for user to transform back + end apply + +end TranARY + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `tranARYTest` main function tests the `TranARY` class on real data: + * Forecasting Lake Levels using In-Sample Testing (In-ST). + * Test forecasts (h = 1 to hh steps ahead forecasts). + * @see cran.r-project.org/web/packages/fpp/fpp.pdf + * > runMain scalation.modeling.forecasting.traARYTest + */ +@main def tranARYTest (): Unit = + + val hh = 3 // maximum forecasting horizon + + val mod = TranARY (y, hh) // create model for time series data + banner (s"In-ST Forecasts: ${mod.modelName} on LakeLevels Dataset") + mod.trainNtest_x ()() // train and test on full dataset + + mod.forecastAll () // forecast h-steps ahead (h = 1 to hh) for all y + mod.diagnoseAll (y, mod.getYf) + println (s"Final In-ST Forecast Matrix yf = ${mod.getYf}") + +end tranARYTest + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `tranARYTest2` main function tests the `TranARY` class on real data: + * Forecasting Lake Levels using Train-n-Test Split (TnT) with Rolling Validation. + * Test forecasts (h = 1 to hh steps ahead forecasts). + * @see cran.r-project.org/web/packages/fpp/fpp.pdf + * > runMain scalation.modeling.forecasting.tranARYTest2 + */ +@main def tranARYTest2 (): Unit = + + val hh = 3 // maximum forecasting horizon + + val mod = TranARY (y, hh) // create model for time series data + banner (s"TnT Forecasts: ${mod.modelName} on LakeLevels Dataset") + mod.trainNtest_x ()() // train and test on full dataset + + mod.rollValidate () // TnT with Rolling Validation + println (s"After Roll TnT Forecast Matrix yf = ${mod.getYf}") + mod.diagnoseAll (mod.getY, mod.getYf, Forecaster.teRng (y.dim)) // only diagnose on the testing set - tran + mod.diagnoseAll (y, mod.getYf.map_ (mod.itran), Forecaster.teRng (y.dim)) // only diagnose on the testing set - orig + println (s"Final TnT Forecast Matrix yf = ${mod.getYf}") + +end tranARYTest2 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `tranARYTest3` main function tests the `TranARY` class on real data: + * Forecasting COVID-19 using In-Sample Testing (In-ST). + * Test forecasts (h = 1 to hh steps ahead forecasts). + * > runMain scalation.modeling.forecasting.tranARYTest3 + */ +@main def tranARYTest3 (): Unit = + + val yy = loadData_y () +// val y = yy // full + val y = yy(0 until 116) // clip the flat end + val hh = 6 // maximum forecasting horizon + + for p <- 1 to 5 do // number of lags + hp("p") = p + val mod = TranARY (y, hh) // create model for time series data + banner (s"In-ST Forecasts: ${mod.modelName} on COVID-19 Dataset") + mod.trainNtest_x ()() // train and test on full dataset + + mod.forecastAll () // forecast h-steps ahead (h = 1 to hh) for all y + mod.diagnoseAll (y, mod.getYf) + println (s"Final In-ST Forecast Matrix yf = ${mod.getYf}") + end for + +end tranARYTest3 + + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `tranARYTest4` main function tests the `TranARY` class on real data: + * Forecasting COVID-19 using Train-n-Test Split (TnT) with Rolling Validation. + * Test forecasts (h = 1 to hh steps ahead forecasts). + * > runMain scalation.modeling.forecasting.tranARYTest4 + */ +@main def tranARYTest4 (): Unit = + + val yy = loadData_y () +// val y = yy // full + val y = yy(0 until 116) // clip the flat end + val hh = 6 // maximum forecasting horizon + + for p <- 2 to 2 do // number of lags + hp("p") = p + val mod = TranARY (y, hh) // create model for time series data + banner (s"TnT Forecasts: ${mod.modelName} on COVID-19 Dataset") + mod.trainNtest_x ()() // use customized trainNtest_ + + mod.setSkip (0) + mod.rollValidate () // TnT with Rolling Validation + println (s"After Roll TnT Forecast Matrix yf = ${mod.getYf}") + mod.diagnoseAll (mod.getY, mod.getYf, Forecaster.teRng (y.dim)) // only diagnose on the testing set - tran + mod.diagnoseAll (y, mod.getYf.map_ (mod.itran), Forecaster.teRng (y.dim)) // only diagnose on the testing set - orig + println (s"Final TnT Forecast Matrix yf = ${mod.getYf}") + end for + +end tranARYTest4 + diff --git a/src/main/scala/scalation/modeling/forecasting/TrendModel.scala b/src/main/scala/scalation/modeling/forecasting/TrendModel.scala index 85a5d12ef..331504d3b 100644 --- a/src/main/scala/scalation/modeling/forecasting/TrendModel.scala +++ b/src/main/scala/scalation/modeling/forecasting/TrendModel.scala @@ -38,11 +38,12 @@ import Example_LakeLevels.y class TrendModel (y: VectorD, hh: Int, tRng: Range = null, hparam: HyperParameter = null, bakcast: Boolean = false) - extends Forecaster (y, hh, tRng, hparam, bakcast): + extends Forecaster (y, hh, tRng, hparam, bakcast) + with NoSubModels: private val flaw = flawf ("TrendModel") // flaw function - modelName = s"TrendModel" + _modelName = "TrendModel" //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Given a time series y_, train the forecasting function y_ = f(lags (y_)) + e, diff --git a/src/main/scala/scalation/modeling/forecasting/WeightedMovingAverage.scala b/src/main/scala/scalation/modeling/forecasting/WeightedMovingAverage.scala index 6c2a1761e..c2234ac69 100644 --- a/src/main/scala/scalation/modeling/forecasting/WeightedMovingAverage.scala +++ b/src/main/scala/scalation/modeling/forecasting/WeightedMovingAverage.scala @@ -14,10 +14,6 @@ package forecasting import scalation.mathstat._ -import Forecaster.rdot -import Example_Covid.loadData_y -import Example_LakeLevels.y - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `WeightedMovingAverage` class provides basic time series analysis capabilities for * WeightedMovingAverage models. WeightedMovingAverage models are often used for forecasting. @@ -35,14 +31,14 @@ import Example_LakeLevels.y class WeightedMovingAverage (y: VectorD, hh: Int, tRng: Range = null, hparam: HyperParameter = SimpleMovingAverage.hp, bakcast: Boolean = false) - extends Forecaster (y, hh, tRng, hparam, bakcast): + extends SimpleMovingAverage (y, hh, tRng, hparam, bakcast): private val flaw = flawf ("WeightedMovingAverage") // flaw function private val q = hparam("q").toInt // take mean of last q values private val u = hparam("u").toDouble // u = 0 => flat, 1 => linear weights - b = WeightedMovingAverage.weights (q, u) // combination of linear/flat weights - modelName = s"WeightedMovingAverage($q)" + b = WeightedMovingAverage.weights (q, u) // combination of linear/flat weights + _modelName = s"WeightedMovingAverage_$q" //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Predict a value for y_t using the 1-step ahead forecast. @@ -50,9 +46,23 @@ class WeightedMovingAverage (y: VectorD, hh: Int, tRng: Range = null, * y_t = f (y_t-1, ...) = weighted mean of last q values (weighted moving average model) * * @param t the time point being predicted - * @param y_ the actual values to use in making predictions (mean (inclusive, exclusice)) + * @param y_ the actual values to use in making predictions (mean (inclusive, exclusive)) + */ + override def predict (t: Int, y_ : VectorD): Double = + if t < 1 then -0.0 // not enough prior data + else adjDot (y_(max0 (t-q) until t), b) // prior q actual values * b + end predict + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Compute the dot product of vectors v1 and v2, unless v1 has fewer elements, + * in which the v2 weights must be rescaled up (adjusted dot product). + * @param v1 the first vector (data) + * @param v2 the second vector (weights) */ - override def predict (t: Int, y_ : VectorD): Double = rdot (b, y_, t-1) + private inline def adjDot (v1: VectorD, v2: VectorD): Double = + if v1.dim == v2.dim then v1 dot v2 // no difference => dot product + else (v1 dot v2) / v2(0 until v1.dim).sum // otherwise => adjusted dot product + end adjDot //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Produce a vector of size hh, h = 1 to hh-steps ahead forecasts for the model, @@ -64,7 +74,8 @@ class WeightedMovingAverage (y: VectorD, hh: Int, tRng: Range = null, override def forecast (t: Int, y_ : VectorD = yb): VectorD = val yh = new VectorD (hh) // hold forecasts for each horizon for h <- 1 to hh do - val pred = rdot (b, yf, t, h-1) // slide in prior forecasted values + val pred = if t < 1 then -0.0 // not enough prior data + else adjDot (forge (t, h), b) // record in forecast matrix yf(t, h) = pred // record in forecast matrix yh(h-1) = pred // record forecasts for each horizon yh // return forecasts for all horizons @@ -82,7 +93,8 @@ class WeightedMovingAverage (y: VectorD, hh: Int, tRng: Range = null, if h < 2 then flaw ("forecastAt", s"horizon h = $h must be at least 2") for t <- y_.indices do // make forecasts over all time points for horizon h - yf(t, h) = rdot (b, yf, t, h-1) // record in forecast matrix + yf(t, h) = if t < 1 then -0.0 // not enough prior data + else adjDot (forge (t, h), b) // record in forecast matrix yf(?, h) // return the h-step ahead forecast vector end forecastAt @@ -134,10 +146,12 @@ object WeightedMovingAverage: end WeightedMovingAverage +import Example_Covid.loadData_y +import Example_LakeLevels.y //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `weightedMovingAverageTest` main function tests the `WeightedMovingAverage` class on real data: - * Forecasting Lake Levels using In-Sample Testing (In-ST). +/** The `weightedMovingAverageTest` main function tests the `WeightedMovingAverage` class + * on real data: Forecasting Lake Levels using In-Sample Testing (In-ST). * Test forecasts (h = 1 to hh steps ahead forecasts). * @see cran.r-project.org/web/packages/fpp/fpp.pdf * > runMain scalation.modeling.forecasting.weightedMovingAverageTest @@ -158,8 +172,8 @@ end weightedMovingAverageTest //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `weightedMovingAverageTest2` main function tests the `WeightedMovingAverage` class on real data: - * Forecasting Lake Levels using Train-n-Test Split (TnT) with Rolling Validation. +/** The `weightedMovingAverageTest2` main function tests the `WeightedMovingAverage` class + * on real data: Forecasting Lake Levels using Train-n-Test Split (TnT) with Rolling Validation. * Test forecasts (h = 1 to hh steps ahead forecasts). * @see cran.r-project.org/web/packages/fpp/fpp.pdf * > runMain scalation.modeling.forecasting.weightedMovingAverageTest2 @@ -179,8 +193,8 @@ end weightedMovingAverageTest2 //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `weightedMovingAverageTest3` main function tests the `WeightedMovingAverage` class on real data: - * Forecasting COVID-19 using In-Sample Testing (In-ST). +/** The `weightedMovingAverageTest3` main function tests the `WeightedMovingAverage` class + * on real data: Forecasting COVID-19 using In-Sample Testing (In-ST). * Test forecasts (h = 1 to hh steps ahead forecasts). * > runMain scalation.modeling.forecasting.weightedMovingAverageTest3 */ @@ -203,8 +217,8 @@ end weightedMovingAverageTest3 //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `weightedMovingAverageTest4` main function tests the `WeightedMovingAverage` class on real data: - * Forecasting COVID-19 using Train-n-Test Split (TnT) with Rolling Validation. +/** The `weightedMovingAverageTest4` main function tests the `WeightedMovingAverage` class + * on real data: Forecasting COVID-19 using Train-n-Test Split (TnT) with Rolling Validation. * Test forecasts (h = 1 to hh steps ahead forecasts). * > runMain scalation.modeling.forecasting.weightedMovingAverageTest4 */ @@ -224,3 +238,32 @@ end weightedMovingAverageTest3 end weightedMovingAverageTest4 + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `weightedMovingAverageTest5` main function tests the `WeightedMovingAverage` class + * on a simple dataset using In-Sample Testing (In-ST). + * Test forecasts (h = 1 to hh steps ahead forecasts). + * > runMain scalation.modeling.forecasting.weightedMovingAverageTest5 + */ +@main def weightedMovingAverageTest5 (): Unit = + + import SimpleMovingAverage.hp + + val y_ = VectorD (1, 3, 5, 7, 9, 11, 13, 15, 17, 19) + + hp("q") = 3 // size of moving average window: test 2 and 3 + hp("u") = 1.0 // try both flat (0.) and linear (1.0) + // flat should give the same results as `SimpleMovingAverage` + val hh = 5 // maximum forecasting horizon + + val mod = new WeightedMovingAverage (y_, hh) // create model for time series data + banner (s"In-ST Forecasts: ${mod.modelName} on Simple Dataset") + mod.trainNtest ()() // train and test on full dataset + + mod.forecastAll () // forecast h-steps ahead (h = 1 to hh) for all y + val yf_ = mod.getYf + mod.diagnoseAll (y_, yf_) + println (s"Final In-ST Forecast Matrix yf = $yf_") + +end weightedMovingAverageTest5 + diff --git a/src/main/scala/scalation/modeling/forecasting/WeightedMovingAverage.scala.bak b/src/main/scala/scalation/modeling/forecasting/WeightedMovingAverage.scala.bak new file mode 100644 index 000000000..b4ecc597c --- /dev/null +++ b/src/main/scala/scalation/modeling/forecasting/WeightedMovingAverage.scala.bak @@ -0,0 +1,227 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author John Miller + * @version 2.0 + * @date Sun Jun 30 13:27:00 EDT 2024 + * @see LICENSE (MIT style license file). + * + * @note Model: Weighted Moving Average (not the same as MA in ARMA) + */ + +package scalation +package modeling +package forecasting + +import scalation.mathstat._ + +import Forecaster.rdot +import Example_Covid.loadData_y +import Example_LakeLevels.y + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `WeightedMovingAverage` class provides basic time series analysis capabilities for + * WeightedMovingAverage models. WeightedMovingAverage models are often used for forecasting. + * Given time series data stored in vector y, its next value y_t = weighted mean of last q values. + * + * y_t = weighted-mean (y_t-1, ..., y_t-q) + e_t + * + * where y_t is the value of y at time t and e_t is the residual/error term. + * @param y the response vector (time series data) + * @param hh the maximum forecasting horizon (h = 1 to hh) + * @param tRng the time range, if relevant (time index may suffice) + * @param hparam the hyper-parameters (defaults to SimpleMovingAverage.hp) + * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) + */ +class WeightedMovingAverage (y: VectorD, hh: Int, tRng: Range = null, + hparam: HyperParameter = SimpleMovingAverage.hp, + bakcast: Boolean = false) + extends Forecaster (y, hh, tRng, hparam, bakcast) + with NoSubModels: + + private val flaw = flawf ("WeightedMovingAverage") // flaw function + private val q = hparam("q").toInt // take mean of last q values + private val u = hparam("u").toDouble // u = 0 => flat, 1 => linear weights + + b = WeightedMovingAverage.weights (q, u) // combination of linear/flat weights + _modelName = s"WeightedMovingAverage_$q" + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Predict a value for y_t using the 1-step ahead forecast. + * + * y_t = f (y_t-1, ...) = weighted mean of last q values (weighted moving average model) + * + * @param t the time point being predicted + * @param y_ the actual values to use in making predictions (mean (inclusive, exclusice)) + */ + override def predict (t: Int, y_ : VectorD): Double = rdot (b, y_, t-1) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Produce a vector of size hh, h = 1 to hh-steps ahead forecasts for the model, + * i.e., forecast the following time points: t+1, ..., t+h. + * Intended to work with rolling validation (analog of predict method). + * @param t the time point from which to make forecasts + * @param y_ the actual values to use in making predictions + */ + override def forecast (t: Int, y_ : VectorD = yb): VectorD = + val yh = new VectorD (hh) // hold forecasts for each horizon + for h <- 1 to hh do + val pred = rdot (b, yf, t, h-1) // slide in prior forecasted values + yf(t, h) = pred // record in forecast matrix + yh(h-1) = pred // record forecasts for each horizon + yh // return forecasts for all horizons + end forecast + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forecast values for all y_.dim time points at horizon h (h-steps ahead). + * Assign into FORECAST MATRIX and return the h-steps ahead forecast. + * Note, `predictAll` provides predictions for h = 1. + * @see `forecastAll` method in `Forecaster` trait. + * @param h the forecasting horizon, number of steps ahead to produce forecasts + * @param y_ the actual values to use in making forecasts + */ + override def forecastAt (h: Int, y_ : VectorD = yb): VectorD = + if h < 2 then flaw ("forecastAt", s"horizon h = $h must be at least 2") + + for t <- y_.indices do // make forecasts over all time points for horizon h + yf(t, h) = rdot (b, yf, t, h-1) // record in forecast matrix + yf(?, h) // return the h-step ahead forecast vector + end forecastAt + +end WeightedMovingAverage + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `WeightedMovingAverage` companion object provides factory methods for the + * `WeightedMovingAverage` class. + */ +object WeightedMovingAverage: + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a `WeightedMovingAverage` object. + * @param y the response vector (time series data) + * @param hh the maximum forecasting horizon (h = 1 to hh) + * @param tRng the time range, if relevant (time index may suffice) + * @param hparam the hyper-parameters + */ + def apply (y: VectorD, hh: Int, tRng: Range = null, + hparam: HyperParameter = SimpleMovingAverage.hp): WeightedMovingAverage = + new WeightedMovingAverage (y, hh, tRng, hparam) + end apply + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Calculate the weight vector used for computing the weighted average. + * param q the number weights to compute + * param u factor indicating how much to have linear vs. flat weights + */ + def weights (q: Int, u: Double): VectorD = + val ww = VectorD.range (1, q+1) + val w1 = ww / ww.sum // linear weights + val w2 = VectorD.one (q) / q // flat weights + w1 * u + w2 * (1 - u) // combination of weights + end weights + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Backcast to predict the value prior to the start (or offset i) of the time series. + * @param y the response vector (time series data), a prefix suffices + * @param i the index offset (defaults to 0) + */ + def backcast (y_ : VectorD, i: Int = 0): Double = + val q = SimpleMovingAverage.hp ("q").toInt + val u = SimpleMovingAverage.hp ("u").toInt + val yy = y_(i until q+i).reverse // first q (offset by i) values reversed + val b = weights (q, u) // coefficients/weights + b dot yy // weighted average + end backcast + +end WeightedMovingAverage + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `weightedMovingAverageTest` main function tests the `WeightedMovingAverage` class on real data: + * Forecasting Lake Levels using In-Sample Testing (In-ST). + * Test forecasts (h = 1 to hh steps ahead forecasts). + * @see cran.r-project.org/web/packages/fpp/fpp.pdf + * > runMain scalation.modeling.forecasting.weightedMovingAverageTest + */ +@main def weightedMovingAverageTest (): Unit = + + val hh = 3 // maximum forecasting horizon + + val mod = new WeightedMovingAverage (y, hh) // create model for time series data + banner (s"In-ST Forecasts: ${mod.modelName} on LakeLevels Dataset") + mod.trainNtest ()() // train and test on full dataset + + mod.forecastAll () // forecast h-steps ahead (h = 1 to hh) for all y + mod.diagnoseAll (y, mod.getYf) + println (s"Final In-ST Forecast Matrix yf = ${mod.getYf}") + +end weightedMovingAverageTest + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `weightedMovingAverageTest2` main function tests the `WeightedMovingAverage` class on real data: + * Forecasting Lake Levels using Train-n-Test Split (TnT) with Rolling Validation. + * Test forecasts (h = 1 to hh steps ahead forecasts). + * @see cran.r-project.org/web/packages/fpp/fpp.pdf + * > runMain scalation.modeling.forecasting.weightedMovingAverageTest2 + */ +@main def weightedMovingAverageTest2 (): Unit = + + val hh = 3 // maximum forecasting horizon + + val mod = new WeightedMovingAverage (y, hh) // create model for time series data + banner (s"TnT Forecasts: ${mod.modelName} on LakeLevels Dataset") + mod.trainNtest ()() // train and test on full dataset + + mod.rollValidate () // TnT with Rolling Validation + println (s"Final TnT Forecast Matrix yf = ${mod.getYf}") + +end weightedMovingAverageTest2 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `weightedMovingAverageTest3` main function tests the `WeightedMovingAverage` class on real data: + * Forecasting COVID-19 using In-Sample Testing (In-ST). + * Test forecasts (h = 1 to hh steps ahead forecasts). + * > runMain scalation.modeling.forecasting.weightedMovingAverageTest3 + */ +@main def weightedMovingAverageTest3 (): Unit = + + val yy = loadData_y () +// val y = yy // full + val y = yy(0 until 116) // clip the flat end + val hh = 6 // maximum forecasting horizon + + val mod = new WeightedMovingAverage (y, hh) // create model for time series data + banner (s"In-ST Forecasts: ${mod.modelName} on COVID-19 Dataset") + mod.trainNtest ()() // train and test on full dataset + + mod.forecastAll () // forecast h-steps ahead (h = 1 to hh) for all y + mod.diagnoseAll (y, mod.getYf) + println (s"Final In-ST Forecast Matrix yf = ${mod.getYf}") + +end weightedMovingAverageTest3 + + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `weightedMovingAverageTest4` main function tests the `WeightedMovingAverage` class on real data: + * Forecasting COVID-19 using Train-n-Test Split (TnT) with Rolling Validation. + * Test forecasts (h = 1 to hh steps ahead forecasts). + * > runMain scalation.modeling.forecasting.weightedMovingAverageTest4 + */ +@main def weightedMovingAverageTest4 (): Unit = + + val yy = loadData_y () +// val y = yy // full + val y = yy(0 until 116) // clip the flat end + val hh = 6 // maximum forecasting horizon + + val mod = new WeightedMovingAverage (y, hh) // create model for time series data + banner (s"TnT Forecasts: ${mod.modelName} on COVID-19 Dataset") + mod.trainNtest ()() + + mod.rollValidate () // TnT with Rolling Validation + println (s"Final TnT Forecast Matrix yf = ${mod.getYf}") + +end weightedMovingAverageTest4 + diff --git a/src/main/scala/scalation/modeling/forecasting/multivar/AR_Star.scala b/src/main/scala/scalation/modeling/forecasting/multivar/AR_Star.scala index e67b85854..6b7577e19 100644 --- a/src/main/scala/scalation/modeling/forecasting/multivar/AR_Star.scala +++ b/src/main/scala/scalation/modeling/forecasting/multivar/AR_Star.scala @@ -13,6 +13,8 @@ package modeling package forecasting package multivar +import scala.collection.mutable.IndexedSeq + import scalation.mathstat._ //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -26,16 +28,31 @@ import scalation.mathstat._ */ class AR_Star (y: MatrixD, hh: Int, fname: Array [String] = null, tRng: Range = null, hparam: HyperParameter = AR.hp) - extends Diagnoser (dfm = 1, df = y.dim - 1) - with ForecastTensor (y, hh, tRng): + extends Diagnoser (dfr = 1, df = y.dim - 1) + with ForecastTensor (y, hh, tRng) + with Forecast + with NoSubModels: private val debug = debugf ("AR_Star", true) // debug function private val yf = makeForecastTensor (y, hh) // make the forecast tensor - val modelName = s"AR_Star${y.dim2} on $fname" + _modelName = s"AR_Star_${y.dim2}" private val mod = (for j <- y.indices2 yield new AR (y(?, j), hh, tRng, hparam)).toArray + def getY: VectorD = y(?, 0) + def hparameter: HyperParameter = hparam + def inSample_Test (skip: Int, showYp: Boolean): Unit = ??? + def parameter: VectorD | MatrixD = ??? + def test (x_ : MatrixD, y_ : VectorD): (VectorD, VectorD) = ??? + def validate (rando: Boolean, ratio: Double) (idx: IndexedSeq [Int]): + (VectorD | MatrixD, VectorD | MatrixD) = ??? + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the feature/variable names. Overrides definition in `Forecast` trait. + */ + override def getFname: Array [String] = fname + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Train and test each forecasting model y_ = f(y-past) + e and report its QoF * and plot its predictions. @@ -93,6 +110,7 @@ object AR_Star: */ def apply (y: MatrixD, hh: Int, fname: Array [String], tRng: Range = null, hparam: HyperParameter = AR.hp): Array [AR] = + println (s"apply: create a AR model for each $fname") (for j <- y.indices2 yield new AR (y(?, j), hh, tRng, hparam)).toArray end apply diff --git a/src/main/scala/scalation/modeling/forecasting/multivar/ForecastTensor.scala b/src/main/scala/scalation/modeling/forecasting/multivar/ForecastTensor.scala index b416a8c53..397c8a4f6 100644 --- a/src/main/scala/scalation/modeling/forecasting/multivar/ForecastTensor.scala +++ b/src/main/scala/scalation/modeling/forecasting/multivar/ForecastTensor.scala @@ -61,15 +61,15 @@ trait ForecastTensor (y: MatrixD, hh: Int, tRng: Range = null): * @param hh the maximum forecasting horizon, number of steps ahead to produce forecasts */ def makeForecastTensor (y_ : MatrixD = y, hh_ : Int = hh): TensorD = - val yf_ = new TensorD (y_.dim, hh + 2, y_.dim2) // forecasts for all time points t & horizons to h + val yf_ = new TensorD (y_.dim, hh_ + 2, y_.dim2) // forecasts for all time points t & horizons to h debug ("makeForecastTensor", s"forecast tensor: y_.dim = ${y_.dim} --> yf_.dims = ${yf_.dims}") for j <- y_.indices2 do // for each variable for t <- y_.indices do yf_(t, 0, j) = y_(t, j) // first column (0) holds the actual time series values if tRng == null then - for t <- yf_.indices do yf_(t, hh+1, j) = t // last column (h+1) holds time (logical day) + for t <- yf_.indices do yf_(t, hh_ + 1, j) = t // last column (h+1) holds time (logical day) else - for t <- tRng do yf_(t, hh+1, j) = t // last column (h+1) holds time (logical day) + for t <- tRng do yf_(t, hh_ + 1, j) = t // last column (h+1) holds time (logical day) end for yf_ end makeForecastTensor @@ -133,7 +133,7 @@ trait ForecastTensor (y: MatrixD, hh: Int, tRng: Range = null): end for for j <- yf.indices3 do println (s"fitMap QoF for variable $j = ") - println (FitM.showFitMap (ftMat (?, ?, j).transpose, QoF.values.map (_.toString))) + println (Fit.showFitMap (ftMat (?, ?, j).ᵀ)) end for end diagnoseAll @@ -153,7 +153,7 @@ trait ForecastTensor (y: MatrixD, hh: Int, tRng: Range = null): // println (FitM.fitMap (qof, qoF_names)) end for println ("fitMap QoF = ") - println (FitM.showFitMap (ftMat.transpose, QoF.values.map (_.toString))) + println (Fit.showFitMap (ftMat.ᵀ)) end diagnoseAll end ForecastTensor diff --git a/src/main/scala/scalation/modeling/forecasting/multivar/Forecaster_RegV.scala b/src/main/scala/scalation/modeling/forecasting/multivar/Forecaster_RegV.scala new file mode 100644 index 000000000..796e3e923 --- /dev/null +++ b/src/main/scala/scalation/modeling/forecasting/multivar/Forecaster_RegV.scala @@ -0,0 +1,526 @@ + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author John Miller + * @version 2.0 + * @date Mon Sep 2 14:37:55 EDT 2024 + * @see LICENSE (MIT style license file). + * + * @note Model Framework: Abstract Class for Vector Forecasters that utilize Regression + * Extending classes include VAR, VARX, ... + * + * @see phdinds-aim.github.io/time_series_handbook/03_VectorAutoregressiveModels/03_VectorAutoregressiveMethods.html + * www.lem.sssup.it/phd/documents/Lesson17.pdf + * Parameter/coefficient estimation: Multi-variate Ordinary Least Squares (OLS) or + * Generalized Least Squares (GLS) + */ + +package scalation +package modeling +package forecasting +package multivar + +import scala.annotation.unused +import scala.collection.mutable.{IndexedSeq, LinkedHashSet => LSET} + +import scalation.mathstat._ +import scalation.modeling.neuralnet.{RegressionMV => REGRESSION} +//import scalation.modeling.neuralnet.{RidgeRegressionMV => REGRESSION} + +import MakeMatrix4TS.hp + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `Forecaster_RegV` abstract class provides multi-variate time series analysis capabilities + * for Forecaster_RegV models. Forecaster_RegV models are similar to `ARX` models, except + * that some exogenous variables are treated as endogenous variables and are themselves forecasted. + * Potentially having more up-to-date forecasted values feeding into multi-horizon forecasting + * can improve accuracy, but may also lead to compounding of forecast errors. + * @param y the response/output matrix (multi-variate time series data) + * @param x the input lagged time series data + * @param hh the maximum forecasting horizon (h = 1 to hh) + * @param fname the feature/variable names + * @param tRng the time range, if relevant (time index may suffice) + * @param hparam the hyper-parameters (defaults to `MakeMatrix4TS.hp`) + * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) + */ +abstract class Forecaster_RegV (x: MatrixD, y: MatrixD, hh: Int, fname: Array [String] = null, + tRng: Range = null, hparam: HyperParameter = hp, + bakcast: Boolean = false) // backcasted values only used in `buildMatrix4TS` +// extends Forecaster_D (x, y, hh, tRng, hparam, bakcast): // no automatic backcasting, @see `Forecaster_RegV.apply` + extends Diagnoser (dfr = hparam("p").toInt, df = y.dim - hparam("p").toInt) + with ForecastTensor (y, hh, tRng) + with FeatureSelection // FIX -- add feature selection + with Model: + + private val debug = debugf ("Forecaster_RegV", true) // debug function + private val flaw = flawf ("Forecaster_RegV") // flaw function + + protected val nneg = hparam("nneg").toInt == 1 // 0 => unrestricted, 1 => predictions must be non-negative + protected var bb: MatrixD = null // matrix of parameter values + protected val yf = makeForecastTensor (y, hh) // make the forecast tensor + protected val reg = new REGRESSION (x, y, fname, hparam) // delegate training to multi-variate regression + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Get the data/input matrix built from lagged y vector (and optionally xe) values. + */ + def getX: MatrixD = x + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the used response vector y (first colum in matrix). + */ + def getY: VectorD = y(?, 0) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the used response matrix y. Mainly for derived classes where y is + * transformed. + */ + override def getYY: MatrixD = y + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the used FORECAST TENSOR yf. + */ + def getYf: TensorD = yf + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the feature/variable names. + */ + def getFname: Array [String] = fname + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Train/fit an `Forecaster_RegV` model to the times-series data y_ = f(x). + * Estimate the coefficient matrix bb for a `Forecaster_RegV` model. + * Uses OLS Matrix Factorization to determine the coefficients, i.e., the bb matrix. + * @param x_ the data/input matrix (e.g., full x) + * @param y_ the training/full response matrix (e.g., full y) + */ + def train (x_ : MatrixD, y_ : MatrixD): Unit = + debug ("train", s"$modelName, x_.dim = ${x_.dim}, y_.dim = ${y_.dim}") + reg.train (x_, y_) // train the multi-variate regression model + bb = reg.parameter // coefficients from regression + debug ("train", s"parameter matrix bb = $bb") + end train + + def train (x_ : MatrixD, y_ : VectorD): Unit = + throw new UnsupportedOperationException ("train (MatrixD, VectorD) use the alternative train") + end train + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Train and test the forecasting model y_ = f(x_) + e and report its QoF + * and plot its predictions. Return the predictions and QoF. + * NOTE: must use `trainNtest_x` when an x matrix is used, such as in `VAR`. + * @param x_ the training/full data/input matrix (defaults to full x) + * @param y_ the training/full response/output vector (defaults to full y) + * @param xx the testing/full data/input matrix (defaults to full x) + * @param yy the testing/full response/output vector (defaults to full y) + */ + def trainNtest_x (x_ : MatrixD = x, y_ : MatrixD = y) + (xx: MatrixD = x, yy: MatrixD = y): (MatrixD, MatrixD) = + train (x_, y_) // train the model on training set + val (yp, qof) = test (xx, yy) // test the model on testing set + for j <- qof.indices do + banner (s"report for feature ${fname(j)}") + println (report (qof(j))) // report on Quality of Fit (QoF) + (yp, qof) + end trainNtest_x + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Test PREDICTIONS of a forecasting model y_ = f(lags (y_)) + e + * and return its predictions and QoF vector. Testing may be in-sample + * (on the training set) or out-of-sample (on the testing set) as determined + * by the parameters passed in. Note, must call train before test. + * Must override to get Quality of Fit (QoF). + * @param x_ the data/input matrix (ignored, pass null) + * @param y_ the actual testing/full response/output matrix + */ + def test (@unused x_ : MatrixD, y_ : MatrixD): (MatrixD, MatrixD) = + val yp = predictAll (y_) // make all predictions + val yy = if bakcast then y_(1 until y_.dim) // align the actual values + else y_ + println (s"yy.dim = ${yy.dim}, yp.dim = ${yp.dim}") +// Forecaster.differ (yy, yfh) // uncomment for debugging + assert (yy.dim == yp.dim) // make sure the vector sizes agree + + Forecaster_RegV.plotAll (yy, yp, s"test: $modelName") + mod_resetDF (yy.dim) // reset the degrees of freedom + (yp, diagnose (yy, yp)) // return predicted and QoF vectors + end test + + def test (x_ : MatrixD, y_ : VectorD): (VectorD, VectorD) = + throw new UnsupportedOperationException ("test (MatrixD, VectorD): use the alternative test") + end test + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the parameters. + */ + def parameter: VectorD | MatrixD = bb + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the hyper-parameters. + */ + def hparameter: HyperParameter = hparam + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Diagnose the quality of the model for each variable. + * @param yy the matrix of actual values + * @param yp the matrix of predicted values + */ + def diagnose (yy: MatrixD, yp: MatrixD): MatrixD = + MatrixD (for j <- yy.indices2 yield diagnose (yy(?, j), yp(?, j))) + end diagnose + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Predict a value for y_t using the 1-step ahead forecast. + * @see `modeling.rectify` define in `Predictor.scala` + * @param t the time point being predicted + * @param y_ the actual values to use in making predictions + * FIX -- `Forecaster_Reg` uses x(t) while x(t-1) is used here + */ + def predict (t: Int, y_ : MatrixD): VectorD = + val yp = rectify (reg.predict (x(t-1)), nneg) + if t < y_.dim then + debug ("predict", s"@t = $t, x(t-1) = ${x(t-1)}, yp = $yp vs. y_ = ${y_(t)}") + yp + end predict + + def predict (z: VectorD): Double | VectorD = + throw new UnsupportedOperationException ("predict (VectorD): use the alternative predict") + end predict + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Predict all values corresponding to the given time series vector y_. + * Update FORECAST TENSOR yf and return PREDICTION MATRIX yp as second (1) column + * of yf with last value removed. + * Note, yf(t, h, j) if the forecast to time t, horizon h, variable j + * @see `forecastAll` to forecast beyond horizon h = 1. + * @see `Forecaster.predictAll` for template implementation for vectors + * @param y_ the actual time series values to use in making predictions + */ + def predictAll (y_ : MatrixD): MatrixD = + if bakcast then + for t <- 1 until y_.dim do yf(t-1, 1) = predict (t, y_) // use model to make predictions + yf(?, 1)(0 until y_.dim-1) // return yp: first horizon only + else +// debug ("predictAll", s"y_.dim = ${y_.dim}, yf.dims = ${yf.dims}") + for t <- 1 until yf.dim+1 do yf(t-1, 1) = predict (t, y_) // skip t = 0 + yf(?, 1) + end predictAll + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forge a new vector from the first spec values of x, the last p-h+1 values + * of x (past values), values 1 to h-1 from the forecasts, and available values + * from exogenous variables. + * @param xx the t-th row of the input matrix (lagged actual values) + * @param yy the t-th row of the forecast tensor (forecasted future values) + * @param h the forecasting horizon, number of steps ahead to produce forecasts + */ + def forge (xx: VectorD, yy: MatrixD, h: Int): VectorD + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Produce a vector of size hh, h = 1 to hh-steps ahead forecasts for the model, + * i.e., forecast the following time points: t+1, ..., t+h. + * Intended to work with rolling validation (analog of predict method). + * @param t the time point from which to make forecasts + * @param y_ the actual values to use in making predictions + */ + def forecast (t: Int, y_ : MatrixD): MatrixD = ??? + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forecast values for all y_.dim time points at horizon h (h-steps ahead). + * Assign into FORECAST TENSOR and return the h-steps ahead forecast. + * Note, yf(t, h, j) if the forecast to time t, horizon h, variable j + * Note, `predictAll` provides predictions for h = 1. + * @see `forecastAll` method in `Forecaster` trait. + * @param h the forecasting horizon, number of steps ahead to produce forecasts + * @param y_ the actual values to use in making forecasts + */ + def forecastAt (h: Int, y_ : MatrixD = y): MatrixD = + if h < 2 then flaw ("forecastAt", s"horizon h = $h must be at least 2") + + for t <- y_.indices do // make forecasts over all time points for horizon h + val xy = forge (x(t), yf(t), h) // yf(t) = time t, all horizons, all variables + val pred = rectify (reg.predict (xy), nneg) // slide in prior forecasted values +// debug ("forecastAt", s"h = $h, @t = $t, xy = $xy, yp = $pred, y_ = ${y_(t)}") + yf(t, h) = pred // record in forecast tensor + yf(?, h) // return the h-step ahead forecast vector + end forecastAt + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forecast values for all y_.dim time points and all horizons (1 through hh-steps ahead). + * Record these in the FORECAST TENSOR yf, where + * + * yf(t, h) = h-steps ahead forecast for y_t + * + * @param y_ the actual values to use in making forecasts + */ + def forecastAll (y_ : MatrixD): TensorD = + for h <- 2 to hh do forecastAt (h, y_) // forecast k-steps into the future + yf // return tensor of forecasted values + end forecastAll + +// F E A T U R E S E L E C T I O N + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Build a sub-model that is restricted to the given columns of the data matrix. + * Must be implemented for models that support feature selection. + * Otherwise, use @see `NoBuildModel + * @note: Forecasting models should use this method to build there own sub-models. FIX. + * @param x_cols the columns that the new model is restricted to + * @param fname2 the variable/feature names for the new model (defaults to null) + */ + def buildModel (x_cols: MatrixD, fname2: Array [String] = null): REGRESSION = + reg.buildModel (x_cols, fname2) + end buildModel + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Convert the underlying Regression Model to a subtype of `Forecaster_Reg` Forecasting Model. + * @param mod the model to convert, e.g., the best model after feature selection + */ + def convertReg2Forc (mod: Model_FS = getBest.mod): Forecaster_RegV = ??? + + private var theBest = BestStep ()() // record the best model from feature selection + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Reset the best-step to default + */ + def resetBest (): Unit = theBest = BestStep ()() + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the best model found from feature selection. + */ + def getBest: BestStep = theBest + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** When the new best-step is better than theBest, replace theBest. + * Note: for QoF where smaller if better, must switch to '<'. + * @param best new best-step found during feature selection + * @param qk index of Quality of Fit (QoF) to use for comparing quality + * defaults to smapeC, could try rSqBar could work better + private def updateBest (best: BestStep) (using qk: Int): Unit = + if best.qof != null then + if theBest.qof == null || (best gt theBest.qof(qk)) then theBest = best + end updateBest + */ + +// FIX -- implement the next four methods + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Perform FORWARD SELECTION to find the MOST predictive features/variables + * to ADD into the model, returning the features/variables added and the new + * Quality of Fit (QoF) measures/metrics for all steps. + * @see `Fit` for index of QoF measures/metrics. + * @param cross indicator to include the cross-validation/validation QoF measure (defaults to "many") + * @param qk index of Quality of Fit (QoF) to use for comparing quality + */ + def forwardSelAll (cross: String)(using qk: Int): (LSET [Int], MatrixD) = ??? + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Perform BACKWARD ELIMINATION to find the LEAST predictive features/variables + * to REMOVE from the full model, returning the features/variables left and the + * new Quality of Fit (QoF) measures/metrics for all steps. + * @see `Fit` for index of QoF measures/metrics. + * @param first first variable to consider for elimination + * @param cross indicator to include the cross-validation/validation QoF measure (defaults to "many") + * @param qk index of Quality of Fit (QoF) to use for comparing quality + */ + def backwardElimAll (first: Int, cross: String) (using qk: Int): (LSET [Int], MatrixD) = ??? + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Perform STEPWISE SELECTION to find a GOOD COMBINATION of predictive features/variables + * to have in the model, returning the features/variables selected and the new Quality of Fit + * (QoF) measures/metrics for all steps. At each step, it calls forward and backward + * and takes the best of the two actions. Stops when neither action yields improvement. + * @see `Fit` for index of QoF measures/metrics. + * @param cross indicator to include the cross-validation/validation QoF measure (defaults to "many") + * @param swap whether to allow a swap step (swap out a feature for a new feature in one step) + * @param qk index of Quality of Fit (QoF) to use for comparing quality + */ + def stepwiseSelAll (cross: String, swap: Boolean)(using qk: Int): (LSET [Int], MatrixD) = ??? + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Perform BEAM SEARCH SELECTION to find a GOOD COMBINATION of predictive features/variables to + * have in the model, returning the top k sets of features/variables selected and the new Quality of + * Fit (QoF) measures/metrics for all steps. At each step, iterate over the models in the beam + * (top k) and create candidates by adding features (phase 1) and then removing (phase 2). + * From all the candidates, keep the best k and start a new iteration. Stops when there is + * no improvement in any of top k (or the maximum number of features is reached. + * @see `Fit` for index of QoF measures/metrics. + * @param cross indicator to include the cross-validation/validation QoF measure (defaults to "many") + * @param bk the beam width holding the top k models (defaults to 3) + * @param qk index of Quality of Fit (QoF) to use for comparing quality + */ + def beamSelAll (cross: String = "many", bk: Int = 3)(using qk: Int): (LSET [Int], MatrixD) = + + // FIX -- to be implemented + + null + end beamSelAll + +// T E S T I N G S C E N A R I O S + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Align the actual response matrix for comparison with the predicted/forecasted + * response matrix, returning a time vector and sliced response matrix. + * @param tr_size the size of the intial training set + * @param y the actual response for the full dataset (to be sliced) + */ + def align (tr_size: Int, y: MatrixD): (VectorD, MatrixD) = + (VectorD.range (tr_size, y.dim), y(tr_size until y.dim)) + end align + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /* Use validation to compute test Quality of Fit (QoF) measures by dividing + * the full dataset into a TESTING-set and a TRAINING-set. + * Delegates to `rollValidate` with no retraining and only diagnoses the first horizon. + * Must call the `set_TE_RATIO` method to change the default TE ratio. + * @param rando flag indicating whether to use randomized or simple validation (false) + * @param ratio the ratio of the TESTING-set to the full dataset (most common 70-30 (.3), 80-20 (.2)) + * @param idx the prescribed TESTING-set indices (default => null) + * FIX - copied from `Forecaster` change it to work for VAR, VARX + */ + def validate (rando: Boolean = false, ratio: Double = Model.TE_RATIO) + (@unused idx: IndexedSeq [Int] = null): + (VectorD, VectorD) = + debug ("validate", s"rando = $rando (requires false), ratio = $ratio (requires ${Model.TE_RATIO})") + val te_size = Model.teSize (y.dim) // size of testing set + val yf1 = rollValidate (y.dim, false)(1)(0) // get column 1 returned from `rollValidate` + val y_ = y(y.dim - te_size until y.dim)(0) // trim the actual values to testing-set + val yf_ = yf1(y.dim - te_size until y.dim) // trim the forecast at h = 1 to testing-set + val qof = diagnose (y_, yf_) + (yf1, qof) + end validate + + def crossValidate (k: Int, rando: Boolean): Array [Statistic] = + throw new UnsupportedOperationException ("Use `rollValidate` instead of `crossValidate`") + end crossValidate + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Use rolling-validation to compute test Quality of Fit (QoF) measures + * by dividing the dataset into a TRAINING SET (tr) and a TESTING SET (te). + * as follows: [ <-- tr_size --> | <-- te_size --> ] + * Calls forecast for h-steps ahead out-of-sample forecasts. + * Return the FORECAST TENSOR. + * @param rc the retraining cycle (number of forecasts until retraining occurs) + * @param growing whether the training grows as it roll or kepps a fixed size + * FIX - copied from `Forecaster` change it to work for VAR, VARX + */ + def rollValidate (rc: Int = 2, growing: Boolean = false): TensorD = + val ftMat = new MatrixD (hh, Fit.N_QoF) + banner (s"rollValidate: Evaluate ${modelName}'s QoF for horizons 1 to $hh:") + + val x = getX // get internal/expanded data/input matrix + val y = getYY // get internal/expanded response/output matrix + val yf = getYf // get the full in-sample forecast tensor + val te_size = Model.teSize (y.dim) // size of testing set + val tr_size = Model.trSize (y.dim) // size of initial training set + debug ("rollValidate", s"y.dim = ${y.dim}, train: tr_size = $tr_size; test: te_size = $te_size, rc = $rc") + + val yp = new MatrixD (te_size, y.dim2) // y-predicted over testing set (only for h=1) + for i <- 0 until te_size do // iterate through testing set + val is = if growing then 0 else i + val t = tr_size + i // next time point to forecast + if i % rc == 0 then + val x_ = if x != null then x(is until t) else null + train (x_, y(is until t)) // retrain on sliding training set +// yp(i) = predict (min (t+1, y.dim-1), y) // predict the next value (only for h=1) + yp(i) = predict (t, y) // predict the next value (only for h=1) + val yd = forecast (t, y) // forecast the next hh-values, yf is updated + println (s"yf(t, 0) = ${yf(t, 0)}, yp(i) = ${yp(i)}, yd = $yd") +// assert (yp(i) =~ yd(0)) // make sure h=1 forecasts agree with predictions + end for + + val (t, yy) = align (tr_size, y) // align vectors + new Plot (t, yy(0), yp(0), s"rollValidate: Plot yy(0), yp(0) vs. t for $modelName", lines = true) + + val yf_ = yf(tr_size until y.dim) // forecast tensor for test-set + for h <- 1 to hh do + val yy_ = yy(h-1 until yy.dim) // trim the actual values + val yfh = yf_(?, h)(0 until yy.dim-h+1) // column h of the forecast tensor + + new Plot (t, yy_(0), yfh(0), s"rollValidate: Plot yy_(0), yfh(0) vs. t for $modelName @h = $h", lines = true) + mod_resetDF (te_size - h) // reset degrees of freedom + val qof = diagnose (yy_, yfh) + ftMat(h-1) = qof(0) // FIX -- need for all endo, not just the first +// println (FitM.fitMap (qof, qoF_names)) + end for + println ("fitMap qof = ") + println (Fit.showFitMap (ftMat.ᵀ)) + yf + end rollValidate + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Perform In-Sample Testing, i.e., train and test on the full data set. + * @param skip the number of initial time points to skip (due to insufficient past) + * @param showYf whether to show the forecast matrix + * FIX - copied from `Forecaster` change it to work for VAR, VARX + */ + override def inSample_Test (skip: Int = 2, showYf: Boolean = false): Unit = + banner (s"In-Sample Test: $modelName") + trainNtest_x ()() // train on full and test on full + forecastAll (getYY) // forecast over all horizons + setSkip (skip) // diagnose: skip the first 'skip' rows + diagnoseAll (getYY, getYf) // compute metrics for all horizons + if showYf then + println (s"Final In-Sample Forecast Matrix yf = ${getYf}") +// println (s"Final In-Sample Forecast Matrix yf = ${getYf.shiftDiag}") + end inSample_Test + +end Forecaster_RegV + + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `Forecaster_RegV` object supports regression for Multivariate Time Series data. + * Given a response matrix y, a predictor matrix x is built that consists of + * lagged y vectors. Additional future response vectors are built for training. + * y_t = b dot x + * where x = [y_{t-1}, y_{t-2}, ... y_{t-lag}]. + */ +object Forecaster_RegV: + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Use rolling-validation to compute test Quality of Fit (QoF) measures + * by dividing the dataset into a TRAINING SET (tr) and a TESTING SET (te) + * as follows: [ <-- tr_size --> | <-- te_size --> ] + * This version calls predict for one-step ahead out-of-sample forecasts. + * @see `RollingValidation` + * @param mod the forecasting model being used (e.g., `Forecaster_RegV`) + * @param rc the retraining cycle (number of forecasts until retraining occurs) + * + def rollValidate (mod: PredictorMV & Fit, rc: Int): Unit = + val x = mod.getX // get data/input matrix + val y = mod.getY // get response/output vector + val te_size = RollingValidation.teSize (y.dim) // size of testing set + val tr_size = y.dim - te_size // size of initial training set + debug ("rollValidate", s"train: tr_size = $tr_size; test: te_size = $te_size, rc = $rc") + + val yp = new MatrixD (te_size, y.dim2) // y-predicted over testing set + for i <- 0 until te_size do // iterate through testing set + val t = tr_size + i // next time point to forecast +// if i % rc == 0 then mod.train (x(0 until t), y(0 until t)) // retrain on sliding training set (growing set) + if i % rc == 0 then mod.train (x(i until t), y(i until t)) // retrain on sliding training set (fixed size set) + yp(i) = mod.predict (x(t-1)) // predict the next value + end for + + val df = max (0, mod.parameter(0).dim - 1) // degrees of freedom for model + mod.resetDF (df, te_size - df) // reset degrees of freedom + for k <- y.indices2 do + val (t, yk) = RollingValidation.align (tr_size, y(?, k)) // align vectors + val ypk = yp(?, k) + banner (s"QoF for horizon ${k+1} with yk.dim = ${yk.dim}, ypk.dim = ${ypk.dim}") + new Plot (t, yk, ypk, s"Plot yy, yp vs. t for horizon ${k+1}", lines = true) + println (FitM.fitMap (mod.diagnose (yk, ypk), qoF_names)) + end for + end rollValidate + */ + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Plot actual vs. predicted values for all variables (columns of the matrices). + * @param y the original un-expanded output/response matrix + * @param yp the predicted values (one-step ahead forecasts) matrix + * @param name the name of the model run to produce yp + */ + def plotAll (y: MatrixD, yp: MatrixD, name: String): Unit = + for j <- y.indices2 do + new Plot (null, y(?, j).drop (1), yp(?, j), s"$name, y vs. yp @ var j = $j", lines = true) + end plotAll + +end Forecaster_RegV + diff --git a/src/main/scala/scalation/modeling/forecasting/multivar/RandomWalk_Star.scala b/src/main/scala/scalation/modeling/forecasting/multivar/RandomWalk_Star.scala index 9141c4f1e..8acb92c5e 100644 --- a/src/main/scala/scalation/modeling/forecasting/multivar/RandomWalk_Star.scala +++ b/src/main/scala/scalation/modeling/forecasting/multivar/RandomWalk_Star.scala @@ -13,6 +13,8 @@ package modeling package forecasting package multivar +import scala.collection.mutable.IndexedSeq + import scalation.mathstat._ //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -26,16 +28,31 @@ import scalation.mathstat._ */ class RandomWalk_Star (y: MatrixD, hh: Int, fname: Array [String] = null, tRng: Range = null, hparam: HyperParameter = null) - extends Diagnoser (dfm = 1, df = y.dim - 1) - with ForecastTensor (y, hh, tRng): + extends Diagnoser (dfr = 1, df = y.dim - 1) + with ForecastTensor (y, hh, tRng) + with Forecast + with NoSubModels: private val debug = debugf ("RandomWalk_Star", true) // debug function private val yf = makeForecastTensor (y, hh) // make the forecast tensor - val modelName = s"RandomWalk_Star${y.dim2} on $fname" + _modelName = s"RandomWalk_Star_${y.dim2}" private val mod = (for j <- y.indices2 yield new RandomWalk (y(?, j), hh, tRng, hparam)).toArray + def getY: VectorD = y(?, 0) + def hparameter: HyperParameter = hparam + def inSample_Test (skip: Int, showYp: Boolean): Unit = ??? + def parameter: VectorD | MatrixD = ??? + def test (x_ : MatrixD, y_ : VectorD): (VectorD, VectorD) = ??? + def validate (rando: Boolean, ratio: Double) (idx: IndexedSeq [Int]): + (VectorD | MatrixD, VectorD | MatrixD) = ??? + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the feature/variable names. Overrides definition in `Forecast` trait. + */ + override def getFname: Array [String] = fname + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Train and test each forecasting model y_ = f(y-past) + e and report its QoF * and plot its predictions. @@ -93,6 +110,7 @@ object RandomWalk_Star: */ def apply (y: MatrixD, hh: Int, fname: Array [String] = null, tRng: Range = null, hparam: HyperParameter = null): Array [RandomWalk] = + println (s"apply: create a RandomWalk model for each $fname") (for j <- y.indices2 yield new RandomWalk (y(?, j), hh, tRng, hparam)).toArray end apply diff --git a/src/main/scala/scalation/modeling/forecasting/multivar/VAR.scala b/src/main/scala/scalation/modeling/forecasting/multivar/VAR.scala index d83b09e4f..44363750f 100644 --- a/src/main/scala/scalation/modeling/forecasting/multivar/VAR.scala +++ b/src/main/scala/scalation/modeling/forecasting/multivar/VAR.scala @@ -18,11 +18,7 @@ package modeling package forecasting package multivar -import scala.runtime.ScalaRunTime.stringOf - import scalation.mathstat._ -import scalation.modeling.neuralnet.{RegressionMV => REGRESSION} -//import scalation.modeling.neuralnet.{RidgeRegressionMV => REGRESSION} import MakeMatrix4TS.hp @@ -32,12 +28,13 @@ import MakeMatrix4TS.hp * as endogenous variables and are themselves forecasted. Potentially having more * up-to-date forecasted values feeding into multi-horizon forecasting can improve * accuracy, but may also lead to compounding of forecast errors. - * Given multi-variate time series data stored in matrix y, its next value y_t = combination - * of last p vector values of y. + * Given multi-variate time series data where matrix x holds the input and matrix y holds + * the output, the next vector value y_t = combination of last p vector values in x. * - * y_t = b dot x_t + e_t + * y_t = bb dot x_t + e_t * - * where y_t is the value of y at time t and e_t is the residual/error term. + * where y_t is the value of y at time t, bb is the parameter matrix and e_t is the + * residual/error term. * @param y the response/output matrix (multi-variate time series data) * @param x the input lagged time series data * @param hh the maximum forecasting horizon (h = 1 to hh) @@ -50,199 +47,19 @@ class VAR (x: MatrixD, y: MatrixD, hh: Int, fname: Array [String] = null, tRng: Range = null, hparam: HyperParameter = hp, bakcast: Boolean = false) // backcasted values only used in `buildMatrix4TS` // extends Forecaster_D (x, y, hh, tRng, hparam, bakcast): // no automatic backcasting, @see `VAR.apply` - extends Diagnoser (dfm = hparam("p").toInt, df = y.dim - hparam("p").toInt) - with ForecastTensor (y, hh, tRng) - with Model: + extends Forecaster_RegV (x, y, hh, fname, tRng, hparam, bakcast): private val debug = debugf ("VAR", true) // debug function - private val flaw = flawf ("VAR") // flaw function private val p = hparam("p").toInt // use the last p values for each variable private val spec = hparam("spec").toInt // trend terms: 0 - none, 1 - constant, 2 - linear, 3 - quadratic // 4 - sine, 5 cosine - private val nneg = hparam("nneg").toInt == 1 // 0 => unrestricted, 1 => predictions must be non-negative private val n = y.dim2 // the number of variables - private var bb: MatrixD = null // matrix of parameter values - private val yf = makeForecastTensor (y, hh) // make the forecast tensor - - private val reg = new REGRESSION (x, y, fname, hparam) // delegate training to multi-variate regression - modelName = s"VAR($p, $n) on ${stringOf(fname)}" + _modelName = s"VAR_${p}_$n" debug ("init", s"$modelName with additional term spec = $spec") debug ("init", s"x = $x") - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Get the data/input matrix built from lagged y vector values. - */ - def getX: MatrixD = x - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the used response vector y (first colum in matrix). - */ - def getY: VectorD = y(?, 0) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the used response matrix y. Mainly for derived classes where y is - * transformed. - */ - override def getYY: MatrixD = y - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the feature/variable names. - */ - def getFname: Array [String] = fname - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train/fit an `VAR` model to the times-series data in vector y_. - * Estimate the coefficient matrix bb for a p,q-th order VAR(p, q) model. - * Uses OLS Matrix Factorization to determine the coefficients, i.e., the bb matrix. - * @param x_ the data/input matrix (e.g., full x) - * @param y_ the training/full response matrix (e.g., full y) - */ - def train (x_ : MatrixD, y_ : MatrixD): Unit = - debug ("train", s"$modelName, x_.dim = ${x_.dim}, y_.dim = ${y_.dim}") - reg.train (x_, y_) // train the multi-variate regression model - bb = reg.parameter // coefficients from regression - debug ("train", s"parameter matrix bb = $bb") - end train - - def train (x_ : MatrixD, y_ : VectorD): Unit = - throw new UnsupportedOperationException ("train (MatrixD, VectorD) use the alternative train") - end train - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train and test the forecasting model y_ = f(y-past) + e and report its QoF - * and plot its predictions. Return the predictions and QoF. - * NOTE: must use `trainNtest_x` when an x matrix is used, such as in `ARY`. - * @param x_ the training/full data/input matrix (defaults to full x) - * @param y_ the training/full response/output vector (defaults to full y) - * @param xx the testing/full data/input matrix (defaults to full x) - * @param yy the testing/full response/output vector (defaults to full y) - */ - def trainNtest_x (x_ : MatrixD = x, y_ : MatrixD = y)(xx: MatrixD = x, yy: MatrixD = y): (MatrixD, MatrixD) = - train (x_, y_) // train the model on training set - val (yp, qof) = test (xx, yy) // test the model on testing set - for j <- qof.indices do println (report (qof(j))) // report on Quality of Fit (QoF) - (yp, qof) - end trainNtest_x - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test PREDICTIONS of a forecasting model y_ = f(lags (y_)) + e - * and return its predictions and QoF vector. Testing may be in-sample - * (on the training set) or out-of-sample (on the testing set) as determined - * by the parameters passed in. Note, must call train before test. - * Must override to get Quality of Fit (QoF). - * @param x_ the data/input matrix (ignored, pass null) - * @param y_ the actual testing/full response/output matrix - */ - def test (x_ : MatrixD, y_ : MatrixD): (MatrixD, MatrixD) = - val yp = predictAll (y_) // make all predictions - val yy = if bakcast then y_(1 until y_.dim) // align the actual values - else y_ - println (s"yy.dim = ${yy.dim}, yp.dim = ${yp.dim}") -// Forecaster.differ (yy, yfh) // uncomment for debugging - assert (yy.dim == yp.dim) // make sure the vector sizes agree - - VAR.plotAll (yy, yp, s"test: $modelName") - mod_resetDF (yy.dim) // reset the degrees of freedom - (yp, diagnose (yy, yp)) // return predicted and QoF vectors - end test - - def test (x_ : MatrixD, y_ : VectorD): (VectorD, VectorD) = - throw new UnsupportedOperationException ("test (MatrixD, VectorD) use the alternative test") - end test - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the parameters. - */ - def parameter: VectorD | MatrixD = bb - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the hyper-parameters. - */ - def hparameter: HyperParameter = hparam - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Diagnose the quality of the model for each variable. - * @param yy the matrix of actual values - * @param yp the matrix of predicted values - */ - def diagnose (yy: MatrixD, yp: MatrixD): MatrixD = - MatrixD (for j <- yy.indices2 yield diagnose (yy(?, j), yp(?, j))) - end diagnose - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Predict a value for y_t using the 1-step ahead forecast. - * - * y_t = b_0 + b_1 y_t-1 + b_2 y_t-2 + ... + b_p y_t-p = b dot x_t - * - * FIX - parameter order is in conflict with AR models. - * @param t the time point being predicted - * @param y_ the actual values to use in making predictions (ignored) - */ - def predict (t: Int, y_ : MatrixD): VectorD = - val yp = rectify (reg.predict (x(t-1)), nneg) - if t < y_.dim then - debug ("predict", s"@t = $t, x(t-1) = ${x(t-1)}, yp = $yp vs. y_ = ${y_(t)}") - yp - end predict - - def predict (z: VectorD): Double | VectorD = - throw new UnsupportedOperationException ("predict (VectorD) use the alternative predict") - end predict - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Predict all values corresponding to the given time series vector y_. - * Update FORECAST TENSOR yf and return PREDICTION MATRIX yp as second (1) column - * of yf with last value removed. - * Note, yf(t, h, j) if the forecast to time t, horizon h, variable j - * @see `forecastAll` to forecast beyond horizon h = 1. - * @see `Forecaster.predictAll` for template implementation for vectors - * @param y_ the actual time series values to use in making predictions - */ - def predictAll (y_ : MatrixD): MatrixD = - if bakcast then - for t <- 1 until y_.dim do yf(t-1, 1) = predict (t, y_) // use model to make predictions - yf(?, 1)(0 until y_.dim-1) // return yp: first horizon only - else -// debug ("predictAll", s"y_.dim = ${y_.dim}, yf.dims = ${yf.dims}") - for t <- 1 until yf.dim+1 do yf(t-1, 1) = predict (t, y_) // skip t = 0 - yf(?, 1) - end predictAll - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all y_.dim time points and all horizons (1 through hh-steps ahead). - * Record these in the FORECAST TENSOR yf, where - * - * yf(t, h) = h-steps ahead forecast for y_t - * - * @param y_ the actual values to use in making forecasts - */ - def forecastAll (y_ : MatrixD): TensorD = - for h <- 2 to hh do forecastAt (h, y_) // forecast k-steps into the future - yf // return tensor of forecasted values - end forecastAll - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all y_.dim time points at horizon h (h-steps ahead). - * Assign into FORECAST TENSOR and return the h-steps ahead forecast. - * Note, yf(t, h, j) if the forecast to time t, horizon h, variable j - * Note, `predictAll` provides predictions for h = 1. - * @see `forecastAll` method in `Forecaster` trait. - * @param h the forecasting horizon, number of steps ahead to produce forecasts - * @param y_ the actual values to use in making forecasts - */ - def forecastAt (h: Int, y_ : MatrixD = y): MatrixD = - if h < 2 then flaw ("forecastAt", s"horizon h = $h must be at least 2") - - for t <- y_.indices do // make forecasts over all time points for horizon h - val xy = forge (x(t), yf(t), h) // yf(t) = time t, all horizons, all variables - val pred = rectify (reg.predict (xy), nneg) // slide in prior forecasted values -// debug ("forecastAt", s"h = $h, @t = $t, xy = $xy, yp = $pred, y_ = ${y_(t)}") - yf(t, h) = pred // record in forecast matrix - yf(?, h) // return the h-step ahead forecast vector - end forecastAt - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Forge a new vector from the first spec values of x, the last p-h+1 values * of x (past values), values 1 to h-1 from the forecasts, and available values @@ -266,10 +83,6 @@ class VAR (x: MatrixD, y: MatrixD, hh: Int, fname: Array [String] = null, xy end forge - def crossValidate (k: Int, rando: Boolean): Array [Statistic] = - throw new UnsupportedOperationException ("Use `rollValidate` instead of `crossValidate`") - end crossValidate - end VAR @@ -292,12 +105,13 @@ object VAR: * @param fname the feature/variable names * @param tRng the time range, if relevant (time index may suffice) * @param hparam the hyper-parameters (defaults to `MakeMatrix4TS.hp`) + * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) */ def apply (y: MatrixD, hh: Int, fname: Array [String] = null, tRng: Range = null, hparam: HyperParameter = hp, bakcast: Boolean = false): VAR = // backcasted values only used in `buildMatrix4TS` - val y_0 = y(?, 0) // the main endogenous variable (column zero) - val yy = y(?, 1 until y.dim2) // the other endogenous variables (rest of the columns) - val x = ARX.buildMatrix (yy, y_0, hparam, bakcast) // add spec trend columns and p|q lags for each column of y + val y_0 = y(?, 0) // the main endogenous variable (column zero) + val yy = y(?, 1 until y.dim2) // the other endogenous variables (rest of the columns) + val x = ARX.buildMatrix (yy, y_0, hparam, bakcast) // add spec trend columns and p|q lags for each column of y new VAR (x, y, hh, fname, tRng, hparam) end apply @@ -351,19 +165,9 @@ object VAR: end rollValidate */ - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Plot actual vs. predicted values for all variables (columns of the matrices). - * @param y the original un-expanded output/response matrix - * @param yp the predicted values (one-step ahead forecasts) matrix - * @param name the name of the model run to produce yp - */ - def plotAll (y: MatrixD, yp: MatrixD, name: String): Unit = - for j <- y.indices2 do - new Plot (null, y(?, j).drop (1), yp(?, j), s"$name, y vs. yp @ var j = $j", lines = true) - end plotAll - end VAR +import Forecaster_RegV.plotAll //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `vARTest` main function tests the `VAR` class. @@ -375,7 +179,7 @@ end VAR val m = 30 val z = VectorD.range (1, m) - val y = MatrixD (z, -z + m) + val y = MatrixD (z, -z + m).ᵀ val hh = 3 // the forecasting horizon hp("q") = 2 @@ -388,7 +192,7 @@ end VAR // val yy = mod.getY // val yp = mod.predict (mod.getX) -// VAR.plotAll (yy, yp, mod.modelName) +// plotAll (yy, yp, mod.modelName) // for k <- yp.indices2 do // new Plot (null, yy(?, k), yp(?, k), s"yy_$k vs. yp_$k for ${mod.modelName} (h=${k+1}) with $p lags", lines = true) end for @@ -415,10 +219,10 @@ end vARTest banner ("Test In-Sample VAR on GasFurnace Data") val mod = VAR (y, hh, header) // create model for time series data - val (yp, qof) = mod.trainNtest_x ()() // train on full and test on full + val yp = mod.trainNtest_x ()()._1 // train on full and test on full println (mod.summary) val yy_ = y(1 until y.dim) // can't forecast first values at t = 0 - VAR.plotAll (yy_, yp, mod.modelName) + plotAll (yy_, yp, mod.modelName) end vARTest2 @@ -446,19 +250,18 @@ end vARTest2 new Plot (null, y(?, j), null, s"y_$j (${vars(j)}) vs. t", lines = true) banner ("Test In-Sample VAR on COVID-19 Weekly Data") - val mod = VAR (y, hh) // create model for time series data - val (yp, qof) = mod.trainNtest_x ()() // train on full and test on full + val mod = VAR (y, hh, vars) // create model for time series data + mod.trainNtest_x ()() // train on full and test on full // println (mod.summary ()) // val yy_ = y(1 until y.dim) // can't forecast first values at t = 0 -// VAR.plotAll (yy_, yp, mod.modelName) +// plotAll (yy_, yp, mod.modelName) /* banner (s"Feature Selection Technique: Stepwise") val (cols, rSq) = mod.stepRegressionAll (cross = false) // R^2, R^2 bar, sMAPE, NA val k = cols.size println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for VAR with tech", lines = true) + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs n for VAR with tech", lines = true) banner ("Feature Importance") println (s"Stepwise: rSq = $rSq") @@ -491,10 +294,10 @@ end vARTest3 hp("p") = LAGS hp("q") = 2 val mod = VAR (y, hh) // create model for time series data - with exo - val (yp, qof) = mod.trainNtest_x ()() // train on full and test on full + mod.trainNtest_x ()() // train on full and test on full // println (mod.summary ()) // val yy_ = y(1 until y.dim) // can't forecast first values at t = 0 -// VAR.plotAll (yy_, yp, mod.modelName) +// plotAll (yy_, yp, mod.modelName) // val tech = SelectionTech.Forward // pick one feature selection technique // val tech = SelectionTech.Backward @@ -505,8 +308,7 @@ end vARTest3 val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA val k = cols.size println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for VAR with tech", lines = true) + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs n for VAR with tech", lines = true) banner ("Feature Importance") println (s"$tech: rSq = $rSq") @@ -539,10 +341,10 @@ end vARTest4 hp("q") = 2 banner ("Test In-Sample VAR on COVID-19 Weekly Data") val mod = VAR (y, hh) // create model for time series data - with exo - val (yp, qof) = mod.trainNtest_x ()() // train on full and test on full + mod.trainNtest_x ()() // train on full and test on full // println (mod.summary ()) // val yy_ = y(1 until y.dim) // can't forecast first values at t = 0 -// VAR.plotAll (yy_, yp, mod.modelName) +// plotAll (yy_, yp, mod.modelName) // val tech = SelectionTech.Forward // pick one feature selection technique // val tech = SelectionTech.Backward @@ -553,8 +355,7 @@ end vARTest4 val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA val k = cols.size println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for VAR with tech", lines = true) + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs n for VAR with tech", lines = true) banner ("Feature Importance") println (s"$tech: rSq = $rSq") @@ -566,7 +367,7 @@ end vARTest4 banner ("Run TnT on Best model") val bmod = mod.getBest._3 // get the best model from feature selection val (x_, y_, xtest, ytest) = VAR.split_TnT (bmod.getX, bmod.getY) - val (yptest, qoftest) = bmod.trainNtest_x (x_, y_)(xtest, ytest) // train on (x_, y_) and test on (xtest, ytest) + val yptest = bmod.trainNtest_x (x_, y_)(xtest, ytest)._1 // train on (x_, y_) and test on (xtest, ytest) new Plot (null, ytest(?, 0), yptest(?, 0), s"${mod.modelName}, ytest vs. yptest", lines = true) */ @@ -584,7 +385,8 @@ end vARTest5 val LAGS = 5 // number of lags val h = 6 // forecasting horizon - val vars = Array ("new_deaths", "icu_patients", "hosp_patients", "new_tests", "people_vaccinated") +// val vars = Array ("new_deaths", "icu_patients", "hosp_patients", "new_tests", "people_vaccinated") + val vars = Array ("new_deaths", "icu_patients") val yy = Example_Covid.loadData_yy (vars) val iskip = yy(?, 0).indexWhere (_ >= 6.0) // find day with at least 6 deaths println (s"iskip = $iskip is first day with at least 6 deaths") @@ -594,11 +396,14 @@ end vARTest5 hp("p") = LAGS hp("q") = 2 banner ("Test In-Sample VAR on COVID-19 Weekly Data") - val mod = VAR (y, h) // create model for time series data - with exo - val (yp, qof) = mod.trainNtest_x ()() // train on full and test on full + val mod = VAR (y, h, fname = vars) // create model for time series data - with exo + mod.trainNtest_x ()() // train on full and test on full + mod.rollValidate () // TnT with Rolling Validation default rc = 2 + mod.diagnoseAll (mod.getYY, mod.getYf, Forecaster.teRng (y.dim), 0) // only diagnose on the testing set + // println (mod.summary ()) // val yy_ = y(1 until y.dim) // can't forecast first values at t = 0 -// VAR.plotAll (yy_, yp, mod.modelName) +// plotAll (yy_, yp, mod.modelName) // val tech = SelectionTech.Forward // pick one feature selection technique // val tech = SelectionTech.Backward @@ -609,8 +414,7 @@ end vARTest5 val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA val k = cols.size println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for VAR with tech", lines = true) + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs n for VAR with tech", lines = true) banner ("Feature Importance") println (s"$tech: rSq = $rSq") diff --git a/src/main/scala/scalation/modeling/forecasting/multivar/VAR.scala.bak b/src/main/scala/scalation/modeling/forecasting/multivar/VAR.scala.bak new file mode 100644 index 000000000..669aa1fd9 --- /dev/null +++ b/src/main/scala/scalation/modeling/forecasting/multivar/VAR.scala.bak @@ -0,0 +1,629 @@ + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author John Miller + * @version 2.0 + * @date Mon Sep 2 14:37:55 EDT 2024 + * @see LICENSE (MIT style license file). + * + * @note Model: Vector AutoRegressive (VAR) + * + * @see phdinds-aim.github.io/time_series_handbook/03_VectorAutoregressiveModels/03_VectorAutoregressiveMethods.html + * www.lem.sssup.it/phd/documents/Lesson17.pdf + * Parameter/coefficient estimation: Multi-variate Ordinary Least Squares (OLS) or + * Generalized Least Squares (GLS) + */ + +package scalation +package modeling +package forecasting +package multivar + +import scala.annotation.unused +import scala.runtime.ScalaRunTime.stringOf + +import scalation.mathstat._ +import scalation.modeling.neuralnet.{RegressionMV => REGRESSION} +//import scalation.modeling.neuralnet.{RidgeRegressionMV => REGRESSION} + +import MakeMatrix4TS.hp + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `VAR` class provides multi-variate time series analysis capabilities for VAR models. + * VAR models are similar to `ARX` models, except that the exogenous variables are treated + * as endogenous variables and are themselves forecasted. Potentially having more + * up-to-date forecasted values feeding into multi-horizon forecasting can improve + * accuracy, but may also lead to compounding of forecast errors. + * Given multi-variate time series data stored in matrix y, its next value y_t = combination + * of last p vector values of y. + * + * y_t = b dot x_t + e_t + * + * where y_t is the value of y at time t and e_t is the residual/error term. + * @param y the response/output matrix (multi-variate time series data) + * @param x the input lagged time series data + * @param hh the maximum forecasting horizon (h = 1 to hh) + * @param fname the feature/variable names + * @param tRng the time range, if relevant (time index may suffice) + * @param hparam the hyper-parameters (defaults to `MakeMatrix4TS.hp`) + * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) + */ +class VAR (x: MatrixD, y: MatrixD, hh: Int, fname: Array [String] = null, + tRng: Range = null, hparam: HyperParameter = hp, + bakcast: Boolean = false) // backcasted values only used in `buildMatrix4TS` +// extends Forecaster_D (x, y, hh, tRng, hparam, bakcast): // no automatic backcasting, @see `VAR.apply` + extends Diagnoser (dfm = hparam("p").toInt, df = y.dim - hparam("p").toInt) + with ForecastTensor (y, hh, tRng) + with Model: + + private val debug = debugf ("VAR", true) // debug function + private val flaw = flawf ("VAR") // flaw function + private val p = hparam("p").toInt // use the last p values for each variable + private val spec = hparam("spec").toInt // trend terms: 0 - none, 1 - constant, 2 - linear, 3 - quadratic + // 4 - sine, 5 cosine + private val nneg = hparam("nneg").toInt == 1 // 0 => unrestricted, 1 => predictions must be non-negative + private val n = y.dim2 // the number of variables + private var bb: MatrixD = null // matrix of parameter values + private val yf = makeForecastTensor (y, hh) // make the forecast tensor + + private val reg = new REGRESSION (x, y, fname, hparam) // delegate training to multi-variate regression + + modelName = s"VAR($p, $n) on ${stringOf(fname)}" + + debug ("init", s"$modelName with additional term spec = $spec") + debug ("init", s"x = $x") + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Get the data/input matrix built from lagged y vector values. + */ + def getX: MatrixD = x + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the used response vector y (first colum in matrix). + */ + def getY: VectorD = y(?, 0) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the used response matrix y. Mainly for derived classes where y is + * transformed. + */ + override def getYY: MatrixD = y + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the feature/variable names. + */ + def getFname: Array [String] = fname + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Train/fit an `VAR` model to the times-series data in vector y_. + * Estimate the coefficient matrix bb for a p,q-th order VAR(p, q) model. + * Uses OLS Matrix Factorization to determine the coefficients, i.e., the bb matrix. + * @param x_ the data/input matrix (e.g., full x) + * @param y_ the training/full response matrix (e.g., full y) + */ + def train (x_ : MatrixD, y_ : MatrixD): Unit = + debug ("train", s"$modelName, x_.dim = ${x_.dim}, y_.dim = ${y_.dim}") + reg.train (x_, y_) // train the multi-variate regression model + bb = reg.parameter // coefficients from regression + debug ("train", s"parameter matrix bb = $bb") + end train + + def train (x_ : MatrixD, y_ : VectorD): Unit = + throw new UnsupportedOperationException ("train (MatrixD, VectorD) use the alternative train") + end train + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Train and test the forecasting model y_ = f(y-past) + e and report its QoF + * and plot its predictions. Return the predictions and QoF. + * NOTE: must use `trainNtest_x` when an x matrix is used, such as in `ARY`. + * @param x_ the training/full data/input matrix (defaults to full x) + * @param y_ the training/full response/output vector (defaults to full y) + * @param xx the testing/full data/input matrix (defaults to full x) + * @param yy the testing/full response/output vector (defaults to full y) + */ + def trainNtest_x (x_ : MatrixD = x, y_ : MatrixD = y)(xx: MatrixD = x, yy: MatrixD = y): (MatrixD, MatrixD) = + train (x_, y_) // train the model on training set + val (yp, qof) = test (xx, yy) // test the model on testing set + for j <- qof.indices do + banner (s"report for feature ${fname(j)}") + println (report (qof(j))) // report on Quality of Fit (QoF) + (yp, qof) + end trainNtest_x + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Test PREDICTIONS of a forecasting model y_ = f(lags (y_)) + e + * and return its predictions and QoF vector. Testing may be in-sample + * (on the training set) or out-of-sample (on the testing set) as determined + * by the parameters passed in. Note, must call train before test. + * Must override to get Quality of Fit (QoF). + * @param x_ the data/input matrix (ignored, pass null) + * @param y_ the actual testing/full response/output matrix + */ + def test (@unused x_ : MatrixD, y_ : MatrixD): (MatrixD, MatrixD) = + val yp = predictAll (y_) // make all predictions + val yy = if bakcast then y_(1 until y_.dim) // align the actual values + else y_ + println (s"yy.dim = ${yy.dim}, yp.dim = ${yp.dim}") +// Forecaster.differ (yy, yfh) // uncomment for debugging + assert (yy.dim == yp.dim) // make sure the vector sizes agree + + VAR.plotAll (yy, yp, s"test: $modelName") + mod_resetDF (yy.dim) // reset the degrees of freedom + (yp, diagnose (yy, yp)) // return predicted and QoF vectors + end test + + def test (x_ : MatrixD, y_ : VectorD): (VectorD, VectorD) = + throw new UnsupportedOperationException ("test (MatrixD, VectorD) use the alternative test") + end test + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the parameters. + */ + def parameter: VectorD | MatrixD = bb + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the hyper-parameters. + */ + def hparameter: HyperParameter = hparam + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Diagnose the quality of the model for each variable. + * @param yy the matrix of actual values + * @param yp the matrix of predicted values + */ + def diagnose (yy: MatrixD, yp: MatrixD): MatrixD = + MatrixD (for j <- yy.indices2 yield diagnose (yy(?, j), yp(?, j))) + end diagnose + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Predict a value for y_t using the 1-step ahead forecast. + * + * y_t = b_0 + b_1 y_t-1 + b_2 y_t-2 + ... + b_p y_t-p = b dot x_t + * + * FIX - parameter order is in conflict with AR models. + * @param t the time point being predicted + * @param y_ the actual values to use in making predictions (ignored) + */ + def predict (t: Int, y_ : MatrixD): VectorD = + val yp = rectify (reg.predict (x(t-1)), nneg) + if t < y_.dim then + debug ("predict", s"@t = $t, x(t-1) = ${x(t-1)}, yp = $yp vs. y_ = ${y_(t)}") + yp + end predict + + def predict (z: VectorD): Double | VectorD = + throw new UnsupportedOperationException ("predict (VectorD) use the alternative predict") + end predict + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Predict all values corresponding to the given time series vector y_. + * Update FORECAST TENSOR yf and return PREDICTION MATRIX yp as second (1) column + * of yf with last value removed. + * Note, yf(t, h, j) if the forecast to time t, horizon h, variable j + * @see `forecastAll` to forecast beyond horizon h = 1. + * @see `Forecaster.predictAll` for template implementation for vectors + * @param y_ the actual time series values to use in making predictions + */ + def predictAll (y_ : MatrixD): MatrixD = + if bakcast then + for t <- 1 until y_.dim do yf(t-1, 1) = predict (t, y_) // use model to make predictions + yf(?, 1)(0 until y_.dim-1) // return yp: first horizon only + else +// debug ("predictAll", s"y_.dim = ${y_.dim}, yf.dims = ${yf.dims}") + for t <- 1 until yf.dim+1 do yf(t-1, 1) = predict (t, y_) // skip t = 0 + yf(?, 1) + end predictAll + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forecast values for all y_.dim time points and all horizons (1 through hh-steps ahead). + * Record these in the FORECAST TENSOR yf, where + * + * yf(t, h) = h-steps ahead forecast for y_t + * + * @param y_ the actual values to use in making forecasts + */ + def forecastAll (y_ : MatrixD): TensorD = + for h <- 2 to hh do forecastAt (h, y_) // forecast k-steps into the future + yf // return tensor of forecasted values + end forecastAll + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forecast values for all y_.dim time points at horizon h (h-steps ahead). + * Assign into FORECAST TENSOR and return the h-steps ahead forecast. + * Note, yf(t, h, j) if the forecast to time t, horizon h, variable j + * Note, `predictAll` provides predictions for h = 1. + * @see `forecastAll` method in `Forecaster` trait. + * @param h the forecasting horizon, number of steps ahead to produce forecasts + * @param y_ the actual values to use in making forecasts + */ + def forecastAt (h: Int, y_ : MatrixD = y): MatrixD = + if h < 2 then flaw ("forecastAt", s"horizon h = $h must be at least 2") + + for t <- y_.indices do // make forecasts over all time points for horizon h + val xy = forge (x(t), yf(t), h) // yf(t) = time t, all horizons, all variables + val pred = rectify (reg.predict (xy), nneg) // slide in prior forecasted values +// debug ("forecastAt", s"h = $h, @t = $t, xy = $xy, yp = $pred, y_ = ${y_(t)}") + yf(t, h) = pred // record in forecast matrix + yf(?, h) // return the h-step ahead forecast vector + end forecastAt + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forge a new vector from the first spec values of x, the last p-h+1 values + * of x (past values), values 1 to h-1 from the forecasts, and available values + * from exogenous variables. + * @param xx the t-th row of the input matrix (lagged actual values) + * @param yy the t-th row of the forecast tensor (forecasted future values) + * @param h the forecasting horizon, number of steps ahead to produce forecasts + */ + def forge (xx: VectorD, yy: MatrixD, h: Int): VectorD = + val xy = new VectorD (spec + n * p) + xy(0 until spec) = xx(0 until spec) // get trend values + + var jend = spec + p // ending j index + for j <- 0 until n do // for each variable + val x_act = xx(jend-(p+1-h) until jend) // get actual lagged y-values + val nyy = p - x_act.dim // number of forecasted values needed + val x_fcast = yy(h-nyy until h, j) // get forecasted y-values + xy(jend-p until jend) = x_act ++ x_fcast + jend += p + end for + xy + end forge + + def crossValidate (k: Int, rando: Boolean): Array [Statistic] = + throw new UnsupportedOperationException ("Use `rollValidate` instead of `crossValidate`") + end crossValidate + +end VAR + + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `VAR` object supports regression for Multivariate Time Series data. + * Given a response matrix y, a predictor matrix x is built that consists of + * lagged y vectors. Additional future response vectors are built for training. + * y_t = b dot x + * where x = [y_{t-1}, y_{t-2}, ... y_{t-lag}]. + */ +object VAR: + +// private val debug = debugf ("VAR", true) // debug function + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a `VAR` object from a response matrix. The input/data matrix + * x is formed from the lagged y vectors as columns in matrix x. + * @param y the response/output matrix (multi-variate time series data) + * @param hh the maximum forecasting horizon (h = 1 to hh) + * @param fname the feature/variable names + * @param tRng the time range, if relevant (time index may suffice) + * @param hparam the hyper-parameters (defaults to `MakeMatrix4TS.hp`) + */ + def apply (y: MatrixD, hh: Int, fname: Array [String] = null, tRng: Range = null, + hparam: HyperParameter = hp, bakcast: Boolean = false): VAR = // backcasted values only used in `buildMatrix4TS` + val y_0 = y(?, 0) // the main endogenous variable (column zero) + val yy = y(?, 1 until y.dim2) // the other endogenous variables (rest of the columns) + val x = ARX.buildMatrix (yy, y_0, hparam, bakcast) // add spec trend columns and p|q lags for each column of y + new VAR (x, y, hh, fname, tRng, hparam) + end apply + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Split the x matrix and y matrix into training and testing sets. + * @param x the x data/input matrix + * @param y the y response/output matrix + * @param ratio the ratio of the TESTING set to the full dataset (most common 70-30, 80-20) + * + def split_TnT (x: MatrixD, y: MatrixD, ratio: Double = 0.30): (MatrixD, MatrixD, MatrixD, MatrixD) = + val n = x.dim + val tr_size = (n * (1.0 - ratio)).toInt + println (s"VAR.split_TnT: tr_size = $tr_size, te_size = ${n - tr_size}") + (x(0 until tr_size), y(0 until tr_size), x(tr_size until n), y(tr_size until n)) + end split_TnT + */ + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Use rolling-validation to compute test Quality of Fit (QoF) measures + * by dividing the dataset into a TRAINING SET (tr) and a TESTING SET (te) + * as follows: [ <-- tr_size --> | <-- te_size --> ] + * This version calls predict for one-step ahead out-of-sample forecasts. + * @see `RollingValidation` + * @param mod the forecasting model being used (e.g., `VAR`) + * @param rc the retraining cycle (number of forecasts until retraining occurs) + * + def rollValidate (mod: PredictorMV & Fit, rc: Int): Unit = + val x = mod.getX // get data/input matrix + val y = mod.getY // get response/output vector + val te_size = RollingValidation.teSize (y.dim) // size of testing set + val tr_size = y.dim - te_size // size of initial training set + debug ("rollValidate", s"train: tr_size = $tr_size; test: te_size = $te_size, rc = $rc") + + val yp = new MatrixD (te_size, y.dim2) // y-predicted over testing set + for i <- 0 until te_size do // iterate through testing set + val t = tr_size + i // next time point to forecast +// if i % rc == 0 then mod.train (x(0 until t), y(0 until t)) // retrain on sliding training set (growing set) + if i % rc == 0 then mod.train (x(i until t), y(i until t)) // retrain on sliding training set (fixed size set) + yp(i) = mod.predict (x(t-1)) // predict the next value + end for + + val df = max (0, mod.parameter(0).dim - 1) // degrees of freedom for model + mod.resetDF (df, te_size - df) // reset degrees of freedom + for k <- y.indices2 do + val (t, yk) = RollingValidation.align (tr_size, y(?, k)) // align vectors + val ypk = yp(?, k) + banner (s"QoF for horizon ${k+1} with yk.dim = ${yk.dim}, ypk.dim = ${ypk.dim}") + new Plot (t, yk, ypk, s"Plot yy, yp vs. t for horizon ${k+1}", lines = true) + println (FitM.fitMap (mod.diagnose (yk, ypk), qoF_names)) + end for + end rollValidate + */ + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Plot actual vs. predicted values for all variables (columns of the matrices). + * @param y the original un-expanded output/response matrix + * @param yp the predicted values (one-step ahead forecasts) matrix + * @param name the name of the model run to produce yp + */ + def plotAll (y: MatrixD, yp: MatrixD, name: String): Unit = + for j <- y.indices2 do + new Plot (null, y(?, j).drop (1), yp(?, j), s"$name, y vs. yp @ var j = $j", lines = true) + end plotAll + +end VAR + + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `vARTest` main function tests the `VAR` class. + * This test is used to CHECK that the `buildMatrix4TS` method (@see `apply`) is working correctly. + * May get NaN for some maximum lags (p) due to multi-collinearity. + * > runMain scalation.modeling.forecasting.multivar.vARTest + */ +@main def vARTest (): Unit = + + val m = 30 + val z = VectorD.range (1, m) + val y = MatrixD (z, -z + m).transpose + val hh = 3 // the forecasting horizon + + hp("q") = 2 + for p <- 5 to 5 do // autoregressive hyper-parameter p + hp("p") = p + banner (s"Test: VAR with $p lags") + val mod = VAR (y, hh) // create model for time series data + mod.trainNtest_x ()() // train the model on full dataset + println (mod.summary) + +// val yy = mod.getY +// val yp = mod.predict (mod.getX) +// VAR.plotAll (yy, yp, mod.modelName) +// for k <- yp.indices2 do +// new Plot (null, yy(?, k), yp(?, k), s"yy_$k vs. yp_$k for ${mod.modelName} (h=${k+1}) with $p lags", lines = true) + end for + +end vARTest + + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `vARTest2` main function tests the `VAR` class on real data: + * Forecasting Gas Furnace Data. Performs In-Sample Testing. + * > runMain scalation.modeling.forecasting.multivar.vARTest2 + */ +@main def vARTest2 (): Unit = + + import Example_GasFurnace._ + + val hh = 4 // forecasting horizon + val LAGS = 5 // number of lags + hp("p") = LAGS + hp("q") = 2 + + val y = Example_GasFurnace.loadData_yy (header) + println (s"y.dims = ${y.dims}") + + banner ("Test In-Sample VAR on GasFurnace Data") + val mod = VAR (y, hh, header) // create model for time series data + val yp = mod.trainNtest_x ()()._1 // train on full and test on full + println (mod.summary) + val yy_ = y(1 until y.dim) // can't forecast first values at t = 0 + VAR.plotAll (yy_, yp, mod.modelName) + +end vARTest2 + + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `vARTest3` main function tests the `VAR` class on real data: + * Forecasting COVID-19 Weekly Data. Performs In-Sample Testing. + * Goal: Find the variable that works best with "new_deaths" + * > runMain scalation.modeling.forecasting.multivar.vARTest3 + */ +@main def vARTest3 (): Unit = + + val hh = 6 // maximum forecasting horizon + val LAGS = 2 // number of lags + hp("p") = LAGS + hp("q") = 2 + + val vars = Array ("new_deaths", "icu_patients") + val yy = Example_Covid.loadData_yy (vars) +// val y = yy // full + val y = yy(0 until 116) // clip the flat end + println (s"y.dims = ${y.dims}") + + for j <- vars.indices do + new Plot (null, y(?, j), null, s"y_$j (${vars(j)}) vs. t", lines = true) + + banner ("Test In-Sample VAR on COVID-19 Weekly Data") + val mod = VAR (y, hh, vars) // create model for time series data + mod.trainNtest_x ()() // train on full and test on full +// println (mod.summary ()) +// val yy_ = y(1 until y.dim) // can't forecast first values at t = 0 +// VAR.plotAll (yy_, yp, mod.modelName) + +/* + banner (s"Feature Selection Technique: Stepwise") + val (cols, rSq) = mod.stepRegressionAll (cross = false) // R^2, R^2 bar, sMAPE, NA + val k = cols.size + println (s"k = $k, n = ${mod.getX.dim2}") + new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), + s"R^2 vs n for VAR with tech", lines = true) + + banner ("Feature Importance") + println (s"Stepwise: rSq = $rSq") +*/ +// val imp = mod.importance (cols.toArray, rSq) +// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") + +end vARTest3 + + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `vARTest4` main function tests the `VAR` class on real data: + * Forecasting COVID-19 Weekly Data. Performs In-Sample Testing. + * Goal: Find the four variables that works best with "new_deaths" + * > runMain scalation.modeling.forecasting.multivar.vARTest4 + */ +@main def vARTest4 (): Unit = + + val LAGS = 5 // number of lags + val hh = 6 // forecasting horizon + + val vars = Array ("new_deaths", "icu_patients", "hosp_patients", "new_tests", "people_vaccinated") + val yy = Example_Covid.loadData_yy (vars) + val iskip = yy(?, 0).indexWhere (_ >= 6.0) // find day with at least 6 deaths + println (s"iskip = $iskip is first day with at least 6 deaths") + val y = yy(iskip until yy.dim) // trim away the first iskip rows + println (s"y.dims = ${y.dims}") + + banner ("Test In-Sample VAR on COVID-19 Weekly Data") + hp("p") = LAGS + hp("q") = 2 + val mod = VAR (y, hh) // create model for time series data - with exo + mod.trainNtest_x ()() // train on full and test on full +// println (mod.summary ()) +// val yy_ = y(1 until y.dim) // can't forecast first values at t = 0 +// VAR.plotAll (yy_, yp, mod.modelName) + +// val tech = SelectionTech.Forward // pick one feature selection technique +// val tech = SelectionTech.Backward +// val tech = SelectionTech.Stepwise + +/* + banner (s"Feature Selection Technique: $tech") + val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA + val k = cols.size + println (s"k = $k, n = ${mod.getX.dim2}") + new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), + s"R^2 vs n for VAR with tech", lines = true) + + banner ("Feature Importance") + println (s"$tech: rSq = $rSq") +*/ +// val imp = mod.importance (cols.toArray, rSq) +// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") + +end vARTest4 + + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `vARTest5` main function tests the `VAR` class on real data: + * Forecasting COVID-19 Weekly Data. Does TnT Testing on endogenous and exogenous variables. + * Determine the terms to include in the model using Stepwise on In-Sample. + * > runMain scalation.modeling.forecasting.multivar.vARTest5 + */ +@main def vARTest5 (): Unit = + + val LAGS = 5 // number of lags + val hh = 6 // forecasting horizon + + val vars = Array ("new_deaths", "icu_patients", "hosp_patients", "new_tests", "people_vaccinated") + val yy = Example_Covid.loadData_yy (vars) + val iskip = yy(?, 0).indexWhere (_ >= 6.0) // find day with at least 6 deaths + println (s"iskip = $iskip is first day with at least 6 deaths") + val y = yy(iskip until yy.dim) // trim away the first iskip rows + println (s"y.dims = ${y.dims}") + + hp("p") = LAGS + hp("q") = 2 + banner ("Test In-Sample VAR on COVID-19 Weekly Data") + val mod = VAR (y, hh) // create model for time series data - with exo + mod.trainNtest_x ()() // train on full and test on full +// println (mod.summary ()) +// val yy_ = y(1 until y.dim) // can't forecast first values at t = 0 +// VAR.plotAll (yy_, yp, mod.modelName) + +// val tech = SelectionTech.Forward // pick one feature selection technique +// val tech = SelectionTech.Backward +// val tech = SelectionTech.Stepwise + +/* + banner (s"Feature Selection Technique: $tech") + val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA + val k = cols.size + println (s"k = $k, n = ${mod.getX.dim2}") + new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), + s"R^2 vs n for VAR with tech", lines = true) + + banner ("Feature Importance") + println (s"$tech: rSq = $rSq") +*/ +// val imp = mod.importance (cols.toArray, rSq) +// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") + +/* + banner ("Run TnT on Best model") + val bmod = mod.getBest._3 // get the best model from feature selection + val (x_, y_, xtest, ytest) = VAR.split_TnT (bmod.getX, bmod.getY) + val yptest = bmod.trainNtest_x (x_, y_)(xtest, ytest)._1 // train on (x_, y_) and test on (xtest, ytest) + new Plot (null, ytest(?, 0), yptest(?, 0), s"${mod.modelName}, ytest vs. yptest", lines = true) +*/ + +end vARTest5 + + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `vARTest6` main function tests the `VAR` class on real data: + * Forecasting COVID-19 Weekly Data. Does Rolling Validation on variables. + * Determine the terms to include in the model using Stepwise on In-Sample. + * > runMain scalation.modeling.forecasting.multivar.vARTest6 + */ +@main def vARTest6 (): Unit = + + val LAGS = 5 // number of lags + val h = 6 // forecasting horizon + + val vars = Array ("new_deaths", "icu_patients", "hosp_patients", "new_tests", "people_vaccinated") + val yy = Example_Covid.loadData_yy (vars) + val iskip = yy(?, 0).indexWhere (_ >= 6.0) // find day with at least 6 deaths + println (s"iskip = $iskip is first day with at least 6 deaths") + val y = yy(iskip until yy.dim) // trim away the first iskip rows + println (s"y.dims = ${y.dims}") + + hp("p") = LAGS + hp("q") = 2 + banner ("Test In-Sample VAR on COVID-19 Weekly Data") + val mod = VAR (y, h) // create model for time series data - with exo + mod.trainNtest_x ()() // train on full and test on full +// println (mod.summary ()) +// val yy_ = y(1 until y.dim) // can't forecast first values at t = 0 +// VAR.plotAll (yy_, yp, mod.modelName) + +// val tech = SelectionTech.Forward // pick one feature selection technique +// val tech = SelectionTech.Backward +// val tech = SelectionTech.Stepwise + +/* + banner (s"Feature Selection Technique: $tech") + val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA + val k = cols.size + println (s"k = $k, n = ${mod.getX.dim2}") + new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), + s"R^2 vs n for VAR with tech", lines = true) + + banner ("Feature Importance") + println (s"$tech: rSq = $rSq") +// val imp = mod.importance (cols.toArray, rSq) +// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") + + banner ("Run Rolling Validation on VAR Best model") + val bmod = mod.getBest._3 // get the best model from feature selection + VAR.rollValidate (bmod, 1) +*/ + +end vARTest6 + diff --git a/src/main/scala/scalation/modeling/forecasting/multivar/VARX.scala b/src/main/scala/scalation/modeling/forecasting/multivar/VARX.scala new file mode 100644 index 000000000..0c10db8ae --- /dev/null +++ b/src/main/scala/scalation/modeling/forecasting/multivar/VARX.scala @@ -0,0 +1,466 @@ + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author John Miller + * @version 2.0 + * @date Mon Sep 2 14:37:55 EDT 2024 + * @see LICENSE (MIT style license file). + * + * @note Model: Vector AutoRegressive (VARX) + * + * @see phdinds-aim.github.io/time_series_handbook/03_VectorAutoregressiveModels/03_VectorAutoregressiveMethods.html + * www.lem.sssup.it/phd/documents/Lesson17.pdf + * Parameter/coefficient estimation: Multi-variate Ordinary Least Squares (OLS) or + * Generalized Least Squares (GLS) + */ + +package scalation +package modeling +package forecasting +package multivar + +import scala.collection.mutable.ArrayBuffer + +import scalation.mathstat._ + +import MakeMatrix4TS._ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `VARX` class provides multi-variate time series analysis capabilities for VARX models. + * VARX models are related to `ARX` and `VAR` models, with some of the exogenous variables + * treated as endogenous variables and are themselves forecasted. Potentially having more + * up-to-date forecasted values feeding into multi-horizon forecasting can improve + * accuracy, but may also lead to compounding of forecast errors. + * Given multi-variate time series data where matrix x holds the input and matrix y holds + * the output, the next vector value y_t = combination of last p vector values in x. + * + * y_t = bb dot x_t + e_t + * + * where y_t is the value of y at time t, bb is the parameter matrix and e_t is the + * residual/error term. + * @param y the response/output matrix (multi-variate time series data) + * @param x the input lagged time series data + * @param hh the maximum forecasting horizon (h = 1 to hh) + * @param n_exo the number of exogenous variables + * @param fname the feature/variable names + * @param tRng the time range, if relevant (time index may suffice) + * @param hparam the hyper-parameters (defaults to `MakeMatrix4TS.hp`) + * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) + */ +class VARX (x: MatrixD, y: MatrixD, hh: Int, n_exo: Int, fname: Array [String] = null, + tRng: Range = null, hparam: HyperParameter = hp, + bakcast: Boolean = false) // backcasted values only used in `buildMatrix4TS` +// extends Forecaster_D (x, y, hh, tRng, hparam, bakcast): // no automatic backcasting, @see `VARX.apply` + extends Forecaster_RegV (x, y, hh, fname, tRng, hparam, bakcast): + + private val debug = debugf ("VARX", true) // debug function + private val p = hparam("p").toInt // use the last p endogenous values for each endo variable (p lags) + private val q = hparam("q").toInt // use the last q exogenous values for each exo variable (q lags) + private val spec = hparam("spec").toInt // trend terms: 0 - none, 1 - constant, 2 - linear, 3 - quadratic + // 4 - sine, 5 cosine + private val n = y.dim2 // the total number of variables + private val n_endo = n - n_exo // the number of endogenous variables + + _modelName = s"VARX_${p}_${q}_$n" + + debug ("init", s"$modelName with $n_endo, $n_exo endo, exo variables and additional term spec = $spec") + debug ("init", s"[ x | y ] = ${x ++^ y}") + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forge a new vector from the first spec values of x, the last p-h+1 values + * of x (past values), values 1 to h-1 from the forecasts, and available values + * from exogenous variables. + * @param xx the t-th row of the input matrix (lagged actual values) + * @param yy the t-th row of the forecast tensor (forecasted future values) + * @param h the forecasting horizon, number of steps ahead to produce forecasts + */ + def forge (xx: VectorD, yy: MatrixD, h: Int): VectorD = + val xy = new VectorD (spec + n * p) + xy(0 until spec) = xx(0 until spec) // get trend values + + var jend = spec + p // ending j index + for j <- 0 until n do // for each variable + val x_act = xx(jend-(p+1-h) until jend) // get actual lagged y-values + val nyy = p - x_act.dim // number of forecasted values needed + val x_fcast = yy(h-nyy until h, j) // get forecasted y-values + xy(jend-p until jend) = x_act ++ x_fcast + jend += p + end for + xy + end forge + +end VARX + + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `VARX` object supports regression for Multivariate Time Series data. + * Given a response matrix y, a predictor matrix x is built that consists of + * lagged y vectors. Additional future response vectors are built for training. + */ +object VARX: + +// private val debug = debugf ("VARX", true) // debug function + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a `VARX` object from a response matrix. The input/data matrix + * x is formed from the lagged y vectors as columns in matrix x. + * @param xe the matrix of exogenous variable values + * @param y the response/output matrix (multi-variate time series data) + * @param hh the maximum forecasting horizon (h = 1 to hh) + * @param fname_ the feature/variable names + * @param tRng the time range, if relevant (time index may suffice) + * @param hparam the hyper-parameters (defaults to `MakeMatrix4TS.hp`) + * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) + */ + def apply (xe: MatrixD, y: MatrixD, hh: Int, fname_ : Array [String] = null, + tRng: Range = null, hparam: HyperParameter = hp, + bakcast: Boolean = false): VARX = // backcasted values only used in `buildMatrix` + val xy = buildMatrix (xe, y, hparam, bakcast) + val fname = if fname_ == null then formNames (xe.dim2, hparam) else fname_ + new VARX (xy, y, hh, xe.dim2, fname, tRng, hparam, bakcast) + end apply + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Build the input matrix by combining the p + spec columns for the trend and + * endogenous variable with the q * xe.dim2 columns for the exogenous variables. + * @param xe the matrix of exogenous variable values + * @param y the matrix vector (time series data) + * @param hp_ the hyper-parameters + * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) + */ + def buildMatrix (xe: MatrixD, y: MatrixD, hp_ : HyperParameter, bakcast: Boolean): MatrixD = + val (p, q, spec, lwave) = (hp_("p").toInt, hp_("q").toInt, hp_("spec").toInt, hp_("lwave").toDouble) + makeMatrix4T (y(?, 0), spec, lwave, bakcast) ++^ // trend terms + makeMatrix4L (y, p, bakcast) ++^ // regular lag terms + makeMatrix4EXO (xe, q, 1, bakcast) // add exogenous terms + end buildMatrix + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Form an array of names for the features included in the model. + * @param n_exo the number of exogenous variable + * @param hp_ the hyper-parameters + */ + def formNames (n_exo: Int, hp_ : HyperParameter): Array [String] = + val (p, q, spec) = (hp_("p").toInt, hp_("q").toInt, hp_("spec").toInt) + val names = ArrayBuffer [String] () + for j <- 0 until n_exo; k <- q to 1 by -1 do names += s"xe${j}l$k" + MakeMatrix4TS.formNames (spec, p) ++ names.toArray + end formNames + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Split the x matrix and y matrix into training and testing sets. + * @param x the x data/input matrix + * @param y the y response/output matrix + * @param ratio the ratio of the TESTING set to the full dataset (most common 70-30, 80-20) + * + def split_TnT (x: MatrixD, y: MatrixD, ratio: Double = 0.30): (MatrixD, MatrixD, MatrixD, MatrixD) = + val n = x.dim + val tr_size = (n * (1.0 - ratio)).toInt + println (s"VARX.split_TnT: tr_size = $tr_size, te_size = ${n - tr_size}") + (x(0 until tr_size), y(0 until tr_size), x(tr_size until n), y(tr_size until n)) + end split_TnT + */ + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Use rolling-validation to compute test Quality of Fit (QoF) measures + * by dividing the dataset into a TRAINING SET (tr) and a TESTING SET (te) + * as follows: [ <-- tr_size --> | <-- te_size --> ] + * This version calls predict for one-step ahead out-of-sample forecasts. + * @see `RollingValidation` + * @param mod the forecasting model being used (e.g., `VARX`) + * @param rc the retraining cycle (number of forecasts until retraining occurs) + * + def rollValidate (mod: PredictorMV & Fit, rc: Int): Unit = + val x = mod.getX // get data/input matrix + val y = mod.getY // get response/output vector + val te_size = RollingValidation.teSize (y.dim) // size of testing set + val tr_size = y.dim - te_size // size of initial training set + debug ("rollValidate", s"train: tr_size = $tr_size; test: te_size = $te_size, rc = $rc") + + val yp = new MatrixD (te_size, y.dim2) // y-predicted over testing set + for i <- 0 until te_size do // iterate through testing set + val t = tr_size + i // next time point to forecast +// if i % rc == 0 then mod.train (x(0 until t), y(0 until t)) // retrain on sliding training set (growing set) + if i % rc == 0 then mod.train (x(i until t), y(i until t)) // retrain on sliding training set (fixed size set) + yp(i) = mod.predict (x(t-1)) // predict the next value + end for + + val df = max (0, mod.parameter(0).dim - 1) // degrees of freedom for model + mod.resetDF (df, te_size - df) // reset degrees of freedom + for k <- y.indices2 do + val (t, yk) = RollingValidation.align (tr_size, y(?, k)) // align vectors + val ypk = yp(?, k) + banner (s"QoF for horizon ${k+1} with yk.dim = ${yk.dim}, ypk.dim = ${ypk.dim}") + new Plot (t, yk, ypk, s"Plot yy, yp vs. t for horizon ${k+1}", lines = true) + println (FitM.fitMap (mod.diagnose (yk, ypk), qoF_names)) + end for + end rollValidate + */ + +end VARX + +// import Forecaster_RegV.plotAll + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `vARXTest` main function tests the `VARX` class. + * This test is used to CHECK that the `buildMatrix` method (@see `apply`) is working correctly. + * May get NaN for some maximum lags (p) due to multi-collinearity. + * > runMain scalation.modeling.forecasting.multivar.vARXTest + * +@main def vARXTest (): Unit = + + val m = 30 + val z = VectorD.range (1, m) + val y = MatrixD (z, -z + m).ᵀ + val hh = 3 // the forecasting horizon + + hp("q") = 2 + for p <- 5 to 5 do // autoregressive hyper-parameter p + hp("p") = p + banner (s"Test: VARX with $p lags") + val mod = VARX (y, hh) // create model for time series data + mod.trainNtest_x ()() // train the model on full dataset + println (mod.summary) + +// val yy = mod.getY +// val yp = mod.predict (mod.getX) +// plotAll (yy, yp, mod.modelName) +// for k <- yp.indices2 do +// new Plot (null, yy(?, k), yp(?, k), s"yy_$k vs. yp_$k for ${mod.modelName} (h=${k+1}) with $p lags", lines = true) + end for + +end vARXTest + */ + + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `vARXTest2` main function tests the `VARX` class on real data: + * Forecasting Gas Furnace Data. Performs In-Sample Testing. + * > runMain scalation.modeling.forecasting.multivar.vARXTest2 + * +@main def vARXTest2 (): Unit = + + import Example_GasFurnace._ + + val hh = 4 // forecasting horizon + val LAGS = 5 // number of lags + hp("p") = LAGS + hp("q") = 2 + + val y = Example_GasFurnace.loadData_yy (header) + println (s"y.dims = ${y.dims}") + + banner ("Test In-Sample VARX on GasFurnace Data") + val mod = VARX (y, hh, header) // create model for time series data + val yp = mod.trainNtest_x ()()._1 // train on full and test on full + println (mod.summary) + val yy_ = y(1 until y.dim) // can't forecast first values at t = 0 + plotAll (yy_, yp, mod.modelName) + +end vARXTest2 + */ + + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `vARXTest3` main function tests the `VARX` class on real data: + * Forecasting COVID-19 Weekly Data. Performs In-Sample Testing. + * Goal: Find the variable that works best with "new_deaths" + * > runMain scalation.modeling.forecasting.multivar.vARXTest3 + */ +@main def vARXTest3 (): Unit = + + val hh = 6 // maximum forecasting horizon + val LAGS = 2 // number of lags + hp("p") = LAGS + hp("q") = 2 + + val endo_vars = Array ("new_deaths", "icu_patients") + val exo_vars = Array ("hosp_patients") + val fname = endo_vars ++ exo_vars + + val yy = Example_Covid.loadData_yy (fname) +// val y = yy // full + val y = yy(0 until 116, 0 until 2) // clip the flat end, col. 0, 1 (endo) + val xe = yy(0 until 116, 2 until 3) // clip the flat end, col. 2 (exo) + println (s"y.dims = ${y.dims}") + + for j <- fname.indices do + new Plot (null, y(?, j), null, s"y_$j (${fname(j)}) vs. t", lines = true) + + banner ("Test In-Sample VARX on COVID-19 Weekly Data") + val mod = VARX (xe, y, hh) // create model for time series data + mod.trainNtest_x ()() // train on full and test on full +// println (mod.summary ()) +// val yy_ = y(1 until y.dim) // can't forecast first values at t = 0 +// plotAll (yy_, yp, mod.modelName) + +/* + banner (s"Feature Selection Technique: Stepwise") + val (cols, rSq) = mod.stepRegressionAll (cross = false) // R^2, R^2 bar, sMAPE, NA + val k = cols.size + println (s"k = $k, n = ${mod.getX.dim2}") + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs n for VARX with tech", lines = true) + + banner ("Feature Importance") + println (s"Stepwise: rSq = $rSq") +*/ +// val imp = mod.importance (cols.toArray, rSq) +// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") + +end vARXTest3 + + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `vARXTest4` main function tests the `VARX` class on real data: + * Forecasting COVID-19 Weekly Data. Performs In-Sample Testing. + * Goal: Find the four variables that works best with "new_deaths" + * > runMain scalation.modeling.forecasting.multivar.vARXTest4 + * +@main def vARXTest4 (): Unit = + + val LAGS = 5 // number of lags + val hh = 6 // forecasting horizon + + val vars = Array ("new_deaths", "icu_patients", "hosp_patients", "new_tests", "people_vaccinated") + val yy = Example_Covid.loadData_yy (vars) + val iskip = yy(?, 0).indexWhere (_ >= 6.0) // find day with at least 6 deaths + println (s"iskip = $iskip is first day with at least 6 deaths") + val y = yy(iskip until yy.dim) // trim away the first iskip rows + println (s"y.dims = ${y.dims}") + + banner ("Test In-Sample VARX on COVID-19 Weekly Data") + hp("p") = LAGS + hp("q") = 2 + val mod = VARX (y, hh) // create model for time series data - with exo + mod.trainNtest_x ()() // train on full and test on full +// println (mod.summary ()) +// val yy_ = y(1 until y.dim) // can't forecast first values at t = 0 +// plotAll (yy_, yp, mod.modelName) + +// val tech = SelectionTech.Forward // pick one feature selection technique +// val tech = SelectionTech.Backward +// val tech = SelectionTech.Stepwise + +/* + banner (s"Feature Selection Technique: $tech") + val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA + val k = cols.size + println (s"k = $k, n = ${mod.getX.dim2}") + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs n for VARX with tech", lines = true) + + banner ("Feature Importance") + println (s"$tech: rSq = $rSq") +*/ +// val imp = mod.importance (cols.toArray, rSq) +// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") + +end vARXTest4 + */ + + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `vARXTest5` main function tests the `VARX` class on real data: + * Forecasting COVID-19 Weekly Data. Does TnT Testing on endogenous and exogenous variables. + * Determine the terms to include in the model using Stepwise on In-Sample. + * > runMain scalation.modeling.forecasting.multivar.vARXTest5 + * +@main def vARXTest5 (): Unit = + + val LAGS = 5 // number of lags + val hh = 6 // forecasting horizon + + val vars = Array ("new_deaths", "icu_patients", "hosp_patients", "new_tests", "people_vaccinated") + val yy = Example_Covid.loadData_yy (vars) + val iskip = yy(?, 0).indexWhere (_ >= 6.0) // find day with at least 6 deaths + println (s"iskip = $iskip is first day with at least 6 deaths") + val y = yy(iskip until yy.dim) // trim away the first iskip rows + println (s"y.dims = ${y.dims}") + + hp("p") = LAGS + hp("q") = 2 + banner ("Test In-Sample VARX on COVID-19 Weekly Data") + val mod = VARX (y, hh) // create model for time series data - with exo + mod.trainNtest_x ()() // train on full and test on full +// println (mod.summary ()) +// val yy_ = y(1 until y.dim) // can't forecast first values at t = 0 +// plotAll (yy_, yp, mod.modelName) + +// val tech = SelectionTech.Forward // pick one feature selection technique +// val tech = SelectionTech.Backward +// val tech = SelectionTech.Stepwise + +/* + banner (s"Feature Selection Technique: $tech") + val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA + val k = cols.size + println (s"k = $k, n = ${mod.getX.dim2}") + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs n for VARX with tech", lines = true) + + banner ("Feature Importance") + println (s"$tech: rSq = $rSq") +*/ +// val imp = mod.importance (cols.toArray, rSq) +// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") + +/* + banner ("Run TnT on Best model") + val bmod = mod.getBest._3 // get the best model from feature selection + val (x_, y_, xtest, ytest) = VARX.split_TnT (bmod.getX, bmod.getY) + val yptest = bmod.trainNtest_x (x_, y_)(xtest, ytest)._1 // train on (x_, y_) and test on (xtest, ytest) + new Plot (null, ytest(?, 0), yptest(?, 0), s"${mod.modelName}, ytest vs. yptest", lines = true) +*/ + +end vARXTest5 + */ + + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `vARXTest6` main function tests the `VARX` class on real data: + * Forecasting COVID-19 Weekly Data. Does Rolling Validation on variables. + * Determine the terms to include in the model using Stepwise on In-Sample. + * > runMain scalation.modeling.forecasting.multivar.vARXTest6 + * +@main def vARXTest6 (): Unit = + + val LAGS = 5 // number of lags + val h = 6 // forecasting horizon + + val vars = Array ("new_deaths", "icu_patients", "hosp_patients", "new_tests", "people_vaccinated") + val yy = Example_Covid.loadData_yy (vars) + val iskip = yy(?, 0).indexWhere (_ >= 6.0) // find day with at least 6 deaths + println (s"iskip = $iskip is first day with at least 6 deaths") + val y = yy(iskip until yy.dim) // trim away the first iskip rows + println (s"y.dims = ${y.dims}") + + hp("p") = LAGS + hp("q") = 2 + banner ("Test In-Sample VARX on COVID-19 Weekly Data") + val mod = VARX (y, h) // create model for time series data - with exo + mod.trainNtest_x ()() // train on full and test on full +// println (mod.summary ()) +// val yy_ = y(1 until y.dim) // can't forecast first values at t = 0 +// plotAll (yy_, yp, mod.modelName) + +// val tech = SelectionTech.Forward // pick one feature selection technique +// val tech = SelectionTech.Backward +// val tech = SelectionTech.Stepwise + +/* + banner (s"Feature Selection Technique: $tech") + val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA + val k = cols.size + println (s"k = $k, n = ${mod.getX.dim2}") + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs n for VARX with tech", lines = true) + + banner ("Feature Importance") + println (s"$tech: rSq = $rSq") +// val imp = mod.importance (cols.toArray, rSq) +// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") + + banner ("Run Rolling Validation on VARX Best model") + val bmod = mod.getBest._3 // get the best model from feature selection + VARX.rollValidate (bmod, 1) +*/ + +end vARXTest6 + */ + diff --git a/src/main/scala/scalation/modeling/forecasting/neuralforecasting/Attention.scala b/src/main/scala/scalation/modeling/forecasting/neuralforecasting/Attention.scala index fd1a9ed96..d047e44da 100644 --- a/src/main/scala/scalation/modeling/forecasting/neuralforecasting/Attention.scala +++ b/src/main/scala/scalation/modeling/forecasting/neuralforecasting/Attention.scala @@ -50,7 +50,7 @@ trait Attention (n_var: Int, n_mod: Int = 512, heads: Int = 8, n_v: Int = -1): * @param w_v the weight matrix for value V */ def queryKeyValue (x: MatrixD, w_q: MatrixD, w_k: MatrixD, w_v: MatrixD): (MatrixD, MatrixD, MatrixD) = - (x * w_q.transpose, x * w_k.transpose, x * w_v.transpose) + (x * w_q.ᵀ, x * w_k.ᵀ, x * w_v.ᵀ) end queryKeyValue //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -61,7 +61,7 @@ trait Attention (n_var: Int, n_mod: Int = 512, heads: Int = 8, n_v: Int = -1): */ def context (q_t: VectorD, k: MatrixD, v: MatrixD): VectorD = val root_n = sqrt (q_t.dim) - v.transpose * f_softmax.f_ (k * (q_t / root_n)) + v.ᵀ * f_softmax.f_ (k * (q_t / root_n)) end context //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -72,7 +72,7 @@ trait Attention (n_var: Int, n_mod: Int = 512, heads: Int = 8, n_v: Int = -1): */ def attention (q: MatrixD, k: MatrixD, v: MatrixD): MatrixD = val root_n = sqrt (q.dim2) - f_softmax.fM (q * (k.transpose / root_n)) * v + f_softmax.fM (q * (k.ᵀ / root_n)) * v end attention //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -97,7 +97,7 @@ trait Attention (n_var: Int, n_mod: Int = 512, heads: Int = 8, n_v: Int = -1): debug ("attentionMH", s"q.dims = ${q.dims}, k.dims: ${k.dims}, v.dims: ${v.dims}") debug ("attentionMH", s"w_q.dims = ${w_q.dims}, w_k.dims = ${w_k.dims}, w_v.dims = ${w_v.dims}") - debug ("attensionMH", "w_o.dims = ${w_o.dims}") + debug ("attensionMH", s"w_o.dims = ${w_o.dims}") println (s"(q * w_q(0)).dims: ${(q * w_q(0)).dims}") println (s"(k * w_k(0)).dims: ${(k * w_k(0)).dims}") @@ -106,7 +106,7 @@ trait Attention (n_var: Int, n_mod: Int = 512, heads: Int = 8, n_v: Int = -1): var att = attention (q * w_q(0), k * w_k(0), v * w_v(0)) for i <- 1 until heads do att = att ++^ attention (q * w_q(i), k * w_k(i), v * w_v(i)) - debug ("attentionMH", s"att = $att") + debug (s"attentionMH", s"att = $att") att * w_o end attentionMH @@ -140,7 +140,7 @@ object Attention: val m = x.dim // number of time points val n = x.dim2 // size of input x_t - println (s"m = $m, n= $n") + println (s"m = $m, n = $n") end Attention import Attention._ @@ -194,9 +194,9 @@ end attentionTest val n_var = x.dim2 // number of variables in input vector x_t println (s"n_var = $n_var") - val n_mod = 72 // size of each query/key vector (q_t, k_t, v_t) + val n_mod = 72 // size of each query/key vector (q_t, k_t) val heads = 3 // number of attention heads - val n_val = 28 + val n_val = 28 // size of the value vector v_t object att extends Attention (n_var, n_mod, heads, n_val) val w_q = att.rmg.gen diff --git a/src/main/scala/scalation/modeling/forecasting/neuralforecasting/GRU.scala b/src/main/scala/scalation/modeling/forecasting/neuralforecasting/GRU.scala index 8b9b5401f..7be38215b 100644 --- a/src/main/scala/scalation/modeling/forecasting/neuralforecasting/GRU.scala +++ b/src/main/scala/scalation/modeling/forecasting/neuralforecasting/GRU.scala @@ -190,7 +190,6 @@ class GRU (x: MatrixD, y: MatrixD, fname: Array [String] = null, n_mem: Int = 8) else yp(t) = V * h(t) + b_V // activation: id for forecasting L(t) = (y(t) - yp(t)).normSq // sse loss function - end if end for end forward @@ -219,16 +218,16 @@ class GRU (x: MatrixD, y: MatrixD, fname: Array [String] = null, n_mem: Int = 8) dIn = dh * (_1 - z(t)) * tanhD (c(t)) // input to tanh for candidate mixin c c += (dIn, x(t), h(t-1) * r(t)) // update partials for c mixin - dhr = Wc.𝐓 * dIn // 𝐓 => matrix transpose + dhr = Wc.ᵀ * dIn // ᵀ => matrix transpose dh = dhr * r(t) dIn = dhr * h(t-1) * sigmoidD (r(t)) // input to sigmoid reset gate r r += (dIn, x(t), h(t-1)) // update partials for r gate - dh += Wr.𝐓 * dIn + dh_bk * z(t) + dh += Wr.ᵀ * dIn + dh_bk * z(t) dIn = dh_bk * (c(t) - h(t-1)) * sigmoidD (z(t)) // input to sigmoid update gate z z += (dIn, x(t), h(t-1)) // update partials for z gate - dh += Wz.𝐓 * dIn + dh += Wz.ᵀ * dIn end for // end case @ time t = 0 -> use h_m1 for hidden state @@ -237,16 +236,16 @@ class GRU (x: MatrixD, y: MatrixD, fname: Array [String] = null, n_mem: Int = 8) dIn = dh * (_1 - z(0)) * tanhD (c(0)) c += (dIn, x(0), h_m1 * r(0)) // update partials for c mixin @ t = 0 - dhr = Wc.𝐓 * dIn + dhr = Wc.ᵀ * dIn dh_m1 += dhr * r(0) dIn = dhr * h_m1 * sigmoidD (r(0)) r += (dIn, x(0), h_m1) // update partials for r gate @ t = 0 - dh_m1 += Wr.𝐓 * dIn + dh * z(0) + dh_m1 += Wr.ᵀ * dIn + dh * z(0) dIn = dh * (c(0) - h_m1) * sigmoidD (z(0)) z += (dIn, x(0), h_m1) // update partials for z gate @ t = 0 - dh_m1 += Wz.𝐓 * dIn + dh_m1 += Wz.ᵀ * dIn end backward //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: diff --git a/src/main/scala/scalation/modeling/forecasting/neuralforecasting/LSTM.scala b/src/main/scala/scalation/modeling/forecasting/neuralforecasting/LSTM.scala index 7deabeae6..0a1fb07db 100644 --- a/src/main/scala/scalation/modeling/forecasting/neuralforecasting/LSTM.scala +++ b/src/main/scala/scalation/modeling/forecasting/neuralforecasting/LSTM.scala @@ -123,7 +123,6 @@ class LSTM (x: MatrixD, y: MatrixD, fname: Array [String] = null, n_mem: Int = 8 else yp(t) = V * h(t) + b_V // activation: id for forecasting L(t) = (y(t) - yp(t)).normSq // sse loss function - end if end for end forward @@ -152,16 +151,16 @@ class LSTM (x: MatrixD, y: MatrixD, fname: Array [String] = null, n_mem: Int = 8 dIn = dh * (_1 - z(t)) * tanhD (c(t)) // input to tanh for candidate mixin c c += (dIn, x(t), h(t-1) * r(t)) // update partials for c mixin - dhr = Wc.𝐓 * dIn // 𝐓 => matrix transpose + dhr = Wc.ᵀ * dIn // ᵀ => matrix transpose dh = dhr * r(t) dIn = dhr * h(t-1) * sigmoidD (r(t)) // input to sigmoid reset gate r r += (dIn, x(t), h(t-1)) // update partials for r gate - dh += Wr.𝐓 * dIn + dh_bk * z(t) + dh += Wr.ᵀ * dIn + dh_bk * z(t) dIn = dh_bk * (c(t) - h(t-1)) * sigmoidD (z(t)) // input to sigmoid update gate z z += (dIn, x(t), h(t-1)) // update partials for z gate - dh += Wz.𝐓 * dIn + dh += Wz.ᵀ * dIn end for // end case @ time t = 0 -> use h_m1 for hidden state @@ -170,16 +169,16 @@ class LSTM (x: MatrixD, y: MatrixD, fname: Array [String] = null, n_mem: Int = 8 dIn = dh * (_1 - z(0)) * tanhD (c(0)) c += (dIn, x(0), h_m1 * r(0)) // update partials for c mixin @ t = 0 - dhr = Wc.𝐓 * dIn + dhr = Wc.ᵀ * dIn dh_m1 += dhr * r(0) dIn = dhr * h_m1 * sigmoidD (r(0)) r += (dIn, x(0), h_m1) // update partials for r gate @ t = 0 - dh_m1 += Wr.𝐓 * dIn + dh * z(0) + dh_m1 += Wr.ᵀ * dIn + dh * z(0) dIn = dh * (c(0) - h_m1) * sigmoidD (z(0)) z += (dIn, x(0), h_m1) // update partials for z gate @ t = 0 - dh_m1 += Wz.𝐓 * dIn + dh_m1 += Wz.ᵀ * dIn end backward //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: diff --git a/src/main/scala/scalation/modeling/forecasting/neuralforecasting/NeuralNet_3L4TS.scala b/src/main/scala/scalation/modeling/forecasting/neuralforecasting/NeuralNet_3L4TS.scala index 92d3f631e..a57d05c55 100644 --- a/src/main/scala/scalation/modeling/forecasting/neuralforecasting/NeuralNet_3L4TS.scala +++ b/src/main/scala/scalation/modeling/forecasting/neuralforecasting/NeuralNet_3L4TS.scala @@ -13,6 +13,7 @@ package modeling package forecasting package neuralforecasting +import scala.collection.mutable.{LinkedHashSet => LSET} import scala.math.max import scalation.mathstat._ @@ -50,7 +51,7 @@ class NeuralNet_3L4TS (x: MatrixD, y: MatrixD, hh: Int, n_exo: Int, fname: Array hparam: HyperParameter = hp ++ Optimizer.hp, f: AFF = f_tanh, f1: AFF = f_id, val itran: FunctionV2V = null, bakcast: Boolean = false) - extends Forecaster_D (x, y, hh, tRng, hparam, bakcast): // no automatic backcasting, @see `NeuralNet_3L4TS.apply` + extends Forecaster_D (x, y, hh, fname, tRng, hparam, bakcast): // no automatic backcasting, @see `NeuralNet_3L4TS.apply` private val debug = debugf ("NeuralNet_3L4TS", true) // debug function // private val flaw = flawf ("NeuralNet_3L4TS") // flaw function @@ -62,7 +63,7 @@ class NeuralNet_3L4TS (x: MatrixD, y: MatrixD, hh: Int, n_exo: Int, fname: Array private val nnet = NeuralNet_3L.rescale (x, y, fname, nz, hparam, f, f1) // delegate training to neural network - modelName = s"NeuralNet_3L4TS_${p}_${q}_${f.name}_${f1.name}" + _modelName = s"NeuralNet_3L4TS_${p}_${q}_${f.name}_${f1.name}" debug ("init", s"$modelName with $n_exo exogenous variables and additional term spec = $spec with nneg = $nneg") // debug ("init", s"[ x | y ] = ${x ++^ y}") @@ -114,9 +115,9 @@ class NeuralNet_3L4TS (x: MatrixD, y: MatrixD, hh: Int, n_exo: Int, fname: Array * @param size the size of dataset (full, train, or test) */ override def mod_resetDF (size: Int): Unit = - val dfm = max (1, nnet.parameter.dim - 1) // degrees of freedom for model - debug ("mod_resetDF", s"dfm = $dfm, df = ${size-dfm}") - resetDF (dfm, size - dfm) + val dfr = max (1, nnet.parameter.dim - 1) // degrees of freedom for regression/model + debug ("mod_resetDF", s"dfr = $dfr, df = ${size-dfr}") + resetDF (dfr, size - dfr) end mod_resetDF //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -134,6 +135,33 @@ class NeuralNet_3L4TS (x: MatrixD, y: MatrixD, hh: Int, n_exo: Int, fname: Array yp end predict + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Build an `NeuralNet_3L4TS` model using the cols with the selected features. + * @param cols the cols of the input matrix with selected features + * @param h the number of the horizon + */ + def getModel (cols: LSET [Int] = LSET.range (0, x.dim2)): NeuralNet_3L4TS = + new NeuralNet_3L4TS (x(?, cols), y, hh, n_exo, cols.toArray.map (fname (_)), tRng, nz, hparam, f, f1, itran, bakcast) + end getModel + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Build a single-horizon `Forecaster_Reg` model using the cols with the selected features. + * @param cols the cols of the input matrix with selected features + * @param h the number of the horizon + */ + def getModel_h (cols: LSET [Int] = LSET.range (0, x.dim2), h: Int = 1): Forecaster_Reg = + throw new UnsupportedOperationException ("getModel_h is not supported by NeuralNet_3L4TS") + end getModel_h + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Convert the underlying Regression Model to an `NeuralNet_3L4TS` Forecasting Model. + * @param mod the regression model to convert, e.g., the best model after feature selection + */ + def convertReg2Forc (mod: Model_FS = getBest.mod): NeuralNet_3L4TS = + new NeuralNet_3L4TS (mod.getX, MatrixD.fromVector (mod.getY), 1, n_exo, fname, + tRng, nz, hparam, f, f1, itran, bakcast) //, tForms) + end convertReg2Forc + end NeuralNet_3L4TS @@ -280,7 +308,7 @@ end neuralNet_3L4TSTest2 for j <- exo_vars.indices do new Plot (null, xe(?, j), null, s"x_$j (${exo_vars(j)}) vs. t", lines = true) - val p = 5 // number of lags for endogenous variable + val p = 5 // number of lags for endogenous variable val q = 2 // number of lags for exogenous variables val spec = 1 // number of trend terms @@ -297,17 +325,16 @@ end neuralNet_3L4TSTest2 // val (x_, y_, xx, yy) = NeuralNet_3L4TS.split_TnT (mod.getX, mod.getYY) // val (yp, qof) = mod.trainNtest_xx (x_, y_)(xx, yy) // train on (x_, y_) and test on (xx, yy) - val (yp, qof) = mod.trainNtest_xx ()() // train on full and test on full + mod.trainNtest_xx ()() // train on full and test on full mod.forecastAll (mod.getYy) // forecast h-steps ahead (h = 1 to hh) for all y mod.diagnoseAll (y, mod.getYf) // diagnose for all horizons /* banner (s"Feature Selection Technique: Stepwise") - val (cols, rSq) = mod.stepwiseSelAll (cross = false) // R^2, R^2 bar, sMAPE, NA + val (cols, rSq) = mod.stepwiseSelAll (cross = false) // R^2, R^2 bar, sMAPE, NA val k = cols.size println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for NeuralNet_3L4TS with tech", lines = true) + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs n for NeuralNet_3L4TS with tech", lines = true) // println (mod.summary ()) banner ("Feature Importance") diff --git a/src/main/scala/scalation/modeling/forecasting/neuralforecasting/NeuralNet_XL4TS.scala b/src/main/scala/scalation/modeling/forecasting/neuralforecasting/NeuralNet_XL4TS.scala deleted file mode 100644 index d78e02319..000000000 --- a/src/main/scala/scalation/modeling/forecasting/neuralforecasting/NeuralNet_XL4TS.scala +++ /dev/null @@ -1,297 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Sun Feb 13 16:22:21 EST 2022 - * @see LICENSE (MIT style license file). - * - * @note Model: NeuralNet_XL for Time Series - */ - -package scalation -package modeling -package forecasting -package neuralforecasting - -// FIX - recode to follow the NeuralNet_3L4TS pattern - -import scala.math.max - -import scalation.mathstat._ - -import ActivationFun._ -import neuralnet.{NeuralNet_XL, Optimizer} - -import MakeMatrix4TS._ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `NeuralNet_XL4TS` object supports X-layer regression-like neural networks - * for Time Series data. Given a response vector y, a predictor matrix x is built - * that consists of lagged y vectors. - * y_t = f2 (b dot f(a dot x)) - * where x = [y_{t-1}, y_{t-2}, ... y_{t-lags}]. - */ -object NeuralNet_XL4TS: - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `NeuralNet_XL` object from a response vector. The input/data matrix - * x is formed from the lagged y vectors as columns in matrix x. - * @param y the original un-expanded output/response vector - * @param lags the maximum lag included (inclusive) - * @param h the forecasting horizon (1, 2, ... h) - * @param nz the number of nodes in hidden layer (-1 => use default formula) - * @param hparam the hyper-parameters (use Optimizer.hp for default) - * @param f the array of activation function family for layers k->k+1 - */ - def apply (y: VectorD, lags: Int, h: Int, nz: Int = -1, - hparam: HyperParameter = Optimizer.hp, - f: Array [AFF] = Array (f_eLU, f_eLU, f_tanh)): NeuralNet_XL = - val hh = 6 - val bakcast = false - val xy = ARX.buildMatrix (null, y, hparam, bakcast) - val yy = makeMatrix4Y (y, hh, bakcast) - - val mod = NeuralNet_XL.rescale (xy, yy, null, null, hparam, f) - mod.modelName = s"NeuralNet_XL4TS_$lags" - mod - end apply - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create an `NeuralNet_XL` object from a response vector. The input/data matrix - * x is formed from the lagged y vectors as columns in matrix x. - * In addition, lagged exogenous variables are added. - * ARX_D.buildMatrix4TS (xe: MatrixD, y: VectorD, p: Int, q: Int, spec: Int, lwave: Double, - * @param y the original un-expanded output/response vector - * @param lags the maximum lag included (inclusive) - * @parax ex the input matrix for exogenous variables (one per column) - * @param h the forecasting horizon (1, 2, ... h) - * @param nz the number of nodes in hidden layer (-1 => use default formula) - * @param hparam the hyper-parameters (use Optimizer.hp for default) - * @param f the array of activation function family for layers k->k+1 - * @param elag1 the minimum exo lag included (inclusive) - * @param elag2 the maximum exo lag included (inclusive) - */ - def exo (y: VectorD, lags: Int, ex: MatrixD, h: Int, nz: Int = -1, - hparam: HyperParameter = Optimizer.hp, - f: Array [AFF] = Array (f_eLU, f_eLU, f_tanh)) - (elag1: Int = max (1, lags / 5), elag2: Int = max (1, lags)): NeuralNet_XL = - val hh = 6 - val bakcast = false - val xy = ARX.buildMatrix (ex, y, hparam, bakcast) - val yy = makeMatrix4Y (y, hh, bakcast) - - println (s"exo: xy.dims = ${xy.dims}, yy.dim = ${yy.dim}") -// println (s"exo: xy = $xy \n yy = $yy") - - val mod = NeuralNet_XL.rescale (xy, yy, null, null, hparam, f) - mod.modelName = s"NeuralNet_XL4TS_$lags" - mod - end exo - -end NeuralNet_XL4TS - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `neuralNet_XL4TSTest` main function tests the `NeuralNet_XL4TS` class. - * This test is used to CHECK that the buildMatrix4TS function is working correctly. - * May get NaN for some maximum lags (p) due to multi-collinearity. - * > runMain scalation.modeling.forecasting.neuralforecasting.neuralNet_XL4TSTest - */ -@main def neuralNet_XL4TSTest (): Unit = - - val m = 30 - val y = VectorD.range (1, m) // used to CHECK the buildMatrix4TS function - val h = 3 // the forecasting horizon - - for p <- 5 to 5 do // autoregressive hyper-parameter p - banner (s"Test: NeuralNet_XL4TS with $p lags") - val mod = NeuralNet_XL4TS (y, p, h) // create model for time series data - mod.trainNtest2 ()() // train the model on full dataset - println (mod.summary) - - val yy = mod.getYY - val yp = mod.predict (mod.getX) - for j <- yp.indices2 do - new Plot (null, yy(?, j), yp(?, j), s"yy_$j vs. yp_$j for ${mod.modelName} with $p lags", lines = true) - end for - end for - -end neuralNet_XL4TSTest - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `neuralNet_XL4TSTest2` main function tests the `NeuralNet_XL4TS` class on real data: - * Forecasting lake levels. - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.neuralforecasting.neuralNet_XL4TSTest2 - */ -@main def neuralNet_XL4TSTest2 (): Unit = - - import Example_LakeLevels.y - val h = 3 // the forecasting horizon - - for p <- 1 to 10 do // autoregressive hyper-parameter p - banner (s"Test: NeuralNet_XL4TS with $p lags") - val mod = NeuralNet_XL4TS (y, p, h) // create model for time series data - mod.trainNtest2 ()() // train the model on full dataset - println (mod.summary) - - banner ("Predictions/Forecasts") // direct forecasting technique - val yy = mod.getYY - val yf = mod.predict (mod.getX) - for k <- yf.indices2 do - new Plot (null, yy(?, k), yf(?, k), s"yy_$k vs. yf_$k for ${mod.modelName} with $p lags", lines = true) - end for - println (s"yf = $yf") - println (s"yf.dims = ${yf.dims}") - end for - -end neuralNet_XL4TSTest2 - -import Example_Covid.{loadData, response} - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `neuralNet_XL4TSTest3` main function tests the `NeuralNet_XL4TS` class on real data: - * Forecasts COVID-19 Weekly Data using endogenous variable only. - * Does In-Sample Testing (In_ST). - * Determines the terms to include in the model using Feature Selection. - * > runMain scalation.modeling.forecasting.neuralforecasting.neuralNet_XL4TSTest3 - */ -@main def neuralNet_XL4TSTest3 (): Unit = - - val LAGS = 10 // number of lags - val h = 6 // forecasting horizon - - val (x, y) = loadData (Array ("new_cases", "hosp_patients", "icu_patients"), response) - - println (s"x.dims = ${x.dims}, y.dim = ${y.dim}") - -// val f_ : Array [AFF] = Array (f_eLU, f_eLU, f_tanh) - val f_ : Array [AFF] = Array (f_id, f_eLU, f_tanh) - Optimizer.hp ("eta") = 0.25 - - banner ("In-ST Test: NeuralNet_XL4TS on COVID-19 Weekly Data") - val mod = NeuralNet_XL4TS (y, LAGS, h, f = f_) // create model for time series data - -// val (x_, y_, xx, yy) = NeuralNet_XL4TS.split_TnT (mod.getX, mod.getYY) -// val (yp, qof) = mod.trainNtest2 (x_, y_)(xx, yy) // train on (x_, y_) and test on (xx, yy) - - val (yp, qof) = mod.trainNtest2 ()() // train on full and test on full - val yy = y(LAGS until y.dim) - new Plot (null, yy, yp(?, 0), s"${mod.modelName}, yy vs. yp", lines = true) - -/* - banner (s"Feature Selection Technique: Stepwise") - val (cols, rSq) = mod.stepwiseSelAll (cross = false) // R^2, R^2 bar, sMAPE, NA - val k = cols.size - println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for NeuralNet_XL4TS with tech", lines = true) -// println (mod.summary ()) - - banner ("Feature Importance") - println (s"tech: rSq = $rSq") -// val imp = mod.importance (cols.toArray, rSq) -// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") -*/ - -end neuralNet_XL4TSTest3 - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `neuralNet_XL4TSTest4` main function tests the `NeuralNet_XL4TS` class on real data: - * Forecasts COVID-19 Weekly Data using endogenous and exogenous variables. - * Does In-Sample Testing (In-ST). - * Determines the terms to include in the model using Feature Selection. - * > runMain scalation.modeling.forecasting.neuralforecasting.neuralNet_XL4TSTest4 - */ -@main def neuralNet_XL4TSTest4 (): Unit = - - val LAGS = 10 // number of lags - val h = 6 // forecasting horizon - - val (_x, _y) = loadData (Array ("new_cases", "hosp_patients", "icu_patients"), response) -// val (x, y) = (_x, _y) // full - val (x, y) = (_x(0 until 116), _y(0 until 116)) // clip the flat end - - println (s"x.dims = ${x.dims}, y.dim = ${y.dim}") - -// val f_ : Array [AFF] = Array (f_eLU, f_eLU, f_tanh) - val f_ : Array [AFF] = Array (f_id, f_eLU, f_tanh) - Optimizer.hp ("eta") = 0.07 - - banner ("In-ST: Test NeuralNet_XL4TS on COVID-19 Weekly Data") - val mod = NeuralNet_XL4TS.exo (y, LAGS, x, h, f = f_)(1, LAGS+1) // create model for time series data - -// val (x_, y_, xx, yy) = NeuralNet_XL4TS.split_TnT (mod.getX, mod.getYY) -// val (yp, qof) = mod.trainNtest2 (x_, y_)(xx, yy) // train on (x_, y_) and test on (xx, yy) - - val (yp, qof) = mod.trainNtest2 ()() // train on full and test on full - val yy = y(LAGS until y.dim) - new Plot (null, yy, yp(?, 0), s"${mod.modelName}, yy vs. yp", lines = true) - -/* - banner (s"Feature Selection Technique: Stepwise") - val (cols, rSq) = mod.stepwiseSelAll (cross = false) // R^2, R^2 bar, sMAPE, NA - val k = cols.size - println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for NeuralNet_XL4TS with tech", lines = true) -// println (mod.summary ()) - - banner ("Feature Importance") - println (s"tech: rSq = $rSq") -// val imp = mod.importance (cols.toArray, rSq) -// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") -*/ - -end neuralNet_XL4TSTest4 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `neuralNet_XL4TSTest4` main function tests the `NeuralNet_XL4TS` class on real data: - * Forecasts COVID-19 Weekly Data using endogenous and exogenous variables. - * Does Train-n-Test Split (TnT) Testing. - * Determines the terms to include in the model using Feature Selection. - * > runMain scalation.modeling.forecasting.neuralforecasting.neuralNet_XL4TSTest4 - */ -@main def neuralNet_XL4TSTest5 (): Unit = - - val LAGS = 10 // number of lags - val h = 6 // forecasting horizon - - val (_x, _y) = loadData (Array ("new_cases", "hosp_patients", "icu_patients"), response) -// val (x, y) = (_x, _y) // full - val (x, y) = (_x(0 until 116), _y(0 until 116)) // clip the flat end - - println (s"x.dims = ${x.dims}, y.dim = ${y.dim}") - -// val f_ : Array [AFF] = Array (f_eLU, f_eLU, f_tanh) - val f_ : Array [AFF] = Array (f_id, f_eLU, f_tanh) - Optimizer.hp ("eta") = 0.2 - - banner ("TnT Test: NeuralNet_XL4TS on COVID-19 Weekly Data") - val mod = NeuralNet_XL4TS.exo (y, LAGS, x, h, f = f_)(1, LAGS+1) // create model for time series data - -// val (x_, y_, xx, yy) = ARX_D.split_TnT (mod.getX, mod.getYY) -// val (yp, qof) = mod.trainNtest2 (x_, y_)(xx, yy) // train on (x_, y_) and test on (xx, yy) -// new Plot (null, yy(?, 0), yp(?, 0), s"${mod.modelName}, yy vs. yp", lines = true) - - mod.trainNtest2 ()() // train on (x_, y_) and test on (xx, yy) -// mod.rollValidate () // FIX -/* - banner (s"Feature Selection Technique: Stepwise") - val (cols, rSq) = mod.stepwiseSelAll (cross = false) // R^2, R^2 bar, sMAPE, NA - val k = cols.size - println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for NeuralNet_XL4TS with tech", lines = true) -// println (mod.summary ()) - - banner ("Feature Importance") - println (s"tech: rSq = $rSq") -// val imp = mod.importance (cols.toArray, rSq) -// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") -*/ - -end neuralNet_XL4TSTest5 - diff --git a/src/main/scala/scalation/modeling/forecasting/neuralforecasting/NeuralNet_XL4TS.scalaa b/src/main/scala/scalation/modeling/forecasting/neuralforecasting/NeuralNet_XL4TS.scalaa new file mode 100644 index 000000000..bc93807c1 --- /dev/null +++ b/src/main/scala/scalation/modeling/forecasting/neuralforecasting/NeuralNet_XL4TS.scalaa @@ -0,0 +1,310 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author John Miller + * @version 2.0 + * @date Sun Feb 13 16:22:21 EST 2022 + * @see LICENSE (MIT style license file). + * + * @note Model: NeuralNet_XL for Time Series + */ + +package scalation +package modeling +package forecasting +package neuralforecasting + +// FIX - recode to follow the NeuralNet_3L4TS pattern + +import scala.annotation.unused + +import scalation.mathstat._ + +import ActivationFun._ +import neuralnet.{NeuralNet_XL, Optimizer} + +import MakeMatrix4TS._ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `NeuralNet_XL4TS` class supports X-layer regression-like neural networks + * for Time Series data. Given a response vector y, a predictor matrix x is built + * that consists of lagged y vectors. + * y_t = f2 (b dot f(a dot x)) + * where x = [y_{t-1}, y_{t-2}, ... y_{t-lags}]. + * x is formed from the lagged y vectors as columns in matrix x. + * @param y the original un-expanded output/response vector + * @param lags the maximum lag included (inclusive) + * @param h the forecasting horizon (1, 2, ... h) + * @param nz the number of nodes in hidden layer (-1 => use default formula) + * @param hparam the hyper-parameters (use Optimizer.hp for default) + * @param f the array of activation function family for layers k->k+1 + */ +class NeuralNet_XL4TS (y: VectorD, lags: Int, h: Int, @unused nz: Int = -1, // FIX + hparam: HyperParameter = Optimizer.hp, + f: Array [AFF] = Array (f_eLU, f_eLU, f_tanh)) + +end NeuralNet_XL4TS + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `NeuralNet_XL4TS` object supports X-layer regression-like neural networks + * for Time Series data. Given a response vector y, a predictor matrix x is built + * that consists of lagged y vectors. + * y_t = f2 (b dot f(a dot x)) + * where x = [y_{t-1}, y_{t-2}, ... y_{t-lags}]. + */ +object NeuralNet_XL4TS: + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a `NeuralNet_XL` object from a response vector. The input/data matrix + * x is formed from the lagged y vectors as columns in matrix x. + * @param y the original un-expanded output/response vector + * @param lags the maximum lag included (inclusive) + * @param h the forecasting horizon (1, 2, ... h) + * @param nz the number of nodes in hidden layer (-1 => use default formula) + * @param hparam the hyper-parameters (use Optimizer.hp for default) + * @param f the array of activation function family for layers k->k+1 + */ + def apply (y: VectorD, lags: Int, h: Int, @unused nz: Int = -1, // FIX + hparam: HyperParameter = Optimizer.hp, + f: Array [AFF] = Array (f_eLU, f_eLU, f_tanh)): NeuralNet_XL = + val bakcast = false + val (xy, _) = ARX.buildMatrix (null, y, hparam, bakcast) + val yy = makeMatrix4Y (y, h, bakcast) + + val mod = NeuralNet_XL.rescale (xy, yy, null, null, hparam, f) + mod.modelName = s"NeuralNet_XL4TS_$lags" + mod + end apply + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create an `NeuralNet_XL` object from a response vector. The input/data matrix + * x is formed from the lagged y vectors as columns in matrix x. + * In addition, lagged exogenous variables are added. + * ARX_D.buildMatrix4TS (xe: MatrixD, y: VectorD, p: Int, q: Int, spec: Int, lwave: Double, + * @param y the original un-expanded output/response vector + * @param lags the maximum lag included (inclusive) + * @parax ex the input matrix for exogenous variables (one per column) + * @param h the forecasting horizon (1, 2, ... h) + * @param nz the number of nodes in hidden layer (-1 => use default formula) + * @param hparam the hyper-parameters (use Optimizer.hp for default) + * @param f the array of activation function family for layers k->k+1 + */ + def exo (y: VectorD, lags: Int, ex: MatrixD, h: Int, @unused nz: Int = -1, // FIX + hparam: HyperParameter = Optimizer.hp, + f: Array [AFF] = Array (f_eLU, f_eLU, f_tanh)): NeuralNet_XL = + val bakcast = false + val (xy, _) = ARX.buildMatrix (ex, y, hparam, bakcast) + val yy = makeMatrix4Y (y, h, bakcast) + + println (s"exo: xy.dims = ${xy.dims}, yy.dim = ${yy.dim}") +// println (s"exo: xy = $xy \n yy = $yy") + + val mod = NeuralNet_XL.rescale (xy, yy, null, null, hparam, f) + mod.modelName = s"NeuralNet_XL4TS_$lags" + mod + end exo + +end NeuralNet_XL4TS + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `neuralNet_XL4TSTest` main function tests the `NeuralNet_XL4TS` class. + * This test is used to CHECK that the buildMatrix4TS function is working correctly. + * May get NaN for some maximum lags (p) due to multi-collinearity. + * > runMain scalation.modeling.forecasting.neuralforecasting.neuralNet_XL4TSTest + */ +@main def neuralNet_XL4TSTest (): Unit = + + val m = 30 + val y = VectorD.range (1, m) // used to CHECK the buildMatrix4TS function + val h = 3 // the forecasting horizon + + for p <- 5 to 5 do // autoregressive hyper-parameter p + banner (s"Test: NeuralNet_XL4TS with $p lags") + val mod = NeuralNet_XL4TS (y, p, h) // create model for time series data + mod.trainNtest2 ()() // train the model on full dataset + println (mod.summary) + + val yy = mod.getYY + val yp = mod.predict (mod.getX) + for j <- yp.indices2 do + new Plot (null, yy(?, j), yp(?, j), s"yy_$j vs. yp_$j for ${mod.modelName} with $p lags", lines = true) + end for + end for + +end neuralNet_XL4TSTest + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `neuralNet_XL4TSTest2` main function tests the `NeuralNet_XL4TS` class on real data: + * Forecasting lake levels. + * @see cran.r-project.org/web/packages/fpp/fpp.pdf + * > runMain scalation.modeling.forecasting.neuralforecasting.neuralNet_XL4TSTest2 + */ +@main def neuralNet_XL4TSTest2 (): Unit = + + import Example_LakeLevels.y + val h = 3 // the forecasting horizon + + for p <- 1 to 10 do // autoregressive hyper-parameter p + banner (s"Test: NeuralNet_XL4TS with $p lags") + val mod = NeuralNet_XL4TS (y, p, h) // create model for time series data + mod.trainNtest2 ()() // train the model on full dataset + println (mod.summary) + + banner ("Predictions/Forecasts") // direct forecasting technique + val yy = mod.getYY + val yf = mod.predict (mod.getX) + for k <- yf.indices2 do + new Plot (null, yy(?, k), yf(?, k), s"yy_$k vs. yf_$k for ${mod.modelName} with $p lags", lines = true) + end for + println (s"yf = $yf") + println (s"yf.dims = ${yf.dims}") + end for + +end neuralNet_XL4TSTest2 + +import Example_Covid.{loadData, response} + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `neuralNet_XL4TSTest3` main function tests the `NeuralNet_XL4TS` class on real data: + * Forecasts COVID-19 Weekly Data using endogenous variable only. + * Does In-Sample Testing (In_ST). + * Determines the terms to include in the model using Feature Selection. + * > runMain scalation.modeling.forecasting.neuralforecasting.neuralNet_XL4TSTest3 + */ +@main def neuralNet_XL4TSTest3 (): Unit = + + val LAGS = 10 // number of lags + val h = 6 // forecasting horizon + + val (x, y) = loadData (Array ("new_cases", "hosp_patients", "icu_patients"), response) + + println (s"x.dims = ${x.dims}, y.dim = ${y.dim}") + +// val f_ : Array [AFF] = Array (f_eLU, f_eLU, f_tanh) + val f_ : Array [AFF] = Array (f_id, f_eLU, f_tanh) + Optimizer.hp ("eta") = 0.25 + + banner ("In-ST Test: NeuralNet_XL4TS on COVID-19 Weekly Data") + val mod = NeuralNet_XL4TS (y, LAGS, h, f = f_) // create model for time series data + +// val (x_, y_, xx, yy) = NeuralNet_XL4TS.split_TnT (mod.getX, mod.getYY) +// val (yp, qof) = mod.trainNtest2 (x_, y_)(xx, yy) // train on (x_, y_) and test on (xx, yy) + + val yp = mod.trainNtest2 ()()._1 // train on full and test on full + val yy = y(LAGS until y.dim) + new Plot (null, yy, yp(?, 0), s"${mod.modelName}, yy vs. yp", lines = true) + +/* + banner (s"Feature Selection Technique: Stepwise") + val (cols, rSq) = mod.stepwiseSelAll (cross = false) // R^2, R^2 bar, sMAPE, NA + val k = cols.size + println (s"k = $k, n = ${mod.getX.dim2}") + new PlotM (null, rSq.transpose, Regression.metrics, s"R^2 vs n for NeuralNet_XL4TS with tech", lines = true) +// println (mod.summary ()) + + banner ("Feature Importance") + println (s"tech: rSq = $rSq") +// val imp = mod.importance (cols.toArray, rSq) +// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") +*/ + +end neuralNet_XL4TSTest3 + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `neuralNet_XL4TSTest4` main function tests the `NeuralNet_XL4TS` class on real data: + * Forecasts COVID-19 Weekly Data using endogenous and exogenous variables. + * Does In-Sample Testing (In-ST). + * Determines the terms to include in the model using Feature Selection. + * > runMain scalation.modeling.forecasting.neuralforecasting.neuralNet_XL4TSTest4 + */ +@main def neuralNet_XL4TSTest4 (): Unit = + + val LAGS = 10 // number of lags + val h = 6 // forecasting horizon + + val (_x, _y) = loadData (Array ("new_cases", "hosp_patients", "icu_patients"), response) +// val (x, y) = (_x, _y) // full + val (x, y) = (_x(0 until 116), _y(0 until 116)) // clip the flat end + + println (s"x.dims = ${x.dims}, y.dim = ${y.dim}") + +// val f_ : Array [AFF] = Array (f_eLU, f_eLU, f_tanh) + val f_ : Array [AFF] = Array (f_id, f_eLU, f_tanh) + Optimizer.hp ("eta") = 0.07 + + banner ("In-ST: Test NeuralNet_XL4TS on COVID-19 Weekly Data") + val mod = NeuralNet_XL4TS.exo (y, LAGS, x, h, f = f_) // create model for time series data + +// val (x_, y_, xx, yy) = NeuralNet_XL4TS.split_TnT (mod.getX, mod.getYY) +// val (yp, qof) = mod.trainNtest2 (x_, y_)(xx, yy) // train on (x_, y_) and test on (xx, yy) + + val yp = mod.trainNtest2 ()()._1 // train on full and test on full + val yy = y(LAGS until y.dim) + new Plot (null, yy, yp(?, 0), s"${mod.modelName}, yy vs. yp", lines = true) + +/* + banner (s"Feature Selection Technique: Stepwise") + val (cols, rSq) = mod.stepwiseSelAll (cross = false) // R^2, R^2 bar, sMAPE, NA + val k = cols.size + println (s"k = $k, n = ${mod.getX.dim2}") + new PlotM (null, rSq.transpose, Regression.metrics, s"R^2 vs n for NeuralNet_XL4TS with tech", lines = true) +// println (mod.summary ()) + + banner ("Feature Importance") + println (s"tech: rSq = $rSq") +// val imp = mod.importance (cols.toArray, rSq) +// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") +*/ + +end neuralNet_XL4TSTest4 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `neuralNet_XL4TSTest4` main function tests the `NeuralNet_XL4TS` class on real data: + * Forecasts COVID-19 Weekly Data using endogenous and exogenous variables. + * Does Train-n-Test Split (TnT) Testing. + * Determines the terms to include in the model using Feature Selection. + * > runMain scalation.modeling.forecasting.neuralforecasting.neuralNet_XL4TSTest4 + */ +@main def neuralNet_XL4TSTest5 (): Unit = + + val LAGS = 10 // number of lags + val h = 6 // forecasting horizon + + val (_x, _y) = loadData (Array ("new_cases", "hosp_patients", "icu_patients"), response) +// val (x, y) = (_x, _y) // full + val (x, y) = (_x(0 until 116), _y(0 until 116)) // clip the flat end + + println (s"x.dims = ${x.dims}, y.dim = ${y.dim}") + +// val f_ : Array [AFF] = Array (f_eLU, f_eLU, f_tanh) + val f_ : Array [AFF] = Array (f_id, f_eLU, f_tanh) + Optimizer.hp ("eta") = 0.2 + + banner ("TnT Test: NeuralNet_XL4TS on COVID-19 Weekly Data") + val mod = NeuralNet_XL4TS.exo (y, LAGS, x, h, f = f_) // create model for time series data + +// val (x_, y_, xx, yy) = ARX_D.split_TnT (mod.getX, mod.getYY) +// val (yp, qof) = mod.trainNtest2 (x_, y_)(xx, yy) // train on (x_, y_) and test on (xx, yy) +// new Plot (null, yy(?, 0), yp(?, 0), s"${mod.modelName}, yy vs. yp", lines = true) + + mod.trainNtest2 ()() // train on (x_, y_) and test on (xx, yy) +// mod.rollValidate () // FIX +/* + banner (s"Feature Selection Technique: Stepwise") + val (cols, rSq) = mod.stepwiseSelAll (cross = false) // R^2, R^2 bar, sMAPE, NA + val k = cols.size + println (s"k = $k, n = ${mod.getX.dim2}") + new PlotM (null, rSq.transpose, Regression.metrics, s"R^2 vs n for NeuralNet_XL4TS with tech", lines = true) +// println (mod.summary ()) + + banner ("Feature Importance") + println (s"tech: rSq = $rSq") +// val imp = mod.importance (cols.toArray, rSq) +// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") +*/ + +end neuralNet_XL4TSTest5 + diff --git a/src/main/scala/scalation/modeling/forecasting/neuralforecasting/RNN.scala b/src/main/scala/scalation/modeling/forecasting/neuralforecasting/RNN.scala index ae17734c6..edad345ee 100644 --- a/src/main/scala/scalation/modeling/forecasting/neuralforecasting/RNN.scala +++ b/src/main/scala/scalation/modeling/forecasting/neuralforecasting/RNN.scala @@ -1,14 +1,20 @@ + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller, Leela Venkata Sai Vukkurthi +/** @author Praveen Rangavajhula, John Miller * @version 2.0 - * @date Mon Dec 2 22:55:42 EST 2024 + * @date Mon Aug 4 21:12:40 EDT 2025 * @see LICENSE (MIT style license file). - ^ + * * @note Model: Recurrent Neural Network (RNN) for Multivariate Time Series + */ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Standard RNN (GRU/LSTM) Formats for Multivariate Time Series Data: + * Matlab: * - * Standard RNN (GRU/LSTM) Formats for Multivariate Time Series Data. * Keras: 3D format expected by GRU/LSTM is [samples, timesteps, features]. - * => indexing [timestamp t, lags k, variable j] + * => indexing [timestamp t, lags k, variable j] + * PyTorch: */ package scalation @@ -16,116 +22,83 @@ package modeling package forecasting package neuralforecasting -import scalation.mathstat.{MatrixD, Plot, VectorD} -import scalation.random.{NormalMat, NormalVec_c} -import ActivationFun.{softmax_, tanh_} -import MatrixD.outer +import scalation.mathstat.{MatrixD, Plot, TensorD, VectorD} +import scalation.modeling.forecasting.MakeMatrix4TS.makeMatrix4L //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `RNN` class implements Recurrent Neural Network (RNN) via Back Propagation Through - * Time (BPTT). At each time point x_t, there is a vector representing several variables - * or the encoding of a word. Intended to work for guessing the next work in a sentence - * or for multi-horizon forecasting. - * Time series: (x_t: t = 0, 1, ..., n_seq-1) where n_seq is the number of time points/words - * - * @param x the input sequence/time series - * @param y the output sequence/time series - * @param fname the feature/variable names - * @param n_mem the size for hidden state (h) (dimensionality of memory) + * Time (BPTT). At each time point x_t, there is a vector representing several variables + * or the encoding of a word. Intended to work for guessing the next work in a sentence + * or for multi-horizon forecasting. + * @param x the input sequence/time series + * @param y the output sequence/time series + * @param y_orig the original target matrix before any preprocessing + * @param fname the feature/variable names + * @param n_mem the size for hidden state (h) (dimensionality of memory) */ -class RNN (x: MatrixD, y: MatrixD, fname: Array[String] = null, n_mem: Int = 8) - extends FitM: - - private val CLASSIF = false // whether to apply classification (e.g., guess next word) or forecast a value - private val max_epochs = 55 // maximum number of iterations - private val eta = 0.27 // the learning rate (use 0.25 for rNNTest) - private val batch_size = 64 // batch size for mini-batch training - private val truncation_length = 63 // Truncate the back-propagation through time - private val β = 0 // Momentum hyper parameter - private val threshold = 5.0 // Threshold for gradient clipping - - private val n_seq = x.dim // e.g., 20, number of words in a sentence (including start and end symbol) - private val n_var = x.dim2 // e.g., 64, number of variables or distinct words (vocabulary size)\ - - // since we will only use one sentence for training, - // this is also the total steps during training. -// private val _1 = VectorD.one (n_mem) // vector of all ones for e.g., 1 - z - - // initialize parameters (weights and biases) - private val rmg1 = NormalMat(n_mem, n_var, 0.0, math.sqrt(2.0 / (n_mem + n_var))) - private val rmg2 = NormalMat(n_mem, n_mem, 0.0, math.sqrt(2.0 / (n_mem + n_mem))) -// private val rmg3 = NormalMat(n_var, n_mem, 0.0, math.sqrt(2.0 / (n_var + n_mem))) // Original initialization for V matrix and b_y +class RNN (override val x: TensorD, override val y: TensorD, val y_orig: MatrixD, + fname: Array[String] = null, override val n_mem: Int = 8) + extends RNNCell, FitM: - private val rmg4 = NormalMat(y.dim2, n_mem, 0.0, math.sqrt(2.0 / (y.dim2 + n_mem))) + override val CLASSIF = false // Indicates whether the model is for classification (false for forecasting) +// FIX -- use hyper-parameters + override val max_epochs = 100 // Maximum number of training epochs + override val eta = 0.0054 // Learning rate for the optimizer + override val batch_size = 32 // Size of each training batch + override val truncation_length = 45 // Length of the sequence truncation for backpropagation through time + override val β = 0.9 // Momentum term for the optimizer + override val threshold = 100.0 // Threshold for gradient clipping to avoid exploding gradients - private val rvg1 = NormalVec_c(n_mem, 0.0, 0.01) // random (Normal) vector generators -// private val rvg3 = NormalVec_c (n_var, 0.0, 0.01) + override val seq_length: Int = x.dim // Length of the input sequences + override val n_var: Int = x.dim2 // Number of variables in the input tensor + override val n_seq: Int = x.dim3 // Number of sequences in the input tensor - private val U = rmg1.gen // parameters for computing the hidden state - private val W = rmg2.gen - private val b_h = new VectorD(n_mem) // bias vector for hidden state + override val loss_per_epoch: VectorD = new VectorD (max_epochs) // Vector to store the loss value for each epoch + override val L: VectorD = new VectorD (seq_length) // Vector to store the loss value for each time step in a sequence - // decoder for generating output - private val V = rmg4.gen // decoder weight matrix - private val b_y = new VectorD (y.dim2) // decoder bias vector: Original initialization for b_y is n_var + private val yp: TensorD = new TensorD (seq_length, y.dim2, n_seq) // Tensor to store the predictions made by the model + private val L_epoch = new VectorD (max_epochs) // Vector to store the loss value for each epoch - private val h_m1 = rvg1.gen // hidden state @ t = -1 (m1 means minus 1) - private val h = new MatrixD (n_seq, n_mem) // hidden state h - private val yp = new MatrixD (n_seq, y.dim2) // predicted output: yp.dim2 is originally n_var - private val L = new VectorD (n_seq) // store loss function values - - // the partial derivative of weights and biases - private var db_y: VectorD = new VectorD(b_y.dim) - private val db_h: VectorD = new VectorD(b_h.dim) - private val dV = new MatrixD(V.dim, V.dim2) - private val dW = new MatrixD(W.dim, W.dim2) - private val dU = new MatrixD(U.dim, U.dim2) - - // initialize velocity parameters for momentum - private val vU = new MatrixD(U.dim, U.dim2) - private val vW = new MatrixD(W.dim, W.dim2) - private val vb_h = new VectorD(b_h.dim) - private val vV = new MatrixD(V.dim, V.dim2) - private val vb_y = new VectorD(b_y.dim) - - // parameter grouping for easy access - private case class ParamGroup (var param: MatrixD, var velocity: MatrixD, var grad: MatrixD) // For matrices - - private case class ParamGroupVector (var param: VectorD, var velocity: VectorD, var grad: VectorD) // For vectors - - private val matrixParams = List (ParamGroup(U, vU, dU), - ParamGroup(W, vW, dW), - ParamGroup(V, vV, dV)) - - private val vectorParams = List (ParamGroupVector(b_h, vb_h, db_h), - ParamGroupVector(b_y, vb_y, db_y)) - - if fname != null then println (s"RNN: fname = $fname") - - private val L_epoch = new VectorD (max_epochs) // store loss function values for each epoch + if fname != null then println (s"RNN: fname = $fname") // Print the file names if provided //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train the RNN using batch gradient descent. IN PROGRESS + /** Trains the RNN model using the provided input and target tensors. + * This function implements the training loop for the RNN model. It iterates over the specified + * number of epochs, divides the data into batches, performs forward and backward propagation, + * clips gradients to avoid exploding gradients, and updates the model parameters. + * @param x the input tensor of shape [seq_length, n_var, n_seq] + * @param y the target tensor of shape [seq_length, output_dim, n_seq] + * @return Unit (no return value, updates are made to class members) */ - def train(): Unit = + override def train (x: TensorD = x, y: TensorD = y): Unit = for it <- 1 to max_epochs do - - val n_batches = math.ceil (n_seq / batch_size).toInt - for i <- 0 to n_batches do - val batch_start = i * batch_size - val batch_end = math.min (n_seq - 1, (i + 1) * batch_size - 1) + val n_batches = math.ceil (n_seq.toDouble / batch_size).toInt + for i <- 0 until n_batches do + val batch_start = i * batch_size + val batch_end = math.min (n_seq - 1, (i + 1) * batch_size - 1) + val current_batch_size = batch_end - batch_start + 1 println (s"batch_start = $batch_start, batch_end = $batch_end") - forward (batch_start, batch_end) // forward propagate: get intermediate and output results + val x_batch: TensorD = x(null, null, (batch_start, batch_end + 1)) + val y_batch: TensorD = y(null, null, (batch_start, batch_end + 1)) - backward (batch_start, batch_end) // back propagate: calculate gradients (partial derivatives) + val H_batch = if current_batch_size == batch_size then H else H.slice (current_batch_size) - clip_gradients(threshold) // clip gradients to avoid exploding gradients + zero_gradients () - update_params (batch_end - batch_start + 1, leaky = true) // update parameters (weights and biases) + forward (x_batch, y_batch, batch_start, batch_end, L, H_batch) // forward propagate: get intermediate and output results + + backward (x_batch, y_batch, batch_start, batch_end, H_batch) // back propagate: calculate gradients (partial derivatives) + + clip_gradients (threshold) // clip gradients to avoid exploding gradients + + update_params (current_batch_size, leaky = false) // update parameters (weights and biases) + + H.reset () + H_batch.reset () end for - val mse = L.sum / n_seq // mean squared error + val mse = L.sum / seq_length // mean squared error println (s"train: for epoch $it: loss function L = $L") banner (s"train: for epoch $it: sum of loss function L.sum = ${L.sum}") banner (s"train: for epoch $it: mean squared error = $mse") @@ -134,250 +107,261 @@ class RNN (x: MatrixD, y: MatrixD, fname: Array[String] = null, n_mem: Int = 8) end train //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test the RNN predictions. - * @param original_extremes + /** Performs the forward pass of the RNN, computing the predictions and loss for the network. + * This function implements the forward propagation for a batch of sequences. + * It calculates the predictions for each time step and computes the loss + * based on the difference between the predictions and the target values. + * @param x the input tensor of shape [seq_length, n_var, batch_size] + * @param y the target tensor of shape [seq_length, output_dim, batch_size] + * @param batch_start the starting index of the current batch in the full dataset + * @param batch_end the ending index (inclusive) of the current batch in the full dataset + * @param L the loss vector to store the loss values for each time step + * @param H the hidden state tensor, default is the class member H + * @return Unit (no return value, updates are made to class members) */ - def test (original_extremes: (Double, Double) = (1.0, 1.0)): Unit = - new Plot(null, y(?, 0), yp(?, 0), "Plot of y vs yp for RNN", lines = true) + def forward (x: TensorD = x, y: TensorD = y, batch_start: Int, batch_end: Int, L: VectorD = L, + H: HiddenState = H): Unit = + val yp_batch = yp(null, null, (batch_start, batch_end + 1)) // Extract the batch of predictions + for t <- 0 until seq_length do // Iterate over each time step in the sequence + val H_prev = get_previous_hidden_state(H, t) // Get the previous hidden state - for col <- 0 until y.dim2 do - val y_unscaled = unscaleV (original_extremes, (-2.0, 2.0))(y(?, col)) - val yp_unscaled = unscaleV (original_extremes, (-2.0, 2.0))(yp(?, col)) - banner ("smape value = " + smapeF(y_unscaled, yp_unscaled)) // calculate the Symmetric Mean Absolute Percentage Error - banner ("mae value = " + Fit.mae(y_unscaled, yp_unscaled)) // calculate the Mean Absolute Error - end for - - new Plot (VectorD.range(0, max_epochs), L_epoch, null, "Plot of Loss Function vs Epoch", lines = true) + H.param (?, ?, t) = tanh_ (U.param * x(t) + W.param * H_prev +^ b_h.param) // Compute current hidden state using the tanh activation function - banner ("minimum loss epoch = " + L_epoch.argmin()) - banner ("minimum loss value = " + L_epoch.min()) - end test - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forward propagate calculates yp, loss and intermediate variables for each step. - * @param batch_start - * @param batch_end - */ - def forward (batch_start: Int, batch_end: Int): Unit = - - for t <- batch_start to batch_end do - - val h_pre = if t == 0 then h_m1 else h(t - 1) // get previous hidden state - h(t) = tanh_(U * x(t) + W * h_pre + b_h) // compute hidden state if CLASSIF then - yp(t) = softmax_(V * h(t) + b_y) // activation: softmax for classification - L(t) = (-y(t) * log_(yp(t))).sum // cross-entropy loss function + yp_batch(t) = softmax_m ((V.param * H.param(?, ?, t)) + b_y.param) // Compute predictions and loss based on the task (classif or fcast) + L(t) = (-y(t) * log_ (yp_batch(t))).sum // cross-entropy loss function else - yp(t) = V * h(t) + b_y // activation: id for forecasting - L(t) = ((y(t) - yp(t)).normSq) / 2.0 // SSE loss function + yp_batch(t) = (V.param * H.param(?, ?, t)) +^ b_y.param // activation: id for forecasting + L(t) = (y(t) - yp_batch(t)).normFSq / 2.0 // SSE loss function end for + + yp(?, ?, batch_start to batch_end) = yp_batch // Update the class member yp with the batch predictions end forward //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Backward propagate to calculate gradients using chain rules in O(n_seq) time. - * FIX - add option of using sse loss function and fix affected partial derivatives - * @param batch_start - * @param batch_end + /** Performs the backward pass of the RNN, computing gradients for the network parameters. + * This function implements backpropagation through time (BPTT) for a batch of sequences. + * It calculates the gradients of the loss with respect to the network parameters, + * which are then used to update the weights in the training process. + * @param x the input tensor of shape [seq_length, n_var, batch_size] + * @param y the target tensor of shape [seq_length, output_dim, batch_size] + * @param batch_start the starting index of the current batch in the full dataset + * @param batch_end the ending index (inclusive) of the current batch in the full dataset + * @param H the hidden state tensor, default is the class member H + * @return Unit (no return value, updates are made to class members) */ - def backward (batch_start: Int, batch_end: Int): Unit = - import ActivationFun.tanhD - - // Reset gradients before accumulating - matrixParams.foreach (_.grad.setAll(0)) // Reset matrix gradients - vectorParams.foreach (_.grad.set(0)) // Reset vector gradients + def backward (x: TensorD, y: TensorD, batch_start: Int, batch_end: Int, + H: HiddenState = H): Unit = + val current_batch_size = batch_end - batch_start + 1 // Calculate the current batch size + val yp_batch = yp(null, null, (batch_start, batch_end + 1)) // Extract the batch of predictions - val truncated_start = math.max (0, batch_end - truncation_length) // Determine the starting point for truncation + val truncated_start = math.max (0, seq_length - truncation_length) // Determine the starting point for truncation + var dyp = yp_batch(seq_length - 1) - y(seq_length - 1) // Initialize the gradient of the output predictions - val e = yp - y // error matrix + var dh_next = new MatrixD(n_mem, current_batch_size) // Initialize the gradient of the next hidden state - db_y = e.sumV // vector of row sums + for t <- seq_length - 1 to truncated_start by -1 do // Iterate backwards through the sequence + val H_prev = get_previous_hidden_state(H, t) // Get the previous hidden state + dyp = yp_batch(t) - y(t) // Calculate the gradient of the output predictions + b_y.grad += dyp.sumVr // Update the gradient of the output bias + V.grad += dyp * H.param(?, ?, t).ᵀ // Update the gradient of the output weights + H.grad(?, ?, t) = (V.param.ᵀ * dyp) + dh_next // Calculate the gradient of the hidden state - println ("debug: e.dims = " + e.dims) - println ("debug: h.dims = " + h.dims) - println ("dv.dims = " + V.dims) + H.pre_act_grad = tanhD_m (H.param(?, ?, t)) ⊙ H.grad(?, ?, t) // Calculate the pre-activation gradient of the hidden state + b_h.grad += H.pre_act_grad.sumVr // Update the gradient of the hidden bias - for t <- truncated_start until n_seq do dV += outer(e(t), h(t)) // outer vector product - val dh_T = V.𝐓 * e(n_seq - 1) // partial w.r.t. h_T - val dh = new MatrixD (n_seq, n_mem) // partial w.r.t. h_t - dh(n_seq - 1) = dh_T // set last row - - for t <- batch_end - 2 to truncated_start by -1 do - dh(t) = ((tanhD(h(t + 1))) *~: W) * dh(t + 1) + (V.𝐓 * e(t)) // partial w.r.t. h_t - - for t <- truncated_start until batch_end do - val h_pre = if t == 0 then h_m1 else h(t - 1) // get previous hidden state - dU += outer(dh(t), x(t)) - dW += outer(dh(t), h_pre) - db_h += tanhD(h(t)) * dh(t) + U.grad += H.pre_act_grad * x(t).ᵀ // Update the gradient of the input weights + W.grad += H.pre_act_grad * H_prev.ᵀ // Update the gradient of the hidden weights + dh_next = W.param.ᵀ * H.pre_act_grad // Calculate the gradient of the next hidden state end for - - matrixParams.head.grad += dU - matrixParams(1).grad += dW - matrixParams(2).grad += dV - vectorParams.head.grad += db_h - vectorParams(1).grad += db_y end backward //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Backward propagate to calculate gradients using chain rules in O(n_seq) time. - * FIX - add option of using sse loss function and fix affected partial derivatives - * @oaram threshold - */ - private def clip_gradients (threshold: Double): Unit = - for group <- matrixParams do - val norm = group.grad.normF // calculate the norm of the gradient - if norm > threshold then - group.grad *= (threshold / norm) // scale the gradient - - for group <- vectorParams do - val norm = group.grad.norm // calculate the norm of the gradient - if norm > threshold then - group.grad *= (threshold / norm) // scale the gradient - end clip_gradients - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Based on the calculated partial derivatives, update the parameters (weights - * and biases). - ^ @pram batch_size - ^ @pram leaky + /** Reconstructs the full-length predictions from overlapping sequences. + * This function takes the predictions made on overlapping sequences and + * reconstructs them into a single, full-length prediction matrix. It + * aggregates predictions for each time step and averages them based on + * the number of contributions. + * @param yp the tensor of predictions, where each slice represents predictions + * for a sequence. Dimensions: [seq_len, num_variables, num_sequences] + * @param seq_len the length of each sequence used for prediction + * @return A matrix of reconstructed predictions, where each row represents + * a time step and each column represents a variable. The number of + * rows is equal to the original time series length, and the number + * of columns is equal to the number of predicted variables. */ - def update_params (batch_size: Int, leaky: Boolean = true): Unit = - for group <- matrixParams do - group.velocity *= β - group.velocity += group.grad * (if leaky then 1 else (1 - β)) - group.param -= group.velocity * eta / batch_size - - // Check and log gradients for vectors - for group <- vectorParams do - group.velocity *= β - group.velocity += group.grad * (if leaky then 1 else (1 - β)) - group.param -= group.velocity * eta / batch_size - end update_params + private def reconstruct_predictions (yp: TensorD, seq_len: Int): MatrixD = + val original_len = yp.dim3 + seq_len - 1 // Calculate the original target length + val ypReconstructed = new MatrixD (original_len, yp.dim2) // Placeholder for aggregated predictions + val contributions = new VectorD (original_len) // Count contributions for each time step -end RNN + for seq_idx <- 0 until yp.dim3 do // Iterate over sequences + val start_idx = seq_idx // Starting index for the current sequence + for t <- 0 until seq_len do // Aggregate predictions and count contributions + for var_idx <- 0 until yp.dim2 do + ypReconstructed(start_idx + t, var_idx) += yp(t, var_idx, seq_idx) + contributions(start_idx + t) += 1 -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `RNN` companion object provides factory methods. - */ -object RNN: + for t <- 0 until original_len do // Average predictions by dividing by contributions + if contributions(t) > 0 then + for var_idx <- 0 until yp.dim2 do + ypReconstructed(t, var_idx) /= contributions(t) - import ActivationFun._ + ypReconstructed // Return the reconstructed predictions + end reconstruct_predictions //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `RNN` with automatic rescaling from a data matrix and response matrix. - * @param x the input/data matrix - * @param y the output/response matrix - * @param fname the feature/variable names - * @param n_mem the size of the hidden state (dimensionality of memory) + /** Tests the RNN model by reconstructing predictions and comparing them with the original data. + * This function reconstructs the full-length predictions from overlapping sequences, + * prints the dimensions of the reconstructed predictions and original data, and plots + * the predictions against the original data. It also calculates and prints the SMAPE + * and MAE values for each variable, and plots the loss function over epochs. + * @param original_extremes a tuple representing the original extremes of the data for unscaling. + * default is (1.0, 1.0). */ - def rescale(x: MatrixD, y: MatrixD, fname: Array[String] = null, n_mem: Int = 4): RNN = - val x_s = rescaleX (x, f_sigmoid) - val y_s = rescaleY (y, f_sigmoid)._1 + def test (original_extremes: (Double, Double) = (1.0, 1.0)): Unit = + val yp_reconstructed = reconstruct_predictions(yp, seq_length) // Reconstruct full-length predictions from overlapping sequences + + println(s"yp_reconstructed.dims = ${yp_reconstructed.dims}") // Print dimensions of reconstructed predictions and original data + println(s"y_orig.dims = ${y_orig.dims}") + + new Plot (null, y_orig(?, 0), yp_reconstructed(?, 0), + "Plot of y vs yp for RNN", lines = true) // Plot first variable of the original and reconstructed predictions + for col <- 0 until y_orig.dim2 do // Iterate over each variable to calc and print SMAPE and MAE values + val y_unscaled = unscaleV (original_extremes, (-2.0, 2.0))(y_orig(?, col)) + val yp_unscaled = unscaleV (original_extremes, (-2.0, 2.0))(yp_reconstructed(?, col)) +// banner ("smape value = " + Fit.smapeF(y_unscaled, yp_unscaled)) how else to use smape? + banner ("mae value = " + Fit.mae(y_unscaled, yp_unscaled)) + end for -// println (s" scaled: x = $x_s \n scaled y = $y_s") - new RNN (x_s, y_s, fname, n_mem) - end rescale + new Plot (VectorD.range (0, max_epochs), L_epoch, null, + "Plot of Loss Function vs Epoch", lines = true) // Plot the loss function over epochs + + banner ("minimum loss epoch = " + L_epoch.argmin()) // Print epoch with the minimum loss and the minimum loss value + banner ("minimum loss value = " + L_epoch.min) + end test end RNN //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `rNNTest` main function tests the `RNN` class on randomly generated - * sequence data meant to represent encoded words - * > runMain scalation.modeling.forecasting.neuralforecasting.rNNTest +/** The `RNN` companion object provides factory methods. */ -@main def rNNTest (): Unit = - - val n_seq = 8 - val n_var = 5 - - val (x_t, y_t) = genSequenceData (n_seq, n_var) - - println (s"x_t = $x_t") - println (s"y_t = $y_t") - - banner ("Create a Recurrent Neural Network (RNN)") - val mod = new RNN (x_t, y_t) - mod.train () - mod.test () - -end rNNTest +object RNN: + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Creates sequences for the RNN model from the input and output matrices. + * This function takes the input and output matrices and creates sequences of a specified length. + * Each sequence is a slice of the original matrices, and the function returns tensors containing + * these sequences. + * @param x the input matrix of shape [n_samples, n_features] + * @param yy the output matrix of shape [n_samples, n_output_features] + * @param sequence_length the length of each sequence + * @return A tuple containing two tensors: + * - x_sequences: The input sequences tensor of shape [sequence_length, n_features, n_sequences] + * - y_sequences: The output sequences tensor of shape [sequence_length, n_output_features, n_sequences] + */ + def create_sequences (x: MatrixD, yy: MatrixD, sequence_length: Int): (TensorD, TensorD) = + val n_samples = x.dim + val n_sequences = n_samples - sequence_length + 1 + val x_sequences: TensorD = new TensorD (sequence_length, x.dim2, n_sequences) + val y_sequences: TensorD = new TensorD (sequence_length, yy.dim2, n_sequences) + + for seq <- 0 until n_sequences do + val sequence = seq until (seq + sequence_length) + x_sequences(?, ?, seq) = x(sequence) + y_sequences(?, ?, seq) = yy(sequence) + end for -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `rNNTest2` main function tests the `RNN` class on sequence data read as words - * in a file that encoded and pass into `RNN` - * > runMain scalation.modeling.forecasting.neuralforecasting.rNNTest2 - */ -@main def rNNTest2 (): Unit = + (x_sequences, y_sequences) + end create_sequences - println("read words from a text file") + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Creates an output matrix for the RNN model from the target vector. + * This function takes a target vector and creates a matrix where each row represents + * a shifted version of the target vector. The number of columns in the matrix is equal + * to the forecasting horizon (hh). If the shifted index exceeds the length of the target + * vector, the value is set to -0.0. + * @param y the target vector of shape [n_samples] + * @param hh the forecasting horizon (number of future steps to predict) + * @return A matrix of shape [n_samples - 1, hh] where each row is a shifted version of the target vector + */ + private def makeOutputMatrix (y: VectorD, hh: Int): MatrixD = + val yy = new MatrixD (y.dim - 1, hh) + for t <- 0 until yy.dim do + for j <- 0 until hh do yy(t, j) = if t + 1 + j >= y.dim then -0.0 else y(t + 1 + j) + yy + end makeOutputMatrix -// FIX - find example text + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Builds the input and output matrices for time series forecasting. + * This function takes a target vector and creates input and output matrices for time series forecasting. + * The input matrix is created using the specified number of lags, and the output matrix is created using + * the specified forecasting horizon. The function prints the dimensions of the input and output matrices + * and the value of the last element in the target vector. + * @param y the target vector of shape [n_samples] + * @param lags the number of lags to use for creating the input matrix + * @param hh the forecasting horizon (number of future steps to predict) + * @param backcast A boolean flag indicating whether to include backcasting (default is true) + * @return A tuple containing two matrices: + * - The input matrix of shape [n_samples - lags, lags] + * - The output matrix of shape [n_samples - 1, hh] + */ + def buildMatrix4TS (y: VectorD, lags: Int, hh: Int, backcast: Boolean = true): (MatrixD, MatrixD) = + val x = makeMatrix4L (y, lags, backcast) + val yy = makeOutputMatrix (y, hh) + println (s"dims of x = ${x.dims}") + println (s"dims of yy = ${yy.dims}") + println (s"last element in y = ${y(y.dim - 1)}") + (x, yy) + end buildMatrix4TS -end rNNTest2 +end RNN //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `rNNTest3` main function tests the `RNN` class on sequence/time series data - * corresponding to the lake level dataset using multiple lags. - * > runMain scalation.modeling.forecasting.neuralforecasting.rNNTest3 +/** Main function to test the RNN model on COVID-19 new deaths data. + * This function loads the COVID-19 new deaths data, preprocesses it, creates sequences + * for the RNN model, trains the model, and tests it. It prints the dimensions of the + * input and output matrices, and the value of the last element in the dataset. + * > runMain scalation.modeling.forecasting.neuralforecasting */ -@main def rNNTest3 (): Unit = - - import Example_LakeLevels.y - import MakeMatrix4TS._ - val hh = 2 // forecasting horizon - FIX - currently lags == hh - hp("p") = 2 // number of lags to include - - val y_s = scaleV (extreme (y), (-2.0, 2.0))(y) // rescale y to active domain of sigmoid, tanh - - val x = ARY.buildMatrix (y_s, hp) // column for each lag - val yy = makeMatrix4Y (y_s, hh) - - println (s"x.dims = ${x.dims}, yy.dims = ${yy.dims}") - - banner ("Create a Recurrent Neural Network Unit (RNN)") - val mod = new RNN (x, yy) // call constructor - mod.train () // train the model - mod.test () // test the model - -end rNNTest3 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @main def rNNTest4 (): Unit = - import MakeMatrix4TS._ - val hh = 2 // forecasting horizon - FIX - currently lags == hh - hp("p") = 2 // number of lags to include + val lags = 4 // Define the number of lags + val hh = 1 // Forecasting horizon + val seq_length = 45 // Sequence length - var y = Example_Covid.loadData_y ("new_deaths") + var y = Example_Covid.loadData_y ("new_deaths") // Load the COVID-19 new deaths data - y = y(0 until 116) + y = y(0 until 116) // Select the first 116 data points - val original_extremes = extreme(y) + val original_extremes = extreme (y) // Calculate the original extremes of the data - println ("original_extremes.type = " + original_extremes.getClass) + println ("original_extremes.type = " + original_extremes.getClass) // Print the type of the original extremes - val y_s = scaleV (extreme (y), (-2.0, 2.0))(y) // rescale y to active domain of sigmoid, tanh + val y_s = scaleV (extreme(y), (-2.0, 2.0))(y) // Scale the data to the active domain of sigmoid and tanh functions - val x = ARY.buildMatrix (y_s, hp) // column for each lag - val yy = makeMatrix4Y (y_s, hh) + val (x, yy) = RNN.buildMatrix4TS (y_s, lags, hh) // Build the input and output matrices for time series forecasting + val (x_seq, y_seq) = RNN.create_sequences (x, yy, seq_length) // Create sequences for the RNN model + println (s"x.dims = ${x.dims}, yy.dims = ${yy.dims}") // Print the dimensions of the input and output matrices + println (s"x_seq.dims = ${x_seq.dims}, y_seq.dims = ${y_seq.dims}") // Print the dimensions of the sequences - println (s"x.dims = ${x.dims}, yy.dims = ${yy.dims}") + banner ("Create a Recurrent Neural Network Unit (RNN)") // Create and train the RNN model + val mod = new RNN (x_seq, y_seq, yy) // Call constructor + mod.train () // Train the model + mod.test (original_extremes) // Test the model - banner ("Create a Recurrent Neural Network Unit (RNN)") - val mod = new RNN (x, yy) // call constructor - mod.train () // train the model - mod.test (original_extremes) - - print ("y(116) = " + y(115)) + print ("y(115) = " + y(115)) // Print the value of the last element in the dataset end rNNTest4 -// GRU, using Trait for Code reusability - +// ------------------------------------------------------------------------------------------------ +// Made a change to line number 130 in MakeMatrix4TS due to mismatch in dimensions +// Fine tune RNN again or modify MakeMatrix4TS? +// Made a change in NormFsq in MatrixD to change the dimensions from column to row +// Made a change in TensorD to add new methods +// ------------------------------------------------------------------------------------------------ diff --git a/src/main/scala/scalation/modeling/forecasting/neuralforecasting/RNN.scala.bak2 b/src/main/scala/scalation/modeling/forecasting/neuralforecasting/RNN.scala.bak2 new file mode 100644 index 000000000..85dc5edf8 --- /dev/null +++ b/src/main/scala/scalation/modeling/forecasting/neuralforecasting/RNN.scala.bak2 @@ -0,0 +1,386 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author John Miller, Leela Venkata Sai Vukkurthi + * @version 2.0 + * @date Mon Dec 2 22:55:42 EST 2024 + * @see LICENSE (MIT style license file). + ^ + * @note Model: Recurrent Neural Network (RNN) for Multivariate Time Series + * + * Standard RNN (GRU/LSTM) Formats for Multivariate Time Series Data. + * Keras: 3D format expected by GRU/LSTM is [samples, timesteps, features]. + * => indexing [timestamp t, lags k, variable j] + */ + +package scalation +package modeling +package forecasting +package neuralforecasting + +import scala.annotation.unused +import scala.math.Ordering.Double.IeeeOrdering + +import scalation.mathstat.{MatrixD, Plot, VectorD} +import scalation.random.{NormalMat, NormalVec_c} +import ActivationFun.{softmax_, tanh_} +import MatrixD.outer + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `RNN` class implements Recurrent Neural Network (RNN) via Back Propagation Through + * Time (BPTT). At each time point x_t, there is a vector representing several variables + * or the encoding of a word. Intended to work for guessing the next work in a sentence + * or for multi-horizon forecasting. + * Time series: (x_t: t = 0, 1, ..., n_seq-1) where n_seq is the number of time points/words + * + * @param x the input sequence/time series + * @param y the output sequence/time series + * @param fname the feature/variable names + * @param n_mem the size for hidden state (h) (dimensionality of memory) + */ +class RNN (x: MatrixD, y: MatrixD, fname: Array [String] = null, n_mem: Int = 8) + extends FitM: + + private val CLASSIF = false // whether to apply classification (e.g., guess next word) or forecast a value + private val max_epochs = 55 // maximum number of iterations + private val eta = 0.27 // the learning rate (use 0.25 for rNNTest) + private val batch_size = 64 // batch size for mini-batch training + private val truncation_length = 63 // Truncate the back-propagation through time + private val β = 0.9 // Momentum hyper parameter + private val threshold = 5.0 // Threshold for gradient clipping + + private val n_seq = x.dim // e.g., 20, number of words in a sentence (including start and end symbol) + private val n_var = x.dim2 // e.g., 64, number of variables or distinct words (vocabulary size)\ + + // since we will only use one sentence for training, + // this is also the total steps during training. +// private val _1 = VectorD.one (n_mem) // vector of all ones for e.g., 1 - z + + // initialize parameters (weights and biases) + private val rmg1 = NormalMat(n_mem, n_var, 0.0, math.sqrt(2.0 / (n_mem + n_var))) + private val rmg2 = NormalMat(n_mem, n_mem, 0.0, math.sqrt(2.0 / (n_mem + n_mem))) +// private val rmg3 = NormalMat(n_var, n_mem, 0.0, math.sqrt(2.0 / (n_var + n_mem))) // Original initialization for V matrix and b_y + + private val rmg4 = NormalMat(y.dim2, n_mem, 0.0, math.sqrt(2.0 / (y.dim2 + n_mem))) + + private val rvg1 = NormalVec_c(n_mem, 0.0, 0.01) // random (Normal) vector generators +// private val rvg3 = NormalVec_c (n_var, 0.0, 0.01) + + private val U = rmg1.gen // parameters for computing the hidden state + private val W = rmg2.gen + private val b_h = new VectorD(n_mem) // bias vector for hidden state + + // decoder for generating output + private val V = rmg4.gen // decoder weight matrix + private val b_y = new VectorD (y.dim2) // decoder bias vector: Original initialization for b_y is n_var + + private val h_m1 = rvg1.gen // hidden state @ t = -1 (m1 means minus 1) + private val h = new MatrixD (n_seq, n_mem) // hidden state h + private val yp = new MatrixD (n_seq, y.dim2) // predicted output: yp.dim2 is originally n_var + private val L = new VectorD (n_seq) // store loss function values + + // the partial derivative of weights and biases + private var db_y: VectorD = new VectorD(b_y.dim) + private val db_h: VectorD = new VectorD(b_h.dim) + private val dV = new MatrixD(V.dim, V.dim2) + private val dW = new MatrixD(W.dim, W.dim2) + private val dU = new MatrixD(U.dim, U.dim2) + + // initialize velocity parameters for momentum + private val vU = new MatrixD(U.dim, U.dim2) + private val vW = new MatrixD(W.dim, W.dim2) + private val vb_h = new VectorD(b_h.dim) + private val vV = new MatrixD(V.dim, V.dim2) + private val vb_y = new VectorD(b_y.dim) + + // parameter grouping for easy access + private case class ParamGroup (var param: MatrixD, var velocity: MatrixD, var grad: MatrixD) // For matrices + + private case class ParamGroupVector (var param: VectorD, var velocity: VectorD, var grad: VectorD) // For vectors + + private val matrixParams = List (ParamGroup(U, vU, dU), + ParamGroup(W, vW, dW), + ParamGroup(V, vV, dV)) + + private val vectorParams = List (ParamGroupVector(b_h, vb_h, db_h), + ParamGroupVector(b_y, vb_y, db_y)) + + if fname != null then println (s"RNN: fname = $fname") + + private val L_epoch = new VectorD (max_epochs) // store loss function values for each epoch + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Train the RNN using batch gradient descent. IN PROGRESS + */ + def train (): Unit = + for it <- 1 to max_epochs do + + val n_batches = math.ceil (n_seq / batch_size).toInt + for i <- 0 to n_batches do + val batch_start = i * batch_size + val batch_end = math.min (n_seq - 1, (i + 1) * batch_size - 1) + println (s"batch_start = $batch_start, batch_end = $batch_end") + + forward (batch_start, batch_end) // forward propagate: get intermediate and output results + + backward (batch_start, batch_end) // back propagate: calculate gradients (partial derivatives) + + clip_gradients (threshold) // clip gradients to avoid exploding gradients + + update_params (batch_end - batch_start + 1, leaky = true) // update parameters (weights and biases) + end for + + val mse = L.sum / n_seq // mean squared error + println (s"train: for epoch $it: loss function L = $L") + banner (s"train: for epoch $it: sum of loss function L.sum = ${L.sum}") + banner (s"train: for epoch $it: mean squared error = $mse") + L_epoch(it - 1) = L.sum + end for + end train + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Test the RNN predictions. + * @param original_extremes + */ + def test (original_extremes: (Double, Double) = (1.0, 1.0)): Unit = + new Plot(null, y(?, 0), yp(?, 0), "Plot of y vs yp for RNN", lines = true) + + for col <- 0 until y.dim2 do + val y_unscaled = unscaleV (original_extremes, (-2.0, 2.0))(y(?, col)) + val yp_unscaled = unscaleV (original_extremes, (-2.0, 2.0))(yp(?, col)) + banner ("smape value = " + smapeF(y_unscaled, yp_unscaled)) // calculate the Symmetric Mean Absolute Percentage Error + banner ("mae value = " + Fit.mae(y_unscaled, yp_unscaled)) // calculate the Mean Absolute Error + end for + + new Plot (VectorD.range(0, max_epochs), L_epoch, null, "Plot of Loss Function vs Epoch", lines = true) + + banner ("minimum loss epoch = " + L_epoch.argmin ()) + banner ("minimum loss value = " + L_epoch.min (using IeeeOrdering)) + end test + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forward propagate calculates yp, loss and intermediate variables for each step. + * @param batch_start + * @param batch_end + */ + def forward (batch_start: Int, batch_end: Int): Unit = + + for t <- batch_start to batch_end do + + val h_pre = if t == 0 then h_m1 else h(t - 1) // get previous hidden state + h(t) = tanh_(U * x(t) + W * h_pre + b_h) // compute hidden state + if CLASSIF then + yp(t) = softmax_(V * h(t) + b_y) // activation: softmax for classification + L(t) = (-y(t) * log_(yp(t))).sum // cross-entropy loss function + else + yp(t) = V * h(t) + b_y // activation: id for forecasting + L(t) = ((y(t) - yp(t)).normSq) / 2.0 // SSE loss function + end for + end forward + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Backward propagate to calculate gradients using chain rules in O(n_seq) time. + * FIX - add option of using sse loss function and fix affected partial derivatives + * @param batch_start + * @param batch_end + */ + def backward (@unused batch_start: Int, batch_end: Int): Unit = + import ActivationFun.tanhD + + // Reset gradients before accumulating + matrixParams.foreach (_.grad.setAll(0)) // Reset matrix gradients + vectorParams.foreach (_.grad.set(0)) // Reset vector gradients + + val truncated_start = math.max (0, batch_end - truncation_length) // Determine the starting point for truncation + + val e = yp - y // error matrix + + db_y = e.sumV // vector of row sums + + println ("debug: e.dims = " + e.dims) + println ("debug: h.dims = " + h.dims) + println ("dv.dims = " + V.dims) + + for t <- truncated_start until n_seq do dV += outer(e(t), h(t)) // outer vector product + val dh_T = V.𝐓 * e(n_seq - 1) // partial w.r.t. h_T + val dh = new MatrixD (n_seq, n_mem) // partial w.r.t. h_t + dh(n_seq - 1) = dh_T // set last row + + for t <- batch_end - 2 to truncated_start by -1 do + dh(t) = ((tanhD(h(t + 1))) *~: W) * dh(t + 1) + (V.𝐓 * e(t)) // partial w.r.t. h_t + + for t <- truncated_start until batch_end do + val h_pre = if t == 0 then h_m1 else h(t - 1) // get previous hidden state + dU += outer(dh(t), x(t)) + dW += outer(dh(t), h_pre) + db_h += tanhD(h(t)) * dh(t) + end for + + matrixParams.head.grad += dU + matrixParams(1).grad += dW + matrixParams(2).grad += dV + vectorParams.head.grad += db_h + vectorParams(1).grad += db_y + end backward + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Backward propagate to calculate gradients using chain rules in O(n_seq) time. + * FIX - add option of using sse loss function and fix affected partial derivatives + * @param threshold + */ + private def clip_gradients (threshold: Double): Unit = + for group <- matrixParams do + val norm = group.grad.normF // calculate the norm of the gradient + if norm > threshold then + group.grad *= (threshold / norm) // scale the gradient + + for group <- vectorParams do + val norm = group.grad.norm // calculate the norm of the gradient + if norm > threshold then + group.grad *= (threshold / norm) // scale the gradient + end clip_gradients + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Based on the calculated partial derivatives, update the parameters (weights and biases). + * @param batch_size + * @param leaky + */ + def update_params (batch_size: Int, leaky: Boolean = true): Unit = + for group <- matrixParams do + group.velocity *= β + group.velocity += group.grad * (if leaky then 1 else (1 - β)) + group.param -= group.velocity * eta / batch_size + + // Check and log gradients for vectors + for group <- vectorParams do + group.velocity *= β + group.velocity += group.grad * (if leaky then 1 else (1 - β)) + group.param -= group.velocity * eta / batch_size + end update_params + +end RNN + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `RNN` companion object provides factory methods. + */ +object RNN: + + import ActivationFun._ + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a `RNN` with automatic rescaling from a data matrix and response matrix. + * @param x the input/data matrix + * @param y the output/response matrix + * @param fname the feature/variable names + * @param n_mem the size of the hidden state (dimensionality of memory) + */ + def rescale (x: MatrixD, y: MatrixD, fname: Array [String] = null, n_mem: Int = 4): RNN = + val x_s = rescaleX (x, f_sigmoid) + val y_s = rescaleY (y, f_sigmoid)._1 + +// println (s" scaled: x = $x_s \n scaled y = $y_s") + new RNN (x_s, y_s, fname, n_mem) + end rescale + +end RNN + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `rNNTest` main function tests the `RNN` class on randomly generated + * sequence data meant to represent encoded words + * > runMain scalation.modeling.forecasting.neuralforecasting.rNNTest + */ +@main def rNNTest (): Unit = + + val n_seq = 8 + val n_var = 5 + + val (x_t, y_t) = genSequenceData (n_seq, n_var) + + println (s"x_t = $x_t") + println (s"y_t = $y_t") + + banner ("Create a Recurrent Neural Network (RNN)") + val mod = new RNN (x_t, y_t) + mod.train () + mod.test () + +end rNNTest + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `rNNTest2` main function tests the `RNN` class on sequence data read as words + * in a file that encoded and pass into `RNN` + * > runMain scalation.modeling.forecasting.neuralforecasting.rNNTest2 + */ +@main def rNNTest2 (): Unit = + + println("read words from a text file") + +// FIX - find example text + +end rNNTest2 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `rNNTest3` main function tests the `RNN` class on sequence/time series data + * corresponding to the lake level dataset using multiple lags. + * > runMain scalation.modeling.forecasting.neuralforecasting.rNNTest3 + */ +@main def rNNTest3 (): Unit = + + import Example_LakeLevels.y + import MakeMatrix4TS._ + val hh = 2 // forecasting horizon - FIX - currently lags == hh + hp("p") = 2 // number of lags to include + + val y_s = scaleV (extreme (y), (-2.0, 2.0))(y) // rescale y to active domain of sigmoid, tanh + + val x = ARY.buildMatrix (y_s, hp) // column for each lag + val yy = makeMatrix4Y (y_s, hh) + + println (s"x.dims = ${x.dims}, yy.dims = ${yy.dims}") + + banner ("Create a Recurrent Neural Network Unit (RNN)") + val mod = new RNN (x, yy) // call constructor + mod.train () // train the model + mod.test () // test the model + +end rNNTest3 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +@main def rNNTest4 (): Unit = + + import MakeMatrix4TS._ + val hh = 2 // forecasting horizon - FIX - currently lags == hh + hp("p") = 2 // number of lags to include + + var y = Example_Covid.loadData_y ("new_deaths") + + y = y(0 until 116) + + val original_extremes = extreme(y) + + println ("original_extremes.type = " + original_extremes.getClass) + + val y_s = scaleV (extreme (y), (-2.0, 2.0))(y) // rescale y to active domain of sigmoid, tanh + + val x = ARY.buildMatrix (y_s, hp) // column for each lag + val yy = makeMatrix4Y (y_s, hh) + + + println (s"x.dims = ${x.dims}, yy.dims = ${yy.dims}") + + banner ("Create a Recurrent Neural Network Unit (RNN)") + val mod = new RNN (x, yy) // call constructor + mod.train () // train the model + mod.test (original_extremes) + + print ("y(116) = " + y(115)) + +end rNNTest4 + +// GRU, using Trait for Code reusability + + diff --git a/src/main/scala/scalation/modeling/forecasting/neuralforecasting/RNNCell.scala b/src/main/scala/scalation/modeling/forecasting/neuralforecasting/RNNCell.scala new file mode 100644 index 000000000..d9ff8e7f5 --- /dev/null +++ b/src/main/scala/scalation/modeling/forecasting/neuralforecasting/RNNCell.scala @@ -0,0 +1,147 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author Praveen Rangavajhula + * @version 2.0 + * @date Mon Aug 4 21:12:40 EDT 2025 + * @see LICENSE (MIT style license file). + * + * @note Model: Recurrent Neural Network (RNN) for Multivariate Time Series + */ + +package scalation +package modeling +package forecasting +package neuralforecasting + +import scalation.mathstat.{MatrixD, TensorD, VectorD} + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `RNNCell` trait defines the structure and operations for a Recurrent Neural Network (RNN) cell. + * It includes weight matrices, bias vectors, and hidden states, along with methods for gradient clipping + * and parameter updates. + */ +trait RNNCell extends RecurrentBase: + + protected lazy val U: ParamGroup [MatrixD] = + initializeParamGroup (n_mem, n_var, math.sqrt(2.0 / (n_var + n_mem))) // Input-to-hidden weights + + protected lazy val W: ParamGroup [MatrixD] = + initializeParamGroup (n_mem, n_mem, math.sqrt(2.0 / (n_mem + n_mem))) // Hidden-to-hidden weights + + protected lazy val V: ParamGroup [MatrixD] = + initializeParamGroup (y.dim2, n_mem, math.sqrt(2.0 / (n_mem + y.dim2))) // Hidden-to-output weights + + protected lazy val b_h: ParamGroup [VectorD] = initializeBiasGroup (n_mem) // Bias for hidden layer + protected lazy val b_y: ParamGroup [VectorD] = initializeBiasGroup (y.dim2) // Bias for output layer + + protected lazy val H: HiddenState = HiddenState (n_mem, batch_size, seq_length) // Hidden state initialization + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Case class representing the hidden state of the RNN. + * @param n_mem number of memory units + * @param n_seq number of sequences + * @param seq_length length of each sequence + * @param param tensor for hidden state parameters + * @param grad tensor for hidden state gradients + * @param pre_act_grad matrix for pre-activation gradients + */ + protected case class HiddenState (n_mem: Int, n_seq: Int, seq_length: Int, var param: TensorD = null, + var grad: TensorD = null, var pre_act_grad: MatrixD = null): + + if param == null then param = new TensorD(n_mem, n_seq, seq_length) // Initialize param if not provided + if grad == null then grad = new TensorD(n_mem, n_seq, seq_length) // Initialize grad if not provided + if pre_act_grad == null then pre_act_grad = new MatrixD (n_mem, n_seq) // Initialize pre-activation grad if not provided + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return a new HiddenState with tensors sliced to the specified batch size. + * @param batch_size The batch size to slice to + * @return A new HiddenState with sliced tensors + */ + def slice (batch_size: Int): HiddenState = + val slicedParam = param(0 until n_mem, 0 until batch_size, 0 until seq_length) + val slicedGrad = grad(0 until n_mem, 0 until batch_size, 0 until seq_length) + val slicedPreActGrad = pre_act_grad(0 until n_mem, 0 until batch_size) + HiddenState (n_mem, batch_size, seq_length, param = slicedParam, grad = slicedGrad, + pre_act_grad = slicedPreActGrad) + end slice + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Reset the hidden state parameters and gradients to zero. + */ + def reset (): Unit = + param.set (0.0) + grad.set (0.0) + pre_act_grad.setAll (0.0) + end reset + + end HiddenState + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Get the previous hidden state at time step t. + * @param H The hidden state + * @param t The time step + * @return The previous hidden state matrix + */ + protected def get_previous_hidden_state(H: HiddenState, t: Int): MatrixD = + require(t >= 0, "Time index t must be non-negative.") + require(t < seq_length, "Time index t must be within the sequence length.") + + if t > 0 then H.param(?, ?, t-1) + else H.param(?, ?, 0) + end get_previous_hidden_state + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Zero the gradients of the model parameters. + * This method sets all gradients of the weight matrices and bias vectors to zero. + */ + protected def zero_gradients (): Unit = + for group <- List(U, V, W) do group.grad.setAll (0.0) // Zero gradients for weight matrices + for group <- List(b_y, b_h) do group.grad.set (0.0) // Zero gradients for bias vectors + end zero_gradients + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Clip the gradients of the model parameters to a specified threshold. + * This method ensures that the gradients do not exceed the given threshold to prevent exploding gradients. + * @param threshold the threshold value for gradient clipping + */ + override protected def clip_gradients (threshold: Double): Unit = + for group <- List(U, V, W) do + val norm = group.grad.normF + println("Gradient norm: " + norm) + if norm > threshold then + group.grad *= (threshold / norm) + println ("Gradient norm after clipping : " + group.grad.normF) + end for + + for group <- List(b_y, b_h) do + val norm = group.grad.norm + println ("Gradient norm: " + norm) + if norm > threshold then + group.grad *= (threshold / norm) + println ("Gradient norm after clipping : " + group.grad.norm) + end for + end clip_gradients + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Update the model parameters based on the gradients. + * This method applies momentum and updates the parameters using the specified + * learning rate and batch size. + * @param batch_size the size of the batch used for training + * @param leaky a boolean flag indicating whether to use leaky updates (default is true) + */ + protected def update_params (batch_size: Int, leaky: Boolean = true): Unit = + for group <- List (U, V, W) do + group.velocity *= β + group.velocity += group.grad * (if leaky then 1 else 1 - β) + group.param -= group.velocity * eta / batch_size + end for + + for group <- List (b_y, b_h) do + group.velocity *= β + group.velocity += group.grad * (if leaky then 1 else 1 - β) + group.param -= group.velocity * eta / batch_size + end for + end update_params + +end RNNCell + diff --git a/src/main/scala/scalation/modeling/forecasting/neuralforecasting/RecurrentBase.scala b/src/main/scala/scalation/modeling/forecasting/neuralforecasting/RecurrentBase.scala new file mode 100644 index 000000000..51cf7d3c2 --- /dev/null +++ b/src/main/scala/scalation/modeling/forecasting/neuralforecasting/RecurrentBase.scala @@ -0,0 +1,138 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author Praveen Rangavajhula + * @version 2.0 + * @date Mon Aug 4 21:12:40 EDT 2025 + * @see LICENSE (MIT style license file). + * + * @note Model: Base Trait for Recurrent Neural Networks + */ + +package scalation +package modeling +package forecasting +package neuralforecasting + +import scala.math.{log, tanh} + +import scalation.mathstat.{MatrixD, TensorD, VectorD} +import scalation.random.NormalMat + +import ActivationFun.{sigmoid, softmax_, tanhD} + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `RecurrentBase` trait defines the base structure and operations for recurrent neural networks. + * It includes common hyperparameters, activation functions, and methods for parameter initialization, + * gradient clipping, and parameter updates. + */ +trait RecurrentBase: + + protected val CLASSIF: Boolean = false // Whether it's classification or regression +// FIX -- use hyper-parameters + protected val max_epochs: Int = 30 // Maximum number of epochs + protected val eta: Double = 0.0005 // Learning rate + protected val batch_size: Int = 64 // Batch size for training + protected val truncation_length: Int = 40 // Truncation length for BPTT + protected val β: Double = 0.9 // Momentum hyper parameter + protected val threshold: Double = 5.0 // Threshold for gradient clipping + + protected val seq_length: Int // Number of time steps (sequence length) + protected val n_mem: Int // Size of hidden state + protected val n_seq: Int // Number of sequences + protected val n_var: Int // Number of input variables + + protected val x: TensorD // Input tensor + protected val y: TensorD // Output tensor + + protected val loss_per_epoch: VectorD // Loss per epoch + protected val L: VectorD // Loss per time step + +// FIX -- functioanlity should be defined once + def log_ (x: VectorD): VectorD = x.map (log) // Log transformation for VectorD + + def log_ (x: MatrixD): MatrixD = x.map_ (log) // Log transformation for MatrixD + + def sigmoid_ (x: MatrixD): MatrixD = x.map_ (sigmoid) // Overloaded sigmoid for MatrixD + + def softmax_m (x: MatrixD): MatrixD = x.mmap (softmax_) // Overloaded softmax for MatrixD + + def tanh_ (t: VectorD): VectorD = t.map (tanh) // Overloaded tanh for VectorD + def tanh_ (x: MatrixD): MatrixD = x.map_ (tanh) // Overloaded tanh for MatrixD + + def tanhD_m (x: MatrixD): MatrixD = x.mmap (tanhD) // Overloaded tanh derivative for MatrixD + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Case class representing a group of parameters, including the parameter itself, its velocity, and its gradient. + * @param param the parameter (e.g., weights or biases) + * @param velocity the velocity associated with the parameter, used for momentum in optimization + * @param grad the gradient of the parameter, used for updating the parameter during training + */ + protected case class ParamGroup [T] (var param: T, var velocity: T, var grad: T) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Updates a specific batch of rows in a matrix with new values. + * @param matrix the matrix to be updated + * @param batch_start the starting index of the batch in the matrix + * @param batch_end the ending index (exclusive) of the batch in the matrix + * @param newBatch the new matrix containing the values to be inserted + * @throws IllegalArgumentException If the size of newBatch doesn't match the specified batch size + */ + protected def updateBatch (matrix: MatrixD, batch_start: Int, batch_end: Int, newBatch: MatrixD): Unit = + if batch_end - batch_start != newBatch.dim then + throw new IllegalArgumentException ( + "Batch size mismatch: The newBatch matrix must have the same number of rows as (batch_end - batch_start).") + for i <- 0 until newBatch.dim do + matrix(batch_start + i) = newBatch(i) // Update batch in the matrix + end updateBatch + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Initialize a parameter group with a specified number of rows, columns, and standard deviation. + * @param rows the number of rows in the parameter matrix + * @param cols the number of columns in the parameter matrix + * @param stdDev the standard deviation for initializing the parameter matrix + * @return A ParamGroup containing the initialized parameter matrix, velocity matrix, and gradient matrix + */ + protected def initializeParamGroup (rows: Int, cols: Int, stdDev: Double): ParamGroup [MatrixD] = + ParamGroup (param = NormalMat (rows, cols, 0.0, stdDev).gen, // Initialize parameter matrix + velocity = new MatrixD (rows, cols), // Initialize velocity matrix + grad = new MatrixD (rows, cols)) // Initialize gradient matrix + end initializeParamGroup + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Initialize a bias group with a specified size. + * @param size the size of the bias vector + * @return A ParamGroup containing the initialized bias vector, velocity vector, and gradient vector + */ + protected def initializeBiasGroup (size: Int): ParamGroup [VectorD] = + ParamGroup (param = new VectorD (size), // Initialize bias vector + velocity = new VectorD (size), // Initialize velocity vector + grad = new VectorD (size)) // Initialize gradient vector + end initializeBiasGroup + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Abstract method for training the model with the given input and output tensors. + * @param x the input tensor + * @param y the output tensor + */ + def train (x: TensorD, y: TensorD): Unit + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Abstract method to zero the gradients of the model parameters. + */ + protected def zero_gradients (): Unit + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Abstract method to clip the gradients of the model parameters to a specified threshold. + * @param threshold the threshold value for gradient clipping + */ + protected def clip_gradients (threshold: Double): Unit + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Abstract method to update the model parameters based on the gradients. + * @param batch_size the size of the batch used for training + * @param leaky a boolean flag indicating whether to use leaky updates (default is true) + */ + protected def update_params (batch_size: Int, leaky: Boolean = true): Unit + +end RecurrentBase + diff --git a/src/main/scala/scalation/modeling/forecasting/neuralforecasting/SimpleEncoder.scala b/src/main/scala/scalation/modeling/forecasting/neuralforecasting/SimpleEncoder.scala new file mode 100644 index 000000000..184711938 --- /dev/null +++ b/src/main/scala/scalation/modeling/forecasting/neuralforecasting/SimpleEncoder.scala @@ -0,0 +1,300 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author John A. Miller + * @version 2.0 + * @date Sun Nov 9 23:27:14 EST 2025 + * @see LICENSE (MIT style license file). + * + * @note Simple Implementation for an Encoder-Only Transformer, e.g., used for + * (1) Natural Language Processing (NLP) or + * (2) Time Series Forecasting (TSF) + * + * Limitations: one attention head, no dropout layer, single encoder block, + * no back-propagation + * + * @see sebastianraschka.com/blog/2023/self-attention-from-scratch.html + * @see arxiv.org/pdf/1706.03762.pdf (main paper) + */ + +package scalation.modeling.forecasting.neuralforecasting + +import scala.math.{cos, sin, sqrt} + +import scalation.~^ +import scalation.mathstat._ +import scalation.modeling.ActivationFun.{f_reLU, f_softmax} + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `SimpleEncoder` object implements the attention method based on the + * scaled dot product. + */ +object SimpleEncoder: + + val eps = 1E-5 // very small value + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Patchify the univariate time series y by breaking it into non-overlapping + * patches of length pl. This simple implementation assumes stride s = pl, + * but PatchTST uses pl = 16 and s = 8 as defaults. + * @param y the given univariate time series + * @param pl the patch length + */ + def patchify (y: VectorD, pl: Int): MatrixD = + val m = y.dim + val np = m / pl + val x = new MatrixD (np, pl) + for i <- x.indices do x(i) = y(i*pl until (i+1)*pl) + x + end patchify + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Use a matrix transformation containing learnable weights to embed each patch + * vector into a higher dimensional space (providing enhanced vector similarity). + * The dimensionality of the embedding space is d_model. For this simple + * implementation d_model = d_k as there is only one attention head. + * @param xx the matrix containing each patch as a row + * @param wE the dimensionality of the embedding space + */ + def embed (xx: MatrixD, wE: MatrixD): MatrixD = xx * wE + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Encode all the positions in the time series as vectors of length d_model. + * @param len the sequence length + * @param d_k the dimensionality of the model (d_model = d_k here) + */ + def encodePositions (len: Int, d_k: Int): MatrixD = + val pe = new MatrixD (len, d_k) + for k <- pe.indices do + for i <- 0 until d_k/2 do + val den = 10000.0~^(2.0*i/d_k) + pe(k, 2*i) = sin (k / den) + pe(k, 2*i+1) = cos (k / den) + pe + end encodePositions + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Based on the Query (Q), Key (K), and Value (V) matrices, compute the attention. + * + * att = softmax (QK^T/√d_k) V + * + * @param q the Query: the input of interest + * @param k the Key: other locations to compare it with (for similarity) + * @param v the Value: the input value at the key locations + * @param d_k the dimensionality of Query, Key, and Value (if different use d_v) + */ + def attention (q: MatrixD, k: MatrixD, v: MatrixD, d_k: Int): MatrixD = + val qkt = q * k.ᵀ // repeated dot product + val sdp = qkt / sqrt (d_k) // scaled dot product (sdp) + val scr = f_softmax.fM (sdp) // attention scores + println (s"attention: qkt = $qkt, sdp = $sdp, scr = $scr") + scr * v // attention (Q, K, V) + end attention + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Perform layer normalization on matrix x. While batch normalization normalizes + * over a mini-batch, layer normalization normalizes over a single instance or row. + * @see www.geeksforgeeks.org/deep-learning/what-is-layer-normalization/ + * An affine transformation is supported via the γ and β learnable parameters. + * @param x the matrix to normalize + * @param γ the scaling learnable parameter (defaults to 1.0) + * @param β the shifting learnable parameter (defaults to 0.0) + */ + def layerNorm (x: MatrixD, γ: Double = 1.0, β: Double = 0.0): MatrixD = + val xt = x.ᵀ // transpose since ScalaTion computes means and stdevs column-wise +// val xn = ((xt - xt.mean) / (xt.stdev + eps)).ᵀ // z-transform to mean zero and stdev one + val xn = ((xt - xt.mean) / (xt.variance + eps).sqrt).ᵀ // z-transform to mean zero and stdev one + xn * γ + β // allows scaling and shifting for flexibility + end layerNorm + +end SimpleEncoder + +import SimpleEncoder._ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `simpleEncoder1` main function illustrates the calculation of attention (Q, K, V) + * for a Single Head as used in a Transformer. SEE LINK BELOW FOR MORE DETAILS. + * + * @see pub.aimind.so/transformer-model-and-variants-of-transformer-chatgpt-3d423676e29c (URL) + * + * > runMain scalation.modeling.forecasting.neuralforecasting.simpleEncoder1 + */ +@main def simpleEncoder1 (): Unit = + + val d_k = 3 // dimensionality for Q, K, and V (if different need d_v) +// val heads = 1 // number of attention heads (d_model = d_k * heads) + + // input token can be (sub) words (for NLP) or patches (for TSF) + // three inputs after embedding, each embedding vector has size/dimensionality 4 + // these three embedding vectors are made up, but could use word2vec, etc. + val x = MatrixD ((3, 4), 1, 0, 1, 0, // input x0 for token 0 + 0, 2, 0, 2, // input x1 for token 1 + 1, 1, 1, 1) // input x2 for token 2 + + println (s"input (after embedding) x = $x") + + val wQ = MatrixD ((4, 3), 1, 0, 1, // Query weight matrix + 1, 0, 0, + 0, 0, 1, + 0, 1, 1) + val wK = MatrixD ((4, 3), 0, 0, 1, // Key weight matrix + 1, 1, 0, + 0, 1, 0, + 1, 1, 0) + val wV = MatrixD ((4, 3), 0, 2, 0, // Value weight matrix + 0, 3, 0, + 1, 0, 3, + 1, 1, 0) + + val q = x * wQ // Query: size of input x d_k + val k = x * wK // Key: size of input x d_k + val v = x * wV // Value: size of input x d_k (or d_v if different) + +// val att = attention (q, k, v, d_k) // compute attention based on correct size + val att = attention (q, k, v, 1) // approximation used by URL for checking purposes + + println (s""" + d_k = $d_k + wQ = $wQ + wK = $wK + wV = $wV + q = $q + k = $k + v = $v + att = $att + """) + +end simpleEncoder1 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `simpleEncoder2` main function illustrates the steps in an "Encoder-Only Transformer" + * consisting of a single encoder block with a "Prediction Head" added for making forecasts. + * > runMain scalation.modeling.forecasting.neuralforecasting.simpleEncoder2 + */ +@main def simpleEncoder2 (): Unit = + + import scalation.modeling.neuralnet.SimpleCNN.yy // example univariate time series of length 20 (t = 0 ... 19) + println (s"|| time series yy = \n $yy") + val y_h1 = 6.5 // next actual value y(20) to compare with forecast + // rolling validation skipped for simplicity + + val pl = 4 // patch length + val d_k = 6 // dimensionality for Q, K, and V (if different need d_v) +// val heads = 1 // number of attention heads (d_model = d_k * heads) + + // input tokens in this case are patches (for TSF) + // five inputs with each patch vector having size/dimensionality 4 + val (μ, σ) = (yy.mean, yy.stdev) + val y = (yy - μ) / (σ + eps) // normalize the whole time series via z-transformation + val xx = patchify (y, pl) // patchify to form a 5 by 4 matrix + println (s"|| input after normalize and patchify y -> xx = $xx") + + //----------------------------------------- + // Input Embedding + Positional Encodings | + //----------------------------------------- + + val wE = MatrixD.fill (xx.dim2, d_k, 0.1) // transformation matrix holding learnable embedding weights + // improve initialization: use random Normal and rescale + var x = embed (xx, wE) // embed xx in a higher dimensional space + println (s"|| input after (higher dimensional) embedding xx -> x = $x") + + val pe = encodePositions (xx.dim, d_k) // create positional encodings + x += pe // add positional encodings + println (s"|| positional encodings pe = $pe") + println (s"|| input (after adding positional encoding) x = $x") + + //------------------ + // Attention Layer | + //------------------ + + val wQ = MatrixD.fill (d_k, d_k, 0.1) // Query weight matrix: improve initialization: randomize + val wK = MatrixD.fill (d_k, d_k, 0.1) // Key weight matrix + val wV = MatrixD.fill (d_k, d_k, 0.1) // Value weight matrix + + val q = x * wQ // Query: size of input x d_k + val k = x * wK // Key: size of input x d_k + val v = x * wV // Value: size of input x d_k (or d_v if different) + + val att = attention (q, k, v, d_k) // compute attention + + println (s""" + wQ = $wQ + wK = $wK + wV = $wV + q = $q + k = $k + v = $v + att = $att + """) + + //----------------------------- + // Add & Norm After Attention | + //----------------------------- + + x += att // add attention to x (residual connection) + x = layerNorm (x) // apply layer normalization (use default γ and β) + println (s"|| after layer normalization x = $x") + + //-------------------------------------------------------- + // Two-Layer (Hidden, Output) Feed Forward Network (FFN) | + //-------------------------------------------------------- + + val d_ff = 2 * d_k // dimensionality of FFN hidden layer (commonly use four-fold expansion) + val w1 = MatrixD.fill (d_k, d_ff, 0.1) // weight matrix preceding the FFN hidden layer: improve initialization: randomize + val b1 = VectorD.fill (d_ff)(0.1) // bias vector for the FFN hidden layer + val w2 = MatrixD.fill (d_ff, d_k, 0.1) // weight matrix preceding the FFN output layer + val b2 = VectorD.fill (d_k)(0.1) // bias vector for the FFN output layer + + // FFN forward prop: 'refined input' -> hidden // input with embedding, position encoding, attention, layer norm + val u = x * w1 + b1 // hidden pre-activation matrix + val z = f_reLU.fM (u) // hidden matrix from f0 = reLU activation +// val z = f_geLU.fM (u) // hidden matrix from f0 = geLU activation + + // FFN forward prop: hidden -> output + val vv = z * w2 + b2 // output pre-activation matrix + val ŷ = vv // output/prediction matrix: typically no activation + + println (s""" + u = $u + z = $z + vv = $vv + ŷ = $ŷ + """) + + //----------------------- + // Add & Norm After FNN | + //----------------------- + + x += ŷ // add output from FNN to x (residual connection) + x = layerNorm (x) // apply layer normalization a second time (use default γ and β) + println (s"|| at end of encoder block x = $x") + + //------------------------------------------------------- + // Prediction Head/Additional Linear Layer: Horizon = 1 | + //------------------------------------------------------- + + val h_last = x(x.dim-1) // use the last patch for prediction (simple example) + println (s"h_last.dim = ${h_last.dim}") + + val w_out = VectorD.fill (d_k)(0.1) // weight vector for the prediction head, improve initialization: randomize + val b_out = 0.0 // bias scalar for the prediction head + + val ŷ_norm = h_last ∙ w_out + b_out // scalar on normalized (z) scale + val ŷ_h1 = μ + (σ + eps) * ŷ_norm // apply back-transformation to get forecast + + println (s"forecast (normalized) = $ŷ_norm") + println (s"forecast (original scale) = $ŷ_h1") + + val ε = y_h1 - ŷ_h1 // error at horizon 1 (actual - forecasted) + + println (s"forecast error at time t = ${y.dim}: ε = $ε") + + // extensions: (1) use more than the last patch from the encoder by flattening matrix x + // (2) add a hidden layer (possibly more than one) before the ending Linear Layer + // (3) perform multi-horizon forecasting + // (4) perform rolling validation + + // No backward prop -- should use AutoDiff due to complexity + +end simpleEncoder2 + diff --git a/src/main/scala/scalation/modeling/forecasting/neuralforecasting/SimpleEncoder.scala.bak b/src/main/scala/scalation/modeling/forecasting/neuralforecasting/SimpleEncoder.scala.bak new file mode 100644 index 000000000..4383934cf --- /dev/null +++ b/src/main/scala/scalation/modeling/forecasting/neuralforecasting/SimpleEncoder.scala.bak @@ -0,0 +1,267 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author John A. Miller + * @version 2.0 + * @date Sun Nov 9 23:27:14 EST 2025 + * @see LICENSE (MIT style license file). + * + * @note Simple Implementation for an Encoder-Only Transformer, e.g., used for + * (1) Natural Language Processing (NLP) or + * (2) Time Series Forecasting (TSF) + * + * Limitations: one attention head, no dropout layer, single encoder block, + * no back-propagation + * + * @see sebastianraschka.com/blog/2023/self-attention-from-scratch.html + * @see arxiv.org/pdf/1706.03762.pdf (main paper) + */ + +package scalation +package modeling +package forecasting +package neuralforecasting + +import scala.math.{cos, sin, sqrt} + +import scalation.mathstat._ +import scalation.modeling.ActivationFun.{f_reLU, f_softmax} + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `SimpleEncoder` object implements the attention method based on the + * scaled dot product. + */ +object SimpleEncoder: + + private val debug = debugf ("SimpleEncoder", true) // debug function + val eps = 1E-5 // very small value + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Patchify the univariate time series y by breaking it into non-overlapping + * patches of length pl. This simple implementation assumes stride s = pl, + * but PatchTST uses pl = 16 and s = 8 as defaults. + * @param y the given univariate time series + * @param pl the patch length + */ + def patchify (y: VectorD, pl: Int): MatrixD = + val m = y.dim + val np = m / pl + val x = new MatrixD (np, pl) + for i <- x.indices do x(i) = y(i*pl until (i+1)*pl) + x + end patchify + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Use a matrix transformation containing learnable weights to embed each patch + * vector into a higher dimensional space (providing enhanced vector similarity). + * The dimensionality of the embedding space is d_model. For this simple + * implementation d_model = d_k as there is only one attention head. + * @param xx the matrix containing each patch as a row + * @param wE the dimensionality of the embedding space + */ + def embed (xx: MatrixD, wE: MatrixD): MatrixD = xx * wE + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Encode all the positions in the time series as vectors of length d_model. + * @param y_len the length of the time series (sequence length) + * @param d_k the dimensionality of the model (d_model = d_k here) + */ + def encodePositions (y_len: Int, d_k: Int): MatrixD = + val pe = new MatrixD (y_len, d_k) + for k <- pe.indices do + for i <- 0 until d_k/2 do + val den = 10000~^(2*i/d_k) + pe(k, 2*i) = sin (k / den) + pe(k, 2*i+1) = cos (k / den) + pe + end encodePositions + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Based on the Query (Q), Key (K), and Value (V) matrices, compute the attention. + * + * att = softmax (QK^T/√d_k) V + * + * @param q the Query: the input of interest + * @param k the Key: other locations to compare it with (for similarity) + * @param v the Value: the input value at the key locations + * @param d_k the dimensionality of Query, Key, and Value (if different use d_v) + */ + def attention (q: MatrixD, k: MatrixD, v: MatrixD, d_k: Int): MatrixD = + val qkt = q * k.𝐓 // repeated dot product + val sdp = qkt / sqrt (d_k) // scaled dot product (sdp) + val scr = f_softmax.fM (sdp) // attention scores + debug ("attention", s" qkt = $qkt, sdp = $sdp, scr = $scr)") + scr * v // attention (Q, K, V) + end attention + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Perform layer normalization on matrix x. The more general affine transformation + * is not supported in this simple implementation + * @param x the matrix to normalize + */ + def layerNorm (x: MatrixD): MatrixD = (x - x.mean) / (x.stdev + eps) + +end SimpleEncoder + +import SimpleEncoder._ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `simpleEncoder1` main function illustrates the calculation of attention (Q, K, V) + * for a Single Head as used in a Transformer. + * @see pub.aimind.so/transformer-model-and-variants-of-transformer-chatgpt-3d423676e29c (URL) + * > runMain scalation.modeling.forecasting.neuralforecasting.simpleEncoder1 + */ +@main def simpleEncoder1 (): Unit = + + val d_k = 3 // dimensionality for Q, K, and V (if different need d_v) +// val heads = 1 // number of attention heads (d_model = d_k * heads) + + // input token can be (sub) words (for NLP) or patches (for TSF) + // three inputs after embedding, each embedding vector has size/dimensionality 4 + // these three embedding vectors are made up, but could use word2vec, etc. + val x = MatrixD ((3, 4), 1, 0, 1, 0, // input x0 for token 0 + 0, 2, 0, 2, // input x1 for token 1 + 1, 1, 1, 1) // input x2 for token 2 + + println (s"input (after embedding) x = $x") + + val wQ = MatrixD ((4, 3), 1, 0, 1, // Query weight matrix + 1, 0, 0, + 0, 0, 1, + 0, 1, 1) + val wK = MatrixD ((4, 3), 0, 0, 1, // Key weight matrix + 1, 1, 0, + 0, 1, 0, + 1, 1, 0) + val wV = MatrixD ((4, 3), 0, 2, 0, // Value weight matrix + 0, 3, 0, + 1, 0, 3, + 1, 1, 0) + + val q = x * wQ // Query: size of input x d_k + val k = x * wK // Key: size of input x d_k + val v = x * wV // Value: size of input x d_k (or d_v if different) + +// val att = attention (q, k, v, d_k) // compute attention based on correct size + val att = attention (q, k, v, 1) // approximation used by URL for checking purposes + + println (s""" + d_k = $d_k + wQ = $wQ + wK = $wK + wV = $wV + q = $q + k = $k + v = $v + att = $att + """) + +end simpleEncoder1 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `simpleEncoder2` main function illustrates the steps in an Encoder-Only Transformer + * consisting of a single encoder block. + * > runMain scalation.modeling.forecasting.neuralforecasting.simpleEncoder2 + */ +@main def simpleEncoder2 (): Unit = + + import neuralnet.SimpleCNN.yy // example univariate time series of length 20 + + val pl = 4 // patch length + val d_k = 6 // dimensionality for Q, K, and V (if different need d_v) +// val heads = 1 // number of attention heads (d_model = d_k * heads) + + // input tokens in this case are patches (for TSF) + // five inputs with each patch vector having size/dimensionality 4 + val (μ, σ) = (yy.mean, yy.stdev) + val y = (yy - μ) / (σ + eps) // normalize the whole time series via z-transformation + val xx = patchify (y, pl) // patchify to form a 5 by 4 matrix + + //----------------------------------------- + // Input Embedding + Positional Encodings | + //----------------------------------------- + + val wE = MatrixD.fill (xx.dim2, d_k, 0.1) // transformation matrix used for embedding +// var x = embed (xx, wE) // embed xx in a higher dimensional space (skip pe) + var x = embed (xx, wE) + encodePositions (y.dim, d_k) // embed xx in a higher dimensional space + // + positional encodings (pe) + + println (s"input (after normalize, patchify, embed, encode) x = $x") + + //------------------ + // Attention Layer | + //------------------ + + val wQ = MatrixD.fill (d_k, d_k, 0.1) // Query weight matrix + val wK = MatrixD.fill (d_k, d_k, 0.1) // Key weight matrix + val wV = MatrixD.fill (d_k, d_k, 0.1) // Value weight matrix + + val q = x * wQ // Query: size of input x d_k + val k = x * wK // Key: size of input x d_k + val v = x * wV // Value: size of input x d_k (or d_v if different) + + val att = attention (q, k, v, d_k) // compute attention + + println (s""" + wQ = $wQ + wK = $wK + wV = $wV + q = $q + k = $k + v = $v + att = $att + """) + + //----------------------------- + // Add & Norm After Attention | + //----------------------------- + + x += att // add attention to x (residual connection) + x = layerNorm (x) // apply layer normalization + println (s"after layer normalization x = $x") + + //--------------------------------------- + // Two-Layer Feed Forward Network (FFN) | + //--------------------------------------- + + val d_ff = 4 * d_k // dimensionality of FFN hidden layer (common four-fold expansion) + val w1 = MatrixD.fill (d_k, d_ff, 0.1) // weight matrix preceding the FFN hidden layer + val b1 = VectorD.fill (d_ff)(0.1) // bias vector for the FFN hidden layer + val w2 = MatrixD.fill (d_ff, d_k, 0.1) // weight matrix preceding the FFN output layer + val b2 = VectorD.fill (d_k)(0.1) // bias vector for the FFN output layer + + // FFN forward prop: 'refined input' -> hidden // input with embedding, position encoding, attention, layer norm + val u = x * w1 + b1 // hidden pre-activation matrix + val z = f_reLU.fM (u) // hidden matrix from f0 = reLU activation +// val z = f_geLU.fM (u) // hidden matrix from f0 = geLU activation + + // FFN forward prop: hidden -> output + val vv = z * w2 + b2 // output pre-activation matrix + val ŷ = vv // output/prediction matrix: typically no activation + + println (s""" + u = $u + z = $z + vv = $vv + ŷ = $ŷ + """) + + //----------------------- + // Add & Norm After FNN | + //----------------------- + + x += ŷ // add output from FNN to x (residual connection) + x = layerNorm (x) // apply layer normalization a second time + println (s"at end of encoder block x = $x") + + //-------------------------------------------------------- + // Use Simple MLP instead of Decoder to Make Predictions | + //-------------------------------------------------------- + +// reverse the embeddings ? +// val ε = y - ŷ // error matrix + + // No backward prop -- should use AutoDiff due to complexity + +end simpleEncoder2 + diff --git a/src/main/scala/scalation/modeling/forecasting/neuralforecasting/SimpleEncoder.scala.txt b/src/main/scala/scalation/modeling/forecasting/neuralforecasting/SimpleEncoder.scala.txt new file mode 100644 index 000000000..85f79379d --- /dev/null +++ b/src/main/scala/scalation/modeling/forecasting/neuralforecasting/SimpleEncoder.scala.txt @@ -0,0 +1,302 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author John A. Miller + * @version 2.0 + * @date Sun Nov 9 23:27:14 EST 2025 + * @see LICENSE (MIT style license file). + * + * @note Simple Implementation for an Encoder-Only Transformer, e.g., used for + * (1) Natural Language Processing (NLP) or + * (2) Time Series Forecasting (TSF) + * + * Limitations: one attention head, no dropout layer, single encoder block, + * no back-propagation + * + * @see sebastianraschka.com/blog/2023/self-attention-from-scratch.html + * @see arxiv.org/pdf/1706.03762.pdf (main paper) + */ + +package scalation +package modeling +package forecasting +package neuralforecasting + +import scala.math.{cos, sin, sqrt} + +import scalation.mathstat._ +import scalation.modeling.ActivationFun.{f_reLU, f_softmax} + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `SimpleEncoder` object implements the attention method based on the + * scaled dot product. + */ +object SimpleEncoder: + + private val debug = debugf ("SimpleEncoder", true) // debug function + val eps = 1E-5 // very small value + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Patchify the univariate time series y by breaking it into non-overlapping + * patches of length pl. This simple implementation assumes stride s = pl, + * but PatchTST uses pl = 16 and s = 8 as defaults. + * @param y the given univariate time series + * @param pl the patch length + */ + def patchify (y: VectorD, pl: Int): MatrixD = + val m = y.dim + val np = m / pl + val x = new MatrixD (np, pl) + for i <- x.indices do x(i) = y(i*pl until (i+1)*pl) + x + end patchify + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Use a matrix transformation containing learnable weights to embed each patch + * vector into a higher dimensional space (providing enhanced vector similarity). + * The dimensionality of the embedding space is d_model. For this simple + * implementation d_model = d_k as there is only one attention head. + * @param xx the matrix containing each patch as a row + * @param wE the dimensionality of the embedding space + */ + def embed (xx: MatrixD, wE: MatrixD): MatrixD = xx * wE + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Encode all the positions in the time series as vectors of length d_model. + * @param len the sequence length + * @param d_k the dimensionality of the model (d_model = d_k here) + */ + def encodePositions (len: Int, d_k: Int): MatrixD = + val pe = new MatrixD (len, d_k) + for k <- pe.indices do + for i <- 0 until d_k/2 do + val den = 10000.0~^(2.0*i/d_k) + pe(k, 2*i) = sin (k / den) + pe(k, 2*i+1) = cos (k / den) + pe + end encodePositions + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Based on the Query (Q), Key (K), and Value (V) matrices, compute the attention. + * + * att = softmax (QK^T/√d_k) V + * + * @param q the Query: the input of interest + * @param k the Key: other locations to compare it with (for similarity) + * @param v the Value: the input value at the key locations + * @param d_k the dimensionality of Query, Key, and Value (if different use d_v) + */ + def attention (q: MatrixD, k: MatrixD, v: MatrixD, d_k: Int): MatrixD = + val qkt = q * k.𝐓 // repeated dot product + val sdp = qkt / sqrt (d_k) // scaled dot product (sdp) + val scr = f_softmax.fM (sdp) // attention scores + debug ("attention", s" qkt = $qkt, sdp = $sdp, scr = $scr") + scr * v // attention (Q, K, V) + end attention + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Perform layer normalization on matrix x. While batch normalization normalizes + * over a mini-batch, layer normalization normalizes over a single instance or row. + * @see www.geeksforgeeks.org/deep-learning/what-is-layer-normalization/ + * An affine transformation is supported via the γ and β learnable parameters. + * @param x the matrix to normalize + * @param γ the scaling learnable parameter (defaults to 1.0) + * @param β the shifting learnable parameter (defaults to 0.0) + */ + def layerNorm (x: MatrixD, γ: Double = 1.0, β: Double = 0.0): MatrixD = + val xt = x.𝐓 // transpose since ScalaTion computes means and stdevs column-wise + val xn = ((xt - xt.mean) / (xt.stdev + eps)).𝐓 // z-transform to mean zero and stdev one + xn * γ + β // allows scaling and shifting for flexibility + end layerNorm + +end SimpleEncoder + +import SimpleEncoder._ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `simpleEncoder1` main function illustrates the calculation of attention (Q, K, V) + * for a Single Head as used in a Transformer. SEE LINK BELOW FOR MORE DETAILS. + * + * @see pub.aimind.so/transformer-model-and-variants-of-transformer-chatgpt-3d423676e29c (URL) + * + * > runMain scalation.modeling.forecasting.neuralforecasting.simpleEncoder1 + */ +@main def simpleEncoder1 (): Unit = + + val d_k = 3 // dimensionality for Q, K, and V (if different need d_v) +// val heads = 1 // number of attention heads (d_model = d_k * heads) + + // input token can be (sub) words (for NLP) or patches (for TSF) + // three inputs after embedding, each embedding vector has size/dimensionality 4 + // these three embedding vectors are made up, but could use word2vec, etc. + val x = MatrixD ((3, 4), 1, 0, 1, 0, // input x0 for token 0 + 0, 2, 0, 2, // input x1 for token 1 + 1, 1, 1, 1) // input x2 for token 2 + + println (s"input (after embedding) x = $x") + + val wQ = MatrixD ((4, 3), 1, 0, 1, // Query weight matrix + 1, 0, 0, + 0, 0, 1, + 0, 1, 1) + val wK = MatrixD ((4, 3), 0, 0, 1, // Key weight matrix + 1, 1, 0, + 0, 1, 0, + 1, 1, 0) + val wV = MatrixD ((4, 3), 0, 2, 0, // Value weight matrix + 0, 3, 0, + 1, 0, 3, + 1, 1, 0) + + val q = x * wQ // Query: size of input x d_k + val k = x * wK // Key: size of input x d_k + val v = x * wV // Value: size of input x d_k (or d_v if different) + +// val att = attention (q, k, v, d_k) // compute attention based on correct size + val att = attention (q, k, v, 1) // approximation used by URL for checking purposes + + println (s""" + d_k = $d_k + wQ = $wQ + wK = $wK + wV = $wV + q = $q + k = $k + v = $v + att = $att + """) + +end simpleEncoder1 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `simpleEncoder2` main function illustrates the steps in an "Encoder-Only Transformer" + * consisting of a single encoder block with a "Prediction Head" added for making forecasts. + * > runMain scalation.modeling.forecasting.neuralforecasting.simpleEncoder2 + */ +@main def simpleEncoder2 (): Unit = + + import neuralnet.SimpleCNN.yy // example univariate time series of length 20 (t = 0 ... 19) + println (s"|| time series yy = \n $yy") + val y_h1 = 6.5 // next actual value y(20) to compare with forecast + // rolling validation skipped for simplicity + + val pl = 4 // patch length + val d_k = 6 // dimensionality for Q, K, and V (if different need d_v) +// val heads = 1 // number of attention heads (d_model = d_k * heads) + + // input tokens in this case are patches (for TSF) + // five inputs with each patch vector having size/dimensionality 4 + val (μ, σ) = (yy.mean, yy.stdev) + val y = (yy - μ) / (σ + eps) // normalize the whole time series via z-transformation + val xx = patchify (y, pl) // patchify to form a 5 by 4 matrix + println (s"|| input after normalize and patchify y -> xx = $xx") + + //----------------------------------------- + // Input Embedding + Positional Encodings | + //----------------------------------------- + + val wE = MatrixD.fill (xx.dim2, d_k, 0.1) // transformation matrix holding learnable embedding weights + // improve initialization: use random Normal and rescale + var x = embed (xx, wE) // embed xx in a higher dimensional space + println (s"|| input after (higher dimensional) embedding xx -> x = $x") + + val pe = encodePositions (xx.dim, d_k) // create positional encodings + x += pe // add positional encodings + println (s"|| positional encodings pe = $pe") + println (s"|| input (after adding positional encoding) x = $x") + + //------------------ + // Attention Layer | + //------------------ + + val wQ = MatrixD.fill (d_k, d_k, 0.1) // Query weight matrix: improve initialization: randomize + val wK = MatrixD.fill (d_k, d_k, 0.1) // Key weight matrix + val wV = MatrixD.fill (d_k, d_k, 0.1) // Value weight matrix + + val q = x * wQ // Query: size of input x d_k + val k = x * wK // Key: size of input x d_k + val v = x * wV // Value: size of input x d_k (or d_v if different) + + val att = attention (q, k, v, d_k) // compute attention + + println (s""" + wQ = $wQ + wK = $wK + wV = $wV + q = $q + k = $k + v = $v + att = $att + """) + + //----------------------------- + // Add & Norm After Attention | + //----------------------------- + + x += att // add attention to x (residual connection) + x = layerNorm (x) // apply layer normalization (use default γ and β) + println (s"|| after layer normalization x = $x") + + //-------------------------------------------------------- + // Two-Layer (Hidden, Output) Feed Forward Network (FFN) | + //-------------------------------------------------------- + + val d_ff = 2 * d_k // dimensionality of FFN hidden layer (commonly use four-fold expansion) + val w1 = MatrixD.fill (d_k, d_ff, 0.1) // weight matrix preceding the FFN hidden layer: improve initialization: randomize + val b1 = VectorD.fill (d_ff)(0.1) // bias vector for the FFN hidden layer + val w2 = MatrixD.fill (d_ff, d_k, 0.1) // weight matrix preceding the FFN output layer + val b2 = VectorD.fill (d_k)(0.1) // bias vector for the FFN output layer + + // FFN forward prop: 'refined input' -> hidden // input with embedding, position encoding, attention, layer norm + val u = x * w1 + b1 // hidden pre-activation matrix + val z = f_reLU.fM (u) // hidden matrix from f0 = reLU activation +// val z = f_geLU.fM (u) // hidden matrix from f0 = geLU activation + + // FFN forward prop: hidden -> output + val vv = z * w2 + b2 // output pre-activation matrix + val ŷ = vv // output/prediction matrix: typically no activation + + println (s""" + u = $u + z = $z + vv = $vv + ŷ = $ŷ + """) + + //----------------------- + // Add & Norm After FNN | + //----------------------- + + x += ŷ // add output from FNN to x (residual connection) + x = layerNorm (x) // apply layer normalization a second time (use default γ and β) + println (s"|| at end of encoder block x = $x") + + //------------------------------------------------------- + // Prediction Head/Additional Linear Layer: Horizon = 1 | + //------------------------------------------------------- + + val h_last = x(x.dim-1) // use the last patch for prediction (simple example) + println (s"h_last.dim = ${h_last.dim}") + + val w_out = VectorD.fill (d_k)(0.1) // weight vector for the prediction head, improve initialization: randomize + val b_out = 0.0 // bias scalar for the prediction head + + val ŷ_norm = h_last ∙ w_out + b_out // scalar on normalized (z) scale + val ŷ_h1 = μ + (σ + eps) * ŷ_norm // apply back-transformation to get forecast + + println (s"forecast (normalized) = $ŷ_norm") + println (s"forecast (original scale) = $ŷ_h1") + + val ε = y_h1 - ŷ_h1 // error at horizon 1 (actual - forecasted) + + println (s"forecast error at time t = ${y.dim}: ε = $ε") + + // extensions: (1) use more than the last patch from the encoder by flattening matrix x + // (2) add a hidden layer (possibly more than one) before the ending Linear Layer + // (3) perform multi-horizon forecasting + // (4) perform rolling validation + + // No backward prop -- should use AutoDiff due to complexity + +end simpleEncoder2 + diff --git a/src/main/scala/scalation/modeling/forecasting/neuralforecasting/TrEncoderLayer.scala b/src/main/scala/scalation/modeling/forecasting/neuralforecasting/TrEncoderLayer.scala deleted file mode 100644 index 55d745db8..000000000 --- a/src/main/scala/scalation/modeling/forecasting/neuralforecasting/TrEncoderLayer.scala +++ /dev/null @@ -1,104 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John A. Miller, Yousef Fekri Dabanloo - * @version 2.0 - * @date Fri Oct 13 22:21:37 EDT 2023 - * @see LICENSE (MIT style license file). - * - * @note Model Framework: Transformer Encoder Layer - * - * @see sebastianraschka.com/blog/2023/self-attention-from-scratch.html - * @see arxiv.org/pdf/1706.03762.pdf (main paper) - */ - -package scalation -package modeling -package forecasting -package neuralforecasting - -import scalation.mathstat._ -import scalation.random.{RandomMatD, RandomTenD} - -import ActivationFun._ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `TrEncoderLayer` class consists of a Multi-Head Self-Attention and a Feed-Forward - * Neural Network (FFNN) sub-layers. - * @see pytorch.org/docs/stable/generated/torch.nn.TransformerEncoderLayer.html#torch.nn.TransformerEncoderLayer - * @param n_var the size of the input vector x_t (number of variables) - * @param n_mod the size of the output (dimensionality of the model, d_model) - * @param heads the number of attention heads - * @param n_v the size of the value vectors - * @param n_z the size of the hidden layer in the Feed-Forward Neural Network - * @param f the activation function family (used by alinear1) - * @param p_drop the probability of setting an element to zero in a dropout layer - * @param norm_eps a small values used in normalization to avoid divide by zero - * @param norm_first whether layer normalization should be done first (see apply method) - */ -class TrEncoderLayer (n_var: Int, n_mod: Int = 512, heads: Int = 8, - n_v: Int = -1, n_z: Int = 2024, f: AFF = f_reLU, - p_drop: Double = 0.5, norm_eps: Double = 1E-5, norm_first: Boolean = false) - extends Attention (n_var, n_mod, heads, n_v): - - private val w_q = rmg.gen // weight matrix for query q - private val w_k = rmg.gen // weight matrix for key k - private val w_v = rmg.gen // weight matrix for value v - - val rtg = RandomTenD (heads, n_mod, n_k, 1) // random (0, 1) tensor generator for q, k - val rtg_v = RandomTenD (heads, n_mod, n_val, 1) // random (0, 1) tensor generator for v - val rmg_o = RandomMatD (heads*n_val, n_mod, 1) // random (0, 1) matrix generator for for w_o - - private val wt_q = rtg.gen // MH query weight tensor: heads x n_mod x n_k - private val wt_k = rtg.gen // MH key weight tensor: heads x n_mod x n_k - private val wt_v = rtg_v.gen // MH value weight tensor; heads x n_mod x n_val - private val w_o = rmg_o.gen // MH overall weight matrix: n_mod x n_mod - - private val dropout_sa = DropoutLayer (p_drop) // dropout layer (sa_block) - - private val alinear1 = DenseLayer (n_mod, n_z, f) // activated linear layer (ff_block) - private val dropout1 = DropoutLayer (p_drop) // dropout layer (ff_block) - private val linear2 = DenseLayer (n_z, n_mod) // linear layer (ff_block) - private val dropout2 = DropoutLayer (p_drop) // dropout layer (ff_block) - - private val norm1 = LayerNorm (true, norm_eps) // normalization layer (apply) - private val norm2 = LayerNorm (true, norm_eps) // normalization layer (apply) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forward pass: Compute this encoder layer's result z by using Multi-Head Self-Attention - * followed by a Feed-Forward Neural Network. - * @param x the input matrix - */ - def apply (x: MatrixD): MatrixD = - banner ("1. Multi-Head Self-Attention: query q, key k, value v") - banner ("2. Fee-Forward Neural Network") - - var z: MatrixD = null - if norm_first then - z = x + sa_block (norm1 (x)) - z = z + ff_block (norm2 (z)) - else - z = norm1 (x + sa_block (x)) - z = norm2 (z + ff_block (z)) - end if - z - end apply - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the Multi-Head Self-Attention result. - * @param x the input matrix - */ - def sa_block (x: MatrixD): MatrixD = - val (q, k, v) = queryKeyValue (x, w_q, w_k, w_v) - dropout_sa (attentionMH (q, k, v, wt_q, wt_k, wt_v, w_o)) - end sa_block - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the Feed-forward Neural Network result. - * @param x the input matrix - */ - def ff_block (x: MatrixD): MatrixD = - dropout2 (linear2 (dropout1 (alinear1 (x)))) - end ff_block - -end TrEncoderLayer - diff --git a/src/main/scala/scalation/modeling/forecasting/neuralforecasting/TrfEncoder.scala b/src/main/scala/scalation/modeling/forecasting/neuralforecasting/TrfEncoder.scala new file mode 100644 index 000000000..1d13ce43d --- /dev/null +++ b/src/main/scala/scalation/modeling/forecasting/neuralforecasting/TrfEncoder.scala @@ -0,0 +1,189 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author John A. Miller, Yousef Fekri Dabanloo + * @version 2.0 + * @date Fri Oct 13 22:21:37 EDT 2023 + * @see LICENSE (MIT style license file). + * + * @note Model Framework: Transformer Encoder Layer + * + * @see sebastianraschka.com/blog/2023/self-attention-from-scratch.html + * @see arxiv.org/pdf/1706.03762.pdf (main paper) + */ + +package scalation +package modeling +package forecasting +package neuralforecasting + +import scalation.mathstat._ +import scalation.random.{RandomMatD, RandomTenD} + +import ActivationFun._ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `TrfEncoderLayer` class consists of a Multi-Head Self-Attention and a Feed-Forward + * Neural Network (FFNN) sub-layers. + * @see pytorch.org/docs/stable/generated/torch.nn.TransformerEncoderLayer.html#torch.nn.TransformerEncoderLayer + * @param x the input data matrix after embedding (number of rows/instances by embedding dimension) + * @param heads the number of attention heads (e.g., 1 to 8) + * @param f the activation function family (used by alinear1) + * @param p_drop the probability of setting an element to zero in a dropout layer (e.g., .0 to .5) + * @param norm_eps a small values used in normalization to avoid divide by zero + * @param norm_first whether layer normalization should be done first (see apply method) + */ +class TrfEncoderLayer (x: MatrixD, heads: Int = 4, f: AFF = f_reLU, + initW: Array [MatrixD] = null, + p_drop: Double = 0.2, norm_eps: Double = 1E-5, norm_first: Boolean = false) + extends Attention (x.dim2, x.dim2 * heads, heads, x.dim2): + + private val m_x = x.dim // size/number of instances in input x + private val d_k = x.dim2 // dimension of query (Q) and key (K) + private val d_v = d_k // dimension of value (V) for simplicity same, for flexibility different + private val d_mod = heads * d_v // the dimensionality of the model (d_model) + private val d_ff = 4 * d_mod // the size of the hidden layer (n_z) in the Feed-Forward Neural Network + + private val rmg = RandomMatD (d_k, m_x, 1) + private val (w_q, w_k, w_v) = if initW == null then (rmg.gen, rmg.gen, rmg.gen) + else (initW(0), initW(1), initW(2)) // weight matrices for query q, key k, value v + + val rtg = RandomTenD (heads, d_mod, d_k, 1) // random (0, 1) tensor generator for q, k + val rtg_v = RandomTenD (heads, d_mod, d_v, 1) // random (0, 1) tensor generator for v + val rmg_o = RandomMatD (heads*d_v, d_mod, 1) // random (0, 1) matrix generator for for w_o + + private val wt_q = rtg.gen // MH query weight tensor: heads x d_mod x d_k + private val wt_k = rtg.gen // MH key weight tensor: heads x d_mod x d_k + private val wt_v = rtg_v.gen // MH value weight tensor; heads x d_mod x d_val + private val w_o = rmg_o.gen // MH overall weight matrix: d_mod x d_mod + + private val dropout_sa = DropoutLayer (p_drop) // dropout layer (sa_block) + + private val alinear1 = DenseLayer (d_mod, d_ff, f) // activated linear layer (ff_block) + private val dropout1 = DropoutLayer (p_drop) // dropout layer (ff_block) + private val linear2 = DenseLayer (d_ff, d_mod) // linear layer (ff_block) + private val dropout2 = DropoutLayer (p_drop) // dropout layer (ff_block) + + private val norm1 = LayerNorm (true, norm_eps) // normalization layer (apply) + private val norm2 = LayerNorm (true, norm_eps) // normalization layer (apply) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Forward pass: Compute this encoder layer's result z by using Multi-Head Self-Attention + * followed by a Feed-Forward Neural Network. + */ + def forward (): MatrixD = + banner ("1. Multi-Head Self-Attention: query q, key k, value v") + banner ("2. Feed-Forward Neural Network") + + var z: MatrixD = null + if norm_first then + z = x + sa_block (norm1 (x)) + z = z + ff_block (norm2 (z)) + else + z = norm1 (x + sa_block (x)) + z = norm2 (z + ff_block (z)) + z + end forward + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Compute the Multi-Head Self-Attention result. + * @param x the input matrix + */ + def sa_block (x: MatrixD): MatrixD = + val (q, k, v) = queryKeyValue (x, w_q, w_k, w_v) + dropout_sa (attentionMH (q, k, v, wt_q, wt_k, wt_v, w_o)) + end sa_block + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Compute the Feed-forward Neural Network result. + * @param x the input matrix + */ + def ff_block (x: MatrixD): MatrixD = + dropout2 (linear2 (dropout1 (alinear1 (x)))) + end ff_block + +end TrfEncoderLayer + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `trfEncoderTest` main function illustrates the use of Gradient Descent (GD) to optimize + * the weights/parameters of a simple neural network (3-layer (1 hidden) Neural Network). + * @see pub.aimind.so/transformer-model-and-variants-of-transformer-chatgpt-3d423676e29c (URL) + * > runMain scalation.modeling.forecasting.neuralforecasting.trfEncoderTest + */ +@main def trfEncoderTest (): Unit = + + import math.sqrt + import ActivationFun.f_softmax + + // three input tokens after embedding (each embedding vector has size 4) + val x = MatrixD ((3, 4), 1, 0, 1, 0, // input x0 + 0, 2, 0, 2, // input x1 + 1, 1, 1, 1) // input x2 + + println (s"input (after embedding) x = $x") + + val d_k = 3 // dimensionality for Q, K, and V (if different need d_v) +// val heads = 1 // number of attention heads (d_model = d_k * heads) + + val wQ = MatrixD ((4, 3), 1, 0, 1, // Query weight matrix + 1, 0, 0, + 0, 0, 1, + 0, 1, 1) + val wK = MatrixD ((4, 3), 0, 0, 1, // Key weight matrix + 1, 1, 0, + 0, 1, 0, + 1, 1, 0) + val wV = MatrixD ((4, 3), 0, 2, 0, // Value weight matrix + 0, 3, 0, + 1, 0, 3, + 1, 1, 0) + + val q = x * wQ // Query: size of input x d_k + val k = x * wK // Key: size of input x d_k + val v = x * wV // Value: size of input x d_k (or d_v if different) + + val qk = q * k.ᵀ // repeated dot product + val sq_ = qk / sqrt (d_k) // corrrect scaled dot product formula for sqk + val sqk = qk / 1.0 // approximation used by URL + val scr = f_softmax.fM (sqk) // attention scores + val att = scr * v // attention (Q, K, V) + + println (s""" + wQ = $wQ + wK = $wK + wV = $wV + q = $q + k = $k + v = $v + qk = $qk + sq_ = $sq_ + sqk = $sqk + scr = $scr + att = $att + """) + +end trfEncoderTest + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `trfEncoderTest2` main function illustrates the use of Gradient Descent (GD) to optimize + * the weights/parameters of a simple neural network (3-layer (1 hidden) Neural Network). + * @see + * > runMain scalation.modeling.forecasting.neuralforecasting.trfEncoderTest2 + * +@main def trfEncoderTest2 (): Unit = + + import Attention.x + + val n_var = x.dim2 // number of variables in input vector x_t + println (s"n_var = $n_var") + val n_mod = 72 // size of each query/key vector (q_t, k_t) + val heads = 3 // number of attention heads + val n_val = 28 // size of the value vector v_t + + val trf = new TrfEncoderLayer (n_var, n_mod, heads, n_val) + println (trf.forward ()) + +end trfEncoderTest2 + */ + diff --git a/src/main/scala/scalation/modeling/forecasting/nonlinear/NARX_SR_D.scala b/src/main/scala/scalation/modeling/forecasting/nonlinear/NARX_SR_D.scala new file mode 100644 index 000000000..fd951bf1f --- /dev/null +++ b/src/main/scala/scalation/modeling/forecasting/nonlinear/NARX_SR_D.scala @@ -0,0 +1,514 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author Yousef Fekri Dabanloo + * @version 2.0 + * @date Wed Nov 19 22:58:47 EST 2025 + * @see LICENSE (MIT style license file). + * + * @note Model: Auto-Regressive on lagged y and xe with SR terms (NARX_SR_D) using OLS - Direct Forecasting + * + * @see `scalation.modeling.Regression` + */ + +package scalation +package modeling +package forecasting +package nonlinear + +import scala.runtime.ScalaRunTime.stringOf +import scala.collection.mutable.{ArrayBuffer, LinkedHashSet => LSET} + +import scalation.optimization.quasi_newton.{LBFGS_B => OPTIMIZER} +import scalation.modeling.{RidgeRegression => REGRESSION} + +import scalation.mathstat._ +import TransformT._ + +import MakeMatrix4TS._ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `NARX_SR_D` class provides time series analysis capabilities for NARX Symbolic + * Regression (SR) models with Direct (D) forecasting. These models include trend, linear, + * power, root, and cross terms for the single endogenous (y) variable and zero or more + * exogenous (xe) variables. + * Given time series data stored in vector y and matrix xe, its next value y_t = combination + * of last p values of y, y^p, y^r and the last q values of each exogenous variable xe_j, + * again in linear, power and root forms (as well as ENDO-EXO cross terms). + * + * y_t = b dot x_t + e_t + * + * where y_t is the value of y at time t, x_t is a vector of inputs, and e_t is the + * residual/error term. + * @see `MakeMatrix4TS` for hyper-parameter specifications. + * @param x the data/input matrix (lagged columns of y and xe) @see `NARX_SR_D.apply` + * @param y the response/output vector (main time series data) + * @param hh the maximum forecasting horizon (h = 1 to hh) + * @param n_exo the number of exogenous variables + * @param fname the feature/variable names + * @param tRng the time range, if relevant (time index may suffice) + * @param hparam the hyper-parameters (defaults to `MakeMatrix4TS.hp`) + * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) + * @param tForms the map of transformations applied + * @param w_nl the non-linear parameters + */ +class NARX_SR_D (x: MatrixD, y: MatrixD, hh: Int, n_exo: Int, fname: Array [String], + tRng: Range = null, hparam: HyperParameter = hp, + bakcast: Boolean = false, + tForms: TransformMap = Map ("tForm_y" -> null), w_nl: VectorD = VectorD (0)) + extends ARX_D (x, y, hh, n_exo, fname, tRng, hparam, bakcast, tForms): + + private val debug = debugf ("NARX_SR_D", true) // debug function + + _modelName = s"NARX_SR_D($p, $q, $n_exo)" + + debug ("init", s"$modelName with with $n_exo exogenous variables and additional term spec = $spec") +// debug ("init", s"[ x | y ] = ${x ++^ y}") + + def parameter_nl: VectorD = w_nl + println(s"x.dims = ${x.dims}") + +end NARX_SR_D + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `NARX_SR_D` companion object provides factory methods for the `NARX_SR_D` class. + */ +object NARX_SR_D: + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create an `NARX_SR_D` object by building an input matrix xy and then calling the + * `NARX_SR_D` constructor. + * @param xe the matrix of exogenous variable values + * @param y the endogenous/response vector (main time series data) + * @param hh the maximum forecasting horizon (h = 1 to hh) + * @param fname_ the feature/variable names + * @param tRng the time range, if relevant (time index may suffice) + * @param hparam the hyper-parameters + * @param fEndo_enabled the set of transforms to be used for the endogenous + * @param fExo_enabled the array containing the sets of transforms to be used for the exogenous + * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) + */ + def apply (xe: MatrixD, y: VectorD, hh: Int, fname_ : Array [String] = null, + tRng: Range = null, hparam: HyperParameter = hp, + fEndo_enabled: LSET[TransformT] = LSET(Root), + fExo_enabled: Array [LSET [TransformT]] = Array (LSET(Pow), LSET(Pow)), + bakcast: Boolean = false): NARX_SR_D = + + val (wInit_nl, _, _) = initializeW (fEndo_enabled, fExo_enabled) + val (fEndo, fExo) = getTransforms(wInit_nl, fEndo_enabled, fExo_enabled) + + var xe_bfill: MatrixD = null + if xe.dim2 > 0 and hparam("q").toInt > 0 then + xe_bfill = new MatrixD (xe.dim, xe.dim2) + for j <- xe.indices2 do xe_bfill(?, j) = backfill (xe(?, j)) // backfill each exogenous variable + + val fEndo_size = fEndo_enabled.size + val fExo_sizeArr: Array [Int] = fExo_enabled.map (_.size) + + val tForms = Map ("tForm_y" -> null, "fEndo" -> fEndo) //, "fExo" -> fExo) + val xy = buildMatrix (xe_bfill, y, hparam, fEndo, fExo, bakcast) + val fname = if fname_ == null then formNames (xe.dim2, hparam, fEndo_size, fExo_sizeArr) else fname_ + val yy = makeMatrix4Y (y, hh, bakcast) + new NARX_SR_D (xy, yy, hh, xe.dim2, fname, tRng, hparam, bakcast, tForms) + end apply + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create an `NARX_SR_D` object by building an input matrix xy and then calling the + * `NARX_SR_D` constructor, with rescaling of endogneous and exogenous variable values. + * @param xe the matrix of exogenous variable values + * @param y the endogenous/response vector (main time series data) + * @param hh the maximum forecasting horizon (h = 1 to hh) + * @param fname_ the feature/variable names + * @param tRng the time range, if relevant (time index may suffice) + * @param hparam the hyper-parameters + * @param fEndo_enabled the set of transforms to be used for the endogenous + * @param fExo_enabled the array containing the sets of transforms to be used for the exogenous + * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) + * @param tFormScale the transform for y + */ + def rescale (xe: MatrixD, y: VectorD, hh: Int, fname_ : Array[String] = null, + tRng: Range = null, hparam: HyperParameter = hp, + fEndo_enabled: LSET [TransformT] = LSET (Root), + fExo_enabled: Array [LSET [TransformT]] = Array (LSET (Pow), LSET (Pow)), + bakcast: Boolean = false, + tFormScale: VectorD | MatrixD => Transform = MinMax.form): NARX_SR_D = + + // Rescales using rangeForm, then transforms, then lagging. + // Uses LBFGS_B optimizer to fit all the linear and nonlinear parameters. + + // rescale y + val tr_size = Model.trSize (y.dim) + val tForm_y = tFormScale (y(0 until tr_size)) // use (mean, std) of training set for both In-sample and TnT + val y_scl = tForm_y.f(y) + if tForm_y.getClass.getSimpleName == "zForm" then hparam("nneg") = 0 + + var xe_bfill: MatrixD = null + if xe.dim2 > 0 and hparam("q").toInt > 0 then + xe_bfill = new MatrixD (xe.dim, xe.dim2) + for j <- xe.indices2 do xe_bfill(?, j) = backfill (xe(?, j)) // backfill each exogenous variable + if tFormScale != null then + val tForm_exo = tFormScale (xe_bfill(0 until tr_size)) + xe_bfill = tForm_exo.f (xe_bfill) // rescale the backfilled exogenous variable + + val fEndo_size = fEndo_enabled.size + val fExo_sizeArr: Array [Int] = fExo_enabled.map (_.size) + val w_nl = optimize2 (xe_bfill, y_scl, hparam, fEndo_enabled, fExo_enabled, bakcast) // set non-linear parameters + + val (fEndo, fExo) = getTransforms (w_nl, fEndo_enabled, fExo_enabled) + val tForms = Map ("tForm_y" -> tForm_y, "fEndo" -> fEndo)//, "fExo" -> fExo) + val xy = buildMatrix (xe_bfill, y_scl, hparam, fEndo, fExo, bakcast) + val fname = if fname_ == null then formNames (xe.dim2, hparam, fEndo_size, fExo_sizeArr) else fname_ + val yy = makeMatrix4Y (y_scl, hh, bakcast) + new NARX_SR_D (xy, yy, hh, xe.dim2, fname, tRng, hparam, bakcast, tForms, w_nl) + end rescale + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Build the input matrix by combining the spec + p columns for the trend and + * endogenous variable with the q * xe.dim2 columns for the exogenous variables. + * When cross = true, additional cross terms will be added. Columns produced + * by transformations will be added as well. + * @param xe_bfill the matrix of exogenous variable values + * @param y the endogenous/response vector (main time series data) + * @param hp_ the hyper-parameters + * @param fEndo the transformation functions to apply on the endogenous variables + * @param fExo the transformation functions to apply on the exogenous variables + * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) + */ + def buildMatrix (xe_bfill: MatrixD, y: VectorD, hp_ : HyperParameter, + fEndo: Array [Transform], fExo: Array [Array [Transform]], bakcast: Boolean): MatrixD = + + val (p, q, spec, lwave, cross) = (hp_("p").toInt, hp_("q").toInt, hp_("spec").toInt, hp_("lwave").toDouble, hp_("cross").toInt == 1) + + // apply transformations fEndo to the endogenous variables and add these columns to x_endo + var x_endo = MatrixD (y).ᵀ // make a matrix out of vector y + for tr <- fEndo do x_endo = x_endo :^+ tr.f(y) // add each transformation of the endogenous variable + + // make matrix xy for trend terms and lagged terms of the endogenous variable + var xy = makeMatrix4T (y, spec, lwave, bakcast) ++^ // trend terms + makeMatrix4L (x_endo, p, bakcast) // lagged linear terms + + // apply transformations fExo to the exogenous variables and add there columns to x_exo + if xe_bfill!= null then + var x_exo = new MatrixD (xe_bfill.dim, 0) + for j <- xe_bfill.indices2 do + val xe_j = xe_bfill(?, j) // extract the (j+1)th exogenouse variable + x_exo = x_exo :^+ xe_j // add the exogenous variable + val fExo_j = fExo(j) // extract the transformations for the (j+1)th exogenouse variable + for tr <- fExo_j do x_exo = x_exo :^+ tr.f(xe_j) // add each transformation of the exogenous variable + + // add cross terms of the endogenous and exogenous variables + if cross then x_exo = x_exo ++^ y *~: xe_bfill // element-wise multiplication of vector y and matrix xe + xy = xy ++^ makeMatrix4L (x_exo, q, bakcast) // add lagged exogenous term to xy + + xy // return the built matrix xy + end buildMatrix + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Fit the nonlinear + linear parameters using LBFGS_B. + * @param xe_bfill the matrix of exogenous variable values + * @param y the endogenous/response vector (main time series data) + * @param hp_ the hyper-parameters + * @param fEndo the transformation functions to apply on the endogenous variables + * @param fExo the transformation functions to apply on the exogenous variables + * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) + */ + def optimize (xe_bfill: MatrixD, y: VectorD, hparam: HyperParameter = hp, + fEndo_enabled: LSET [TransformT] = LSET (Root), + fExo_enabled: Array [LSET [TransformT]] = Array (LSET (Pow), LSET (Pow)), + bakcast: Boolean = false): VectorD = + val (p, q, spec, crs) = (hparam("p").toInt, hparam("q").toInt, hparam("spec").toInt, hparam("cross").toInt) + + val fEndo_size = fEndo_enabled.size + val fExo_size = fExo_enabled.map (_.size).sum + val (wInit_nl, l_wInit_nl, u_wInit_nl) = initializeW (fEndo_enabled, fExo_enabled) + val w_nl_size = wInit_nl.dim + + val n_exo = if xe_bfill != null then xe_bfill.dim2 else 0 + val q_exo = if xe_bfill != null then q else 0 + val w_l_size = spec + p * (1 + fEndo_size) + q_exo * (n_exo + fExo_size + n_exo * crs) + // FIX: compare with the initialization of LR + val wInit = wInit_nl ++ VectorD.fill (w_l_size)(0.1) // add initial linear weights + val l_wInit = l_wInit_nl ++ VectorD.fill (w_l_size)(-10.0) // add lower bounds of linear weights + val u_wInit = u_wInit_nl ++ VectorD.fill (w_l_size)(10.0) // add upper bounds of linear weights + var count = 0 + + def loss: FunctionV2S = (ww: VectorD) => + if count % 10000 == 0 then println (s"count = $count, ww = $ww") + count += 1 + val w_nl = ww(0 until w_nl_size) + val w_l = ww(w_nl_size until ww.dim) + val (fEndo, fExo) = getTransforms (w_nl, fEndo_enabled, fExo_enabled) + val xy = buildMatrix (xe_bfill, y, hparam, fEndo, fExo, bakcast) + val yp = xy * w_l + (y - yp).normSq + end loss + + // LBFGS_B + val optimizer = OPTIMIZER (loss, wInit.dim, l_u_ = (l_wInit, u_wInit)) + val (_, ww) = optimizer.solve (wInit, 0.05) + + // LBFGS +// val optimizer = OPTIMIZER (loss) +// val (_, ww) = optimizer.solve (wInit) + +// println (s"optimize: parameters ww = $ww") +// println (s"optimize: loss = ${loss (ww)}") + ww(0 until w_nl_size) + end optimize + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Fit the nonlinear + linear parameters using LBFGS_B with VarPro. + * @param xe_bfill the matrix of exogenous variable values + * @param y the endogenous/response vector (main time series data) + * @param hp_ the hyper-parameters + * @param fEndo the transformation functions to apply on the endogenous variables + * @param fExo the transformation functions to apply on the exogenous variables + * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) + */ + def optimize2 (xe_bfill: MatrixD, y: VectorD, hparam: HyperParameter = hp, + fEndo_enabled: LSET [TransformT] = LSET(Root), + fExo_enabled: Array [LSET [TransformT]] = Array (LSET (Pow), LSET (Pow)), + bakcast: Boolean = false): VectorD = + + val (wInit, l_wInit, u_wInit) = initializeW (fEndo_enabled, fExo_enabled) + + def loss: FunctionV2S = (ww: VectorD) => + val (fEndo, fExo) = getTransforms (ww, fEndo_enabled, fExo_enabled) + val xy = buildMatrix (xe_bfill, y, hparam, fEndo, fExo, bakcast) + val reg = new REGRESSION (xy, y) + reg.train (xy, y) + val yp = reg.predict(xy) + (y - yp).normSq + end loss + + // LBFGS_B + val optimizer = OPTIMIZER (loss, wInit.dim, l_u_ = (l_wInit, u_wInit)) + val (_, ww) = optimizer.solve (wInit, 0.05) + +// println (s"optimize: parameters ww = $ww") +// println (s"optimize: loss = ${loss (ww)}") + ww + end optimize2 + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Form vectors for the initial weights and their bounds for the transforms. + * @param fEndo_enabled the set of transforms to be used for the endogenous + * @param fExo_enabled the array containing the sets of transforms to be used for the exogenous + */ + def initializeW (fEndo_enabled: LSET [TransformT], fExo_enabled: Array [LSET [TransformT]]): + (VectorD, VectorD, VectorD) = + + var wInit = new VectorD (0) + var l_wInit = new VectorD (0) + var u_wInit = new VectorD (0) + // order: endo's transforms, exo1's transforms, exo2's transfroms, ... + + if fEndo_enabled != null then + for t <- fEndo_enabled do + val init = t.wlu + wInit = wInit ++ init.w + l_wInit = l_wInit ++ init.l + u_wInit = u_wInit ++ init.u + + if fExo_enabled.length > 0 then + for set <- fExo_enabled do + if set != null then + for t <- set do + val init = t.wlu + wInit = wInit ++ init.w + l_wInit = l_wInit ++ init.l + u_wInit = u_wInit ++ init.u + (wInit, l_wInit, u_wInit) + end initializeW + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Form arrays of transforms object using the vector of nonlinear parameters. + * @param w_nl the vector of nonlinear parameters + * @param fEndo_enabled the set of transforms to be used for the endogenous + * @param fExo_enabled the array containing the sets of transforms to be used for the exogenous + */ + def getTransforms (w_nl: VectorD, fEndo_enabled: LSET [TransformT], fExo_enabled: Array [LSET [TransformT]]): + (Array [Transform], Array [Array [Transform]]) = + val listEndo = new ArrayBuffer [Transform] () + var i = 0 + if fEndo_enabled != null then + for t <- fEndo_enabled do + listEndo += t.form (VectorD (w_nl(i), w_nl(i+1))) + i += 2 + + val listExo = new Array [Array [Transform]] (fExo_enabled.length) + if fExo_enabled.length > 0 then + var k = 0 + for set <- fExo_enabled do + val listExo_k = new ArrayBuffer [Transform] () + if set != null then + for t <- set do + listExo_k += t.form (VectorD (w_nl(i), w_nl(i+1))) + i += 2 + listExo(k) = listExo_k.toArray + k += 1 + + (listEndo.toArray, listExo) + end getTransforms + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Form an array of names for the features included in the model. + * @param n_exo the number of exogenous variable + * @param hp_ the hyper-parameters + * @param n_fEn the number of functions used to map endogenous variables + * @param n_fExArr the number of functions used to map each exogenous variables + */ + def formNames (n_exo: Int, hp_ : HyperParameter, n_fEn: Int, n_fExArr: Array [Int]): Array [String] = + + val (spec, p, q, cross) = (hp_("spec").toInt, hp_("p").toInt, hp_("q").toInt, hp_("cross").toInt) + val names = ArrayBuffer [String] () + for i <- 0 until n_fEn; j <- p to 1 by -1 do names += s"f$i(yl$j)" // function lags endo terms + + // exogenous (match build order): + for j <- 0 until n_exo do + // raw exogenous lags + for k <- q to 1 by -1 do + names += s"xe${j}l$k" + + // transformations for this exo j + val n_fEx_j = n_fExArr(j) + for i <- 0 until n_fEx_j do + for k <- q to 1 by -1 do + names += s"g$j,$i(xe${j}l$k)" + end for + + if cross == 1 then + for j <- 0 until n_exo; k <- q to 1 by -1 do names += s"xe${j}l$k*yl$k" // lagged cross terms + + MakeMatrix4TS.formNames (spec, p) ++ names.toArray + end formNames + +end NARX_SR_D + +import Example_Covid._ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `nARX_SR_DTest3` main function tests the `NARX_SR_D` class on real data: + * Forecasting COVID-19 using In-Sample Testing (In-ST). + * Test forecasts (h = 1 to hh steps ahead forecasts). + * > runMain scalation.modeling.forecasting.nARX_SR_DTest3 + */ +@main def nARX_SR_DTest3 (): Unit = + +// val exo_vars = NO_EXO + val exo_vars = Array ("icu_patients") +// val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") + val (xxe, yy) = loadData (exo_vars, response) + println (s"xxe.dims = ${xxe.dims}, yy.dim = ${yy.dim}") + +// val xe = xxe // full + val xe = xxe(0 until 116) // clip the flat end +// val y = yy // full + val y = yy(0 until 116) // clip the flat end + val hh = 6 // maximum forecasting horizon + hp("lwave") = 20 // wavelength (distance between peaks) + hp("cross") = 1 // 1 => add cross terms + + for p <- 6 to 6; q <- 4 to 4; s <- 1 to 1 do // number of lags (endo, exo); trend + hp("p") = p // endo lags + hp("q") = q // exo lags + hp("spec") = s // trend specification: 0, 1, 2, 3, 5 + val mod = NARX_SR_D (xe, y, hh) // create model for time series data + mod.inSample_Test () // In-sample Testing + println (mod.summary ()) // statistical summary of fit + end for + +end nARX_SR_DTest3 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `nARX_SR_DTest4` main function tests the `NARX_SR_D` class on real data: + * Forecasting COVID-19 using Train and Test (TnT). + * Test forecasts (h = 1 to hh steps ahead forecasts). + * > runMain scalation.modeling.forecasting.nARX_SR_DTest4 + */ +@main def nARX_SR_DTest4 (): Unit = + + val exo_vars = Array ("icu_patients") +// val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") + val (xxe, yy) = loadData (exo_vars, response) + println (s"xxe.dims = ${xxe.dims}, yy.dim = ${yy.dim}") + +// val xe = xxe // full + val xe = xxe(0 until 116) // clip the flat end +// val y = yy // full + val y = yy(0 until 116) // clip the flat end + val hh = 6 // maximum forecasting horizon + hp("lwave") = 20 // wavelength (distance between peaks) +// hp("cross") = 1 // 1 => add cross terms + + for p <- 6 to 6; q <- 4 to 4; s <- 1 to 1 do // number of lags (endo, exo); trend + hp("p") = p // endo lags + hp("q") = q // exo lags + hp("spec") = s // trend specification: 0, 1, 2, 3, 5 + + val mod = NARX_SR_D (xe, y, hh) // create model for time series data + banner (s"TnT Forecasts: ${mod.modelName} on COVID-19 Dataset") + mod.trainNtest_x ()() // use customized trainNtest_x + println (mod.summary ()) // statistical summary of fit + + mod.setSkip (0) + mod.rollValidate () // TnT with Rolling Validation + println (s"After Roll TnT Forecast Matrix yf = ${mod.getYf}") + mod.diagnoseAll (mod.getY, mod.getYf, Forecaster.teRng (y.dim)) // only diagnose on the testing set +// println (s"Final TnT Forecast Matrix yf = ${mod.getYf}") + end for +end nARX_SR_DTest4 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `nARX_SR_DTest5` main function fit the linear and non linear + * parameters and tests the `NARX_SR_D` class on real data: + * Forecasting COVID-19 using Train and Test (TnT). + * Test forecasts (h = 1 to hh steps ahead forecasts). + * > runMain scalation.modeling.forecasting.nARX_SR_DTest5 + */ +@main def nARX_SR_DTest5 (): Unit = + + val exo_vars = Array ("icu_patients", "positive_rate") +// val exo_vars = Array ("icu_patients") + val (xxe, yy) = loadData (exo_vars, response) + println (s"xxe.dims = ${xxe.dims}, yy.dim = ${yy.dim}") + +// val xe = xxe // full + val xe = xxe(0 until 116) // clip the flat end +// val y = yy // full + val y = yy(0 until 116) // clip the flat end + val hh = 6 // maximum forecasting horizon + hp("lwave") = 20 // wavelength (distance between peaks) + hp("cross") = 0 // 1 => add cross terms + RidgeRegression.hp("lambda") = 6.0 + // hp("pow") = 0.5 + RidgeRegression.hp("factorization") = "Fac_Cholesky" + + hp("p") = 6 // endo lags + hp("q") = 4 // exo lags + hp("spec") = 1 // trend specification: 0, 1, 2, 3, 5 + + val fEndo_enabled = LSET (Root) + val fExo_enabled = Array (LSET(Root), LSET(Root)) // array of transforms for exogenous (must be exo_vars.length == fExo_enabled.length) + + val mod = NARX_SR_D.rescale (xe, y, hh, fEndo_enabled = fEndo_enabled, fExo_enabled = fExo_enabled) // create model for time series data +// mod.trainNtest_x ()() + mod.inSample_Test () + + println ("rollValidate") + mod.setSkip (0) + mod.rollValidate (rc = 2) + mod.diagnoseAll (mod.getY, mod.getYf, Forecaster.teRng (y.dim)) + + val (cols, rSq, modForc) = mod.featureSelectAtHorizon (h = 1, fsType = SelectionTech.Backward) //, cross = "many") + println (s"cols = ${cols}") + println (s"rSq = ${rSq}") + modForc.setSkip (0) + modForc.rollValidate (rc = 2) + modForc.diagnoseAll (mod.getY, mod.getYf, Forecaster.teRng (y.dim)) + println (stringOf (mod.parameter_nl)) + +end nARX_SR_DTest5 + diff --git a/src/main/scala/scalation/modeling/forecasting_old/ForecasterX.scala b/src/main/scala/scalation/modeling/forecasting/reg_trees/ForecasterX.scala similarity index 86% rename from src/main/scala/scalation/modeling/forecasting_old/ForecasterX.scala rename to src/main/scala/scalation/modeling/forecasting/reg_trees/ForecasterX.scala index a74c0a88a..40051f7e4 100644 --- a/src/main/scala/scalation/modeling/forecasting_old/ForecasterX.scala +++ b/src/main/scala/scalation/modeling/forecasting/reg_trees/ForecasterX.scala @@ -12,12 +12,41 @@ * medium.com/@xwang222/forecasting-101-ep07-multivariate-models-9f3a11fbb374 */ +// FIX - convert the regression trees so the `ForecasterX` trait is not needed and can be eliminated + package scalation package modeling -package forecasting_old +package forecasting +package reg_trees import scalation.mathstat._ +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Given a response vector y, build and return + * (1) an input/predictor MATRIX xx and + * (2) an output/multi-horizon output/response MATRIX yy. + * Used by forecast models that use DIRECT multi-horizon forecasting. + * The first response can't be predicted as its inputs are only the backcast value. + * Therefore, the number of rows in xx and yy is reduced to "y.dim-1". + * FIX - utilize `MakeMatrix4TS` + * @param y the given output/response vector, i.e., the time series + * @param lags the maximum lag included (inclusive) + * @param hh the maximum forecasting horizon (h = 1, 2, ... hh) + */ +def buildMatrix4TS (y: VectorD, lags: Int, hh: Int): (MatrixD, MatrixD) = + val mm = y.dim - 1 + val yb = WeightedMovingAverage.backcast (y) +: y // y prependined with one backcast value + val xx = new MatrixD (y.dim-1, lags) // input matrix: column for each lag + val yy = new MatrixD (y.dim-1, hh) // output matrix: column for each horizon + for t <- 0 until mm do // skip first row (all the same values) + for j <- xx.indices2 do xx(t, lags - 1 - j) = yb(max0 (t + 1 - j)) + for j <- yy.indices2 do yy(t, j) = if t + 1 + j >= y.dim then -0.0 else y(t + 1 + j) + end for + println (s"buildMatrix4TS: xx.dims = ${xx.dims}, yy.dims = ${yy.dims}") + (xx, yy) +end buildMatrix4TS + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `ForecasterX` trait provides a common framework for several forecasting * models that use 1 ENDOGENOUS variable y and 0 or more EXOGENOUS variables xj. @@ -46,6 +75,9 @@ trait ForecasterX (lags: Int) */ def getXY: (MatrixD, VectorD) // (getX, getY) + def trainNtest (x_ : MatrixD, y_ : VectorD) + (xx: MatrixD, yy: VectorD): (VectorD, VectorD) + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Predict a value for y_t+1 using the 1-step ahead forecast. * y_t+1 = f (y_t, ...) + e_t+1 @@ -235,7 +267,7 @@ object ForecasterX: banner (s"rollValidate: Evaluate ${mod.modelName}'s QoF for the horizons: 1 to $hh") println ("fitMap qof = ") - println (FitM.showFitMap (ftMat.transpose, QoF.values.map (_.toString))) + println (Fit.showFitMap (ftMat.ᵀ)) end rollValidate //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -249,7 +281,8 @@ object ForecasterX: def evalForecasts (mod: ForecasterX & Fit, y: VectorD, yx: MatrixD, hh: Int, ints: Boolean = false): Unit = val ftMat = new MatrixD (hh, Fit.N_QoF) - banner (s"Evaluate ${mod.modelName}'s QoF for horizons 1 to $hh:") + val mName = mod.modelName + banner (s"Evaluate ${mName}'s QoF for horizons 1 to $hh:") for h <- 1 to hh do val (yy, yfh, qof) = mod.testF (h, y, yx) // h-steps ahead forecast and its QoF @@ -257,13 +290,14 @@ object ForecasterX: // println (FitM.fitMap (qof, qoF_names)) // evaluate h-steps ahead forecasts if ints then - val (low, up) = mod.forecastAtI (yy, yfh, h) // prediction interval forecasts - val qof_all = mod.diagnose_ (yy, yfh, low, up) // fully evaluate h-steps ahead forecasts - mod.show_interval_forecasts (yy, yfh, low, up, qof_all, h) + val low_up = mod.forecastAtI (yy, yfh, h) // prediction interval forecasts + val qof_all = mod.diagnose_ (yy, yfh, low_up) // fully evaluate h-steps ahead forecasts + mod.showQoF (qof_all) // show all the QoF metrics + Predictor.plotPredictionInt (yy, yfh, low_up, mName) // plot ordered actual, predicted, lower, upper end for println ("fitMap qof = ") - println (FitM.showFitMap (ftMat.transpose, QoF.values.map (_.toString))) + println (Fit.showFitMap (ftMat.ᵀ)) end evalForecasts end ForecasterX diff --git a/src/main/scala/scalation/modeling/forecasting_old/RegressionTreeGB4TS.scala b/src/main/scala/scalation/modeling/forecasting/reg_trees/RegressionTreeGB4TS.scala similarity index 89% rename from src/main/scala/scalation/modeling/forecasting_old/RegressionTreeGB4TS.scala rename to src/main/scala/scalation/modeling/forecasting/reg_trees/RegressionTreeGB4TS.scala index 478e961aa..3cf5b96c2 100644 --- a/src/main/scala/scalation/modeling/forecasting_old/RegressionTreeGB4TS.scala +++ b/src/main/scala/scalation/modeling/forecasting/reg_trees/RegressionTreeGB4TS.scala @@ -10,9 +10,8 @@ package scalation package modeling -package forecasting_old - -import scala.math.max +package forecasting +package reg_trees import scalation.mathstat._ @@ -28,7 +27,7 @@ import scalation.mathstat._ * * @param x the input/predictor matrix built out of lags of y * (and optionally from exogenous variables ex) - * @param yy the output/response vector trimmed to match x.dim (@see ARX object) + * @param yy the output/response vector trimmed to match x.dim * @param lags the maximum lag included (inclusive) * @param fname the feature/variable names * @param hparam the hyper-parameters (use REGRESSION.hp for default) @@ -41,7 +40,7 @@ class RegressionTreeGB4TS (x: MatrixD, yy: VectorD, lags: Int, fname: Array [Str private val debug = debugf ("RegressionTreeGB4TS", true) // debug function private val flaw = flawf ("RegressionTreeGB4TS") // flaw function - modelName = s"RegressionTreeGB4TS_$lags" + _modelName = s"RegressionTreeGB4TS_$lags" debug ("init", s"$modelName: x.dims = ${x.dims}, yy.dim = ${yy.dim}") @@ -170,19 +169,16 @@ object RegressionTreeGB4TS: * @param h the forecasting horizon (1, 2, ... h) * @param intercept whether to add a column of all ones to the matrix (intercept) * @param hparam the hyper-parameters (use RegressionTree.hp for default) - * @param elag1 the minimum exo lag included (inclusive) - * @param elag2 the maximum exo lag included (inclusive) */ def exo (y: VectorD, lags: Int, ex: MatrixD, h: Int, - intercept: Boolean = true, hparam: HyperParameter = RegressionTree.hp) - (elag1: Int = max (1, lags / 5), - elag2: Int = max (1, lags)): RegressionTreeGB4TS = + intercept: Boolean = true, hparam: HyperParameter = RegressionTree.hp): RegressionTreeGB4TS = val (x_, yy) = buildMatrix4TS (y, lags, h) // column for each lag var x = if intercept then VectorD.one (yy.dim) +^: x_ else x_ // add first column of all ones val endoCols = x.dim2 println (s"endogenous: columns = $endoCols") - x = x ++^ ARX.makeExoCols (lags, ex, elag1, elag2) // add columns for each lagged exo var +// x = x ++^ ARX.makeExoCols (lags, ex, elag1, elag2) // add columns for each lagged exo var + x = x ++^ MakeMatrix4TS.makeMatrix4EXO (ex, lags, 1) // add columns for each lagged exo var println (s"exogenous: columns = ${x.dim2 - endoCols}") val y_ = yy(?, 0) // use first column @@ -198,7 +194,7 @@ end RegressionTreeGB4TS /** The `regressionTreeGB4TSTest` main function tests the `RegressionTreeGB4TS` class. * This test is used to CHECK that the `buildMatrix4TS` function is working correctly. * May get NaN for some maximum lags (p) due to multi-collinearity. - * > runMain scalation.modeling.forecasting.regressionTreeGB4TSTest + * > runMain scalation.modeling.forecasting.reg_trees.regressionTreeGB4TSTest */ @main def regressionTreeGB4TSTest (): Unit = @@ -224,7 +220,7 @@ end regressionTreeGB4TSTest /** The `regressionTreeGB4TSTest2` main function tests the `RegressionTreeGB4TS` class * on real data: Forecasting lake levels. * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.regressionTreeGB4TSTest2 + * > runMain scalation.modeling.forecasting.reg_trees.regressionTreeGB4TSTest2 */ @main def regressionTreeGB4TSTest2 (): Unit = @@ -255,7 +251,7 @@ import forecasting.Example_Covid.{loadData, NO_EXO, response} * on real data: Forecasts COVID-19 Weekly Data using endogenous variable only. * Does In-Sample Testing (In_ST). * Determines the terms to include in the model using Feature Selection. - * > runMain scalation.modeling.forecasting.regressionTreeGB4TSTest3 + * > runMain scalation.modeling.forecasting.reg_trees.regressionTreeGB4TSTest3 */ @main def regressionTreeGB4TSTest3 (): Unit = @@ -269,12 +265,12 @@ import forecasting.Example_Covid.{loadData, NO_EXO, response} banner ("Test In-Sample RegressionTreeGB4TS on COVID-19 Weekly Data") val mod = RegressionTreeGB4TS (yy, LAGS, hh) // create model for time series data // val mod = RegressionTreeGB4TS.rescale (yy, LAGS, hh) // create model for time series data - scaling - val (yp, qof) = mod.trainNtest ()() // train on full and test on full + val yp = mod.trainNtest ()()._1 // train on full and test on full val yy_ = yy.drop (1) // can't forecast first point new Plot (null, yy_, yp, s"${mod.modelName}, yy_ vs. yp @ h = 1", lines = true) - val y_yp = MatrixD (yy_, yp).transpose + val y_yp = MatrixD (yy_, yp).ᵀ println (s"y_yp = $y_yp") val xx = mod.getX @@ -286,8 +282,7 @@ import forecasting.Example_Covid.{loadData, NO_EXO, response} val (cols, rSq) = mod.stepwiseSelAll (cross = false) // R^2, R^2 bar, sMAPE, NA val k = cols.size println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for RegressionTreeGB4TS with tech", lines = true) + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs n for RegressionTreeGB4TS with tech", lines = true) // println (mod.summary ()) banner ("Feature Importance") @@ -303,7 +298,7 @@ end regressionTreeGB4TSTest3 /** The `regressionTreeGB4TSTest4` main function tests the `RegressionTreeGB4TS` class * on real data: Forecasts COVID-19 Weekly Data using endogenous variables. * Does Train-n-Test (TnT) Split testing on the model. - * > runMain scalation.modeling.forecasting.regressionTreeGB4TSTest4 + * > runMain scalation.modeling.forecasting.reg_trees.regressionTreeGB4TSTest4 */ @main def regressionTreeGB4TSTest4 (): Unit = @@ -334,7 +329,7 @@ end regressionTreeGB4TSTest4 * on real data: Forecasts COVID-19 Weekly Data using endogenous and exogenous variables. * Does In-Sample Testing (In-ST). * Determines the terms to include in the model using Feature Selection. - * > runMain scalation.modeling.forecasting.regressionTreeGB4TSTest5 + * > runMain scalation.modeling.forecasting.reg_trees.regressionTreeGB4TSTest5 */ @main def regressionTreeGB4TSTest5 (): Unit = @@ -346,8 +341,8 @@ end regressionTreeGB4TSTest4 println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") banner ("Test In-Sample RegressionTreeGB4TS.exo on COVID-19 Weekly Data") - val mod = RegressionTreeGB4TS.exo (y, LAGS, ex, hh)(1, LAGS+1) // create model for time series data - with exo - val (yp, qof) = mod.trainNtest ()() // train on full and test on full + val mod = RegressionTreeGB4TS.exo (y, LAGS, ex, hh) // create model for time series data - with exo + val yp = mod.trainNtest ()()._1 // train on full and test on full val yy_ = y(LAGS until y.dim) new Plot (null, yy_, yp, s"${mod.modelName}, yy vs. yp", lines = true) @@ -356,11 +351,10 @@ end regressionTreeGB4TSTest4 val tech = SelectionTech.Stepwise banner (s"Feature Selection Technique: $tech") - val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA + val (cols, rSq) = mod.selectFeatures (tech, cross = "none") // R^2, R^2 bar, sMAPE, NA val k = cols.size println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for RegressionTreeGB4TS with tech", lines = true) + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs n for RegressionTreeGB4TS with tech", lines = true) // println (mod.summary ()) banner ("Feature Importance") @@ -377,7 +371,7 @@ end regressionTreeGB4TSTest5 * Does In-Sample Testing (In-ST). * Determines the terms to include in the model using Feature Selection. * Run Train-n-Test (TnT) Split testing on best model. - * > runMain scalation.modeling.forecasting.regressionTreeGB4TSTest6 + * > runMain scalation.modeling.forecasting.reg_trees.regressionTreeGB4TSTest6 */ @main def regressionTreeGB4TSTest6 (): Unit = @@ -389,8 +383,8 @@ end regressionTreeGB4TSTest5 println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") banner ("Test In-Sample RegressionTreeGB4TS.exo on COVID-19 Weekly Data") - val mod = RegressionTreeGB4TS.exo (y, LAGS, ex, hh)(1, LAGS+1) // create model for time series data with exo - val (yp, qof) = mod.trainNtest ()() // train on full and test on full + val mod = RegressionTreeGB4TS.exo (y, LAGS, ex, hh) // create model for time series data with exo + val yp = mod.trainNtest ()()._1 // train on full and test on full val yy_ = y(LAGS until y.dim) new Plot (null, yy_, yp, s"${mod.modelName}, yy vs. yp", lines = true) @@ -399,11 +393,10 @@ end regressionTreeGB4TSTest5 val tech = SelectionTech.Stepwise banner (s"Feature Selection Technique: $tech") - val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA + val (cols, rSq) = mod.selectFeatures (tech, cross = "none") // R^2, R^2 bar, sMAPE, NA val k = cols.size println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for RegressionTreeGB4TS with tech", lines = true) + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs n for RegressionTreeGB4TS with tech", lines = true) // println (mod.summary ()) banner ("Feature Importance") @@ -415,7 +408,7 @@ end regressionTreeGB4TSTest5 // val bmod = mod.getBest._4 // get the best model from feature selection val bmod = mod.getBest.mod.asInstanceOf [RegressionTreeGB4TS] // get the best model from feature selection val (x_, y_, xtest, ytest) = ForecasterX.split_TnT (bmod.getX, bmod.getY) - val (yptest, qoftest) = bmod.trainNtest (x_, y_)(xtest, ytest) // train on (x_, y_) and test on (xtest, ytest) + val yptest = bmod.trainNtest (x_, y_)(xtest, ytest)._1 // train on (x_, y_) and test on (xtest, ytest) new Plot (null, ytest, yptest, s"${mod.modelName}, ytest vs. yptest", lines = true) end regressionTreeGB4TSTest6 @@ -427,7 +420,7 @@ end regressionTreeGB4TSTest6 * Does In-Sample Testing (In-ST). * Determines the terms to include in the model using Feature Selection. * Run Train-n-Test (TnT) Split testing on best model using Rolling Validation. - * > runMain scalation.modeling.forecasting.regressionTreeGB4TSTest7 + * > runMain scalation.modeling.forecasting.reg_trees.regressionTreeGB4TSTest7 */ @main def regressionTreeGB4TSTest7 (): Unit = @@ -443,8 +436,8 @@ end regressionTreeGB4TSTest6 println (s"te_size = $te_size") banner ("Test In-Sample RegressionTreeGB4TS.exo on COVID-19 Weekly Data") - val mod = RegressionTreeGB4TS.exo (y, LAGS, ex, hh)(1, LAGS+1) // create model for time series data with exo - val (yp, qof) = mod.trainNtest ()() // train on full and test on full + val mod = RegressionTreeGB4TS.exo (y, LAGS, ex, hh) // create model for time series data with exo + val yp = mod.trainNtest ()()._1 // train on full and test on full val yy_ = y(LAGS until y.dim) new Plot (null, yy_, yp, s"${mod.modelName}, yy vs. yp", lines = true) @@ -453,11 +446,10 @@ end regressionTreeGB4TSTest6 val tech = SelectionTech.Stepwise banner (s"Feature Selection Technique: $tech") - val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA + val (cols, rSq) = mod.selectFeatures (tech, cross = "none") // R^2, R^2 bar, sMAPE, NA val k = cols.size println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for RegressionTreeGB4TS with tech", lines = true) + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs n for RegressionTreeGB4TS with tech", lines = true) // println (mod.summary ()) banner ("Feature Importance") diff --git a/src/main/scala/scalation/modeling/forecasting/reg_trees/RegressionTreeGB4TS2.scalaa b/src/main/scala/scalation/modeling/forecasting/reg_trees/RegressionTreeGB4TS2.scalaa new file mode 100644 index 000000000..54a5e697e --- /dev/null +++ b/src/main/scala/scalation/modeling/forecasting/reg_trees/RegressionTreeGB4TS2.scalaa @@ -0,0 +1,425 @@ + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author John Miller + * @version 2.0 + * @date Fri Jun 21 23:13:48 EDT 2024 + * @see LICENSE (MIT style license file). + * + * @note Model: Gradient Boosting for Time Series + */ + +package scalation +package modeling +package forecasting +package reg_trees + +import scalation.mathstat._ + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `RegressionTreeGB4TS2` object supports Gradient Boosting for Time Series data. + * Multi-horizon forecasting supported via the Recursive method. + * Given a response vector y, a predictor matrix x is built that consists of + * lagged y vectors. Additional future response vectors are built for training. + * + * y_t = f(x) + * + * where x = [y_{t-1}, y_{t-2}, ... y_{t-lags}]. + */ +object RegressionTreeGB4TS2: + + private val debug = debugf ("RegressionTreeGB4TS2", true) // debug function + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a `RegressionTreeGB` object from a response vector. The input/data matrix + * x is formed from the lagged y vectors as columns in matrix x. + * @param y the original un-expanded output/response vector + * @param lags the maximum lag included (inclusive) + * @param h the forecasting horizon (1, 2, ... h) + * @param intercept whether to add a column of all ones to the matrix (intercept) + * @param hparam the hyper-parameters (use RegressionTree.hp for default) + */ + def apply (y: VectorD, lags: Int, h: Int, intercept: Boolean = true, + hparam: HyperParameter = RegressionTree.hp): RegressionTreeGB = + val (x_, yy) = buildMatrix4TS (y, lags, h) // column for each lag + val x = if intercept then VectorD.one (yy.dim) +^: x_ else x_ // add first column of all ones + val y_ = yy(?, 0) // use first column (h = 1) + debug ("apply", s"x.dims = ${x.dims}, y_.dim = ${y_.dim}") + + val mod = new RegressionTreeGB (x, y_, null, hparam) + mod.modelName = s"RegressionTreeGB4TS2$lags" + mod + end apply + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a `RegressionTreeGB` object from a response matrix. The input/data matrix + * x is formed from the lagged y vectors as columns in matrix x. + * This method provides data rescaling. + * @param y the original un-expanded output/response vector + * @param lags the maximum lag included (inclusive) + * @param h the forecasting horizon (1, 2, ... h) + * @param intercept whether to add a column of all ones to the matrix (intercept) + * @param hparam the hyper-parameters (use RegressionTree.hp for default) + */ + def rescale (y: VectorD, lags: Int, h: Int, intercept: Boolean = true, + hparam: HyperParameter = RegressionTree.hp): RegressionTreeGB = + val (x_, yy) = buildMatrix4TS (y, lags, h) // column for each lag + var x = scale (extreme (x_), (1.0, 5.0))(x_) // rescale vector x matrix to [1, 5] + if intercept then x = VectorD.one (yy.dim) +^: x // add first column of all ones + val y_ = yy(?, 0) // use first column + debug ("rescale", s"x.dims = ${x.dims}, y_.dim = ${y_.dim}") + + val mod = new RegressionTreeGB (x, y_, null, hparam) + mod.modelName = s"RegressionTreeGB4TS2$lags" + mod + end rescale + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a `RegressionTreeGB` object from a response vector. The input/data matrix + * x is formed from the lagged y vectors as columns in matrix x. + * In addition, lagged exogenous variables are added. + * @param y the original un-expanded output/response vector + * @param lags the maximum lag included (inclusive) + * @parax ex the input matrix for exogenous variables (one per column) + * @param h the forecasting horizon (1, 2, ... h) + * @param intercept whether to add a column of all ones to the matrix (intercept) + * @param hparam the hyper-parameters (use RegressionTree.hp for default) + */ + def exo (y: VectorD, lags: Int, ex: MatrixD, h: Int, + intercept: Boolean = true, hparam: HyperParameter = RegressionTree.hp): RegressionTreeGB = + val (x_, yy) = buildMatrix4TS (y, lags, h) // column for each lag + var x = if intercept then VectorD.one (yy.dim) +^: x_ else x_ // add first column of all ones + val endoCols = x.dim2 + println (s"endogenous: columns = $endoCols") + +// x = x ++^ ARX.makeExoCols (lags, ex, elag1, elag2) // add columns for each lagged exo var + x = x ++^ MakeMatrix4TS.makeMatrix4EXO (ex, lags, 1) // add columns for each lagged exo var + println (s"exogenous: columns = ${x.dim2 - endoCols}") + + val y_ = yy(?, 0) // use first column + debug ("exo", s"x.dims = ${x.dims}, y_.dim = ${y_.dim}") + + val mod = new RegressionTreeGB (x, y_, null, hparam) + mod.modelName = s"RegressionTreeGB4TS2.exo_$lags" + mod + end exo + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Split the x matrix and y matrix into training and testing sets. + * @param x the x data/input matrix + * @param y the y response/output matrix + * @param ratio the ratio of the TESTING set to the full dataset (most common 70-30, 80-20) + */ +// def split_TnT (x: MatrixD, y: MatrixD, ratio: Double = 0.20): (MatrixD, MatrixD, MatrixD, MatrixD) = + def split_TnT (x: MatrixD, y: VectorD, ratio: Double = 0.20): (MatrixD, VectorD, MatrixD, VectorD) = + val n = x.dim + val tr_size = (n * (1.0 - ratio)).toInt + println (s"RegressionTreeGB4TS2.split_TnT: tr_size = $tr_size, te_size = ${n - tr_size}") + (x(0 until tr_size), y(0 until tr_size), x(tr_size until n), y(tr_size until n)) + end split_TnT + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Align (for the testing set) the actual response vector for comparison with + * the predicted/forecasted response vector, returning a time vector and sliced + * response vectors. + * @param tr_size the size of the intial training set + * @param y the actual response for the full dataset (to be sliced) + * @param yp the predicted response for the full dataset (to be sliced) + * @param h_ the current forecasting horizon - 1 + */ + def align (tr_size: Int, y: VectorD, yp: VectorD, h_ : Int): (VectorD, VectorD, VectorD) = + debug ("align:", s"y.dim = ${y.dim}, yp.dim = ${yp.dim}, h_ = $h_") + (VectorD.range (tr_size, y.dim - h_), y(tr_size until y.dim - h_), yp(0 until yp.dim - h_)) + end align + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Use rolling-validation to compute test Quality of Fit (QoF) measures + * by dividing the dataset into a TRAINING SET (tr) and a TESTING SET (te) + * as follows: [ <-- tr_size --> | <-- te_size --> ] + * This version calls predict (DIRECT) for h-steps ahead out-of-sample forecasts. + * @see `RollingValidation` + * @param mod the forecasting model being used (e.g., `RegressionTreeGB4TS2`) + * @param rc the retraining cycle (number of forecasts until retraining occurs) + * @param te_size the size of the testing set + */ + def rollValidate (mod: Predictor & Fit, rc: Int, te_size_ : Int): VectorD = + val x = mod.getX // get data/input matrix + val y = mod.getY // get response/output matrix + val hh = 1 + val ftMat = new MatrixD (hh, Fit.N_QoF) + banner (s"rollValidate: Evaluate ${mod.modelName}'s QoF for the horizons: 1 to $hh") + + val te_size = if te_size_ < 0 then RollingValidation.teSize (y.dim) else te_size_ // size of test set + val tr_size = y.dim - te_size // size of initial training set + debug ("rollValidate", s"y.dim = ${y.dim}, train: tr_size = $tr_size; test: te_size = $te_size, rc = $rc") + +// val yp = new MatrixD (te_size, y.dim2) // y-predicted over testing set + val yp = new VectorD (te_size) // y-predicted over testing set + for i <- 0 until te_size do // iterate through testing set + val t = tr_size + i // next time point to forecast +// if i % rc == 0 then mod.train (x(0 until t), y(0 until t)) // retrain on sliding training set (growing set) + if i % rc == 0 then mod.train (x(i until t), y(i until t)) // retrain on sliding training set (fixed size set) + yp(i) = mod.predict (x(t-1)) // predict the next value + end for + + val df = max0 (mod.parameter.dim - 1) // degrees of freedom for model + mod.resetDF (df, te_size - df) // reset degrees of freedom + + for k <- 0 until hh do // move thru each horizon 1 to h + val (t, yk, ypk) = align (tr_size, y, yp, k) // clip ending zeros (0.0 or -0.0) + debug ("rollValidate", s"horizon $k: yk.dim = ${yk.dim}, ypk.dim = ${ypk.dim}") + new Plot (t, yk, ypk, s"Plot yy, yp vs. t for horizon ${k+1}", lines = true) + val qof = mod.diagnose (yk, ypk) + ftMat(k) = qof +// println (FitM.fitMap (qof, qoF_names)) + end for + println ("fitMap qof = ") + println (FitM.showFitMap (ftMat.transpose, QoF.values.map (_.toString))) + yp + end rollValidate + +end RegressionTreeGB4TS2 + + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `regressionTreeGB4TS2Test` main function tests the `RegressionTreeGB4TS2` class. + * This test is used to CHECK that the `buildMatrix4TS` function is working correctly. + * May get NaN for some maximum lags (p) due to multi-collinearity. + * > runMain scalation.modeling.forecasting.reg_trees.regressionTreeGB4TS2Test + */ +@main def regressionTreeGB4TS2Test (): Unit = + + val m = 30 + val y = VectorD.range (1, m) // used to CHECK the buildMatrix4TS function + val h = 3 // the forecasting horizon + + for p <- 5 to 5 do // autoregressive hyper-parameter p + banner (s"Test: RegressionTreeGB4TS2 with $p lags") + val mod = RegressionTreeGB4TS2 (y, p, h) // create model for time series data + mod.trainNtest ()() // train the model on full dataset + println (mod.summary) + + val yy = mod.getY + val yp = mod.predict (mod.getX) + new Plot (null, yy, yp, s"yy vs. yp for ${mod.modelName} (h=1) with $p lags", lines = true) + end for + +end regressionTreeGB4TS2Test + + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `regressionTreeGB4TS2Test2` main function tests the `RegressionTreeGB4TS2` class on real data: + * Forecasting lake levels. + * @see cran.r-project.org/web/packages/fpp/fpp.pdf + * > runMain scalation.modeling.forecasting.reg_trees.regressionTreeGB4TS2Test2 + */ +@main def regressionTreeGB4TS2Test2 (): Unit = + + import forecasting.Example_LakeLevels.y + val h = 1 // the forecasting horizon + + for p <- 1 to 7 do // autoregressive hyper-parameter p + banner (s"Test: RegressionTreeGB4TS2 with $p lags") + val mod = RegressionTreeGB4TS2 (y, p, h) // create model for time series data + mod.trainNtest ()() // train the model on full dataset + println (mod.summary) + + banner ("Predictions/Forecasts") // direct forecasting technique + val yy = mod.getY + val yf = mod.predict (mod.getX) +// for k <- yf.indices2 do + new Plot (null, yy, yf, s"yy vs. yf for ${mod.modelName} (h=1) with $p lags", lines = true) + println (s"yf = $yf") + println (s"yf.dim = ${yf.dim}") + end for + +end regressionTreeGB4TS2Test2 + +import forecasting.Example_Covid.{loadData, NO_EXO, response} + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `regressionTreeGB4TS2Test3` main function tests the `RegressionTreeGB4TS2` class on real data: + * Forecasts COVID-19 Weekly Data using endogenous variable only. + * Does In-Sample Testing (In_ST). + * Determines the terms to include in the model using Feature Selection. + * > runMain scalation.modeling.forecasting.reg_trees.regressionTreeGB4TS2Test3 + */ +@main def regressionTreeGB4TS2Test3 (): Unit = + + val LAGS = 10 // number of lags + val h = 6 // forecasting horizon + + val (ex, y) = loadData (NO_EXO, response) + val yy = y(0 until 116) // clip the flat part of the data + println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") + + banner ("Test In-Sample RegressionTreeGB4TS2 on COVID-19 Weekly Data") + val mod = RegressionTreeGB4TS2 (yy, LAGS, h) // create model for time series data +// val mod = RegressionTreeGB4TS2.rescale (yy, LAGS, h) // create model for time series data - scaling + val yp = mod.trainNtest ()()._1 // train on full and test on full + val yy_ = yy.drop (1) // can't forecast first point + + new Plot (null, yy_, yp, s"${mod.modelName}, yy_ vs. yp @ h = 1", lines = true) + + val y_yp = MatrixD (yy_, yp).transpose + println (s"y_yp = $y_yp") + +// mod.forecastAll (yy, h) // FIX - to be implemented - see ARX.scala +// Forecaster.evalForecasts (mod, yy, h) // FIX - to be implemented - see ARX.scala + +/* + banner (s"Feature Selection Technique: Stepwise") + val (cols, rSq) = mod.stepwiseSelAll (cross = false) // R^2, R^2 bar, sMAPE, NA + val k = cols.size + println (s"k = $k, n = ${mod.getX.dim2}") + new PlotM (null, rSq.transpose, Regression.metrics, s"R^2 vs n for RegressionTreeGB4TS2 with tech", lines = true) +// println (mod.summary ()) + + banner ("Feature Importance") + println (s"Stepwise: rSq = $rSq") +// val imp = mod.importance (cols.toArray, rSq) +// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") +*/ + +end regressionTreeGB4TS2Test3 + + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `regressionTreeGB4TS2Test4` main function tests the `RegressionTreeGB4TS2` class on real data: + * Forecasts COVID-19 Weekly Data using endogenous and exogenous variables. + * Does In-Sample Testing (In-ST). + * Determines the terms to include in the model using Feature Selection. + * > runMain scalation.modeling.forecasting.reg_trees.regressionTreeGB4TS2Test4 + */ +@main def regressionTreeGB4TS2Test4 (): Unit = + + val LAGS = 10 // number of lags + val h = 6 // forecasting horizon + + val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") + val (ex, y) = loadData (exo_vars, response) + println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") + + banner ("Test In-Sample RegressionTreeGB4TS2.exo on COVID-19 Weekly Data") + val mod = RegressionTreeGB4TS2.exo (y, LAGS, ex, h) // create model for time series data - with exo + val yp = mod.trainNtest ()()._1 // train on full and test on full + val yy_ = y(LAGS until y.dim) + new Plot (null, yy_, yp, s"${mod.modelName}, yy vs. yp", lines = true) + +// val tech = SelectionTech.Forward // pick one feature selection technique +// val tech = SelectionTech.Backward + val tech = SelectionTech.Stepwise + + banner (s"Feature Selection Technique: $tech") + val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA + val k = cols.size + println (s"k = $k, n = ${mod.getX.dim2}") + new PlotM (null, rSq.transpose, Regression.metrics, s"R^2 vs n for RegressionTreeGB4TS2 with tech", lines = true) +// println (mod.summary ()) + + banner ("Feature Importance") + println (s"$tech: rSq = $rSq") +// val imp = mod.importance (cols.toArray, rSq) +// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") + +end regressionTreeGB4TS2Test4 + + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `regressionTreeGB4TS2Test5` main function tests the `RegressionTreeGB4TS2` class on real data: + * Forecasts COVID-19 Weekly Data using endogenous and exogenous variables. + * Does In-Sample Testing (In-ST). + * Determines the terms to include in the model using Feature Selection. + * Run Train-n-Test (TnT) Split testing on best model. + * > runMain scalation.modeling.forecasting.reg_trees.regressionTreeGB4TS2Test5 + */ +@main def regressionTreeGB4TS2Test5 (): Unit = + + val LAGS = 10 // number of lags + val h = 6 // forecasting horizon + + val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") + val (ex, y) = loadData (exo_vars, response) + println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") + + banner ("Test In-Sample RegressionTreeGB4TS2.exo on COVID-19 Weekly Data") + val mod = RegressionTreeGB4TS2.exo (y, LAGS, ex, h) // create model for time series data with exo + val yp = mod.trainNtest ()()._1 // train on full and test on full + val yy_ = y(LAGS until y.dim) + new Plot (null, yy_, yp, s"${mod.modelName}, yy vs. yp", lines = true) + +// val tech = SelectionTech.Forward // pick one feature selection technique +// val tech = SelectionTech.Backward + val tech = SelectionTech.Stepwise + + banner (s"Feature Selection Technique: $tech") + val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA + val k = cols.size + println (s"k = $k, n = ${mod.getX.dim2}") + new PlotM (null, rSq.transpose, Regression.metrics, s"R^2 vs n for RegressionTreeGB4TS2 with tech", lines = true) +// println (mod.summary ()) + + banner ("Feature Importance") + println (s"$tech: rSq = $rSq") +// val imp = mod.importance (cols.toArray, rSq) +// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") + + banner ("Run TnT on Best model") +// val bmod = mod.getBest._4 // get the best model from feature selection + val bmod = mod.getBest.mod.asInstanceOf [RegressionTreeGB] // get the best model from feature selection + val (x_, y_, xtest, ytest) = RegressionTreeGB4TS2.split_TnT (bmod.getX, bmod.getY) + val yptest = bmod.trainNtest (x_, y_)(xtest, ytest)._1 // train on (x_, y_) and test on (xtest, ytest) + new Plot (null, ytest, yptest, s"${mod.modelName}, ytest vs. yptest", lines = true) + +end regressionTreeGB4TS2Test5 + + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `regressionTreeGB4TS2Test6` main function tests the `RegressionTreeGB4TS2` class on real data: + * Forecasts COVID-19 Weekly Data using endogenous and exogenous variables. + * Does In-Sample Testing (In-ST). + * Determines the terms to include in the model using Feature Selection. + * Run Train-n-Test (TnT) Split testing on best model using Rolling Validation. + * > runMain scalation.modeling.forecasting.reg_trees.regressionTreeGB4TS2Test6 + */ +@main def regressionTreeGB4TS2Test6 (): Unit = + + val LAGS = 10 // number of lags (values from past) + val rc = 1 // retraining cycle + val h = 6 // forecasting horizon + + val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") + val (ex, y) = loadData (exo_vars, response) + println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") + + val te_size = RollingValidation.teSize (y.dim) + println (s"te_size = $te_size") + + banner ("Test In-Sample RegressionTreeGB4TS2.exo on COVID-19 Weekly Data") + val mod = RegressionTreeGB4TS2.exo (y, LAGS, ex, h) // create model for time series data with exo + val yp = mod.trainNtest ()()._1 // train on full and test on full + val yy_ = y(LAGS until y.dim) + new Plot (null, yy_, yp, s"${mod.modelName}, yy vs. yp", lines = true) + +// val tech = SelectionTech.Forward // pick one feature selection technique +// val tech = SelectionTech.Backward + val tech = SelectionTech.Stepwise + + banner (s"Feature Selection Technique: $tech") + val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA + val k = cols.size + println (s"k = $k, n = ${mod.getX.dim2}") + new PlotM (null, rSq.transpose, Regression.metrics, s"R^2 vs n for RegressionTreeGB4TS2 with tech", lines = true) +// println (mod.summary ()) + + banner ("Feature Importance") + println (s"$tech: rSq = $rSq") +// val imp = mod.importance (cols.toArray, rSq) +// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") + + banner ("Run Rolling Validation on RegressionTreeGB4TS2 Best model") +// val bmod = mod.getBest._4 // get the best model from feature selection + val bmod = mod.getBest.mod.asInstanceOf [RegressionTreeGB] // get the best model from feature selection + RegressionTreeGB4TS2.rollValidate (bmod, rc, te_size) + +end regressionTreeGB4TS2Test6 + diff --git a/src/main/scala/scalation/modeling/forecasting_old/RegressionTreeMT4TS.scala b/src/main/scala/scalation/modeling/forecasting/reg_trees/RegressionTreeMT4TS.scala similarity index 90% rename from src/main/scala/scalation/modeling/forecasting_old/RegressionTreeMT4TS.scala rename to src/main/scala/scalation/modeling/forecasting/reg_trees/RegressionTreeMT4TS.scala index efc13d02e..25a02502b 100644 --- a/src/main/scala/scalation/modeling/forecasting_old/RegressionTreeMT4TS.scala +++ b/src/main/scala/scalation/modeling/forecasting/reg_trees/RegressionTreeMT4TS.scala @@ -10,9 +10,8 @@ package scalation package modeling -package forecasting_old - -import scala.math.max +package forecasting +package reg_trees import scalation.mathstat._ @@ -28,7 +27,7 @@ import scalation.mathstat._ * * @param x the input/predictor matrix built out of lags of y * (and optionally from exogenous variables ex) - * @param yy the output/response vector trimmed to match x.dim (@see ARX object) + * @param yy the output/response vector trimmed to match x.dim * @param lags the maximum lag included (inclusive) * @param fname the feature/variable names * @param hparam the hyper-parameters (use RegressionTree.hp for default) @@ -41,7 +40,7 @@ class RegressionTreeMT4TS (x: MatrixD, yy: VectorD, lags: Int, fname: Array [Str private val debug = debugf ("RegressionTreeMT4TS", true) // debug function private val flaw = flawf ("RegressionTreeMT4TS") // flaw function - modelName = s"RegressionTreeMT4TS_$lags" + _modelName = s"RegressionTreeMT4TS_$lags" debug ("init", s"$modelName: x.dims = ${x.dims}, yy.dim = ${yy.dim}") @@ -170,19 +169,16 @@ object RegressionTreeMT4TS: * @param h the forecasting horizon (1, 2, ... h) * @param intercept whether to add a column of all ones to the matrix (intercept) * @param hparam the hyper-parameters (use RegressionTree.hp for default) - * @param elag1 the minimum exo lag included (inclusive) - * @param elag2 the maximum exo lag included (inclusive) */ def exo (y: VectorD, lags: Int, ex: MatrixD, h: Int, - intercept: Boolean = true, hparam: HyperParameter = RegressionTree.hp) - (elag1: Int = max (1, lags / 5), - elag2: Int = max (1, lags)): RegressionTreeMT4TS = + intercept: Boolean = true, hparam: HyperParameter = RegressionTree.hp): RegressionTreeMT4TS = val (x_, yy) = buildMatrix4TS (y, lags, h) // column for each lag var x = if intercept then VectorD.one (yy.dim) +^: x_ else x_ // add first column of all ones val endoCols = x.dim2 println (s"endogenous: columns = $endoCols") - x = x ++^ ARX.makeExoCols (lags, ex, elag1, elag2) // add columns for each lagged exo var +// x = x ++^ ARX.makeExoCols (lags, ex, elag1, elag2) // add columns for each lagged exo var + x = x ++^ MakeMatrix4TS.makeMatrix4EXO (ex, lags, 1) // add columns for each lagged exo var println (s"exogenous: columns = ${x.dim2 - endoCols}") val y_ = yy(?, 0) // use first column @@ -233,7 +229,7 @@ object RegressionTreeMT4TS: banner (s"rollValidate: Evaluate ${mod.modelName}'s QoF for the horizons: 1 to $hh") println ("fitMap qof = ") - println (FitM.showFitMap (ftMat.transpose, QoF.values.map (_.toString))) + println (Fit.showFitMap (ftMat.ᵀ)) end rollValidate */ @@ -244,7 +240,7 @@ end RegressionTreeMT4TS /** The `regressionTreeMT4TSTest` main function tests the `RegressionTreeMT4TS` class. * This test is used to CHECK that the `buildMatrix4TS` function is working correctly. * May get NaN for some maximum lags (p) due to multi-collinearity. - * > runMain scalation.modeling.forecasting.regressionTreeMT4TSTest + * > runMain scalation.modeling.forecasting.reg_trees.regressionTreeMT4TSTest */ @main def regressionTreeMT4TSTest (): Unit = @@ -270,7 +266,7 @@ end regressionTreeMT4TSTest /** The `regressionTreeMT4TSTest2` main function tests the `RegressionTreeMT4TS` class * on real data: Forecasting lake levels. * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.regressionTreeMT4TSTest2 + * > runMain scalation.modeling.forecasting.reg_trees.regressionTreeMT4TSTest2 */ @main def regressionTreeMT4TSTest2 (): Unit = @@ -301,7 +297,7 @@ import forecasting.Example_Covid.{loadData, NO_EXO, response} * on real data: Forecasts COVID-19 Weekly Data using endogenous variable only. * Does In-Sample Testing (In_ST). * Determines the terms to include in the model using Feature Selection. - * > runMain scalation.modeling.forecasting.regressionTreeMT4TSTest3 + * > runMain scalation.modeling.forecasting.reg_trees.regressionTreeMT4TSTest3 */ @main def regressionTreeMT4TSTest3 (): Unit = @@ -317,12 +313,12 @@ import forecasting.Example_Covid.{loadData, NO_EXO, response} banner ("Test In-Sample RegressionTreeMT4TS on COVID-19 Weekly Data") val mod = RegressionTreeMT4TS (yy, LAGS, hh) // create model for time series data // val mod = RegressionTreeMT4TS.rescale (yy, LAGS, hh) // create model for time series data - scaling - val (yp, qof) = mod.trainNtest ()() // train on full and test on full + val yp = mod.trainNtest ()()._1 // train on full and test on full val yy_ = yy.drop (1) // can't forecast first point new Plot (null, yy_, yp, s"${mod.modelName}, yy_ vs. yp @ h = 1", lines = true) - val y_yp = MatrixD (yy_, yp).transpose + val y_yp = MatrixD (yy_, yp).ᵀ println (s"y_yp = $y_yp") val xx = mod.getX @@ -334,8 +330,7 @@ import forecasting.Example_Covid.{loadData, NO_EXO, response} val (cols, rSq) = mod.stepwiseSelAll (cross = false) // R^2, R^2 bar, sMAPE, NA val k = cols.size println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for RegressionTreeMT4TS with tech", lines = true) + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs n for RegressionTreeMT4TS with tech", lines = true) // println (mod.summary ()) banner ("Feature Importance") @@ -351,7 +346,7 @@ end regressionTreeMT4TSTest3 /** The `regressionTreeMT4TSTest4` main function tests the `RegressionTreeMT4TS` class * on real data: Forecasts COVID-19 Weekly Data using endogenous variables. * Does Train-n-Test (TnT) Split testing on the model. - * > runMain scalation.modeling.forecasting.regressionTreeMT4TSTest4 + * > runMain scalation.modeling.forecasting.reg_trees.regressionTreeMT4TSTest4 */ @main def regressionTreeMT4TSTest4 (): Unit = @@ -383,7 +378,7 @@ end regressionTreeMT4TSTest4 * on real data: Forecasts COVID-19 Weekly Data using endogenous and exogenous variables. * Does In-Sample Testing (In-ST). * Determines the terms to include in the model using Feature Selection. - * > runMain scalation.modeling.forecasting.regressionTreeMT4TSTest5 + * > runMain scalation.modeling.forecasting.reg_trees.regressionTreeMT4TSTest5 */ @main def regressionTreeMT4TSTest5 (): Unit = @@ -395,8 +390,8 @@ end regressionTreeMT4TSTest4 println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") banner ("Test In-Sample RegressionTreeMT4TS.exo on COVID-19 Weekly Data") - val mod = RegressionTreeMT4TS.exo (y, LAGS, ex, hh)(1, LAGS+1) // create model for time series data - with exo - val (yp, qof) = mod.trainNtest ()() // train on full and test on full + val mod = RegressionTreeMT4TS.exo (y, LAGS, ex, hh) // create model for time series data - with exo + val yp = mod.trainNtest ()()._1 // train on full and test on full val yy_ = y(LAGS until y.dim) new Plot (null, yy_, yp, s"${mod.modelName}, yy vs. yp", lines = true) @@ -405,11 +400,10 @@ end regressionTreeMT4TSTest4 val tech = SelectionTech.Stepwise banner (s"Feature Selection Technique: $tech") - val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA + val (cols, rSq) = mod.selectFeatures (tech, cross = "none") // R^2, R^2 bar, sMAPE, NA val k = cols.size println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for RegressionTreeMT4TS with tech", lines = true) + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs n for RegressionTreeMT4TS with tech", lines = true) // println (mod.summary ()) banner ("Feature Importance") @@ -426,7 +420,7 @@ end regressionTreeMT4TSTest5 * Does In-Sample Testing (In-ST). * Determines the terms to include in the model using Feature Selection. * Run Train-n-Test (TnT) Split testing on best model. - * > runMain scalation.modeling.forecasting.regressionTreeMT4TSTest6 + * > runMain scalation.modeling.forecasting.reg_trees.regressionTreeMT4TSTest6 */ @main def regressionTreeMT4TSTest6 (): Unit = @@ -438,8 +432,8 @@ end regressionTreeMT4TSTest5 println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") banner ("Test In-Sample RegressionTreeMT4TS.exo on COVID-19 Weekly Data") - val mod = RegressionTreeMT4TS.exo (y, LAGS, ex, hh)(1, LAGS+1) // create model for time series data with exo - val (yp, qof) = mod.trainNtest ()() // train on full and test on full + val mod = RegressionTreeMT4TS.exo (y, LAGS, ex, hh) // create model for time series data with exo + val yp = mod.trainNtest ()()._1 // train on full and test on full val yy_ = y(LAGS until y.dim) new Plot (null, yy_, yp, s"${mod.modelName}, yy vs. yp", lines = true) @@ -448,11 +442,10 @@ end regressionTreeMT4TSTest5 val tech = SelectionTech.Stepwise banner (s"Feature Selection Technique: $tech") - val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA + val (cols, rSq) = mod.selectFeatures (tech, cross = "none") // R^2, R^2 bar, sMAPE, NA val k = cols.size println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for RegressionTreeMT4TS with tech", lines = true) + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs n for RegressionTreeMT4TS with tech", lines = true) // println (mod.summary ()) banner ("Feature Importance") @@ -464,7 +457,7 @@ end regressionTreeMT4TSTest5 // val bmod = mod.getBest._4 // get the best model from feature selection val bmod = mod.getBest.mod.asInstanceOf [RegressionTreeMT4TS] // get the best model from feature selection val (x_, y_, xtest, ytest) = ForecasterX.split_TnT (bmod.getX, bmod.getY) - val (yptest, qoftest) = bmod.trainNtest (x_, y_)(xtest, ytest) // train on (x_, y_) and test on (xtest, ytest) + val yptest = bmod.trainNtest (x_, y_)(xtest, ytest)._1 // train on (x_, y_) and test on (xtest, ytest) new Plot (null, ytest, yptest, s"${mod.modelName}, ytest vs. yptest", lines = true) end regressionTreeMT4TSTest6 @@ -476,7 +469,7 @@ end regressionTreeMT4TSTest6 * Does In-Sample Testing (In-ST). * Determines the terms to include in the model using Feature Selection. * Run Train-n-Test (TnT) Split testing on best model using Rolling Validation. - * > runMain scalation.modeling.forecasting.regressionTreeMT4TSTest7 + * > runMain scalation.modeling.forecasting.reg_trees.regressionTreeMT4TSTest7 */ @main def regressionTreeMT4TSTest7 (): Unit = @@ -492,8 +485,8 @@ end regressionTreeMT4TSTest6 println (s"te_size = $te_size") banner ("Test In-Sample RegressionTreeMT4TS.exo on COVID-19 Weekly Data") - val mod = RegressionTreeMT4TS.exo (y, LAGS, ex, hh)(1, LAGS+1) // create model for time series data with exo - val (yp, qof) = mod.trainNtest ()() // train on full and test on full + val mod = RegressionTreeMT4TS.exo (y, LAGS, ex, hh) // create model for time series data with exo + val yp = mod.trainNtest ()()._1 // train on full and test on full val yy_ = y(LAGS until y.dim) new Plot (null, yy_, yp, s"${mod.modelName}, yy vs. yp", lines = true) @@ -502,11 +495,10 @@ end regressionTreeMT4TSTest6 val tech = SelectionTech.Stepwise banner (s"Feature Selection Technique: $tech") - val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA + val (cols, rSq) = mod.selectFeatures (tech, cross = "none") // R^2, R^2 bar, sMAPE, NA val k = cols.size println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for RegressionTreeMT4TS with tech", lines = true) + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs n for RegressionTreeMT4TS with tech", lines = true) // println (mod.summary ()) banner ("Feature Importance") diff --git a/src/main/scala/scalation/modeling/forecasting_old/RegressionTreeRF4TS.scala b/src/main/scala/scalation/modeling/forecasting/reg_trees/RegressionTreeRF4TS.scala similarity index 90% rename from src/main/scala/scalation/modeling/forecasting_old/RegressionTreeRF4TS.scala rename to src/main/scala/scalation/modeling/forecasting/reg_trees/RegressionTreeRF4TS.scala index a0dd5fc12..d53dbf5b2 100644 --- a/src/main/scala/scalation/modeling/forecasting_old/RegressionTreeRF4TS.scala +++ b/src/main/scala/scalation/modeling/forecasting/reg_trees/RegressionTreeRF4TS.scala @@ -10,9 +10,8 @@ package scalation package modeling -package forecasting_old - -import scala.math.max +package forecasting +package reg_trees import scalation.mathstat._ @@ -28,7 +27,7 @@ import scalation.mathstat._ * * @param x the input/predictor matrix built out of lags of y * (and optionally from exogenous variables ex) - * @param yy the output/response vector trimmed to match x.dim (@see ARX object) + * @param yy the output/response vector trimmed to match x.dim * @param lags the maximum lag included (inclusive) * @param fname the feature/variable names * @param use_fb whether to use feature bagging (select subsets of the features) @@ -42,7 +41,7 @@ class RegressionTreeRF4TS (x: MatrixD, yy: VectorD, lags: Int, fname: Array [Str private val debug = debugf ("RegressionTreeRF4TS", false) // debug function private val flaw = flawf ("RegressionTreeRF4TS") // flaw function - modelName = s"RegressionTreeRF4TS_$lags" + _modelName = s"RegressionTreeRF4TS_$lags" debug ("init", s"$modelName: x.dims = ${x.dims}, yy.dim = ${yy.dim}") @@ -171,19 +170,16 @@ object RegressionTreeRF4TS: * @param h the forecasting horizon (1, 2, ... h) * @param intercept whether to add a column of all ones to the matrix (intercept) * @param hparam the hyper-parameters (use RegressionTree.hp for default) - * @param elag1 the minimum exo lag included (inclusive) - * @param elag2 the maximum exo lag included (inclusive) */ def exo (y: VectorD, lags: Int, ex: MatrixD, h: Int, - intercept: Boolean = true, hparam: HyperParameter = RegressionTree.hp) - (elag1: Int = max (1, lags / 5), - elag2: Int = max (1, lags)): RegressionTreeRF4TS = + intercept: Boolean = true, hparam: HyperParameter = RegressionTree.hp): RegressionTreeRF4TS = val (x_, yy) = buildMatrix4TS (y, lags, h) // column for each lag var x = if intercept then VectorD.one (yy.dim) +^: x_ else x_ // add first column of all ones val endoCols = x.dim2 println (s"endogenous: columns = $endoCols") - x = x ++^ ARX.makeExoCols (lags, ex, elag1, elag2) // add columns for each lagged exo var +// x = x ++^ ARX.makeExoCols (lags, ex, elag1, elag2) // add columns for each lagged exo var + x = x ++^ MakeMatrix4TS.makeMatrix4EXO (ex, lags, 1) // add columns for each lagged exo var println (s"exogenous: columns = ${x.dim2 - endoCols}") val y_ = yy(?, 0) // use first column @@ -234,7 +230,7 @@ object RegressionTreeRF4TS: banner (s"rollValidate: Evaluate ${mod.modelName}'s QoF for the horizons: 1 to $hh") println ("fitMap qof = ") - println (FitM.showFitMap (ftMat.transpose, QoF.values.map (_.toString))) + println (Fit.showFitMap (ftMat.ᵀ)) end rollValidate */ @@ -245,7 +241,7 @@ end RegressionTreeRF4TS /** The `regressionTreeRF4TSTest` main function tests the `RegressionTreeRF4TS` class. * This test is used to CHECK that the `buildMatrix4TS` function is working correctly. * May get NaN for some maximum lags (p) due to multi-collinearity. - * > runMain scalation.modeling.forecasting.regressionTreeRF4TSTest + * > runMain scalation.modeling.forecasting.reg_trees.regressionTreeRF4TSTest */ @main def regressionTreeRF4TSTest (): Unit = @@ -271,7 +267,7 @@ end regressionTreeRF4TSTest /** The `regressionTreeRF4TSTest2` main function tests the `RegressionTreeRF4TS` class * on real data: Forecasting lake levels. * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.regressionTreeRF4TSTest2 + * > runMain scalation.modeling.forecasting.reg_trees.regressionTreeRF4TSTest2 */ @main def regressionTreeRF4TSTest2 (): Unit = @@ -302,7 +298,7 @@ import forecasting.Example_Covid.{loadData, NO_EXO, response} * on real data: Forecasts COVID-19 Weekly Data using endogenous variable only. * Does In-Sample Testing (In_ST). * Determines the terms to include in the model using Feature Selection. - * > runMain scalation.modeling.forecasting.regressionTreeRF4TSTest3 + * > runMain scalation.modeling.forecasting.reg_trees.regressionTreeRF4TSTest3 */ @main def regressionTreeRF4TSTest3 (): Unit = @@ -316,12 +312,12 @@ import forecasting.Example_Covid.{loadData, NO_EXO, response} banner ("Test In-Sample RegressionTreeRF4TS on COVID-19 Weekly Data") val mod = RegressionTreeRF4TS (yy, LAGS, hh) // create model for time series data // val mod = RegressionTreeRF4TS.rescale (yy, LAGS, hh) // create model for time series data - scaling - val (yp, qof) = mod.trainNtest ()() // train on full and test on full + val yp = mod.trainNtest ()()._1 // train on full and test on full val yy_ = yy.drop (1) // can't forecast first point new Plot (null, yy_, yp, s"${mod.modelName}, yy_ vs. yp @ h = 1", lines = true) - val y_yp = MatrixD (yy_, yp).transpose + val y_yp = MatrixD (yy_, yp).ᵀ println (s"y_yp = $y_yp") val xx = mod.getX @@ -333,8 +329,7 @@ import forecasting.Example_Covid.{loadData, NO_EXO, response} val (cols, rSq) = mod.stepwiseSelAll (cross = false) // R^2, R^2 bar, sMAPE, NA val k = cols.size println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for RegressionTreeRF4TS with tech", lines = true) + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs n for RegressionTreeRF4TS with tech", lines = true) // println (mod.summary ()) banner ("Feature Importance") @@ -350,7 +345,7 @@ end regressionTreeRF4TSTest3 /** The `regressionTreeRF4TSTest4` main function tests the `RegressionTreeRF4TS` class * on real data: Forecasts COVID-19 Weekly Data using endogenous variables. * Does Train-n-Test (TnT) Split testing on the model. - * > runMain scalation.modeling.forecasting.regressionTreeRF4TSTest4 + * > runMain scalation.modeling.forecasting.reg_trees.regressionTreeRF4TSTest4 */ @main def regressionTreeRF4TSTest4 (): Unit = @@ -380,7 +375,7 @@ end regressionTreeRF4TSTest4 * on real data: Forecasts COVID-19 Weekly Data using endogenous and exogenous variables. * Does In-Sample Testing (In-ST). * Determines the terms to include in the model using Feature Selection. - * > runMain scalation.modeling.forecasting.regressionTreeRF4TSTest5 + * > runMain scalation.modeling.forecasting.reg_trees.regressionTreeRF4TSTest5 */ @main def regressionTreeRF4TSTest5 (): Unit = @@ -392,8 +387,8 @@ end regressionTreeRF4TSTest4 println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") banner ("Test In-Sample RegressionTreeRF4TS.exo on COVID-19 Weekly Data") - val mod = RegressionTreeRF4TS.exo (y, LAGS, ex, hh)(1, LAGS+1) // create model for time series data - with exo - val (yp, qof) = mod.trainNtest ()() // train on full and test on full + val mod = RegressionTreeRF4TS.exo (y, LAGS, ex, hh) // create model for time series data - with exo + val yp = mod.trainNtest ()()._1 // train on full and test on full val yy_ = y(LAGS until y.dim) new Plot (null, yy_, yp, s"${mod.modelName}, yy vs. yp", lines = true) @@ -402,11 +397,10 @@ end regressionTreeRF4TSTest4 val tech = SelectionTech.Stepwise banner (s"Feature Selection Technique: $tech") - val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA + val (cols, rSq) = mod.selectFeatures (tech, cross = "none") // R^2, R^2 bar, sMAPE, NA val k = cols.size println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for RegressionTreeRF4TS with tech", lines = true) + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs n for RegressionTreeRF4TS with tech", lines = true) // println (mod.summary ()) banner ("Feature Importance") @@ -423,7 +417,7 @@ end regressionTreeRF4TSTest5 * Does In-Sample Testing (In-ST). * Determines the terms to include in the model using Feature Selection. * Run Train-n-Test (TnT) Split testing on best model. - * > runMain scalation.modeling.forecasting.regressionTreeRF4TSTest6 + * > runMain scalation.modeling.forecasting.reg_trees.regressionTreeRF4TSTest6 */ @main def regressionTreeRF4TSTest6 (): Unit = @@ -435,8 +429,8 @@ end regressionTreeRF4TSTest5 println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") banner ("Test In-Sample RegressionTreeRF4TS.exo on COVID-19 Weekly Data") - val mod = RegressionTreeRF4TS.exo (y, LAGS, ex, hh)(1, LAGS+1) // create model for time series data with exo - val (yp, qof) = mod.trainNtest ()() // train on full and test on full + val mod = RegressionTreeRF4TS.exo (y, LAGS, ex, hh) // create model for time series data with exo + val yp = mod.trainNtest ()()._1 // train on full and test on full val yy_ = y(LAGS until y.dim) new Plot (null, yy_, yp, s"${mod.modelName}, yy vs. yp", lines = true) @@ -445,11 +439,10 @@ end regressionTreeRF4TSTest5 val tech = SelectionTech.Stepwise banner (s"Feature Selection Technique: $tech") - val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA + val (cols, rSq) = mod.selectFeatures (tech, cross = "none") // R^2, R^2 bar, sMAPE, NA val k = cols.size println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for RegressionTreeRF4TS with tech", lines = true) + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs n for RegressionTreeRF4TS with tech", lines = true) // println (mod.summary ()) banner ("Feature Importance") @@ -461,7 +454,7 @@ end regressionTreeRF4TSTest5 // val bmod = mod.getBest._4 // get the best model from feature selection val bmod = mod.getBest.mod.asInstanceOf [RegressionTreeRF4TS] // get the best model from feature selection val (x_, y_, xtest, ytest) = ForecasterX.split_TnT (bmod.getX, bmod.getY) - val (yptest, qoftest) = bmod.trainNtest (x_, y_)(xtest, ytest) // train on (x_, y_) and test on (xtest, ytest) + val yptest = bmod.trainNtest (x_, y_)(xtest, ytest)._1 // train on (x_, y_) and test on (xtest, ytest) new Plot (null, ytest, yptest, s"${mod.modelName}, ytest vs. yptest", lines = true) end regressionTreeRF4TSTest6 @@ -473,7 +466,7 @@ end regressionTreeRF4TSTest6 * Does In-Sample Testing (In-ST). * Determines the terms to include in the model using Feature Selection. * Run Train-n-Test (TnT) Split testing on best model using Rolling Validation. - * > runMain scalation.modeling.forecasting.regressionTreeRF4TSTest7 + * > runMain scalation.modeling.forecasting.reg_trees.regressionTreeRF4TSTest7 */ @main def regressionTreeRF4TSTest7 (): Unit = @@ -489,8 +482,8 @@ end regressionTreeRF4TSTest6 println (s"te_size = $te_size") banner ("Test In-Sample RegressionTreeRF4TS.exo on COVID-19 Weekly Data") - val mod = RegressionTreeRF4TS.exo (y, LAGS, ex, hh)(1, LAGS+1) // create model for time series data with exo - val (yp, qof) = mod.trainNtest ()() // train on full and test on full + val mod = RegressionTreeRF4TS.exo (y, LAGS, ex, hh) // create model for time series data with exo + val yp = mod.trainNtest ()()._1 // train on full and test on full val yy_ = y(LAGS until y.dim) new Plot (null, yy_, yp, s"${mod.modelName}, yy vs. yp", lines = true) @@ -499,11 +492,10 @@ end regressionTreeRF4TSTest6 val tech = SelectionTech.Stepwise banner (s"Feature Selection Technique: $tech") - val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA + val (cols, rSq) = mod.selectFeatures (tech, cross = "none") // R^2, R^2 bar, sMAPE, NA val k = cols.size println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for RegressionTreeRF4TS with tech", lines = true) + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs n for RegressionTreeRF4TS with tech", lines = true) // println (mod.summary ()) banner ("Feature Importance") diff --git a/src/main/scala/scalation/modeling/forecasting_old/RegressionTreeRF_MT4TS.scala b/src/main/scala/scalation/modeling/forecasting/reg_trees/RegressionTreeRF_MT4TS.scala similarity index 90% rename from src/main/scala/scalation/modeling/forecasting_old/RegressionTreeRF_MT4TS.scala rename to src/main/scala/scalation/modeling/forecasting/reg_trees/RegressionTreeRF_MT4TS.scala index 758aad440..e114b47a5 100644 --- a/src/main/scala/scalation/modeling/forecasting_old/RegressionTreeRF_MT4TS.scala +++ b/src/main/scala/scalation/modeling/forecasting/reg_trees/RegressionTreeRF_MT4TS.scala @@ -10,9 +10,8 @@ package scalation package modeling -package forecasting_old - -import scala.math.max +package forecasting +package reg_trees import scalation.mathstat._ @@ -28,7 +27,7 @@ import scalation.mathstat._ * * @param x the input/predictor matrix built out of lags of y * (and optionally from exogenous variables ex) - * @param yy the output/response vector trimmed to match x.dim (@see ARX object) + * @param yy the output/response vector trimmed to match x.dim * @param lags the maximum lag included (inclusive) * @param fname the feature/variable names * @param use_fb whether to use feature bagging (select subsets of the features) @@ -42,7 +41,7 @@ class RegressionTreeRF_MT4TS (x: MatrixD, yy: VectorD, lags: Int, fname: Array [ private val debug = debugf ("RegressionTreeRF_MT4TS", true) // debug function private val flaw = flawf ("RegressionTreeRF_MT4TS") // flaw function - modelName = s"RegressionTreeRF_MT4TS_$lags" + _modelName = s"RegressionTreeRF_MT4TS_$lags" debug ("init", s"$modelName: x.dims = ${x.dims}, yy.dim = ${yy.dim}") @@ -171,19 +170,16 @@ object RegressionTreeRF_MT4TS: * @param h the forecasting horizon (1, 2, ... h) * @param intercept whether to add a column of all ones to the matrix (intercept) * @param hparam the hyper-parameters (use RegressionTree.hp for default) - * @param elag1 the minimum exo lag included (inclusive) - * @param elag2 the maximum exo lag included (inclusive) */ def exo (y: VectorD, lags: Int, ex: MatrixD, h: Int, - intercept: Boolean = true, hparam: HyperParameter = RegressionTree.hp) - (elag1: Int = max (1, lags / 5), - elag2: Int = max (1, lags)): RegressionTreeRF_MT4TS = + intercept: Boolean = true, hparam: HyperParameter = RegressionTree.hp): RegressionTreeRF_MT4TS = val (x_, yy) = buildMatrix4TS (y, lags, h) // column for each lag var x = if intercept then VectorD.one (yy.dim) +^: x_ else x_ // add first column of all ones val endoCols = x.dim2 println (s"endogenous: columns = $endoCols") - x = x ++^ ARX.makeExoCols (lags, ex, elag1, elag2) // add columns for each lagged exo var +// x = x ++^ ARX.makeExoCols (lags, ex, elag1, elag2) // add columns for each lagged exo var + x = x ++^ MakeMatrix4TS.makeMatrix4EXO (ex, lags, 1) // add columns for each lagged exo var println (s"exogenous: columns = ${x.dim2 - endoCols}") val y_ = yy(?, 0) // use first column @@ -234,7 +230,7 @@ object RegressionTreeRF_MT4TS: banner (s"rollValidate: Evaluate ${mod.modelName}'s QoF for the horizons: 1 to $hh") println ("fitMap qof = ") - println (FitM.showFitMap (ftMat.transpose, QoF.values.map (_.toString))) + println (Fit.showFitMap (ftMat.ᵀ)) end rollValidate */ @@ -245,7 +241,7 @@ end RegressionTreeRF_MT4TS /** The `regressionTreeRF_MT4TSTest` main function tests the `RegressionTreeRF_MT4TS` class. * This test is used to CHECK that the `buildMatrix4TS` function is working correctly. * May get NaN for some maximum lags (p) due to multi-collinearity. - * > runMain scalation.modeling.forecasting.regressionTreeRF_MT4TSTest + * > runMain scalation.modeling.forecasting.reg_trees.regressionTreeRF_MT4TSTest */ @main def regressionTreeRF_MT4TSTest (): Unit = @@ -271,7 +267,7 @@ end regressionTreeRF_MT4TSTest /** The `regressionTreeRF_MT4TSTest2` main function tests the `RegressionTreeRF_MT4TS` class * on real data: Forecasting lake levels. * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.regressionTreeRF_MT4TSTest2 + * > runMain scalation.modeling.forecasting.reg_trees.regressionTreeRF_MT4TSTest2 */ @main def regressionTreeRF_MT4TSTest2 (): Unit = @@ -302,7 +298,7 @@ import forecasting.Example_Covid.{loadData, NO_EXO, response} * on real data: Forecasts COVID-19 Weekly Data using endogenous variable only. * Does In-Sample Testing (In_ST). * Determines the terms to include in the model using Feature Selection. - * > runMain scalation.modeling.forecasting.regressionTreeRF_MT4TSTest3 + * > runMain scalation.modeling.forecasting.reg_trees.regressionTreeRF_MT4TSTest3 */ @main def regressionTreeRF_MT4TSTest3 (): Unit = @@ -316,12 +312,12 @@ import forecasting.Example_Covid.{loadData, NO_EXO, response} banner ("Test In-Sample RegressionTreeRF_MT4TS on COVID-19 Weekly Data") val mod = RegressionTreeRF_MT4TS (yy, LAGS, hh) // create model for time series data // val mod = RegressionTreeRF_MT4TS.rescale (yy, LAGS, hh) // create model for time series data - scaling - val (yp, qof) = mod.trainNtest ()() // train on full and test on full + val yp = mod.trainNtest ()()._1 // train on full and test on full val yy_ = yy.drop (1) // can't forecast first point new Plot (null, yy_, yp, s"${mod.modelName}, yy_ vs. yp @ h = 1", lines = true) - val y_yp = MatrixD (yy_, yp).transpose + val y_yp = MatrixD (yy_, yp).ᵀ println (s"y_yp = $y_yp") val xx = mod.getX @@ -333,8 +329,7 @@ import forecasting.Example_Covid.{loadData, NO_EXO, response} val (cols, rSq) = mod.stepwiseSelAll (cross = false) // R^2, R^2 bar, sMAPE, NA val k = cols.size println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for RegressionTreeRF_MT4TS with tech", lines = true) + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs n for RegressionTreeRF_MT4TS with tech", lines = true) // println (mod.summary ()) banner ("Feature Importance") @@ -350,7 +345,7 @@ end regressionTreeRF_MT4TSTest3 /** The `regressionTreeRF_MT4TSTest4` main function tests the `RegressionTreeRF_MT4TS` class * on real data: Forecasts COVID-19 Weekly Data using endogenous variables. * Does Train-n-Test (TnT) Split testing on the model. - * > runMain scalation.modeling.forecasting.regressionTreeRF_MT4TSTest4 + * > runMain scalation.modeling.forecasting.reg_trees.regressionTreeRF_MT4TSTest4 */ @main def regressionTreeRF_MT4TSTest4 (): Unit = @@ -380,7 +375,7 @@ end regressionTreeRF_MT4TSTest4 * on real data: Forecasts COVID-19 Weekly Data using endogenous and exogenous variables. * Does In-Sample Testing (In-ST). * Determines the terms to include in the model using Feature Selection. - * > runMain scalation.modeling.forecasting.regressionTreeRF_MT4TSTest5 + * > runMain scalation.modeling.forecasting.reg_trees.regressionTreeRF_MT4TSTest5 */ @main def regressionTreeRF_MT4TSTest5 (): Unit = @@ -392,8 +387,8 @@ end regressionTreeRF_MT4TSTest4 println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") banner ("Test In-Sample RegressionTreeRF_MT4TS.exo on COVID-19 Weekly Data") - val mod = RegressionTreeRF_MT4TS.exo (y, LAGS, ex, hh)(1, LAGS+1) // create model for time series data - with exo - val (yp, qof) = mod.trainNtest ()() // train on full and test on full + val mod = RegressionTreeRF_MT4TS.exo (y, LAGS, ex, hh) // create model for time series data - with exo + val yp = mod.trainNtest ()()._1 // train on full and test on full val yy_ = y(LAGS until y.dim) new Plot (null, yy_, yp, s"${mod.modelName}, yy vs. yp", lines = true) @@ -402,11 +397,10 @@ end regressionTreeRF_MT4TSTest4 val tech = SelectionTech.Stepwise banner (s"Feature Selection Technique: $tech") - val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA + val (cols, rSq) = mod.selectFeatures (tech, cross = "none") // R^2, R^2 bar, sMAPE, NA val k = cols.size println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for RegressionTreeRF_MT4TS with tech", lines = true) + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs n for RegressionTreeRF_MT4TS with tech", lines = true) // println (mod.summary ()) banner ("Feature Importance") @@ -423,7 +417,7 @@ end regressionTreeRF_MT4TSTest5 * Does In-Sample Testing (In-ST). * Determines the terms to include in the model using Feature Selection. * Run Train-n-Test (TnT) Split testing on best model. - * > runMain scalation.modeling.forecasting.regressionTreeRF_MT4TSTest6 + * > runMain scalation.modeling.forecasting.reg_trees.regressionTreeRF_MT4TSTest6 */ @main def regressionTreeRF_MT4TSTest6 (): Unit = @@ -435,8 +429,8 @@ end regressionTreeRF_MT4TSTest5 println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") banner ("Test In-Sample RegressionTreeRF_MT4TS.exo on COVID-19 Weekly Data") - val mod = RegressionTreeRF_MT4TS.exo (y, LAGS, ex, hh)(1, LAGS+1) // create model for time series data with exo - val (yp, qof) = mod.trainNtest ()() // train on full and test on full + val mod = RegressionTreeRF_MT4TS.exo (y, LAGS, ex, hh) // create model for time series data with exo + val yp = mod.trainNtest ()()._1 // train on full and test on full val yy_ = y(LAGS until y.dim) new Plot (null, yy_, yp, s"${mod.modelName}, yy vs. yp", lines = true) @@ -445,11 +439,10 @@ end regressionTreeRF_MT4TSTest5 val tech = SelectionTech.Stepwise banner (s"Feature Selection Technique: $tech") - val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA + val (cols, rSq) = mod.selectFeatures (tech, cross = "none") // R^2, R^2 bar, sMAPE, NA val k = cols.size println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for RegressionTreeRF_MT4TS with tech", lines = true) + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs n for RegressionTreeRF_MT4TS with tech", lines = true) // println (mod.summary ()) banner ("Feature Importance") @@ -461,7 +454,7 @@ end regressionTreeRF_MT4TSTest5 // val bmod = mod.getBest._4 // get the best model from feature selection val bmod = mod.getBest.mod.asInstanceOf [RegressionTreeRF_MT4TS] // get the best model from feature selection val (x_, y_, xtest, ytest) = ForecasterX.split_TnT (bmod.getX, bmod.getY) - val (yptest, qoftest) = bmod.trainNtest (x_, y_)(xtest, ytest) // train on (x_, y_) and test on (xtest, ytest) + val yptest = bmod.trainNtest (x_, y_)(xtest, ytest)._1 // train on (x_, y_) and test on (xtest, ytest) new Plot (null, ytest, yptest, s"${mod.modelName}, ytest vs. yptest", lines = true) end regressionTreeRF_MT4TSTest6 @@ -473,7 +466,7 @@ end regressionTreeRF_MT4TSTest6 * Does In-Sample Testing (In-ST). * Determines the terms to include in the model using Feature Selection. * Run Train-n-Test (TnT) Split testing on best model using Rolling Validation. - * > runMain scalation.modeling.forecasting.regressionTreeRF_MT4TSTest7 + * > runMain scalation.modeling.forecasting.reg_trees.regressionTreeRF_MT4TSTest7 */ @main def regressionTreeRF_MT4TSTest7 (): Unit = @@ -489,8 +482,8 @@ end regressionTreeRF_MT4TSTest6 println (s"te_size = $te_size") banner ("Test In-Sample RegressionTreeRF_MT4TS.exo on COVID-19 Weekly Data") - val mod = RegressionTreeRF_MT4TS.exo (y, LAGS, ex, hh)(1, LAGS+1) // create model for time series data with exo - val (yp, qof) = mod.trainNtest ()() // train on full and test on full + val mod = RegressionTreeRF_MT4TS.exo (y, LAGS, ex, hh) // create model for time series data with exo + val yp = mod.trainNtest ()()._1 // train on full and test on full val yy_ = y(LAGS until y.dim) new Plot (null, yy_, yp, s"${mod.modelName}, yy vs. yp", lines = true) @@ -499,11 +492,10 @@ end regressionTreeRF_MT4TSTest6 val tech = SelectionTech.Stepwise banner (s"Feature Selection Technique: $tech") - val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA + val (cols, rSq) = mod.selectFeatures (tech, cross = "none") // R^2, R^2 bar, sMAPE, NA val k = cols.size println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for RegressionTreeRF_MT4TS with tech", lines = true) + new PlotM (null, rSq.ᵀ, Regression.metrics, s"R^2 vs n for RegressionTreeRF_MT4TS with tech", lines = true) // println (mod.summary ()) banner ("Feature Importance") diff --git a/src/main/scala/scalation/modeling/forecasting_old/RollingValidation.scala b/src/main/scala/scalation/modeling/forecasting/reg_trees/RollingValidation.scala similarity index 82% rename from src/main/scala/scalation/modeling/forecasting_old/RollingValidation.scala rename to src/main/scala/scalation/modeling/forecasting/reg_trees/RollingValidation.scala index f606c3d7c..c5bb44441 100644 --- a/src/main/scala/scalation/modeling/forecasting_old/RollingValidation.scala +++ b/src/main/scala/scalation/modeling/forecasting/reg_trees/RollingValidation.scala @@ -8,9 +8,12 @@ * @note Model Framework: Rolling Validation for Forecasters */ +// FIX - convert the regression trees so the `RollingValidation` object is not needed and can be eliminated + package scalation package modeling -package forecasting_old +package forecasting +package reg_trees import scala.math.{max, round} @@ -66,7 +69,7 @@ object RollingValidation: * @param mod the forecasting model being used (e.g., `ARIMA`) * @param rc the retraining cycle (number of forecasts until retraining occurs) */ - def rollValidate (mod: Forecaster & Fit, rc: Int): Unit = + def rollValidate (mod: ForecasterX & Fit, rc: Int): Unit = val y = mod.getY // get (expanded) response/output vector val te_size = teSize (y.dim) // size of testing set val tr_size = y.dim - te_size // size of initial training set @@ -77,11 +80,15 @@ object RollingValidation: val t = tr_size + i // next time point to forecast // if i % rc == 0 then mod.train (null, y(0 until t)) // retrain on sliding training set (growing set) if i % rc == 0 then mod.train (null, y(i until t)) // retrain on sliding training set (fixed size set) - yp(i) = mod.predict (t-1, y) // predict the next value +// yp(i) = mod.predict (t-1, y) // predict the next value + yp(i) = mod.predict (t-1, MatrixD (y)) // predict the next value - FIX end for val (t, yy) = align (tr_size, y) // align vectors - val df = max (1, mod.parameter.size - 1) // degrees of freedom for model + val pars = mod.parameter + val psize = if pars.isInstanceOf [VectorD] then pars.asInstanceOf [VectorD].dim + else pars.asInstanceOf [MatrixD].dim2 + val df = max (1, psize - 1) // degrees of freedom for model mod.resetDF (df, te_size - df) // reset degrees of freedom new Plot (t, yy, yp, "Plot yy, yp vs. t", lines = true) println (FitM.fitMap (mod.diagnose (yy, yp), qoF_names)) @@ -97,13 +104,13 @@ object RollingValidation: * @param rc the retraining cycle (number of forecasts until retraining occurs) * @param h the forecasting horizon (h-steps ahead) */ - def rollValidate (mod: Forecaster & Fit, rc: Int, h: Int): MatrixD = + def rollValidate (mod: ForecasterX & Fit, rc: Int, h: Int): MatrixD = val ftMat = new MatrixD (h, Fit.N_QoF) banner (s"rollValidate: Evaluate ${mod.modelName}'s QoF for horizons 1 to $h:") val y = mod.getY // get (expanded) response/output vector // val yf = mod.makeForecastMatrix (y, mod.getYp, h) // build forecast matrix - val yf = mod.forecastAll (y, h) // get the full in-sample forecast matrix + val yf = mod.forecastAll (y, mod.getX, h) // get the full in-sample forecast matrix val te_size = teSize (y.dim) // size of testing set val tr_size = y.dim - te_size // size of initial training set debug ("rollValidate", s"y.dim = ${y.dim}, train: tr_size = $tr_size; test: te_size = $te_size, rc = $rc") @@ -113,15 +120,19 @@ object RollingValidation: val t = tr_size + i // next time point to forecast // if i % rc == 0 then mod.train (null, y(0 until t)) // retrain on sliding training set (growing set) if i % rc == 0 then mod.train (null, y(i until t)) // retrain on sliding training set (fixed size set) - yp(i) = mod.predict (t-1, y) // predict the next value (only for h=1) - val yd = mod.forecast (t-1, yf, y, h) // forecast the next h-values +// yp(i) = mod.predict (t-1, y) // predict the next value (only for h=1) + yp(i) = mod.predict (t-1, MatrixD (y)) // predict the next value - FIX +// val yd = mod.forecast (t-1, yf, y, h) // forecast the next h-values // yf is updated down its diagonals - println (s"yp(i) = ${yp(i)}, yd = $yd") - assert (yp(i) =~ yd(0)) // make sure h=1 forecasts agree with predictions +// println (s"yp(i) = ${yp(i)}, yd = $yd") +// assert (yp(i) =~ yd(0)) // make sure h=1 forecasts agree with predictions end for // yf is updated down its diagonals val (t, yy) = align (tr_size, y) // align vectors - val df = max (1, mod.parameter.size - 1) // degrees of freedom for model + val pars = mod.parameter + val psize = if pars.isInstanceOf [VectorD] then pars.asInstanceOf [VectorD].dim + else pars.asInstanceOf [MatrixD].dim2 + val df = max (1, psize - 1) // degrees of freedom for model mod.resetDF (df, te_size - df) // reset degrees of freedom new Plot (t, yy, yp, "Plot yy, yp vs. t", lines = true) @@ -134,7 +145,7 @@ object RollingValidation: // println (FitM.fitMap (qof, qoF_names)) end for println ("fitMap qof = ") - println (FitM.showFitMap (ftMat.transpose, QoF.values.map (_.toString))) + println (Fit.showFitMap (ftMat.ᵀ)) yf_ end rollValidate @@ -147,9 +158,9 @@ object RollingValidation: * @param rc the retraining cycle (number of forecasting until retraining occurs) * @param h the forecasting horizon (h-steps ahead) */ - def testValidate (mod: Forecaster & Fit, rc: Int, h: Int): Unit = + def testValidate (mod: ForecasterX & Fit, rc: Int, h: Int): Unit = banner (s"testValidate: in-sample on full dataset for ${mod.modelName}") - val (yp, qof) = mod.trainNtest ()() + mod.trainNtest (mod.getX, mod.getY)(mod.getX, mod.getY) banner (s"testValidate: out-of-sample predict rolling validation for ${mod.modelName}") RollingValidation.rollValidate (mod, rc) @@ -175,7 +186,7 @@ end RollingValidation //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `rollingValidationTest` main function is used to test the rollValidate method * in the `RollingValidation` object. - * > runMain scalation.modeling.forecasting.rollingValidationTest + * > runMain scalation.modeling.forecasting.reg_trees.rollingValidationTest */ @main def rollingValidationTest (): Unit = @@ -190,22 +201,22 @@ end RollingValidation val p = 3 // order of the model val h = 2 // forecasting horizon, try changing - val rc = 2 // retrain cycle +// val rc = 2 // retrain cycle println (s"y.min = ${y.min}, y.max = ${y.max}") banner (s"AR($p) full dataset results at forecasting horizon h = $h") SARIMA.hp("p") = p -// val mod = new AR (y) // create an AR(p) model - val mod = new ARMA (y) // create an ARMA(p, 0) model - val (yp, qof) = mod.trainNtest ()() // train-test model on full dataset +// val mod = new AR (y, h) // create an AR(p) model + val mod = new ARMA (y, h) // create an ARMA(p, 0) model + mod.trainNtest ()() // train-test model on full dataset banner (s"AR($p) one-step ahead rolling validation results") - RollingValidation.rollValidate (mod, rc) +// RollingValidation.rollValidate (mod, rc) banner (s"AR($p) $h-steps rolling validation results") - RollingValidation.rollValidate (mod, rc, h) +// RollingValidation.rollValidate (mod, rc, h) end rollingValidationTest @@ -213,7 +224,7 @@ end rollingValidationTest //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `rollingValidationTest2` main function is used to test the rollValidate method * in the `RollingValidation` object. - * > runMain scalation.modeling.forecasting.rollingValidationTest2 + * > runMain scalation.modeling.forecasting.reg_trees.rollingValidationTest2 */ @main def rollingValidationTest2 (): Unit = @@ -221,25 +232,25 @@ end rollingValidationTest val p = 3 // order of the model val h = 2 // forecasting horizon, try changing - val rc = 2 // retrain cycle +// val rc = 2 // retrain cycle println (s"y.min = ${y.min}, y.max = ${y.max}") banner (s"AR($p) full dataset results at forecasting horizon h = $h") SARIMA.hp("p") = p -// val mod = new AR (y) // create an AR(p) model - val mod = new ARMA (y) // create an ARMA(p, 0) model - val (yp, qof) = mod.trainNtest ()() // train-test model on full dataset +// val mod = new AR (y, h) // create an AR(p) model + val mod = new ARMA (y, h) // create an ARMA(p, 0) model + val yp = mod.trainNtest ()()._1 // train-test model on full dataset val t = VectorD.range (49 until 97) // note original y must be shifted new Plot (t, y(50 until 98), yp(49 until 97), "y, yp vs t 2nd half", lines = true) banner (s"AR($p) one-step ahead rolling validation results") - RollingValidation.rollValidate (mod, rc) +// RollingValidation.rollValidate (mod, rc) banner (s"AR($p) $h-steps rolling validation results") - RollingValidation.rollValidate (mod, rc, h) +// RollingValidation.rollValidate (mod, rc, h) end rollingValidationTest2 @@ -248,8 +259,8 @@ end rollingValidationTest2 /** The `rollingValidationTest3` main function is used to test the rollValidate method * in the `RollingValidation` object. * Compares baseline models on in-sample and out-of-sample assessment. - * > runMain scalation.modeling.forecasting.rollingValidationTest3 - */ + * > runMain scalation.modeling.forecasting.reg_trees.rollingValidationTest3 + * @main def rollingValidationTest3 (): Unit = import forecasting.Example_LakeLevels.y @@ -257,20 +268,21 @@ end rollingValidationTest2 val h = 2 // forecasting horizon, try changing val rc = 2 // retrain cycle - RollingValidation.testValidate (new RandomWalk (y), rc, h) + RollingValidation.testValidate (new RandomWalk (y, h), rc, h) - RollingValidation.testValidate (new NullModel (y), rc, h) + RollingValidation.testValidate (new NullModel (y, h), rc, h) - RollingValidation.testValidate (new TrendModel (y), rc, h) + RollingValidation.testValidate (new TrendModel (y, h), rc, h) end rollingValidationTest3 + */ //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `rollingValidationTest4` main function is used to test the rollValidate method * in the `RollingValidation` object. * Random Walk is used to make structure of the yf matrix clear. - * > runMain scalation.modeling.forecasting.rollingValidationTest4 + * > runMain scalation.modeling.forecasting.reg_trees.rollingValidationTest4 */ @main def rollingValidationTest4 (): Unit = @@ -278,22 +290,22 @@ end rollingValidationTest3 val h = 2 // forecasting horizon, try changing banner (s"RW full dataset results at forecasting horizon h = $h") - val mod = new RandomWalk (y) // create an RW model + val mod = new RandomWalk (y, h) // create an RW model mod.train (null, y) // train the model on full dataset val (yp, qof) = mod.test (null, y) // test the model on full dataset println (mod.report (qof)) // report on Quality of Fit (QoF) println (s"yp = $yp") // print prediction matrix - val yf = mod.forecastAll (y, h) // produce all forecasts up horizon h + val yf = mod.forecastAll (y) // produce all forecasts up horizon h println (s"yf = $yf") // print forecast matrix - val rc = 2 // retrain cycle +// val rc = 2 // retrain cycle banner ("RW one-step ahead rolling validation results") - RollingValidation.rollValidate (mod, rc) +// RollingValidation.rollValidate (mod, rc) banner (s"RW $h-steps rolling validation results") - RollingValidation.rollValidate (mod, rc, h) +// RollingValidation.rollValidate (mod, rc, h) end rollingValidationTest4 diff --git a/src/main/scala/scalation/modeling/forecasting_old/AR.scala b/src/main/scala/scalation/modeling/forecasting_old/AR.scala deleted file mode 100644 index c5a0fe787..000000000 --- a/src/main/scala/scalation/modeling/forecasting_old/AR.scala +++ /dev/null @@ -1,435 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author Hao Peng, John Miller, Michael Cotterell - * @version 2.0 - * @date Sat Jun 13 01:27:00 EST 2017 - * @see LICENSE (MIT style license file). - * - * @note Model: Auto-Regressive (AR) - * - * @see http://en.wikipedia.org/wiki/Autoregressive%E2%80%93moving-average_model - * @see http://www.emu.edu.tr/mbalcilar/teaching2007/econ604/lecture_notes.htm - * @see http://www.stat.berkeley.edu/~bartlett/courses/153-fall2010 - * @see http://www.princeton.edu/~apapanic/ORFE_405,_Financial_Time_Series_%28Fall_2011%29_files/slides12-13.pdf - */ - -package scalation -package modeling -package forecasting_old - -import scalation.mathstat._ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `AR` class provides basic time series analysis capabilities for Auto-Regressive - * (AR) models. In an AR(p) model, p refers to the order of the Auto-Regressive - * components of the model. AR models are often used for forecasting. - * Given time series data stored in vector y, its next value y_t+1 = y(t+1) - * may be predicted based on past values of y: - * - * y_t+1 = δ + Σ[φ_j y_t-j] + e_t+1 - * - * where δ is a constant, φ is the auto-regressive coefficient vector, - * and e_t+1 is the new residual/error term. - * @param y the response vector (time series data) - * @param tt the time vector, if relevant (time index may suffice) - * @param hparam the hyper-parameters - */ -class AR (y: VectorD, tt: VectorD = null, hparam: HyperParameter = SARIMA.hp) - extends Forecaster (y, tt, hparam) - with Correlogram (y) - with Fit (dfm = hparam("p").toDouble, df = y.dim - hparam("p").toDouble): - - private val debug = debugf ("AR", true) // debug function - private val flaw = flawf ("AR") // flaw function - private val p = hparam("p").toInt // p-th order Auto-Regressive model - private var φ = VectorD.nullv // AR(p) parameters/coefficients - private var δ = NO_DOUBLE // drift/intercept/constant term - private val pnq = p // sum of # parameters - private var calPhi = true // calculate phi vector - not externally supplied - - if p > MAX_LAGS then flaw ("init", s"p = $p must not be greater than MAX_LAGS = $MAX_LAGS") - - modelName = s"AR($p)" - - debug ("init", s"$modelName") - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Allow users to set the parameter/coefficient vector the values they want, e.g., - * custom values or from another optimizer. - * @param phi the custom values for the parameter vector - */ - def setPhi (phi: VectorD): Unit = - φ = phi - calPhi = false - end setPhi - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train/fit an `AR` model to the times-series data in vector y_. - * Estimate the coefficient vector φ for a p-th order Auto-Regressive AR(p) model. - * Uses Durbin-Levinson Algorithm (in `Correlogram`) to determine the coefficients. - * The φ vector is p-th row of psi matrix (ignoring the first (0th) column). - * @param x_null the data/input matrix (ignored, pass null) - * @param y_ the training/full response vector (e.g., full y) - */ - def train (x_null: MatrixD, y_ : VectorD): Unit = - m = y_.dim // length of relevant time series - resetDF (pnq, m - pnq) // reset the degrees of freedom - makeCorrelogram (y_) // correlogram computes psi matrix - if calPhi then φ = psiM(p)(1 until p+1) // coefficients = p-th row, columns 1, 2, ... p - δ = statsF.mu * (1 - φ.sum) // compute drift/intercept - debug ("train", s"parameters for AR($p) model: φ = $φ, δ = $δ") - end train - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test PREDICTIONS of an AR forecasting model y_ = f(lags (y_)) + e - * and return its predictions and QoF vector. Testing may be in-sample - * (on the training set) or out-of-sample (on the testing set) as determined - * by the parameters passed in. Note: must call train before test. - * @param x_null the data/input matrix (ignored, pass null) - * @param y_ the testing/full response/output vector - */ - override def test (x_null: MatrixD, y_ : VectorD): (VectorD, VectorD) = - val (yp, no_qof) = super.test (null, y_) // call super.test for predictions - resetDF (pnq, y_.dim - pnq) // reset the degrees of freedom - (yp, diagnose (y_, yp)) // return predictions and QoF vector - end test - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test FORECASTS of an AR forecasting model y_ = f(lags (y_)) + e - * and return its forecasts and QoF vector. Testing may be in-sample - * (on the training set) or out-of-sample (on the testing set) as determined - * by the parameters passed in. Note: must call train and forecastAll before testF. - * @param h the forecasting horizon, number of steps ahead to produce forecasts - * @param y_ the testing/full response/output vector - */ - def testF (h: Int, y_ : VectorD): (VectorD, VectorD, VectorD) = - val (yy, yfh) = testSetupF (y_, h) // get and align actual and forecasted values - resetDF (pnq, yy.dim - pnq) // reset the degrees of freedom - println (s"testF: yy.dim = ${yy.dim}, yfh.dim = ${yfh.dim}") -// Forecaster.differ (yy, yfh) // uncomment for debugging - (yy, yfh, diagnose (yy, yfh)) // return aligned actual, forecasted and QoF vectors - end testF - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the parameter vector for the AR(p) model. - */ - override def parameter: VectorD = φ :+ δ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Predict a value for y_t+1 using the 1-step ahead forecast. - * - * y_t+1 = δ + φ_0 y_t + φ_1 y_t-1 + ... + φ_p-1 y_t-(p-1) - * - * When t-j is negative, use y_0. - * @see `predictAll` method in `Forecaster` trait. - * @see `rdot` in Forecaster.scala for reverse dot product implementation. - * @param i the time series index from which to make prediction - * @param y_ the actual time series values to use in making predictions (has one backcast) - */ - def predict (i: Int, y_ : VectorD): Double = δ + rdot (φ, y_, i) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Produce a vector of size h, of 1 through h-steps ahead forecasts for the model. - * forecast the following time points: t+1, ..., t-1+h. - * Note, must create the yf matrix before calling the forecast method. - * Intended to work with rolling validation (analog of predict method) - * @param t the time point from which to make forecasts - * @param yf the forecast matrix (time x horizons) - * @param y_ the actual values to use in making predictions - * @param h the forecasting horizon, number of steps ahead to produce forecasts - */ - def forecast (t: Int, yf: MatrixD, y_ : VectorD, h: Int): VectorD = - if h < 1 then flaw ("forecast", s"horizon h = $h must be at least 1") - val yd = new VectorD (h) // hold forecasts for each horizon - for k <- 1 to h do - val t1 = t + k - 1 // time point prior to horizon - val sum = δ + rdot (φ, yf, t1, k-1) - yf(t+k, k) = sum // forecast down the diagonal - yd(k-1) = sum // record diagonal values - end for - yd // return forecasts for each horizon - end forecast - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all y_.dim time points at horizon h (h-steps ahead). - * Assign to FORECAST MATRIX yf and return h-step ahead forecast. - * Use y_0 for y_t when t < 0 (i.e., assume first value repeats back in time). - * Note, `predictAll` provides predictions for h = 1. - * @see `forecastAll` method in `Forecaster` trait. - * @param yf the forecast matrix (time x horizons) - * @param y_ the actual values to use in making forecasts - * @param h the forecasting horizon, number of steps ahead to produce forecasts - */ - def forecastAt (yf: MatrixD, y_ : VectorD, h: Int): VectorD = - if h < 2 then flaw ("forecastAt", s"horizon h = $h must be at least 2") - val h1 = h - 1 // start pulling values for column h1 - - var sum = δ - for j <- 0 until p do sum += φ(j) * yf(max0 (h1-1-j), max0 (h1-j)) - yf(h1, h) = sum // first forecast is special case - - for i <- y_.indices do // make forecasts over all time points for horizon h - val t1 = i + h1 // time point prior to horizon - yf(i+h, h) = δ + rdot (φ, yf, t1, h1) // forecast down the diagonal - end for - yf(?, h) // return the h-step ahead forecast vector - end forecastAt - -end AR - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `AR` companion object provides factory methods for the `AR` class. - * Use `SARIMA` for hyper-parameters. - */ -object AR: - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create an `AR` object. - * @param y the response vector (time series data) - * @param tt the time vector, if relevant (time index may suffice) - * @param hparam the hyper-parameters - */ - def apply (y: VectorD, tt: VectorD = null, hparam: HyperParameter = SARIMA.hp): AR = - new AR (y, tt, hparam) - end apply - -end AR - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRTest` main function tests the `AR` class on simulated data. - * Test predictions (one step ahead forecasts). - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.aRTest - */ -@main def aRTest (): Unit = - - val y = makeTSeries () // create simulated time series (see `Stationary`) - - banner (s"Test Predictions: AR(1) on simulated time series") - val mod = new AR (y) // create model for time series data AR(1) - mod.trainNtest ()() // train and test on full dataset - - banner ("Select model based on ACF and PACF") - mod.plotFunc (mod.acF, "ACF") // Auto-Correlation Function (ACF) - mod.plotFunc (mod.pacF, "PACF") // Partial Auto-Correlation Function (PACF) - -end aRTest - -import forecasting.Example_LakeLevels.y - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRTest2` main function tests the `AR` class on real data: Forecasting lake levels. - * Test predictions (one step ahead forecasts). - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.aRTest2 - */ -@main def aRTest2 (): Unit = - - banner (s"Test Predictions: AR(1) on LakeLevels Dataset") - val mod = new AR (y) // create model for time series data AR(1) - mod.trainNtest ()() // train and test on full dataset - - banner ("Select model based on ACF and PACF") - mod.plotFunc (mod.acF, "ACF") // Auto-Correlation Function (ACF) - mod.plotFunc (mod.pacF, "PACF") // Partial Auto-Correlation Function (PACF) - -end aRTest2 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRTest3` main function tests the `AR` class on real data: Forecasting lake levels. - * Test forecasts (1 to h steps ahead forecasts). - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.aRTest3 - */ -@main def aRTest3 (): Unit = - - val hh = 2 // maximum forecasting horizon - - banner (s"Test Forecasts: AR(1) on LakeLevels Dataset") - val mod = new AR (y) // create model for time series data AR(1) - val (yp, qof) = mod.trainNtest ()() // train and test on full dataset - - mod.forecastAll (y, hh) // forecast h-steps ahead (h = 1 to hh) for all y - Forecaster.evalForecasts (mod, y, hh) // same as code below, except MASE - - for h <- 1 to hh do - val (yy, yfh, qof) = mod.testF (h, y) // h-steps ahead forecast and its QoF - println (s"Evaluate QoF for horizon $h:") - println (FitM.fitMap (qof, qoF_names)) // evaluate h-steps ahead forecasts - println (s"Fit.mae (y, yfh, h) = ${Fit.mae (y, yfh, h)}") // evaluate h-steps ahead forecasts with MAE - println (s"Fit.mae_n (y, 1) = ${Fit.mae_n (y, 1)}") // evaluate h-steps ahead forecasts with MAE_n - println (s"Fit.mase (y, yfh, h) = ${Fit.mase (y, yfh, h)}") // evaluate h-steps ahead forecasts with MASE - - val (low, up) = mod.forecastAtI (yy, yfh, h) // prediction interval forecasts - val qof_all = mod.diagnose_ (yy, yfh, low, up) // fully evaluate h-steps ahead forecasts - mod.show_interval_forecasts (yy, yfh, low, up, qof_all, h) - end for - -end aRTest3 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRTest4` main function tests the `AR` class on real data: Forecasting lake levels. - * Test forecasts (1 to h steps ahead forecasts). Try multiple values for p. - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.aRTest4 - */ -@main def aRTest4 (): Unit = - - val hh = 2 // maximum forecasting horizon - - var mod: AR = null - for p <- 1 to 7 do // autoregressive hyper-parameter p - SARIMA.hp("p") = p // set p hyper-parameter - banner (s"Test: AR($p) on LakeLevels Dataset") - mod = new AR (y) // create model for time series data - mod.trainNtest ()() // train and test on full dataset - - mod.forecastAll (y, hh) // forecast h-steps ahead for all y - Forecaster.evalForecasts (mod, y, hh) - end for - -end aRTest4 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRTest5` main function tests the `AR` class on small dataset. - * Test forecasts (h = 1 step ahead forecasts). - * > runMain scalation.modeling.forecasting.aRTest5 - */ -@main def aRTest5 (): Unit = - - val y = VectorD (1, 3, 4, 2, 5, 7, 9, 8, 6, 3) - - val mod = new AR (y) // create model for time series data - banner (s"In-ST Forecasts: ${mod.modelName} on a Small Dataset") - mod.trainNtest ()() // train and test on full dataset - println (s"Final In-ST Forecast Matrix yf = ${mod.getYf}") - -end aRTest5 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRTest6` main function tests the `AR` class on real data: Forecasting Weekly Covid-19. - * Test forecasts (1 to h steps ahead forecasts). Try multiple values for p. - * > runMain scalation.modeling.forecasting.aRTest6 - */ -@main def aRTest6 (): Unit = - - val y = forecasting.Example_Covid.loadData_y ("new_deaths") - val hh = 4 // maximum forecasting horizon - - println (s"y.dim = ${y.dim}") - - var mod: AR = null - for p <- 1 to 12 do // autoregressive hyper-parameter p - SARIMA.hp("p") = p // set p hyper-parameter - banner (s"Test: AR($p) on Covid-19 Weekly Dataset") - mod = new AR (y) // create model for time series data - mod.trainNtest ()() // train and test on full dataset - - mod.forecastAll (y, hh) // forecast h-steps ahead for all y - Forecaster.evalForecasts (mod, y, hh) - end for - -end aRTest6 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRTest7` main function tests the `AR` class on simulated data. - * Test predictions (one step ahead forecasts). - * > runMain scalation.modeling.forecasting.aRTest7 - */ -@main def aRTest7 (): Unit = - - import scala.math.sqrt - - import ActivationFun.f_sigmoid.{fM, dM} - import neuralnet.{NeuralNet_3L, Optimizer} - - val y = VectorD (1, 2, 4, 7, 9, 8, 6, 5, 3) // create a time series by hand - - val m = y.dim - val mu_y = y.mean // mean for full series - - def rho (k: Int): Double = - var s = 0.0 - var q = 0.0 - for t <- 0 until y.dim-k do - s += (y(t) - mu_y) * (y(t+k) - mu_y) - for t <- 0 until y.dim do - q += (y(t) - mu_y)~^2 - s / q - end rho - - val yB1 = y(1 until m) // apply back-shift operator - val yy = y(0 until m-1) // y clipped to match the size of yB1 - val zz = yy - yy.mean - val zB1 = yB1 - yB1.mean - val r1 = (zz dot zB1) / sqrt ((zz dot zz) * (zB1 dot zB1)) // lag-1 auto-correlation - println (s"mu_y = $mu_y") - println (s"zz = $zz") - println (s"zB1 = $zB1") - println (s"r1 = $r1") - println (s"rho1 = ${rho(1)}") - println (s"rho2 = ${rho(2)}") - - banner (s"Test Predictions: AR(1) on hand created time series") - var mod = new AR (y) // create model for time series data AR(1) - mod.setPhi (VectorD (0.6)) // allows coefficients to be user specified - mod.trainNtest ()() // train and test on full dataset - - banner (s"Test Predictions: AR(2) on hand created time series") - SARIMA.hp("p") = 2 - mod = new AR (y) // create model for time series data AR(2) - mod.trainNtest ()() // train and test on full dataset - - val x = MatrixD ((9, 3), 1, 1, 8, - 1, 2, 7, - 1, 3, 6, - 1, 4, 5, - 1, 5, 5, - 1, 6, 4, - 1, 7, 4, - 1, 8, 3, - 1, 9, 2) - - banner (s"Test Predictions: Regression on hand created time series") - val reg = new Regression (x, y) - val (yp, qof) =reg.trainNtest ()() // train and test on full dataset - println (reg.summary ()) - new Plot (null, y, yp, "Regression", lines = true) - - banner (s"Test Predictions: NeuralNet_3L on hand created time series") - val x_ = x(?, 1 until 3) - val y_ = MatrixD.fromVector (y) - val a = MatrixD.fill (2, 2, 0.1) // weight matrix A - val b = MatrixD.fill (2, 1, 0.1) // weight matrix B - val ab = VectorD.fill (2)(0.1) // bias vector alpha - val bb = VectorD.fill (1)(0.1) // bias vector beta - val u = x_ * a + ab // hidden layer pre-activation - val z = fM (u) // hidden layer (use sigmoid) - val v = z * b + bb // output layer pre-activation - val yp_ = v // output layer (use id) - val e = yp_ - y_ // negative error - val d1 = e *~ dM (v) // delta 1: output -> hidden - val d0 = (d1 * b.transpose) *~ dM (z) // delta 0: hidden -> input - - println (s"u = $u, z = $z, v = $v, yp_ = $yp_, e = $e, d1 = $d1, d0 = $d0") - - Optimizer.hp ("eta") = 1.0 - val nn3 = new NeuralNet_3L (x(?, 1 until 3), MatrixD.fromVector (y), nz = 2) - val (yq, q0f) = nn3.trainNtest ()() // train and test on full dataset -// val (yq, q0f) = nn3.trainNtest2 ()() // train and test on full dataset - auto eta - nn3.opti.plotLoss ("NeuralNet_3L") - new Plot (null, y, yq(?, 0), "NeuralNet_3L", lines = true) - - banner ("Select model based on ACF and PACF") - mod.plotFunc (mod.acF, "ACF") // Auto-Correlation Function (ACF) - mod.plotFunc (mod.pacF, "PACF") // Partial Auto-Correlation Function (PACF) - -end aRTest7 - diff --git a/src/main/scala/scalation/modeling/forecasting_old/AR1MA.scala b/src/main/scala/scalation/modeling/forecasting_old/AR1MA.scala deleted file mode 100644 index fc02302bd..000000000 --- a/src/main/scala/scalation/modeling/forecasting_old/AR1MA.scala +++ /dev/null @@ -1,417 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller, Hao Peng - * @version 2.0 - * @date Thu May 26 18:06:08 EDT 2022 - * @see LICENSE (MIT style license file). - * - * @note Model: Auto-Regressive, Integrated (0 or 1), Moving Average (AR1MA) - * - * @see http://en.wikipedia.org/wiki/Autoregressive%E2%80%93moving-average_model - * @see http://www.emu.edu.tr/mbalcilar/teaching2007/econ604/lecture_notes.htm - * @see http://www.stat.berkeley.edu/~bartlett/courses/153-fall2010 - * @see http://www.princeton.edu/~apapanic/ORFE_405,_Financial_Time_Series_%28Fall_2011%29_files/slides12-13.pdf - */ - -package scalation -package modeling -package forecasting_old - -import scalation.mathstat._ - -import ARIMA_diff._ -import Forecaster.differ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `AR1MA` class provides basic time-series analysis capabilities for Auto- - * Regressive 'AR' Integrated 'I' Moving-Average 'MA' models. In an - * AR1MA(p, q) model, p and q refer to the order of the Auto-Regressive and - * Moving-Average components of the model; d=1 refers to the order of differencing. - * Works by taking the first difference and delegating to the `ARMA` class. - * Also works for d=0 (no differencing). - * @param y the response vector (time-series data) - * @param tt the time vector, if relevant (time index may suffice) - * @param hparam the hyper-parameters - * @param diffr whether to take a first difference (defaults to true) - */ -class AR1MA (y: VectorD, tt: VectorD = null, hparam: HyperParameter = SARIMA.hp, - diffr: Boolean = true) - extends Forecaster (y, tt, hparam) - with Fit (dfm = hparam("p").toDouble, df = y.dim - pq (hparam)): - - private val debug = debugf ("AR1MA", true) // debug function - private val p = hparam("p").toInt // p-th order Auto-Regressive model - private val q = hparam("q").toInt // q-th order Moving-Average model - private val v = if diffr then Δ(y) else y // first difference of the full time-series - val arma = new ARMA (v, tt, hparam) // delegate to the `ARMA` class - - arma.modelName = s"AR1MA($p, $q)" // rename delegate ARMA to match - modelName = arma.modelName // use same name for AR1MA - - debug ("init", s"$modelName: diffr = $diffr") - - new Plot (null, y, null, s"Plot $modelName: y vs. t", lines = true) - if diffr then new Plot (null, v, null, s"Plot $modelName: v = Δ(y) vs. t", lines = true) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Pick one of the following vectors: v full first difference, u differenced, or u itself. - * @param u the input time-series vector - */ - def pick (u: VectorD): VectorD = - if u == y then v // passed in original full time-series - else if diffr then Δ(u) // sub-series differenced - else u // sub-series as is - end pick - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train/fit an `AR1MA` model to the times-series data in vector y_. - * Estimate the coefficient vectors φ and θ for (p, q)-th order AR1MA(p, q) model. - * @param x_null the data/input matrix (ignored, pass null) - * @param y_ the training/full response vector (e.g., full y) - */ - override def train (x_null: MatrixD, y_ : VectorD): Unit = - arma.train (x_null, pick (y_)) - end train - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test PREDICTIONS of an AR1MA forecasting model y_ = f(lags (y_)) + e - * and return its predictions and QoF vector. Testing may be in-sample - * (on the training set) or out-of-sample (on the testing set) as determined - * by the parameters passed in. Note: must call train before test. - * @param x_null the data/input matrix (ignored, pass null) - * @param y_ the training/testing/full response/output vector - */ - override def test (x_null: MatrixD, y_ : VectorD): (VectorD, VectorD) = - arma.test (x_null, pick (y_)) - end test - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train and test the forecasting model y_ = f(y-past) + e and report its QoF - * and plot its predictions. Return the predictions and QoF. - * @param y_ the training/full response/output vector (defaults to full y) - * @param yy the testing/full response/output vector (defaults to full y) - */ - override def trainNtest (y_ : VectorD = y)(yy: VectorD = y): (VectorD, VectorD) = - arma.trainNtest (pick (y_))(pick (yy)) - end trainNtest - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test FORECASTS of an AR1MA forecasting model y_ = f(lags (y_)) + e - * and return its forecasts and QoF vector. Testing may be in-sample - * (on the training set) or out-of-sample (on the testing set) as determined - * by the parameters passed in. Note: must call train and forecastAll before testF. - * @param h the forecasting horizon, number of steps ahead to produce forecasts - * @param y_ the training/testing/full response/output vector - */ - def testF (h: Int, y_ : VectorD): (VectorD, VectorD, VectorD) = - arma.testF (h, pick (y_)) // return aligned observed, forecasted and qof vectors - end testF - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the parameter vector for the AR1MA(p, q) model. - */ - override def parameter: VectorD = arma.parameter - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Predict a value for y_t+1 using the 1-step ahead forecast. - * y_t+1 = f (y_t, ...) - * @param t the time point from which to make prediction - * @param y_ the observed values to use in making predictions - */ - def predict (t: Int, y_ : VectorD): Double = - arma.predict (t, pick (y_)) - end predict - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Predict all values corresponding to the given vector y_. - * @param y_ the observed values to use in making predictions - */ -// override def predictAll (y_ : VectorD): VectorD = arma.predictAll (pick (y_)) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Produce a vector of size h, of 1 through h-steps ahead forecasts for the model. - * forecast the following time points: t+1, ..., t-1+h. - * Note, must create the yf matrix before calling the forecast method. - * Intended to work with rolling validation (analog of predict method) - * @param t the time point from which to make forecasts - * @param yf the forecast matrix (time x horizons) - * @param y_ the observed values to use in making predictions - * @param h the forecasting horizon, number of steps ahead to produce forecasts - */ - def forecast (t: Int, yf: MatrixD, y_ : VectorD, h: Int): VectorD = - arma.forecast (t, yf, pick (y_), h) - end forecast - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all y_.dim time points at horizon h (h-steps ahead). - * Assign to FORECAST MATRIX and return h-step ahead forecast. - * Note, `predictAll` provides predictions for h = 1. - * @see `forecastAll` method in `Forecaster` trait. - * @param yf the forecast matrix (time x horizons) - * @param y_ the observed values to use in making forecasts - * @param h the forecasting horizon, number of steps ahead to produce forecasts - */ - def forecastAt (yf: MatrixD, y_ : VectorD, h: Int): VectorD = - arma.forecastAt (yf, pick (y_), h) - end forecastAt - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all y_.dim time points and all horizons (1 through h-steps ahead). - * Record these in the FORECAST MATRIX yf, where - * yf(t, k) = k-steps ahead forecast for y_t - * Note, column 0, yf(?, 0), is set to y (the observed time-series values). - * last column, yf(?, h+1), is set to t (the time values, for reference). - * Forecast recursively down diagonals in the yf forecast matrix. - * The top right and bottom left triangles in yf matrix are not forecastable. - * @param y_ the observed values to use in making forecasts - * @param h the maximum forecasting horizon, number of steps ahead to produce forecasts - */ - override def forecastAll (y_ : VectorD, h: Int): MatrixD = - debug ("forecastAll", s"y_.dim = ${y_.dim}, e.dim = ${e.dim}") - arma.forecastAll (pick (y_), h) - end forecastAll - - //////////////////////////////////////////////////////////////////////////////// - // Make predictions/forecasts on the original scale time-series (not differenced). - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Predict a value for y_t+1 using the 1-step ahead forecast. - * y_t+1 = f (y_t, ...) + e_t+1 - * @param t the time point from which to make prediction - * @param y_ the observed values to use in making predictions - */ - def predict2 (t: Int, y_ : VectorD): Double = - arma.predict (t, pick (y_)) + (if diffr then y_(t) else 0.0) - end predict2 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Predict all values corresponding to the given vector y_. - * @param y_ the observed values to use in making predictions - */ - def predictAll2 (y_ : VectorD, show: Boolean = true): VectorD = - val yp = new VectorD (y_.dim) - yp(0) = y_(0) - for t <- 0 until y_.dim-1 do yp(t+1) = arma.predict (t, v) + y_(t) -// for t <- 0 until y_.dim-1 do yp(t+1) = predict2 (t, y_) - if show then - println (s"nparams = $nparams") - resetDF (nparams - 1, y_.dim - nparams) - println (report (diagnose (y_, yp))) // report on Quality of Fit (QoF) - println (s"mase = ${Fit.mase (y, yp)}") // Means Absolute Scaled Error - new Plot (null, y_, yp, "Plot y, yp vs. t", lines = true) - end if - yp - end predictAll2 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all y_.dim time points at horizon h (h-steps ahead). - * Assign to FORECAST MATRIX and return h-step ahead forecast. - * @param yf the forecast matrix (time x horizons) - * @param y_ the observed values to use in making forecasts - * @param h the forecasting horizon, number of steps ahead to produce forecasts - * - def forecastAt2 (yf: MatrixD, y_ : VectorD, h: Int): VectorD = - val yfh = arma.forecastAt (yf, pick (y_), h) - println (s"h = $h, yf = $yf") - if diffr then yfh + y_ else yfh - end forecastAt2 - */ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all y_.dim time points and all horizons (1 through h-steps ahead). - * Record these in the yf matrix, where - * yf(t, k) = k-steps ahead forecast for y_t - * Note, column 0, yf(?, 0), is set to y (the observed time-series values). - * Forecast recursively down diagonals in the yf forecast matrix. - * The top right and bottom left triangles in yf matrix are not forecastable. - * @param y_ the observed values to use in making forecasts - * @param h the maximum forecasting horizon, number of steps ahead to produce forecasts - * - def forecastAll2 (y_ : VectorD, h: Int): MatrixD = - debug ("forecastAll2", s"y_.dim = ${y_.dim}, e.dim = ${e.dim}") - yf = new MatrixD (y_.dim+h, h+2) // forecasts for all time points t & horizons to h - for t <- y_.indices do yf(t, 0) = y_(t) // first column is the timestep (e.g., logical day) - for k <- 1 to h do forecastAt2 (yf, y_, k) // forecast k-steps into the future - for t <- yf.indices do yf(t, h+1) = t // last column is time (logical day) - yf // return matrix of forecasted values - end forecastAll2 - */ - -end AR1MA - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aR1MATest` main function tests the `AR1MA` class on simulated data. - * Test predictions (one step ahead forecasts). - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.aR1MATest - */ -@main def aR1MATest (): Unit = - - import SARIMA.hp - - println (s"hp = $hp") - - val y = makeTSeries () // create simulated time-series (see `Stationary`) - - banner (s"Test Predictions: AR1MA(1, 1) on simulated time-series") - var mod = new AR1MA (y) // create model for time-series data AR1MA(1, 1) - mod.trainNtest ()() // train and test on full dataset - - banner (s"Test Predictions: AR1MA(1, 0) on simulated time-series") - hp("q") = 0 - mod = new AR1MA (y) // create model for time-series data AR1MA(1, 0) - mod.trainNtest ()() // train and test on full dataset - - banner ("Select model based on ACF and PACF") - mod.arma.plotFunc (mod.arma.acF, "ACF") // Auto-Correlation Function (ACF) - mod.arma.plotFunc (mod.arma.pacF, "PACF") // Partial Auto-Correlation Function (PACF) - -end aR1MATest - -import forecasting.Example_LakeLevels.y - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aR1MATest2` main function tests the `AR1MA` class on real data: Forecasting lake levels. - * Test predictions (one step ahead forecasts) with no differencing - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * @see `aRMATest2` - * > runMain scalation.modeling.forecasting.aR1MATest2 - */ -@main def aR1MATest2 (): Unit = - - import SARIMA.hp - - // d = 0 (no differencing) => should give same results as ARMA (@see `aRMATest2`) - - for p <- 1 to 5; q <- 0 to 2 do - hp("p") = p; hp("q") = q // set p (AR) and q (MA) hyper-parameters - val mod = new AR1MA (y, diffr = false) // create model for time-series data AR1MA(p, q) - banner (s"Test Predictions: ${mod.modelName} (d=0) on LakeLevels Dataset") - mod.trainNtest ()() // train and test the model on full dataset - end for - -end aR1MATest2 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aR1MATest3` main function tests the `AR1MA` class on real data: Forecasting lake levels. - * Test predictions (one step ahead forecasts) taking one difference. - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.aR1MATest3 - */ -@main def aR1MATest3 (): Unit = - - import SARIMA.hp - - val v = Δ (y) // take the first difference of time-series y - differ (y, backform (v, y)) // verify recovery of original times-series - differ (y, undiff (v, y(0))) // verify recovery of original times-series - - for p <- 2 to 2; q <- 0 to 0 do - hp("p") = p; hp("q") = q // set p (AR) and q (MA) hyper-parameters - val mod = new AR1MA (y) // create model for time-series data AR1MA(p, q) - banner (s"Test Predictions: ${mod.modelName} (d=1) on LakeLevels Dataset") - val (vp_, qof) = mod.trainNtest ()() // test and test the model on full dataset - val vp = v(0) +: vp_ // want v and vp to have the same size - val yp = mod.predictAll2 (y) // results on original scale - val yp2 = backform (vp, y) // results on original scale - println (MatrixD (y, yp, yp2).transpose) - - println (new TestFit (y.dim).testDiagnose (y, yp2)) - - new Plot (null, y, yp, s"Plot: ${mod.modelName} predictAll2: y, yp vs t", lines = true) - new Plot (null, y, yp2, s"Plot: ${mod.modelName} backform: y, yp2 vs t", lines = true) - end for - -end aR1MATest3 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aR1MATest4` main function tests the `AR1MA` class on real data: Forecasting lake levels. - * Test forecasts (1 to h steps ahead forecasts) for several values of p and q. - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * @see `aRMATest5` - * > runMain scalation.modeling.forecasting.aR1MATest4 - */ -@main def aR1MATest4 (): Unit = - - import SARIMA.hp - - val hh = 2 // maximum forecasting horizon - - val v = Δ (y) // velocity series (first differences) - - for p <- 2 to 2; q <- 0 to 0 do - hp("p") = p; hp("q") = q // set p (AR) and q (MA) hyper-parameters - val mod = new AR1MA (y) // create model for time series data - banner (s"Test: ${mod.modelName} on LakeLevels Dataset") - mod.trainNtest ()() // train and test the model on full dataset - - val yf_ = mod.forecastAll (y, hh) // forecast using differenced values h-steps ahead for all y - val yf = transformBack (yf_, y, hh) // transform back to original scale - println (s"yf_ = $yf_") // forecast matrix on differenced values - println (s"yf = $yf") // forecast matrix on original scale - println (s"y = $y") // observed values on original scale - - val tf = new TestFit (y.dim) - val vh1 = yf_(?, 1)(0 until v.dim) // test on differenced scale - val vh2 = yf_(?, 2)(1 until v.dim) - println (tf.testDiagnose (v, vh1)) - println (tf.testDiagnose (v.drop(1), vh2)) - new Plot (null, v, vh1, "v, vh1 vs. t", lines = true) - new Plot (null, v.drop(1), vh2, "v, vh2 vs. t", lines = true) - - val yh1 = yf(?, 1)(0 until y.dim) // test on original scale - val yh2 = yf(?, 2)(1 until y.dim) - println (tf.testDiagnose (y, yh1)) - println (tf.testDiagnose (y.drop(1), yh2)) - new Plot (null, y, yh1, "y, yh1 vs. t", lines = true) - new Plot (null, y.drop(1), yh2, "y, yh2 vs. t", lines = true) - - val yp = mod.predictAll2 (y) // one-step predictions on original scale - println (tf.testDiagnose (y, yp)) - - differ (yp, yh1) // FIX - should be the same - -// Forecaster.checkForecastMatrix (yf, y, yp) // FIX - differences & un-differenced -// Forecaster.evalForecasts (mod, y, hh) - end for - -end aR1MATest4 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aR1MATest5` main function tests the `AR1MA` class on real data: - * Forecasting COVID-19. - * > runMain scalation.modeling.forecasting.aR1MATest5 - */ -@main def aR1MATest5 (): Unit = - - import SARIMA.hp - - val data = MatrixD.load ("covid_19.csv", 1, 1) // skip first row (header) and first column - val yy = data(?, 4) // column 4 is daily deaths -// val yy = data(?, 5) // column 5 is daily deaths smoothed - val is = yy.indexWhere (_ >= 2.0) // find day of first death with at least 2 deaths - println (s"is = $is is first day with at least 2 deaths") - val y = yy(is until yy.dim) // slice out days before is - -// val h = 2 // forecasting horizon - for p <- 1 to 5; q <- 1 to 3 do // AR1MA hyper-parameter settings - hp("p") = p; hp("q") = q - val mod = new AR1MA (y) // create an AR1MA model - val (vp, qof) = mod.trainNtest ()() // train and the model on full dataset - val yp = mod.predictAll2 (y) // one-step predictions on original scale - println (s"yp = $yp") - -/* - val yfa = mod.forecastAll (y, h) - val yf = yfa(?, h) // forecasted values - h steps ahead - new Plot (null, y, yf, s"Plot of y & yf, forecasted (h = $h) ${mod.modelName} vs. t", true) -*/ - - end for - -end aR1MATest5 - diff --git a/src/main/scala/scalation/modeling/forecasting_old/ARIMA.scala b/src/main/scala/scalation/modeling/forecasting_old/ARIMA.scala deleted file mode 100644 index 9551bd76a..000000000 --- a/src/main/scala/scalation/modeling/forecasting_old/ARIMA.scala +++ /dev/null @@ -1,384 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Sat Jun 13 01:27:00 EST 2017 - * @see LICENSE (MIT style license file). - * - * @note Model: Auto-Regressive, Integrated, Moving Average (ARIMA) - * - * @see http://en.wikipedia.org/wiki/Autoregressive%E2%80%93moving-average_model - * @see http://www.emu.edu.tr/mbalcilar/teaching2007/econ604/lecture_notes.htm - * @see http://www.stat.berkeley.edu/~bartlett/courses/153-fall2010 - * @see http://www.princeton.edu/~apapanic/ORFE_405,_Financial_Time_Series_%28Fall_2011%29_files/slides12-13.pdf - */ - -package scalation -package modeling -package forecasting_old - -import scalation.mathstat._ - -import ARIMA_diff._ -import Forecaster.differ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARIMA` class provides basic time series analysis capabilities for Auto- - * Regressive 'AR' Integrated 'I' Moving-Average 'MA' models. In an - * ARIMA(p, d, q) model, p and q refer to the order of the Auto-Regressive - * and Moving-Average components of the model; d refers to the order of - * differencing. Given time series data stored in vector y, its next value y_t = y(t) - * may be predicted based on prior values of y and its noise: - * y_t = δ + Σ(φ_i y_t-i) + Σ(θ_i e_t-i) + e_t - * where δ is a constant, φ is the auto-regressive coefficient vector, - * θ is the moving-average coefficient vector, and e is the noise vector. - *------------------------------------------------------------------------------ - * If d > 0, then the time series must be differenced first before applying - * the above model. - *------------------------------------------------------------------------------ - * @param y the original input vector (time series data) - * @param tt the time vector, if relevant (time index may suffice) - * @param hparam the hyper-parameters - */ -class ARIMA (y: VectorD, tt: VectorD = null, hparam: HyperParameter = SARIMA.hp) - extends Forecaster (y, tt, hparam) - with Fit (dfm = hparam("p").toDouble, df = y.dim - pq (hparam)): - - private val debug = debugf ("ARIMA", true) // debug function - private val flaw = flawf ("ARIMA") // flaw function - private val p = hparam("p").toInt // p-th order Auto-Regressive model - private val d = hparam("d").toInt // the number of differences to take - private val q = hparam("q").toInt // q-th order Moving-Average model - private val v = diff (y, d) // d-th difference of the full time-series - val arma = new ARMA (v, tt, hparam) // delegate to the `ARMA` class - - if d out (0, 3) then flaw ("init", s"difference d = $d must be in (0, 3)") - - arma.modelName = s"ARIMA($p, $d, $q)" // rename delegate ARMA to match - modelName = arma.modelName // use same name for ARIMA - - debug ("init", s"$modelName") - - new Plot (null, y, null, s"Plot $modelName: y vs. t", lines = true) - if d > 0 then new Plot (null, v, null, s"Plot $modelName: v = diff (y, d) vs. t", lines = true) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Pick one of the following vectors: v full first difference, u differenced, or u itself. - * @param u the input time-series vector - */ - def pick (u: VectorD): VectorD = - if u == y then v // passed in original full time-series - else if d > 0 then diff (u, d) // sub-series differenced - else u // sub-series as is - end pick - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train/fit an `ARIMA` model to the times-series data in vector y_. - * Estimate the coefficient vectors φ and θ for (p, q)-th order ARIMA(p, q) model. - * @param x_null the data/input matrix (ignored, pass null) - * @param y_ the training/full response vector (e.g., full y) - */ - override def train (x_null: MatrixD, y_ : VectorD): Unit = - arma.train (x_null, pick (y_)) - end train - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test PREDICTIONS of an ARIMA forecasting model y_ = f(lags (y_)) + e - * and return its predictions and QoF vector. Testing may be in-sample - * (on the training set) or out-of-sample (on the testing set) as determined - * by the parameters passed in. Note: must call train before test. - * @param x_null the data/input matrix (ignored, pass null) - * @param y_ the training/testing/full response/output vector - */ - override def test (x_null: MatrixD, y_ : VectorD): (VectorD, VectorD) = - arma.test (x_null, pick (y_)) - end test - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train and test the forecasting model y_ = f(y-past) + e and report its QoF - * and plot its predictions. Return the predictions and QoF. - * @param y_ the training/full response/output vector (defaults to full y) - * @param yy the testing/full response/output vector (defaults to full y) - */ - override def trainNtest (y_ : VectorD = y)(yy: VectorD = y): (VectorD, VectorD) = - arma.trainNtest (pick (y_))(pick (yy)) - end trainNtest - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test FORECASTS of an ARIMA forecasting model y_ = f(lags (y_)) + e - * and return its forecasts and QoF vector. Testing may be in-sample - * (on the training set) or out-of-sample (on the testing set) as determined - * by the parameters passed in. Note: must call train and forecastAll before testF. - * @param h the forecasting horizon, number of steps ahead to produce forecasts - * @param y_ the training/testing/full response/output vector - */ - def testF (h: Int, y_ : VectorD): (VectorD, VectorD, VectorD) = - arma.testF (h, pick (y_)) // return aligned actual, forecasted and qof vectors - end testF - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the parameter vector for the ARIMA(p, q) model. - */ - override def parameter: VectorD = arma.parameter - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Predict a value for y_t+1 using the 1-step ahead forecast. - * y_t+1 = f (y_t, ...) - * @param t the time point from which to make prediction - * @param y_ the actual values to use in making predictions - */ - def predict (t: Int, y_ : VectorD): Double = - arma.predict (t, pick (y_)) - end predict - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Predict all values corresponding to the given vector y_. - * @param y_ the actual values to use in making predictions - */ -// override def predictAll (y_ : VectorD): VectorD = arma.predictAll (pick (y_)) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Produce a vector of size h, of 1 through h-steps ahead forecasts for the model. - * forecast the following time points: t+1, ..., t-1+h. - * Note, must create the yf matrix before calling the forecast method. - * Intended to work with rolling validation (analog of predict method) - * @param t the time point from which to make forecasts - * @param yf the forecast matrix (time x horizons) - * @param y_ the actual values to use in making predictions - * @param h the forecasting horizon, number of steps ahead to produce forecasts - */ - def forecast (t: Int, yf: MatrixD, y_ : VectorD, h: Int): VectorD = - arma.forecast (t, yf, pick (y_), h) - end forecast - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all y_.dim time points at horizon h (h-steps ahead). - * Assign to FORECAST MATRIX and return h-step ahead forecast. - * Note, `predictAll` provides predictions for h = 1. - * @see `forecastAll` method in `Forecaster` trait. - * @param yf the forecast matrix (time x horizons) - * @param y_ the actual values to use in making forecasts - * @param h the forecasting horizon, number of steps ahead to produce forecasts - */ - def forecastAt (yf: MatrixD, y_ : VectorD, h: Int): VectorD = - arma.forecastAt (yf, pick (y_), h) - end forecastAt - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all y_.dim time points and all horizons (1 through h-steps ahead). - * Record these in the FORECAST MATRIX yf, where - * yf(t, k) = k-steps ahead forecast for y_t - * Note, column 0, yf(?, 0), is set to y (the actual time-series values). - * last column, yf(?, h+1), is set to t (the time values, for reference). - * Forecast recursively down diagonals in the yf forecast matrix. - * The top right and bottom left triangles in yf matrix are not forecastable. - * @param y_ the actual values to use in making forecasts - * @param h the maximum forecasting horizon, number of steps ahead to produce forecasts - */ - override def forecastAll (y_ : VectorD, h: Int): MatrixD = - debug ("forecastAll", s"y_.dim = ${y_.dim}, e.dim = ${e.dim}") - arma.forecastAll (pick (y_), h) - end forecastAll - - //////////////////////////////////////////////////////////////////////////////// - // Make predictions/forecasts on the original scale time-series (not differenced). - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Predict a value for y_t+1 using the 1-step ahead forecast. - * y_t+1 = f (y_t, ...) + e_t+1 - * @param t the time point from which to make prediction - * @param y_ the actual values to use in making predictions - */ - def predict2 (t: Int, y_ : VectorD): Double = - arma.predict (t, pick (y_)) + (if d > 0 then y_(t) else 0.0) // FIX - surely it's incorrect? - end predict2 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Predict all values corresponding to the given vector y_. - * @param y_ the actual values to use in making predictions - */ - def predictAll2 (y_ : VectorD, show: Boolean = true): VectorD = - val yp = new VectorD (y_.dim) - yp(0) = y_(0) - for t <- 0 until y_.dim-1 do yp(t+1) = predict2 (t, y_) - if show then -// println (FitM.fitMap (diagnose (y_, yp), qoF_names)) - println (s"nparams = $nparams") - resetDF (nparams - 1, y_.dim - nparams) - println (report (diagnose (y_, yp))) // report on Quality of Fit (QoF) - println (s"mase = ${Fit.mase (y, yp)}") // Means Absolute Scaled Error - new Plot (null, y_, yp, "Plot y, yp vs. t", lines = true) - end if - yp - end predictAll2 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all y_.dim time points at horizon h (h-steps ahead). - * Assign to FORECAST MATRIX and return h-step ahead forecast. - * @param yf the forecast matrix (time x horizons) - * @param y_ the actual values to use in making forecasts - * @param h the forecasting horizon, number of steps ahead to produce forecasts - * - def forecastAt2 (yf: MatrixD, y_ : VectorD, h: Int): VectorD = - val yfh = arma.forecastAt (yf, pick (y_), h) - if d > 0 then yfh + y_ else yfh - end forecastAt2 - */ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all y_.dim time points and all horizons (1 through h-steps ahead). - * Record these in the yf matrix, where - * yf(t, k) = k-steps ahead forecast for y_t - * Note, column 0, yf(?, 0), is set to y (the actual time-series values). - * Forecast recursively down diagonals in the yf forecast matrix. - * The top right and bottom left triangles in yf matrix are not forecastable. - * @param y_ the actual values to use in making forecasts - * @param h the maximum forecasting horizon, number of steps ahead to produce forecasts - * - def forecastAll2 (y_ : VectorD, h: Int): MatrixD = - debug ("forecastAll2", s"y_.dim = ${y_.dim}, e.dim = ${e.dim}") - yf = new MatrixD (y_.dim+h, h+2) // forecasts for all time points t & horizons to h - for t <- y_.indices do yf(t, 0) = y_(t) // first column is the timestep (e.g., logical day) - for k <- 1 to h do forecastAt2 (yf, y_, k) // forecast k-steps into the future - for t <- yf.indices do yf(t, h+1) = t // last column is time (logical day) - yf // return matrix of forecasted values - end forecastAll2 - */ - -end ARIMA - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRIMATest` main function tests the `ARIMA` class on simulated data. - * Test predictions (one step ahead forecasts). - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.aRIMATest - */ -@main def aRIMATest (): Unit = - - import SARIMA.hp - - println (s"hp = $hp") - - val y = makeTSeries () // create simulated time-series (see `Stationary`) - - banner (s"Test Predictions: ARIMA(1, 1) on simulated time-series") - var mod = new ARIMA (y) // create model for time-series data ARIMA(1, 1) - mod.trainNtest ()() // train and test on full dataset - - banner (s"Test Predictions: ARIMA(1, 0) on simulated time-series") - hp("q") = 0 - mod = new ARIMA (y) // create model for time-series data ARIMA(1, 0) - mod.trainNtest ()() - - banner ("Select model based on ACF and PACF") - mod.arma.plotFunc (mod.arma.acF, "ACF") // Auto-Correlation Function (ACF) - mod.arma.plotFunc (mod.arma.pacF, "PACF") // Partial Auto-Correlation Function (PACF) - -end aRIMATest - -import forecasting.Example_LakeLevels.y - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRIMATest2` main function tests the `ARIMA` class on real data: Forecasting lake levels. - * Test predictions (one step ahead forecasts) with no differencing - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.aRIMATest2 - */ -@main def aRIMATest2 (): Unit = - - import SARIMA.hp - - hp("d") = 0 // (no differencing) => should give same results as ARMA (@see `aRMATest2`) - println (s"hp = $hp") - - for p <- 1 to 5; q <- 0 to 2 do - hp("p") = p; hp("q") = q // set p (AR) and q (MA) hyper-parameters - val mod = new ARIMA (y) // create model for time-series data ARIMA(p, q) - banner (s"Test Predictions: ${mod.modelName} on LakeLevels Dataset") - mod.trainNtest ()() // trainb and test the model on full dataset - - val yp = mod.predictAll2 (y) // results on original scale - new Plot (null, y, yp, s"Plot: ${mod.modelName} predictAll2: y, yp vs t", lines = true) - end for - -end aRIMATest2 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRIMATest3` main function tests the `ARIMA` class on real data: Forecasting lake levels. - * Test predictions (one step ahead forecasts) taking one difference. - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.aRIMATest3 - */ -@main def aRIMATest3 (): Unit = - - import SARIMA.hp - - val d = 1 - hp("d") = d // (first differencing) => should give same results as ARIMA (@see `aR1MATest3`) - - val v = diff (y, d) // take the first difference of time-series y - differ (y, backform (v, y)) // verify recovery of original times-series - - for p <- 1 to 5; q <- 0 to 2 do - hp("p") = p; hp("q") = q // set p (AR) and q (MA) hyper-parameters - val mod = new ARIMA (y) // create model for time-series data ARIMA(p, q) - banner (s"Test Predictions: ${mod.modelName} on LakeLevels Dataset") - val (vp, qof) = mod.trainNtest ()() // test and test the model on full dataset - val yp = mod.predictAll2 (y) // results on original scale - - new Plot (null, y, yp, s"Plot: ${mod.modelName} predictAll2: y, yp vs t", lines = true) - end for - -end aRIMATest3 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRIMATest4` main function tests the `ARIMA` class on real data: Forecasting lake levels. - * Test forecasts (1 to h steps ahead forecasts) for several values of p and q. - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * @see `aRMATest5` - * > runMain scalation.modeling.forecasting.aRIMATest4 - */ -@main def aRIMATest4 (): Unit = - - import SARIMA.hp - - val hh = 2 // maximum forecasting horizon - - val v = Δ (y) // velocity series (first differences) - - for p <- 2 to 2; q <- 0 to 0 do - hp("p") = p; hp("q") = q // set p (AR) and q (MA) hyper-parameters - val mod = new ARIMA (y) // create model for time series data - banner (s"Test: ${mod.modelName} on LakeLevels Dataset") - mod.trainNtest ()() // train and test the model on full dataset - - val yf_ = mod.forecastAll (y, hh) // forecast using differenced values h-steps ahead for all y - val yf = transformBack (yf_, y, hh) // transform back to original scale - println (s"yf_ = $yf_") // forecast matrix on differenced values - println (s"yf = $yf") // forecast matrix on original scale - println (s"y = $y") // observed values on original scale - - val tf = new TestFit (y.dim) - val vh1 = yf_(?, 1) // test on differenced scale - val vh2 = yf_(?, 2) - println (tf.testDiagnose (v, vh1)) - println (tf.testDiagnose (v, vh2)) - new Plot (null, v, vh1, "v, vh1 vs. t", lines = true) - new Plot (null, v, vh2, "v, vh2 vs. t", lines = true) - - val yh1 = yf(?, 1) // test on original scale - val yh2 = yf(?, 2) - println (tf.testDiagnose (y, yh1)) - println (tf.testDiagnose (y, yh2)) - new Plot (null, y, yh1, "y, yh1 vs. t", lines = true) - new Plot (null, y, yh2, "y, yh2 vs. t", lines = true) - -// Forecaster.checkForecastMatrix (yf, y, yp) // FIX - differences & un-differenced -// Forecaster.evalForecasts (mod, y, hh) - end for - -end aRIMATest4 - diff --git a/src/main/scala/scalation/modeling/forecasting_old/ARIMA_diff.scala b/src/main/scala/scalation/modeling/forecasting_old/ARIMA_diff.scala deleted file mode 100644 index b5c3908e3..000000000 --- a/src/main/scala/scalation/modeling/forecasting_old/ARIMA_diff.scala +++ /dev/null @@ -1,173 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author Hao Peng, John Miller - * @version 2.0 - * @date Sat Jun 13 01:27:00 EST 2017 - * @see LICENSE (MIT style license file). - * - * @note Model: Differencing methods for ARIMA models. - */ - -package scalation -package modeling -package forecasting_old - -import scalation.mathstat._ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARIMA_diff` object provides methods for taking first and second order - * differences, as well as transforming back to the original scale. - * - * diff: position y --> velocity v --> acceleration a (actual) - * | | | - * backform, undiff: position yp <-- velocity vp <-- acceleration ap (predicted) - * - * @see www.jstatsoft.org/article/view/v027i03/v27i03.pdf - * @see stats.stackexchange.com/questions/32634/difference-time-series-before-arima-or-within-arima - */ -object ARIMA_diff: - - private val debug = debugf ("ARIMA_diff", true) // debug function - private val flaw = flawf ("ARIMA_diff") // flaw function - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Take the d'th difference of the position time series for d in {0, 1, 2}. - * A new vector (of length y.dim-d) is returned even when there is no difference - * taken (d = 0), to ensure the original is preserved. - * E.g., for d = 1: Position y --> Velocity v. - * @param y the actual position time series to be differenced - * @param d the order of simple differencing (defaults to 1) - */ - def diff (y: VectorD, d: Int = 1): VectorD = - debug ("diff", s"y.dim = ${y.dim}, d = $d") - d match - case 0 => y.copy - case 1 => VectorD (for i <- 0 until y.dim-1 yield y(i+1) - y(i)) - case 2 => VectorD (for i <- 0 until y.dim-2 yield y(i+2) - 2*y(i+1) + y(i)) - case _ => flaw ("diff", s"does not support differencing higher than 2"); null - end diff - - inline def Δ (y: VectorD): VectorD = diff (y) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Undifference the velocity time series by adding the difference to the previous value. - * Velocity v -> Position y. - * @param v the differenced time series (velocity) - * @param y0 the first value in the original time series - */ - def undiff (v: VectorD, y0: Double): VectorD = - debug ("undiff", s"v.dim = ${v.dim}, y0 = $y0") - val y = new VectorD (v.dim + 1) - y(0) = y0 - for t <- 1 until y.dim do y(t) = v(t-1) + y(t-1) - y - end undiff - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Backform by transforming back the predicted values of a differenced time series - * to the original scale using actual values y. - * E.g., for d = 1: Velocity vp -> Position yp. - * @param vp the predicted differenced (velocity/acceleration) time series - * @param y the actual position time series vector (first d values needed) - * @param d the order of simple differencing (defaults to 1) - */ - def backform (vp: VectorD, y: VectorD, d: Int = 1): VectorD = - debug ("backform", s"vp.dim = ${vp.dim}, y.dim = ${y.dim}, d = $d") - val yp = new VectorD (y.dim) - d match - case 0 => vp - case 1 => yp(0) = y(0) - for t <- 0 until y.dim-1 do yp(t+1) = vp(t) + y(t) - yp - case 2 => yp(0) = y(0); yp(1) = y(1) - for t <- 0 until y.dim-2 do yp(t+2) = vp(t) + 2*y(t+1) - y(t) - yp - case _ => flaw ("backform", "does not support differencing higher than 2"); null - end backform - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Transform the forecast values in the FORECAST MATRIX of a differenced time series - * back to the original scale for all horizons (1 to h). - * @param vf the matrix of all multi-horizon forecasted values (differenced) - * @param y the original actual time series vector (undifferenced) - * @param d the order of simple differencing - */ - def transformBack (vf: MatrixD, y: VectorD, d: Int): MatrixD = - val h = vf.dim2 - 2 - val yy = y ++ VectorD(0) - val yf = new MatrixD (vf.dim , vf.dim2) - yf(?, 0) = yy - for k <- 1 to h do yf(?, k) = backform (vf(?, k), yy, d) - yf(?, h+1) = VectorD.range (0 until vf.dim) - yf - end transformBack - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Transform the forecast values from time point t of a differenced time series back - * to the original scale for all horizons (1 to h). - * @param vh the vector of forecasted differenced values for times t+1, ... t+h - * @param y the original actual time series vector (undifference) - * @param d the order of simple differencing - * @param t the time point being forecasted (@see the `forecast` method) - */ - def transformBack (vh: VectorD, y: VectorD, d: Int, t: Int): VectorD = - d match - case 0 => vh - case 1 => val yh = y(t - 1 to t) ++ vh - for i <- 1 until yh.dim do yh(i) += yh(i-1) - yh(1 to yh.dim) - case 2 => val yh = y(t-2 to t) ++ vh - for i <- 2 until yh.dim do yh(i) += (2*yh(i-1) - yh(i-2)) - yh(2 to yh.dim) - case _ => flaw ("transformBack", "does not support differencing higher than 2"); null - end transformBack - -end ARIMA_diff - -import ARIMA_diff._ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRIMA_diffTest` main function tests the `ARIMA_diff` object on real data: - * Forecasting lake levels comparing ARMA, AR1MA, Differenced ARMA, Transformed-Back - * Differenced ARMA. Observe that `backform` is better than `undiff` on predictions. - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.aRIMA_diffTest - */ -@main def aRIMA_diffTest (): Unit = - - import forecasting.Example_LakeLevels.y - import SARIMA.hp - val tf = new TestFit (y.dim) - - banner ("Test ARMA (2, 0) on Lake Level Dataset") - hp("p") = 2; hp("q") = 0 - val (yp, qof) = new ARMA (y).trainNtest ()() - - banner ("Test Differenced ARMA (2, 0) on Lake Level Dataset") - val v = diff (y) // first difference on y (size of v one less than y) - val yy = undiff (v, y(0)) // reverse the diff - Forecaster.differ (y, yy) // verify recovery of original time series - val (vp_, qofv) = new ARMA (v).trainNtest ()() // predictions skip the first value (no past) - val vp = v(0) +: vp_ // prepend the first actual value (want same size as v) - - banner ("Test Transformed-Back Differenced ARMA (2, 0) on Lake Level Dataset") - println (s"predictAll: y.dim = ${y.dim}, vp.dim = ${vp.dim}") - val yp1 = undiff (vp, y(0)) // transform vp back to original (y) scale using undiff - val yp2 = backform (vp, y) // transform vp back to original (y) scale using backform - - println (tf.testDiagnose (y, yp1)) // determine the quality of fit for yp1 - println (tf.testDiagnose (y, yp2)) // determine the quality of fit for yp2 - new Plot (null, y, yp1, "y and yp1 vs. time", lines = true) - new Plot (null, y, yp2, "y and yp2 vs. time", lines = true) - new Plot (null, yp1, yp2, "yp1 and yp2 vs. time", lines = true) - - banner ("Test AR1MA (2, 0) on Lake Level Dataset") // AR1MA automatically takes first differences - val mod = new AR1MA (y) - val (vp3_, qof3) = mod.trainNtest ()() - val vp3 = v(0) +: vp3_ // prepend the first actual value (want same size as v) - val yp3 = backform (vp3, y) // transform vp back to original (y) scale using backform - println (tf.testDiagnose (y, yp3)) // determine the quality of fit for yp3 - new Plot (null, y, yp3, "y and yp3 vs. time", lines = true) - -end aRIMA_diffTest - diff --git a/src/main/scala/scalation/modeling/forecasting_old/ARMA.scala b/src/main/scala/scalation/modeling/forecasting_old/ARMA.scala deleted file mode 100644 index cc3c81f53..000000000 --- a/src/main/scala/scalation/modeling/forecasting_old/ARMA.scala +++ /dev/null @@ -1,458 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author Hao Peng, John Miller, Michael Cotterell - * @version 2.0 - * @date Sat Jun 13 01:27:00 EST 2017 - * @see LICENSE (MIT style license file). - * - * @note Model: Auto-Regressive, Moving-Average (ARMA) - * - * @see http://en.wikipedia.org/wiki/Autoregressive%E2%80%93moving-average_model - * @see http://www.emu.edu.tr/mbalcilar/teaching2007/econ604/lecture_notes.htm - * @see http://www.stat.berkeley.edu/~bartlett/courses/153-fall2010 - * @see http://www.princeton.edu/~apapanic/ORFE_405,_Financial_Time_Series_%28Fall_2011%29_files/slides12-13.pdf - */ - -package scalation -package modeling -package forecasting_old - -import scala.math.max - -import scalation.mathstat._ -import scalation.optimization.quasi_newton.{BFGS => Optimizer} // change import to change optimizer -//import scalation.optimization.quasi_newton.{LBFGS => Optimizer} -import scalation.random.NormalVec_c - -def pq (hpar: HyperParameter): Int = hpar("p").toInt + hpar("q").toInt - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARMA` class provides basic time series analysis capabilities for Auto-Regressive, - * Moving-Average (ARMA) models. In an ARMA(p, q) model, p refers to the order of the - * Auto-Regressive components and q refers to the Moving-Average compoenest of the model. - * Given time series data stored in vector y, its next value y_t+1 = y(t+1) - * may be predicted based on past values of y and past shocks (differences in actual and - * forecasted values): - * - * y_t+1 = δ + Σ[φ_j y_t-j] + Σ[θ_j e_t-j] + e_t+1 - * - * where δ is a constant, φ is the auto-regressive coefficient vector, - * θ is the moving-average vector, and e_t+1 is the new residual/error/shock term. - * @param y the response vector (time series data) - * @param tt the time vector, if relevant (time index may suffice) - * @param hparam the hyper-parameters - */ -class ARMA (y: VectorD, tt: VectorD = null, hparam: HyperParameter = SARIMA.hp) - extends Forecaster (y, tt, hparam) - with Correlogram (y) - with Fit (dfm = pq (hparam), df = y.dim - pq (hparam)): - - private val debug = debugf ("ARMA", true) // debug function - private val flaw = flawf ("ARMA") // flaw function - protected val p = hparam("p").toInt // p-th order Auto-Regressive part - protected val q = hparam("q").toInt // q-th order Moving-Average part - protected var φ = NormalVec_c (p, 0.0, 0.1).gen // AR(p) parameters/coefficients - protected var θ = NormalVec_c (q, 0.0, 0.1).gen // MA(q) parameters/coefficients - protected var δ = NO_DOUBLE // drift/intercept/constant term - protected val pnq = pq (hparam) // combined order - - modelName = s"ARMA($p, $q)" - - debug ("init", s"$modelName") - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the maximum lag used by this model (its capacity to look into the past). - */ - override def cap: Int = max (p, q) // max order - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train/fit an `ARMA` model to the times-series data in vector y_. - * Estimate the coefficient vectors φ and θ for (p, q)-th order ARMA(p, q) model. - * @param x_null the data/input matrix (ignored, pass null) - * @param y_ the training/full response vector (e.g., full y) - */ - def train (x_null: MatrixD, y_ : VectorD): Unit = - m = y_.dim // length of relevant time-series - println (s"e.dim = ${e.dim}") - resetDF (pnq, m - pnq) // reset the degrees of freedom - makeCorrelogram (y_) // correlogram computes psi matrix, gives ACF and PACF - - val mu = y_.mean // sample mean of y_ - val z = y_ - mu // optimization works better using zero-centered data - δ = 0.0 // drift/intercept for z (should end up close to zero) - val b = φ ++ θ :+ δ // combine all parameters -> vector to optimize - - def csse (b: VectorD): Double = // objective function - conditional sum of squared errors - φ = b(0 until p); θ = b(p until p+q); δ = b(b.dim-1) // pull parameters out of b vector - ssef (z, predictAll (z)) // compute loss function - end csse - -/* - def nll (b: VectorD): Double = // objective function - negative log-likelihood (MLE) - 0.0 // FIX - implement - end nll -*/ - - val optimizer = new Optimizer (csse) // apply Quasi-Newton optimizer - val (fb, bb) = optimizer.solve (b, 0.5) // optimal solution for the objective function and parameters - - φ = bb(0 until p); θ = bb(p until p+q); δ = bb(b.dim-1) // recover parameters for z - δ += mu * (1 - φ.sum) // uncenter - debug ("train", s"parameters for ARMA($p, $q) model: φ = $φ, θ = $θ, δ = $δ") - end train - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test PREDICTIONS of an ARMA forecasting model y_ = f(lags (y_)) + e - * and return its predictions and QoF vector. Testing may be in-sample - * (on the training set) or out-of-sample (on the testing set) as determined - * by the parameters passed in. Note: must call train before test. - * @param x_null the data/input matrix (ignored, pass null) - * @param y_ the actual testing/full response/output vector - */ - override def test (x_null: MatrixD, y_ : VectorD): (VectorD, VectorD) = - val (yp, no_qof) = super.test (null, y_) // call super.test for predictions - resetDF (pnq, y_.dim - pnq) // reset the degrees of freedom - (yp, diagnose (y_, yp)) // return predictions and QoF vector - end test - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test FORECASTS of an ARMA forecasting model y_ = f(lags (y_)) + e - * and return its forecasts and QoF vector. Testing may be in-sample - * (on the training set) or out-of-sample (on the testing set) as determined - * by the parameters passed in. Note: must call train and forecastAll before testF. - * @param h the forecasting horizon, number of steps ahead to produce forecasts - * @param y_ the testing/full response/output vector - */ - def testF (h: Int, y_ : VectorD): (VectorD, VectorD, VectorD) = - val (yy, yfh) = testSetupF (y_, h) // get and align actual and forecasted values - resetDF (pnq, yy.dim - pnq) // reset the degrees of freedom - println (s"testF: yy.dim = ${yy.dim}, yfh.dim = ${yfh.dim}") -// Forecaster.differ (yy, yfh) // uncomment for debugging - (yy, yfh, diagnose (yy, yfh)) // return aligned actual, forecasted and QoF vectors - end testF - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the parameter vector for the ARMA(p, q) model. - */ - override def parameter: VectorD = φ ++ θ :+ δ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Predict a value for y_t+1 using 1-step ahead forecasts (p' = p-1). - * - * y_t+1 = δ + φ_0 y_t + φ_1 y_t-1 + ... + φ_p' y_t-p' + - * θ_0 e_t + θ_1 e_t-1 + ... + θ_q' e_t-q' - * - * When k < 0 let y_k = y_0 (i.e., assume first value repeats back in time), - * but do not assume errors repeat. Note, column 1 of yf (yf(?, 1) holds yp. - * Must be executed in time order, so errors are properly recorded in vector e - * @see `predictAll` method in `Forecaster` trait. - * @see `rdot` in Forecaster.scala for reverse dot product implementation. - * @param i the time series index from which to make prediction - * @param y_ the actual time series values to use in making predictions (has one backcast) - */ - override def predict (i: Int, y_ : VectorD): Double = - if i == 0 then e(0) = 0 // t = -1 (from backcast), assume no error - if i == 1 then e(1) = y_(1) - yf(0, 1) // t = 0 (first real point) - - var sum = δ + rdot (φ, y_, i) // intercept + AR terms - for j <- 0 until q if i-j >= 0 do sum += θ(j) * e(i-j) // add MA terms (shocks) -// yf(i, 1) = sum // yp(i) = yf(i, 1) - - if i < y_.dim-1 then e(i+1) = y_(i+1) - sum // update the error vector - sum // prediction yp - end predict - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Produce a vector of size h, of 1 through h-steps ahead forecasts for the model. - * forecast the following time points: t+1, ..., t-1+h. - * Note, must create the yf matrix before calling the forecast method. - * Intended to work with rolling validation (analog of predict method) - * FIX - not updated - * @param t the time point from which to make forecasts - * @param yf the forecast matrix (time x horizons) - * @param y_ the actual values to use in making predictions - * @param h the forecasting horizon, number of steps ahead to produce forecasts - */ - def forecast (t: Int, yf: MatrixD, y_ : VectorD, h: Int): VectorD = - if h < 1 then flaw ("forecast", s"horizon h = $h must be at least 1") - val m1 = y_.dim - 1 - val yd = new VectorD (h) // hold forecasts for each horizon - for k <- 1 to h do - val t1 = t + k - 1 // time point prior to horizon - var sum = δ - for j <- 0 until p do sum += φ(j) * yf(max (0, t1-j), max (0, k-1-j)) - for j <- 0 until q do - if t1-j in (0, m1) then sum += θ(j) * e(t1-j) - end for - yf(t+k, k) = sum // forecast down the diagonal - yd(k-1) = sum // record diagonal values - if h == 1 && t < m1 then e(t+1) = y_(t+1) - sum // update the next element in the error vector - end for - yd // return forecasts for each horizon - end forecast - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all y_.dim time points at horizon h (h-steps ahead). - * Assign to FORECAST MATRIX yf and return h-step ahead forecast. - * Use y_0 for y_t when t < 0 (i.e., assume first value repeats back in time). - * Note, `predictAll` provides predictions for h = 1. - * @see `forecastAll` method in `Forecaster` trait. - * FIX -- replace m1 cutoff with e values from pseudo-shocks (differences per horizon forecasts) - * @param yf the forecast matrix (time x horizons) - * @param y_ the actual values to use in making forecasts - * @param h the forecasting horizon, number of steps ahead to produce forecasts - */ - override def forecastAt (yf: MatrixD, y_ : VectorD, h: Int): VectorD = - if h < 2 then flaw ("forecastAt", s"horizon h = $h must be at least 2") - val h1 = h - 1 // start pulling values for column h1 - val m1 = e.dim - 1 - - var sum = δ - for j <- 0 until p do sum += φ(j) * yf(max0 (h1-1-j), max0 (h1-j)) - for j <- 0 until q if h1-j >= 0 do sum += θ(j) * e(h1-j) - yf(h1, h) = sum // first forecast is special case - - for i <- y_.indices do // make forecasts over all time points for horizon k - val t1 = i + h1 // time point prior to horizon - var sum = δ + rdot (φ, yf, t1, h1) // intercept + AR terms - for j <- 0 until q if t1-j in (0, m1) do sum += θ(j) * e(t1-j) // add MA terms (shocks) - yf(i+h, h) = sum // forecast down the diagonal - end for - yf(?, h) // return the h-step ahead forecast vector - end forecastAt - -end ARMA - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARMA` companion object provides factory methods for the `ARMA` class. - */ -object ARMA: - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create an `ARMA` object. - * @param y the response vector (time series data) - * @param tt the time vector, if relevant (time index may suffice) - * @param hparam the hyper-parameters - */ - def apply (y: VectorD, tt: VectorD = null, hparam: HyperParameter = SARIMA.hp): ARMA = - new ARMA (y, tt, hparam) - end apply - -end ARMA - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRMATest` main function tests the `ARMA` class on simulated data. - * Test predictions (one step ahead forecasts). - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.aRTest - */ -@main def aRMATest (): Unit = - - import SARIMA.hp - - println (s"hp = $hp") - - val y = makeTSeries () // create simulated time-series (see `Stationary`) - - banner (s"Test Predictions: ARMA(1, 1) on simulated time-series") - var mod = new ARMA (y) // create model for time series data ARMA(1, 1) - mod.trainNtest ()() // train and test on full dataset - - banner (s"Test Predictions: AR1MA(1, 0) on simulated time-series") - hp("q") = 0 - mod = new ARMA (y) // create model for time-series data AR1MA(1, 0) - mod.trainNtest ()() // train and test on full dataset - - banner ("Select model based on ACF and PACF") - mod.plotFunc (mod.acF, "ACF") // Auto-Correlation Function (ACF) - mod.plotFunc (mod.pacF, "PACF") // Partial Auto-Correlation Function (PACF) - -end aRMATest - -import forecasting.Example_LakeLevels.y - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRMATest2` main function tests the `ARMA` class on real data: Forecasting lake levels. - * Test predictions (one step ahead forecasts). - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * @see `aR1MATest2` - * > runMain scalation.modeling.forecasting.aRMATest2 - */ -@main def aRMATest2 (): Unit = - - import SARIMA.hp - - for p <- 1 to 5; q <- 0 to 2 do - hp("p") = p; hp("q") = q // set p (AR) and q (MA) hyper-parameters - val mod = new ARMA (y) // create model for time-series data AR1MA(p, q) - banner (s"Test Predictions: ${mod.modelName} on LakeLevels Dataset") - mod.trainNtest ()() // train and test the model on full dataset - end for - -end aRMATest2 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRMATest3` main function tests the `ARMA` class on real data: Forecasting lake levels. - * Test forecasts (1 to h steps ahead forecasts). - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.aRMATest3 - */ -@main def aRMATest3 (): Unit = - - val hh = 2 // maximum forecasting horizon - - banner (s"Test Forecasts: ARMA(1, 1) on LakeLevels Dataset") - val mod = new ARMA (y) // create model for time series data ARMA(1, 1) - val (yp, qof) = mod.trainNtest ()() // train and test the model on full dataset - - mod.forecastAll (y, hh) // forecast h-steps ahead (h = 1 to hh) for all y - Forecaster.evalForecasts (mod, y, hh, true) // prediction interval forecasts - -end aRMATest3 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRMATest4` main function tests the `ARMA` class on real data: Forecasting lake levels. - * Test forecasts (1 to h steps ahead forecasts) for several values of p and q. - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.aRMATest4 - */ -@main def aRMATest4 (): Unit = - - import SARIMA.hp - -// val hh = 2 // maximum forecasting horizon - - for p <- 1 to 5; q <- 0 to 2 do - hp("p") = p; hp("q") = q // set p (AR) and q (MA) hyper-parameters - val mod = new ARMA (y) // create model for time series data - banner (s"Test: ${mod.modelName} on LakeLevels Dataset") - val (yp, qof) = mod.trainNtest ()() // train and test the model on full dataset - -/* - val yf = mod.forecastAll (y, hh) // forecast h-steps ahead for all y - Forecaster.evalForecasts (mod, y, hh) -*/ - end for - -end aRMATest4 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRMATest5` main function tests the `ARMA` class on real data: Forecasting lake levels. - * This test looks at the velocity series (first differences). - * Test predictions (one step ahead forecasts). - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * @see `aR1MATest4` - * > runMain scalation.modeling.forecasting.aRMATest5 - */ -@main def aRMATest5 (): Unit = - - import SARIMA.hp - import ARIMA_diff.{Δ, backform} - - val hh = 2 // maximum forecasting horizon - - val v = Δ (y) // velocity series (first differences) - val t = VectorD.range (0 until v.dim) - val t2 = VectorD.range (1 until v.dim) - val tf = new TestFit (v.dim) - - var mod: ARMA = null - for p <- 2 to 2; q <- 0 to 0 do - hp("p") = p; hp("q") = q - mod = new ARMA (v) // create model for time series data ARMA(1, 0) - banner (s"Test: ${mod.modelName} on Differenced LakeLevels Dataset") - val (vp, qof) = mod.trainNtest ()() // train the model on full dataset - - banner ("Diagnose prediction vp") - println (tf.testDiagnose (v, vp)) // diagnose v-predicted - new Plot (t, v, vp, "v, vp vs. t", lines = true) - - val vf = mod.forecastAll (v, hh) // forecast h-steps ahead for all v - println (s"vf.dims = ${vf.dims}, v.dim = ${v.dim}, vp.dim = ${vp.dim}") - println (s"vf = $vf") // forecast matrix on v-values - println (s"v = $v") // actual v-values - println (s"vp = $vp") // one-step predicted v-values - - banner ("Diagnose forecasts vh1, vh2") - val vh1 = vf(?, 1)(0 until v.dim) - val vh2 = vf(?, 2)(1 until v.dim) - println (tf.testDiagnose (v, vh1)) // diagnose v-forecast @ h = 1 -- vh1 - println (tf.testDiagnose (v.drop (1), vh2)) // diagnose v-forecast @ h = 2 -- vh2 - new Plot (t, v, vh1, "v, vh1 vs. t", lines = true) - new Plot (t2, v.drop (1), vh2, "v, vh2 vs. t", lines = true) - - banner ("Diagnose prediction on original scale yp") - val yp = backform (vp, y) // predictions on original scale - println (MatrixD (y, yp).transpose) - println (tf.testDiagnose (y, yp)) // diagnose y-predicted - new Plot (null, y, yp, "y, yp vs. t", lines = true) - -// Forecaster.evalForecasts (mod, v, hh) - end for - -end aRMATest5 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRMATest6` main function tests the `ARMA` class on real data: - * Forecasting COVID-19. - * > runMain scalation.modeling.forecasting.aRMATest6 - */ -@main def aRMATest6 (): Unit = - - import SARIMA.hp - - val data = MatrixD.load ("covid_19.csv", 1, 1) // skip first row (header) and first column - val yy = data(?, 4) // column 4 is daily deaths -// val yy = data(?, 5) // column 5 is daily deaths smoothed - val is = yy.indexWhere (_ >= 2.0) // find day of first death with at least 2 deaths - println (s"is = $is is first day with at least 2 deaths") - val y = yy(is until yy.dim) // slice out days before is - -// val h = 1 // forecasting horizon - for p <- 1 to 4; q <- 0 to 2 do // ARMA hyper-parameter settings - hp("p") = p; hp("q") = q - val mod = new ARMA (y) // create an ARMA model - val (yp, qof) = mod.trainNtest ()() // train and the model on full dataset - -/* - val yfa = mod.forecastAll (y, h) - val yf = yfa(?, h) // forecasted values - h steps ahead - new Plot (null, y, yf, s"Plot of y & yf, forecasted (h = $h) ${mod.modelName} vs. t", true) -*/ - - end for - -end aRMATest6 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRMATest6` main function tests the `AR` class on real data: Forecasting Weekly Covid-19. - * Test forecasts (1 to h steps ahead forecasts). Try multiple values for p. - * > runMain scalation.modeling.forecasting.aRMATest7 - */ -@main def aRMATest7 (): Unit = - - val y = forecasting.Example_Covid.loadData_y ("new_deaths") - val hh = 3 // maximum forecasting horizon - - println (s"y.dim = ${y.dim}") - - var mod: ARMA = null - for p <- 1 to 12 do // autoregressive hyper-parameter p - SARIMA.hp("p") = p // set p hyper-parameter - banner (s"Test: ARMA($p) on Covid-19 Weekly Dataset") - mod = new ARMA (y) // create model for time series data - mod.trainNtest ()() // train and test on full dataset - - mod.forecastAll (y, hh) // forecast h-steps ahead for all y - Forecaster.evalForecasts (mod, y, hh) - end for - -end aRMATest7 - diff --git a/src/main/scala/scalation/modeling/forecasting_old/ARX.scala b/src/main/scala/scalation/modeling/forecasting_old/ARX.scala deleted file mode 100644 index 0deb4f160..000000000 --- a/src/main/scala/scalation/modeling/forecasting_old/ARX.scala +++ /dev/null @@ -1,498 +0,0 @@ - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Tue Feb 22 23:14:31 EST 2022 - * @see LICENSE (MIT style license file). - * - * @note Model: AutoRegressive with eXogenous Variables (Time Series Regression) - */ - -package scalation -package modeling -package forecasting_old - -import scala.math.{max, min} - -import scalation.mathstat._ - -import scalation.modeling.{Regression => REGRESSION} -//import scalation.modeling.{RidgeRegression => REGRESSION} -//import scalation.modeling.{LassoRegression => REGRESSION} - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARX` class supports regression for Time Series data. - * Multi-horizon forecasting supported via the RECURSIVE method. - * Given a response vector y, a predictor matrix x is built that consists of - * lagged y vectors, - * - * y_t = b dot x - * where x = [1, y_{t-1}, y_{t-2}, ... y_{t-lags}] - * - * @param x the input/predictor matrix built out of lags of y - * (and optionally from exogenous variables ex) - * @param yy the output/response vector trimmed to match x.dim (@see ARX object) - * @param lags the maximum lag included (inclusive) - * @param fname the feature/variable names - * @param hparam the hyper-parameters (use REGRESSION.hp for default) - */ -class ARX (x: MatrixD, yy: VectorD, lags: Int, fname: Array [String] = null, - hparam: HyperParameter = REGRESSION.hp) - extends REGRESSION (x, yy, fname, hparam) - with ForecasterX (lags): - - private val debug = debugf ("ARX", true) // debug function - private val flaw = flawf ("ARX") // flaw function - - modelName = s"ARX_$lags" - - debug ("init", s"$modelName: x.dims = ${x.dims}, yy.dim = ${yy.dim}") - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Get the internally row trimed and column expanded input matrix and response vector. - */ - def getXY: (MatrixD, VectorD) = (x, yy) // (getX, getY) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Predict a value for y_t+1 using the 1-step ahead forecast. - * y_t+1 = f (y_t, ...) + e_t+1 - * @param t the time point from which to make prediction - * @param yx the matrix of endogenous y and exogenous x values - */ - def predict (t: Int, yx: MatrixD): Double = ??? -/* - // FIX - prints for debugging assertion failure yp(i) != yd(0) - println (yx) - println (s"t-1: ${yx(min (t-1, yx.dim-1))} --> ${b dot yx(min (t-1, yx.dim-1))}") - println (s"t: ${yx(min (t, yx.dim-1))} --> ${b dot yx(min (t, yx.dim-1))}") - b dot yx(min (t-1, yx.dim-1)) -*/ - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Produce a vector of size h, of 1 through h-steps ahead forecasts for the model. - * forecast the following time points: t+1, ..., t-1+h. - * Note, must create the yf matrix before calling the forecast method. - * Intended to work with rolling validation (analog of predict method) - * Must call `forecastAll` first. - * @param t the time point from which to make forecasts - * @param yf the forecast matrix (time x horizons) - * @param h the forecasting horizon, number of steps ahead to produce forecasts - */ - def forecast (t: Int, yf: MatrixD, h: Int): VectorD = - if h < 1 then flaw ("forecast", s"horizon h = $h must be at least 1") - VectorD (for k <- 1 to h yield yf(t+k, k)) // get yf diagonal from time t - end forecast - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all y_.dim time points at horizon h (h-steps ahead). - * Assign to FORECAST MATRIX and return h-step ahead forecast. - * Note, `predictAll` provides predictions for h = 1. - * @see `forecastAll` method in `Forecaster` trait. - * @param yf the forecast matrix for the endogenous variable y (time x horizons) - * @param yx the matrix of endogenous y and exogenous x values - * @param h the forecasting horizon, number of steps ahead to produce forecasts - */ - def forecastAt (yf: MatrixD, yx: MatrixD, h: Int): VectorD = - if h < 2 then flaw ("forecastAt", s"horizon h = $h must be at least 2") - - for t <- yx.indices do // make forecasts over all time points for horizon h - yf(t+h-1, h) = b dot yx(min (t, yx.dim-1)) // forecast down the diagonal ?? - yf(?, h) // return the h-step ahead forecast vector - end forecastAt - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test FORECASTS of a `ARX` forecasting model y_ = f(x) + e - * and return its forecasts and QoF vector. Testing may be in-sample - * (on the training set) or out-of-sample (on the testing set) as determined - * by the parameters passed in. Note: must call train and forecastAll before testF. - * @param h the forecasting horizon, number of steps ahead to produce forecasts - * @param y_ the testing/full response/output vector - * @param yx the matrix of endogenous y and exogenous x values - */ - def testF (h: Int, y_ : VectorD, yx: MatrixD): (VectorD, VectorD, VectorD) = - val (yy, yfh) = testSetupF (y_, yx, h) // get and align actual and forecasted values - val params = x.dim2 - resetDF (params, yy.dim - params) // reset the degrees of freedom - println (s"testF: yy.dim = ${yy.dim}, yfh.dim = ${yfh.dim}") -// differ (yy, yfh) // uncomment for debugging - (yy, yfh, diagnose (yy, yfh)) // return aligned actual, forecasted and QoF vectors - end testF - -end ARX - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARX` companion object provides factory methods. - */ -object ARX: - - private val debug = debugf ("ARX", true) // debug function - - private var TREND = false // include quadratic trend - private val DAY = false // include day of the week effect - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Set whether to include a simple linear (in time) trend. - * @param trend flag indicating whether to include a trend - */ - def setTrend (trend: Boolean): Unit = TREND = trend - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create an `ARX` object from a response vector. The input/data matrix - * x is formed from the lagged y vectors as columns in matrix x. - * @param y the original un-expanded output/response vector - * @param lags the maximum lag included (inclusive) - * @param hparam the hyper-parameters (use REGRESSION.hp for default) - */ - def apply (y: VectorD, lags: Int, hparam: HyperParameter = REGRESSION.hp): ARX = - val (x_, yy) = buildMatrix4TS (y, lags) // column for each lag; yy is y trimmed - var x = VectorD.one (yy.dim) +^: x_ // add first column of all ones - - if TREND then - x = VectorD.range (0, yy.dim) +^: x // add trend/time - x = VectorD.range (0, yy.dim)~^2 +^: x // add quadratic trend/time - if DAY then - val day = VectorI (for t <- yy.indices yield t % 7) - x = day.toDouble +^: x // add DAY of week as ordinal var - -// val dum = Variable.dummyVars (day) -// x = x ++^ dum // add DAY of week as dummy vars - - debug ("apply", s"x.dims = ${x.dims}, yy.dim = ${yy.dim}") - debug ("apply", "x = $x \n yy = $yy") - new ARX (x, yy, lags, null, hparam) - end apply - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create an `ARX` object from a response vector. The input/data matrix - * x is formed from the lagged y vectors as columns in matrix x. - * In addition, lagged exogenous variables are added. - * @param y the original un-expanded output/response vector - * @param lags the maximum lag included (inclusive) - * @parax ex the input matrix for exogenous variables (one per column) - * @param hparam the hyper-parameters (use REGRESSION.hp for default) - * @param elag1 the minimum exo lag included (inclusive) - * @param elag2 the maximum exo lag included (inclusive) - */ - def exo (y: VectorD, lags: Int, ex: MatrixD, hparam: HyperParameter = REGRESSION.hp) - (elag1: Int = max (1, lags / 5), elag2: Int = max (1, lags)): ARX = - val (x_, yy) = buildMatrix4TS (y, lags) // column for each lag - var x = VectorD.one (yy.dim) +^: x_ // add first column of all ones - val endoCols = x.dim2 - println (s"endogenous: columns = $endoCols") - - x = x ++^ makeExoCols (lags, ex, elag1, elag2) // add columns for each lagged exo var - println (s"exogenous: columns = ${x.dim2 - endoCols}") - - if TREND then - x = VectorD.range (0, yy.dim) +^: x // add trend/time - x = VectorD.range (0, yy.dim)~^2 +^: x // add quadratic trend/time - end if - if DAY then - val day = VectorI (for t <- yy.indices yield t % 7) - val dum = Variable.dummyVars (day) - x = x ++^ dum // add DAY of week as dummy vars - end if - - debug ("exo", s"x.dims = ${x.dims}, yy.dim = ${yy.dim}") -// debug ("exo", s"x = $x \n yy = $yy") - new ARX (x, yy, lags, null, hparam) - end exo - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Make a matrix whose columns are lagged exogenous variables to be added to a data matrix. - * @param lags the maximum lag included (inclusive) for checking purposes - * @param ex the matrix of data for the exogenous variables - * @param elag1 the minimum exo lag included (inclusive) - * @param elag2 the maximum exo lag included (inclusive) - */ - def makeExoCols (lags: Int, ex: MatrixD, elag1: Int, elag2: Int): MatrixD = - var xx: MatrixD = buildMatrix4TS_exo (ex(?, 0), lags, elag1, elag2) - for j <- 1 until ex.dim2 do - xx = xx ++^ buildMatrix4TS_exo (ex(?, j), lags, elag1, elag2) - end for - println (s"addExoVars: collects lags of ${ex.dim2} exo variables into ${xx.dim2} columns") - xx - end makeExoCols - -end ARX - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRXTest` main function tests the `ARX` class. - * This test is used to CHECK that the buildMatrix4TS function is working correctly. - * May get NaN for some maximum lags (p) due to multi-collinearity. - * > runMain scalation.modeling.forecasting.aRXTest - */ -@main def aRXTest (): Unit = - - val m = 30 - val y = VectorD.range (1, m) // used to CHECK the buildMatrix4TS function - - for p <- 1 to 9 do // autoregressive hyper-parameter p - banner (s"Test: ARX with $p lags") - val mod = ARX (y, p) // create model for time series data - mod.trainNtest ()() // train the model on full dataset - println (mod.summary) - - val yp = mod.predict (mod.getX) - new Plot (null, mod.getY, yp, s"y vs. yp for ${mod.modelName} with $p lags", lines = true) - end for - -end aRXTest - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRXTest2` main function tests the `ARX` class on real data: - * Forecasting lake levels. - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.aRXTest2 - */ -@main def aRXTest2 (): Unit = - - import forecasting.Example_LakeLevels.y - val h = 2 // the forecasting horizon - ARX.setTrend (true) - - for p <- 1 to 7 do // autoregressive hyper-parameter p - banner (s"Test: ARX with $p lags") - val mod = ARX (y, p) // create model for time series data - mod.trainNtest ()() // train the model on full dataset - println (mod.summary) // parameter/coefficient statistics - - banner ("Predictions") - val (yx, yy) = mod.getXY // trimmed, input matrix and actual response vector - println (s"y.dim = ${y.dim}, yy.dim = ${yy.dim}, yx.dims = ${yx.dims}") - println (s"y = $y") - println (s"yy = $yy") - val yp = mod.predict (yx) // predicted response vector - new Plot (null, yy, yp, s"y vs. yp for ${mod.modelName} with $p lags", lines = true) - println (s"yp = $yp") - - banner ("Forecasts") -// val yf = mod.forecast (yp, h) // forecasted response matrix - val yf = mod.forecastAll (y, yx, h) // forecasted response matrix - for k <- yf.indices2 do - new Plot (null, yy, yf(?, k), s"yy vs. yf_$k for ${mod.modelName} with $p lags", lines = true) - -// mod.testHorizons (h, y, yx) // calls testF for horizons 1 to h - ForecasterX.evalForecasts (mod, y, yx, h) - -// banner ("Forecast QoF") -// println (testForecast (mod, y, yf, p)) // QoF -// println (Fit.fitMap (mod.testf (k, y))) // evaluate k-units ahead forecasts - end for - -end aRXTest2 - -// val iskip = yy.indexWhere (_ >= 6.0) // find week with at least 6 deaths -// println (s"iskip = $iskip is first week with at least 6 deaths") - -import forecasting.Example_Covid.{loadData, NO_EXO, response} - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRXTest3` main function tests the `ARX` class on real data: - * Forecasts COVID-19 Weekly Data using endogenous variable only. - * Does In-Sample Testing (In_ST). - * Determines the terms to include in the model using Feature Selection. - * > runMain scalation.modeling.forecasting.aRXTest3 - */ -@main def aRXTest3 (): Unit = - - val LAGS = 10 // number of lags of y - val h = 6 // forecasting horizon - - val (ex, y) = loadData (NO_EXO, response) - println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") - - banner ("Test In-Sample ARX on COVID-19 Weekly Data") - val mod = ARX (y, LAGS) // create ARX model for time series data - val (yp, qof) = mod.trainNtest ()() // train the model on full dataset - new Plot (null, mod.getY, yp, s"${mod.modelName}, y vs. yp", lines = true) - - banner (s"Multi-horizon forecasting using the recursive method") - val yx = mod.getX - val yf = mod.forecastAll (y, yx, h) // forecasted response matrix - for k <- 0 to h do - new Plot (null, y, yf(?, k), s"y vs. yf_$k for ${mod.modelName} with $LAGS lags @ horizon $k", lines = true) - -// mod.testHorizons (h, y, yx) // calls testF for horizons 1 to h - ForecasterX.evalForecasts (mod, y, yx, h) - - banner (s"Feature Selection Technique: Stepwise") - val (cols, rSq) = mod.stepwiseSelAll (cross = false) // R^2, R^2 bar, sMAPE, NA - val k = cols.size - println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for ARX with tech", lines = true) - println (mod.summary ()) - - banner ("Feature Importance") - println (s"Stepwise: rSq = $rSq") -// val imp = mod.importance (cols.toArray, rSq) -// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") - -end aRXTest3 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRXTest4` main function tests the `ARX` class on real data: - * Forecasts COVID-19 Weekly Data using endogenous and exogenous variables. - * Does In-Sample Testing (In-ST). - * Determines the terms to include in the model using Feature Selection. - * > runMain scalation.modeling.forecasting.aRXTest4 - */ -@main def aRXTest4 (): Unit = - - val LAGS = 10 // number of lags of y - - val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (ex, y) = loadData (exo_vars, response) - println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") - - banner ("Test In-Sample ARX.exo on COVID-19 Weekly Data") - val mod = ARX.exo (y, LAGS, ex)(1, LAGS+1) // create model for time series data with exo - val (yp, qof) = mod.trainNtest ()() // train the model on full dataset - new Plot (null, mod.getY, yp, s"${mod.modelName}, y vs. yp", lines = true) - -// val tech = SelectionTech.Forward // pick one feature selection technique -// val tech = SelectionTech.Backward - val tech = SelectionTech.Stepwise - - banner (s"Feature Selection Technique: $tech") - val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA - val k = cols.size - println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for ARX with tech", lines = true) - println (mod.summary ()) - - banner ("Feature Importance") - println (s"$tech: rSq = $rSq") -// val imp = mod.importance (cols.toArray, rSq) -// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") - -end aRXTest4 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRXTest5` main function tests the `ARX` class on real data: - * Forecasts COVID-19 Weekly Data using endogenous and exogenous variables. - * Does In-Sample Testing (In-ST). - * Determines the terms to include in the model using Feature Selection. - * Run Train-n-Test (TnT) Split testing on best model. - * > runMain scalation.modeling.forecasting.aRXTest5 - */ -@main def aRXTest5 (): Unit = - - val LAGS = 10 // number of lags of y - - val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (ex, y) = loadData (exo_vars, response) - println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") - - banner ("Test In-Sample ARX.exo on COVID-19 Weekly Data") - val mod = ARX.exo (y, LAGS, ex)(1, LAGS+1) // create model for time series data with exo - val (yp, qof) = mod.trainNtest ()() // train on full and test on full - new Plot (null, mod.getY, yp, s"${mod.modelName}, y vs. yp", lines = true) - -// val tech = SelectionTech.Forward // pick one feature selection technique -// val tech = SelectionTech.Backward - val tech = SelectionTech.Stepwise - - banner (s"Feature Selection Technique: $tech") - val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA - val k = cols.size - println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for ARX with tech", lines = true) - println (mod.summary ()) - - banner ("Feature Importance") - println (s"$tech: rSq = $rSq") -// val imp = mod.importance (cols.toArray, rSq) -// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") - - banner ("Run TnT on Best model") -// val bmod = mod.getBest._4 // get the best model from feature selection - val bmod = mod.getBest.mod.asInstanceOf [ARX] // get the best model from feature selection - val (x_, y_, xtest, ytest) = ForecasterX.split_TnT (bmod.getX, bmod.getY) - val (yptest, qoftest) = bmod.trainNtest (x_, y_)(xtest, ytest) // train on (x_, y_) and test on (xtest, ytest) - new Plot (null, ytest, yptest, s"${mod.modelName}, ytest vs. yptest", lines = true) - -end aRXTest5 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRXTest6` main function tests the `ARX` class on real data: - * Forecasts COVID-19 Weekly Data using endogenous and exogenous variables. - * Does In-Sample Testing (In-ST). - * Determines the terms to include in the model using Feature Selection. - * Run Train-n-Test (TnT) Split testing on best model using Rolling Validation. - * > runMain scalation.modeling.forecasting.aRXTest6 - */ -@main def aRXTest6 (): Unit = - - val LAGS = 10 // number of lags (values from past) - val rc = 1 // retraining cycle - val h = 6 // forecasting horizon - - val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (ex, y) = loadData (exo_vars, response) - println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") - - banner ("Test In-Sample ARX.exo on COVID-19 Weekly Data") - val mod = ARX.exo (y, LAGS, ex)(1, LAGS+1) // create model for time series data with exo - - val (yp, qof) = mod.trainNtest ()() // train on full and test on full - new Plot (null, mod.getY, yp, s"${mod.modelName}, y vs. yp", lines = true) - -// val tech = SelectionTech.Forward // pick one feature selection technique -// val tech = SelectionTech.Backward - val tech = SelectionTech.Stepwise - - banner (s"Feature Selection Technique: $tech") - val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA - val k = cols.size - println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for ARX with tech", lines = true) - println (mod.summary ()) - - banner ("Feature Importance") - println (s"$tech: rSq = $rSq") -// val imp = mod.importance (cols.toArray, rSq) -// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") - - banner ("Run Rolling Validation on ARX Best model") - val bmod = mod.getBest._3.asInstanceOf [ARX] // get the best model from feature selection - ForecasterX.rollValidate (bmod, rc, h) - -end aRXTest6 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRXTest7` main function tests the `ARX` class on real data: - * Forecasts COVID-19 Weekly Data using endogenous and exogenous variables. - * Preliminary investigation of Symbolic Regression. - * > runMain scalation.modeling.forecasting.aRXTest7 - */ -@main def aRXTest7 (): Unit = - - val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (ex, y) = loadData (exo_vars, response) - println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") - - banner ("Plot Variables on COVID-19 Weekly Data") - for lag <- 10 to 10 do - val xx_ = ex(lag until y.dim) - val yy_ = y(0 until y.dim - lag) -// new Plot (xx_, yy_, null, s"deaths vs. exo-vars @ lag = $lag") - - val mod = SymbolicRegression (xx_, yy_, null, collection.mutable.Set (1.0), cross = false) - mod.trainNtest ()() - println (mod.summary ()) - end for - -end aRXTest7 - diff --git a/src/main/scala/scalation/modeling/forecasting_old/ARX_MV.scala b/src/main/scala/scalation/modeling/forecasting_old/ARX_MV.scala deleted file mode 100644 index f290986cb..000000000 --- a/src/main/scala/scalation/modeling/forecasting_old/ARX_MV.scala +++ /dev/null @@ -1,422 +0,0 @@ - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Sun Feb 13 16:22:21 EST 2022 - * @see LICENSE (MIT style license file). - * - * @note Model: AutoRegressive with eXogenous Variables (Time Series Multi-Variate Regression) - */ - -package scalation -package modeling -package forecasting_old - -import scala.math.max - -import scalation.mathstat._ -import scalation.modeling.neuralnet.{PredictorMV, RegressionMV} - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARX_MV` object supports regression for Time Series data. - * Multi-horizon forecasting supported via the DIRECT method. - * Given a response vector y, a predictor matrix x is built that consists of - * lagged y vectors. Additional future response vectors are built for training. - * - * y_t = b dot x - * - * where x = [y_{t-1}, y_{t-2}, ... y_{t-lags}]. - */ -object ARX_MV: - - private val debug = debugf ("ARX_MV", true) // debug function - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `RegressionMV` object from a response vector. The input/data matrix - * x is formed from the lagged y vectors as columns in matrix x. - * @param y the original un-expanded output/response vector - * @param lags the maximum lag included (inclusive) - * @param h the forecasting horizon (1, 2, ... h) - * @param intercept whether to add a column of all ones to the matrix (intercept) - * @param hparam the hyper-parameters (use Regression.hp for default) - */ - def apply (y: VectorD, lags: Int, h: Int, intercept: Boolean = true, - hparam: HyperParameter = Regression.hp): RegressionMV = - val (x_, yy) = buildMatrix4TS (y, lags, h) // column for each lag - val x = if intercept then VectorD.one (yy.dim) +^: x_ else x_ // add first column of all ones - debug ("apply", s"x.dims = ${x.dims}, yy.dims = ${yy.dims}") - - val mod = new RegressionMV (x, yy, null, hparam) - mod.modelName = s"ARX_MV$lags" - mod - end apply - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `RegressionMV` object from a response matrix. The input/data matrix - * x is formed from the lagged y vectors as columns in matrix x. - * This method provides data rescaling. - * @param y the original un-expanded output/response vector - * @param lags the maximum lag included (inclusive) - * @param h the forecasting horizon (1, 2, ... h) - * @param intercept whether to add a column of all ones to the matrix (intercept) - * @param hparam the hyper-parameters (use Regression.hp for default) - */ - def rescale (y: VectorD, lags: Int, h: Int, intercept: Boolean = true, - hparam: HyperParameter = Regression.hp): RegressionMV = - val (x_, yy) = buildMatrix4TS (y, lags, h) // column for each lag - var x = scale (extreme (x_), (1.0, 5.0))(x_) // rescale vector x matrix to [1, 5] - if intercept then x = VectorD.one (yy.dim) +^: x // add first column of all ones - debug ("rescale", s"x.dims = ${x.dims}, yy.dims = ${yy.dims}") - - val mod = new RegressionMV (x, yy, null, hparam) - mod.modelName = s"ARX_MV$lags" - mod - end rescale - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `RegressionMV` object from a response vector. The input/data matrix - * x is formed from the lagged y vectors as columns in matrix x. - * In addition, lagged exogenous variables are added. - * @param y the original un-expanded output/response vector - * @param lags the maximum lag included (inclusive) - * @parax ex the input matrix for exogenous variables (one per column) - * @param h the forecasting horizon (1, 2, ... h) - * @param intercept whether to add a column of all ones to the matrix (intercept) - * @param hparam the hyper-parameters (use Regression.hp for default) - * @param elag1 the minimum exo lag included (inclusive) - * @param elag2 the maximum exo lag included (inclusive) - */ - def exo (y: VectorD, lags: Int, ex: MatrixD, h: Int, - intercept: Boolean = true, hparam: HyperParameter = Regression.hp) - (elag1: Int = max (1, lags / 5), - elag2: Int = max (1, lags)): RegressionMV = - val (x_, yy) = buildMatrix4TS (y, lags, h) // column for each lag - var x = if intercept then VectorD.one (yy.dim) +^: x_ else x_ // add first column of all ones - val endoCols = x.dim2 - println (s"endogenous: columns = $endoCols") - - x = x ++^ ARX.makeExoCols (lags, ex, elag1, elag2) // add columns for each lagged exo var - println (s"exogenous: columns = ${x.dim2 - endoCols}") - - println (s"exo: x.dims = ${x.dims}, yy.dim = ${yy.dim}") -// println (s"exo: x = $x \n yy = $yy") - val mod = new RegressionMV (x, yy, null, hparam) - mod.modelName = s"ARX_MV.exo_$lags" - mod - end exo - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Split the x matrix and y matrix into training and testing sets. - * @param x the x data/input matrix - * @param y the y response/output matrix - * @param ratio the ratio of the TESTING set to the full dataset (most common 70-30, 80-20) - */ - def split_TnT (x: MatrixD, y: MatrixD, ratio: Double = 0.20): (MatrixD, MatrixD, MatrixD, MatrixD) = - val n = x.dim - val tr_size = (n * (1.0 - ratio)).toInt - println (s"ARX_MV.split_TnT: tr_size = $tr_size, te_size = ${n - tr_size}") - (x(0 until tr_size), y(0 until tr_size), x(tr_size until n), y(tr_size until n)) - end split_TnT - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Align (for the testing set) the actual response vector for comparison with - * the predicted/forecasted response vector, returning a time vector and sliced - * response vectors. - * @param tr_size the size of the intial training set - * @param y the actual response for the full dataset (to be sliced) - * @param yp the predicted response for the full dataset (to be sliced) - * @param h_ the current forecasting horizon - 1 - */ - def align (tr_size: Int, y: VectorD, yp: VectorD, h_ : Int): (VectorD, VectorD, VectorD) = - debug ("align:", s"y.dim = ${y.dim}, yp.dim = ${yp.dim}, h_ = $h_") - (VectorD.range (tr_size, y.dim - h_), y(tr_size until y.dim - h_), yp(0 until yp.dim - h_)) - end align - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Use rolling-validation to compute test Quality of Fit (QoF) measures - * by dividing the dataset into a TRAINING SET (tr) and a TESTING SET (te) - * as follows: [ <-- tr_size --> | <-- te_size --> ] - * This version calls predict (DIRECT) for h-steps ahead out-of-sample forecasts. - * @see `RollingValidation` - * @param mod the forecasting model being used (e.g., `ARX_MV`) - * @param rc the retraining cycle (number of forecasts until retraining occurs) - * @param te_size_ the given size of the testing set (negative => take ratio) - */ - def rollValidate (mod: PredictorMV & Fit, rc: Int, te_size_ : Int = -1): MatrixD = - val x = mod.getX // get data/input matrix - val y = mod.getYY // get response/output matrix - val h = y.dim2 - val ftMat = new MatrixD (h, Fit.N_QoF) - banner (s"rollValidate: Evaluate ${mod.modelName}'s QoF for the horizons: 1 to $h") - - val te_size = if te_size_ < 0 then RollingValidation.teSize (y.dim) else te_size_ // size of test set - val tr_size = y.dim - te_size // size of initial training set - debug ("rollValidate", s"y.dims = ${y.dims}, train: tr_size = $tr_size; test: te_size = $te_size, rc = $rc") - - val yp = new MatrixD (te_size, y.dim2) // y-predicted over testing set - for i <- 0 until te_size do // iterate through testing set - val t = tr_size + i // next time point to forecast -// if i % rc == 0 then mod.train (x(0 until t), y(0 until t)) // retrain on sliding training set (growing set) - if i % rc == 0 then mod.train (x(i until t), y(i until t)) // retrain on sliding training set (fixed size set) - yp(i) = mod.predict (x(t-1)) // predict the next value - end for - - val yte0 = y(tr_size until y.dim, 0) - println (s"y_yp = ${yte0 +^: yp}") - - val df = max0 (mod.parameter.dim - 1) // degrees of freedom for model - mod.resetDF (df, te_size - df) // reset degrees of freedom - - for k <- y.indices2 do // move thru each horizon 1 to h - val (t, yk, ypk) = align (tr_size, y(?, k), yp(?, k), k) // clip ending zeros (0.0 or -0.0) - debug ("rollValidate", s"horizon $k: yk.dim = ${yk.dim}, ypk.dim = ${ypk.dim}") - new Plot (t, yk, ypk, s"Plot yy, yp vs. t for horizon ${k+1}", lines = true) - val qof = mod.diagnose (yk, ypk) - ftMat(k) = qof -// println (FitM.fitMap (qof, qoF_names)) - end for - println ("fitMap qof = ") - println (FitM.showFitMap (ftMat.transpose, QoF.values.map (_.toString))) - yp - end rollValidate - -end ARX_MV - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRX_MVTest` main function tests the `ARX_MV` class. - * This test is used to CHECK that the `buildMatrix4TS` function is working correctly. - * May get NaN for some maximum lags (p) due to multi-collinearity. - * > runMain scalation.modeling.forecasting.aRX_MVTest - */ -@main def aRX_MVTest (): Unit = - - val m = 30 - val y = VectorD.range (1, m) // used to CHECK the buildMatrix4TS function - val h = 3 // the forecasting horizon - - for p <- 5 to 5 do // autoregressive hyper-parameter p - banner (s"Test: ARX_MV with $p lags") - val mod = ARX_MV (y, p, h) // create model for time series data - mod.trainNtest ()() // train the model on full dataset - println (mod.summary) - - val yy = mod.getYY - val yp = mod.predict (mod.getX) - for k <- yp.indices2 do - new Plot (null, yy(?, k), yp(?, k), s"yy_$k vs. yp_$k for ${mod.modelName} (h=${k+1}) with $p lags", lines = true) - end for - -end aRX_MVTest - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRX_MVTest2` main function tests the `ARX_MV` class on real data: - * Forecasting lake levels. - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.aRX_MVTest2 - */ -@main def aRX_MVTest2 (): Unit = - - import forecasting.Example_LakeLevels.y - val h = 2 // the forecasting horizon - - for p <- 1 to 7 do // autoregressive hyper-parameter p - banner (s"Test: ARX_MV with $p lags") - val mod = ARX_MV (y, p, h) // create model for time series data - mod.trainNtest ()() // train the model on full dataset - println (mod.summary) - - banner ("Predictions/Forecasts") // direct forecasting technique - val yy = mod.getYY - val yf = mod.predict (mod.getX) - for k <- yf.indices2 do - new Plot (null, yy(?, k), yf(?, k), s"yy_$k vs. yf$k for ${mod.modelName} (h=${k+1}) with $p lags", lines = true) - println (s"yf = $yf") - println (s"yf.dims = ${yf.dims}") - end for - -end aRX_MVTest2 - -import forecasting.Example_Covid.{loadData, NO_EXO, response} - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRX_MVTest3` main function tests the `ARX_MV` class on real data: - * Forecasts COVID-19 Weekly Data using endogenous variable only. - * Does In-Sample Testing (In_ST). - * Determines the terms to include in the model using Feature Selection. - * > runMain scalation.modeling.forecasting.aRX_MVTest3 - */ -@main def aRX_MVTest3 (): Unit = - - val LAGS = 10 // number of lags - val h = 6 // forecasting horizon - - val (ex, y) = loadData (NO_EXO, response) - println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") - - banner ("Test In-Sample ARX_MV on COVID-19 Weekly Data") - val mod = ARX_MV (y, LAGS, h) // create model for time series data -// val mod = ARX_MV.rescale (y, LAGS, h) // create model for time series data - scaling - val (yp, qof) = mod.trainNtest ()() // train on full and test on full - val yy_ = y(LAGS until y.dim) - - for k <- 0 until h do - new Plot (null, yy_, yp(?, k), s"${mod.modelName}, yy vs. yp @ h = $k", lines = true) - - banner (s"Feature Selection Technique: Stepwise") - val (cols, rSq) = mod.stepwiseSelAll (cross = false) // R^2, R^2 bar, sMAPE, NA - val k = cols.size - println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for ARX_MV with tech", lines = true) - println (mod.summary ()) - - banner ("Feature Importance") - println (s"Stepwise: rSq = $rSq") -// val imp = mod.importance (cols.toArray, rSq) -// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") - -end aRX_MVTest3 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRX_MVTest4` main function tests the `ARX_MV` class on real data: - * Forecasts COVID-19 Weekly Data using endogenous and exogenous variables. - * Does In-Sample Testing (In-ST). - * Determines the terms to include in the model using Feature Selection. - * > runMain scalation.modeling.forecasting.aRX_MVTest4 - */ -@main def aRX_MVTest4 (): Unit = - - val LAGS = 10 // number of lags - val h = 6 // forecasting horizon - - val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (ex, y) = loadData (exo_vars, response) - println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") - - banner ("Test In-Sample ARX_MV.exo on COVID-19 Weekly Data") - val mod = ARX_MV.exo (y, LAGS, ex, h)(1, LAGS+1) // create model for time series data - with exo - val (yp, qof) = mod.trainNtest ()() // train on full and test on full - val yy_ = y(LAGS until y.dim) - new Plot (null, yy_, yp(?, 0), s"${mod.modelName}, yy vs. yp", lines = true) - -// val tech = SelectionTech.Forward // pick one feature selection technique -// val tech = SelectionTech.Backward - val tech = SelectionTech.Stepwise - - banner (s"Feature Selection Technique: $tech") - val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA - val k = cols.size - println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for ARX_MV with tech", lines = true) - println (mod.summary ()) - - banner ("Feature Importance") - println (s"$tech: rSq = $rSq") -// val imp = mod.importance (cols.toArray, rSq) -// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") - -end aRX_MVTest4 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRX_MVTest5` main function tests the `ARX_MV` class on real data: - * Forecasts COVID-19 Weekly Data using endogenous and exogenous variables. - * Does In-Sample Testing (In-ST). - * Determines the terms to include in the model using Feature Selection. - * Run Train-n-Test (TnT) Split testing on best model. - * > runMain scalation.modeling.forecasting.aRX_MVTest5 - */ -@main def aRX_MVTest5 (): Unit = - - val LAGS = 10 // number of lags - val h = 6 // forecasting horizon - - val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (ex, y) = loadData (exo_vars, response) - println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") - - banner ("Test In-Sample ARX_MV.exo on COVID-19 Weekly Data") - val mod = ARX_MV.exo (y, LAGS, ex, h)(1, LAGS+1) // create model for time series data with exo - val (yp, qof) = mod.trainNtest ()() // train on full and test on full - val yy_ = y(LAGS until y.dim) - new Plot (null, yy_, yp(?, 0), s"${mod.modelName}, yy vs. yp", lines = true) - -// val tech = SelectionTech.Forward // pick one feature selection technique -// val tech = SelectionTech.Backward - val tech = SelectionTech.Stepwise - - banner (s"Feature Selection Technique: $tech") - val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA - val k = cols.size - println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for ARX_MV with tech", lines = true) - println (mod.summary ()) - - banner ("Feature Importance") - println (s"$tech: rSq = $rSq") -// val imp = mod.importance (cols.toArray, rSq) -// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") - - banner ("Run TnT on Best model") -// val bmod = mod.getBest._3 // get the best model from feature selection - val bmod = mod.getBest.mod.asInstanceOf [RegressionMV] // get the best model from feature selection - val (x_, y_, xtest, ytest) = ARX_MV.split_TnT (bmod.getX, bmod.getYY) - val (yptest, qoftest) = bmod.asInstanceOf [PredictorMV].trainNtest (x_, y_)(xtest, ytest) // train on (x_, y_) and test on (xtest, ytest) - new Plot (null, ytest(?, 0), yptest(?, 0), s"${mod.modelName}, ytest vs. yptest", lines = true) - -end aRX_MVTest5 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRX_MVTest6` main function tests the `ARX_MV` class on real data: - * Forecasts COVID-19 Weekly Data using endogenous and exogenous variables. - * Does In-Sample Testing (In-ST). - * Determines the terms to include in the model using Feature Selection. - * Run Train-n-Test (TnT) Split testing on best model using Rolling Validation. - * > runMain scalation.modeling.forecasting.aRX_MVTest6 - */ -@main def aRX_MVTest6 (): Unit = - - val LAGS = 10 // number of lags (values from past) - val rc = 1 // retraining cycle - val h = 6 // forecasting horizon - - val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (ex, y) = loadData (exo_vars, response) - println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") - - banner ("Test In-Sample ARX_MV.exo on COVID-19 Weekly Data") - val mod = ARX_MV.exo (y, LAGS, ex, h)(1, LAGS+1) // create model for time series data with exo - val (yp, qof) = mod.trainNtest ()() // train on full and test on full - val yy_ = y(LAGS until y.dim) - new Plot (null, yy_, yp(?, 0), s"${mod.modelName}, yy vs. yp", lines = true) - -// val tech = SelectionTech.Forward // pick one feature selection technique -// val tech = SelectionTech.Backward - val tech = SelectionTech.Stepwise - - banner (s"Feature Selection Technique: $tech") - val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA - val k = cols.size - println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for ARX_MV with tech", lines = true) - println (mod.summary ()) - - banner ("Feature Importance") - println (s"$tech: rSq = $rSq") -// val imp = mod.importance (cols.toArray, rSq) -// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") - - banner ("Run Rolling Validation on ARX_MV Best model") -// val bmod = mod.getBest._3 // get the best model from feature selection - val bmod = mod.getBest.mod.asInstanceOf [RegressionMV] // get the best model from feature selection - ARX_MV.rollValidate (bmod.asInstanceOf [PredictorMV & Fit], rc) - -end aRX_MVTest6 - diff --git a/src/main/scala/scalation/modeling/forecasting_old/ARX_Quad.scala b/src/main/scala/scalation/modeling/forecasting_old/ARX_Quad.scala deleted file mode 100644 index 4034c9444..000000000 --- a/src/main/scala/scalation/modeling/forecasting_old/ARX_Quad.scala +++ /dev/null @@ -1,594 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Tue Feb 22 23:14:31 EST 2022 - * @see LICENSE (MIT style license file). - * - * @note Model: Quadratic AutoRegressive with eXogenous Variables (Quadratic Time Series Regression) - */ - -package scalation -package modeling -package forecasting_old - -import scala.math.{max, min} - -import scalation.mathstat._ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARX_Quad` class supports quadratic regression for Time Series data. - * Given a response vector y, a predictor matrix x is built that consists of - * lagged y vectors, - * - * y_t = b dot x - * where x = [ 1, y_{t-1}, y_{t-2}, ... y_{t-lag}, y_{t-1}^2, ...]. - * - * @param x the input/predictor matrix built out of lags of y - * @param yy the output/response vector trimmed to match x.dim - * @param lags the maximum lag included (inclusive) - * @param fname the feature/variable names - * @param hparam the hyper-parameters (use Regression.hp for default) - */ -class ARX_Quad (x: MatrixD, yy: VectorD, lags: Int, fname: Array [String] = null, - hparam: HyperParameter = Regression.hp) - extends Regression (x, yy, fname, hparam) - with ForecasterX (lags): - - private val debug = debugf ("ARX_Quad", true) // debug function - private val flaw = flawf ("ARX_Quad") // flaw function - - modelName = s"ARX_Quad$lags" - - debug ("init", s"$modelName: x.dims = ${x.dims}, yy.dim = ${yy.dim}") - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Get the internally row trimed and column expanded input matrix and response vector. - */ - def getXY: (MatrixD, VectorD) = (x, yy) // (getX, getY) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Predict a value for y_t+1 using the 1-step ahead forecast. - * y_t+1 = f (y_t, ...) + e_t+1 - * @param t the time point from which to make prediction - * @param yx the matrix of endogenous y and exogenous x values - */ - def predict (t: Int, yx: MatrixD): Double = ??? - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast h steps ahead using the recursive method, returning forecasts in - * matrix yf with columns: [1-step, 2-steps, ... h-steps]. - * @param yp the predicted response vector (horizon 1 forecasts) - * @param h the forecasting horizon - * - def forecast (yp: VectorD, h: Int): MatrixD = - val yf = new MatrixD (yp.dim, h) // matrix to hold forecasts - yf(?, 0) = yp // column 0 is predicted values - for k <- 1 until h do // forecast into future: columns 1 to h-1 - for i <- yf.indices do - val xi = x(i) - val yi = yf(i) - var sum = b(0) - var l = 0 - for j <- 1 until b.dim-1 by 2 do // add terms in an interleaved fashion - if j+k+1 < b.dim then - sum += b(j) * xi(j+k) // linear terms - sum += b(j+1) * xi(j+k+1) // add quadratic terms - else - sum += b(j) * yi(l) - sum += b(j+1) * yi(l)~^2 - l += 1 - end if - end for - yf(i, k) = sum // record forecasted value - end for - end for - yf - end forecast - */ - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Produce a vector of size h, of 1 through h-steps ahead forecasts for the model. - * forecast the following time points: t+1, ..., t-1+h. - * Note, must create the yf matrix before calling the forecast method. - * Intended to work with rolling validation (analog of predict method) - * Must call `forecastAll` first. - * @param t the time point from which to make forecasts - * @param yf the forecast matrix (time x horizons) - * @param h the forecasting horizon, number of steps ahead to produce forecasts - */ - def forecast (t: Int, yf: MatrixD, h: Int): VectorD = - if h < 1 then flaw ("forecast", s"horizon h = $h must be at least 1") - VectorD (for k <- 1 to h yield yf(t+k, k)) // get yf diagonal from time t - end forecast - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all y_.dim time points at horizon h (h-steps ahead). - * Assign to FORECAST MATRIX and return h-step ahead forecast. - * Note, `predictAll` provides predictions for h = 1. - * @see `forecastAll` method in `Forecaster` trait. - * @param yf the forecast matrix for the endogenous variable y (time x horizons) - * @param yx the matrix of endogenous y and exogenous x values - * @param h the forecasting horizon, number of steps ahead to produce forecasts - */ - def forecastAt (yf: MatrixD, yx: MatrixD, h: Int): VectorD = - if h < 1 then flaw ("forecastAt", s"horizon h = $h must be at least 1") - for t <- yx.indices do // make forecasts over all time points for horizon h - val t1 = t + h - 1 // time point prior to horizon - yf(t+h, h) = b dot yx(min (t1, yx.dim-1)) // forecast down the diagonal ?? - end for - yf(?, h) // return the h-step ahead forecast vector - end forecastAt - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all y_.dim time points and all horizons (1 through h-steps ahead). - * Record these in the yf matrix, where - * yf(t, k) = k-steps ahead forecast for y_t - * Note, column 0, yf(?, 0), is set to y (the actual time-series values). - * Forecast recursively down diagonals in the yf forecast matrix. - * The top right and bottom left triangles in yf matrix are not forecastable. - * @param y_ the actual values to use in making forecasts - * @param yx the matrix of endogenous y and exogenous x values - * @param h the maximum forecasting horizon, number of steps ahead to produce forecasts - */ - override def forecastAll (y_ : VectorD, yx: MatrixD, h: Int): MatrixD = - debug ("forecastAll", s"y_.dim = ${y_.dim}, yx.dims = ${yx.dims}") - yf = new MatrixD (y_.dim+h, h+2) // forecasts for all time points t & horizons to h - for t <- y_.indices do yf(t, 0) = y_(t) // first column is the actual endogenous y values - for t <- yf.indices do yf(t, h+1) = t // last column is time (logical day) - - for k <- 1 to h do - if k > 1 then - val prev = yf(?, k-1) // the previous forecasted vales - yx.insert (1, lags, prev) // insert previous forecasts for endogenous variable - yx.insert (1+lags, lags+lags, prev~^2) // insert previous forecasts^2 for endogenous variable - // FIX - must insert at the right position; maybe rescaling - end if - forecastAt (yf, yx, k) // forecast k-steps into the future - end for - yf // return matrix of forecasted values - end forecastAll - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test FORECASTS of a `ARX_Quad` forecasting model y_ = f(x) + e - * and return its forecasts and QoF vector. Testing may be in-sample - * (on the training set) or out-of-sample (on the testing set) as determined - * by the parameters passed in. Note: must call train and forecastAll before testF. - * @param h the forecasting horizon, number of steps ahead to produce forecasts - * @param y_ the testing/full response/output vector - * @param yx the matrix of endogenous y and exogenous x values - */ - def testF (h: Int, y_ : VectorD, yx: MatrixD): (VectorD, VectorD, VectorD) = - val (yy, yfh) = testSetupF (y_, yx, h) // get and align actual and forecasted values - val params = x.dim2 - resetDF (params, yy.dim - params) // reset the degrees of freedom - println (s"testF: yy.dim = ${yy.dim}, yfh.dim = ${yfh.dim}") -// differ (yy, yfh) // uncomment for debugging - (yy, yfh, diagnose (yy, yfh)) // return aligned actual, forecasted and QoF vectors - end testF - -end ARX_Quad - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARX_Quad` companion object provides factory methods. - */ -object ARX_Quad: - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create an `ARX_Quad` object to fit a quadratic surface from a response vector. - * The input/data matrix x is formed from the lagged y vectors as columns in matrix x. - * surface to Time Series data. - * @param y the original un-expanded output/response vector - * @param lags the maximum lag included (inclusive) - * @param hparam the hyper-parameters (use Regression.hp for default) - */ - def apply (y: VectorD, lags: Int, - hparam: HyperParameter = Regression.hp): ARX_Quad = - val (x, yy) = buildMatrix4TS (y, lags) // column for each lag - val xx = new MatrixD (x.dim, 2*x.dim2+1) - xx(?, 0) = VectorD.one (yy.dim) // add first column of all ones - for j <- x.indices2 do // add terms in an interleaved fashion - xx(?, 2*j+1) = x(?, j) // linear terms - xx(?, 2*j+2) = x(?, j)~^2 // add quadratic terms - end for - - println (s"apply: xx.dims = ${xx.dims}, yy.dim = ${yy.dim}") -// println (s"apply: xx = $xx \n yy = $yy") - new ARX_Quad (xx, yy, lags, null, hparam) - end apply - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create an `ARX_Quad` object to fit a quadratic surface from a response vector. - * The input/data matrix x is formed from the lagged y vectors as columns in matrix x. - * In addition, lagged exogenous variables are added. - * @param y the original un-expanded output/response vector - * @param lags the maximum lag included (inclusive) - * @parax ex the input matrix for exogenous variables (one per column) - * @param hparam the hyper-parameters (use Regression.hp for default) - * @param elag1 the minimum exo lag included (inclusive) - * @param elag2 the maximum exo lag included (inclusive) - */ - def exo (y: VectorD, lags: Int, ex: MatrixD, hparam: HyperParameter = Regression.hp) - (elag1: Int = max (1, lags / 5), - elag2: Int = max (1, lags)): ARX_Quad = - val (x, yy) = buildMatrix4TS (y, lags) // column for each lag - var xx = new MatrixD (x.dim, 2*x.dim2+1) - xx(?, 0) = VectorD.one (yy.dim) // add first column of all ones - for j <- x.indices2 do // add terms in an interleaved fashion - xx(?, 2*j+1) = x(?, j) // linear terms - xx(?, 2*j+2) = x(?, j)~^2 // add quadratic terms - end for - val endoCols = xx.dim2 - println (s"exo: endogenous: columns = $endoCols") - - xx = xx ++^ ARX.makeExoCols (lags, ex, elag1, elag2) // add columns for each lagged exo var - println (s"exogenous: columns = ${xx.dim2 - endoCols}") - - println (s"exo: xx.dims = ${xx.dims}, yy.dim = ${yy.dim}") -// println (s"exo: xx = $xx \n yy = $yy") - new ARX_Quad (xx, yy, lags, null, hparam) - end exo - -end ARX_Quad - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARX_QuadTest` main function tests the `ARX_Quad` class. - * This test is used to CHECK that the buildMatrix4TS function is working correctly. - * May get NaN for some maximum lags (p) due to multi-collinearity. - * > runMain scalation.modeling.forecasting.ARX_QuadTest - */ -@main def ARX_QuadTest (): Unit = - - val m = 30 - val y = VectorD.range (1, m) // used to CHECK the buildMatrix4TS function - - for p <- 1 to 10 do // autoregressive hyper-parameter p - banner (s"Test: ARX_Quad with $p lags") - val mod = ARX_Quad (y, p) // create model for time series data - mod.trainNtest ()() // train the model on full dataset - println (mod.summary) - - val yp = mod.predict (mod.getX) - new Plot (null, mod.getY, yp, s"y vs. yp for ${mod.modelName} with $p lags", lines = true) - end for - -end ARX_QuadTest - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARX_QuadTest2` main function tests the `ARX_Quad` class on real data: - * Forecasting lake levels. - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.ARX_QuadTest2 - */ -@main def ARX_QuadTest2 (): Unit = - - import forecasting.Example_LakeLevels.y - val h = 2 // the forecasting horizon - - for p <- 1 to 8 do // autoregressive hyper-parameter p - banner (s"Test: ARX_Quad with $p lags") - val mod = ARX_Quad (y, p) // create model for time series data - mod.trainNtest ()() // train the model on full dataset - println (mod.summary) // parameter/coefficient statistics - - banner ("Predictions") - val yy = mod.getY // trimmed actual response vector - println (s"y.dim = ${y.dim}, yy.dim = ${yy.dim}") - println (s"y = $y") - println (s"yy = $yy") - val yx = mod.getX - val yp = mod.predict (yx) // predicted response vector - new Plot (null, yy, yp, s"y vs. yp for ${mod.modelName} with $p lags", lines = true) - println (s"yp = $yp") - - banner ("Forecasts") -// val yf = mod.forecast (yp, h) // forecasted response matrix - val yf = mod.forecastAll (yy, yx, h) // forecasted response matrix - for k <- yf.indices2 do - new Plot (null, yy, yf(?, k), s"yy vs. yf_$k for ${mod.modelName} with $p lags", lines = true) - -// mod.testHorizons (h, y, yx) // calls testF for horizons 1 to h - ForecasterX.evalForecasts (mod, y, yx, h) - -// banner ("Forecast QoF") -// println (testForecast (mod, y, yf, p)) // QoF -// println (Fit.fitMap (mod.testf (k, y))) // evaluate k-units ahead forecasts - end for - -end ARX_QuadTest2 - -/* - val mod = ARX (y, p) // create model for time series data - mod.trainNtest ()() // train the model on full dataset - println (mod.summary) // parameter/coefficient statistics - - banner ("Predictions") - val yy = mod.getY // trimmed actual response vector - val yx = mod.getX - val yp = mod.predict (yx) // predicted response vector - new Plot (null, yy, yp, s"y vs. yp for ${mod.modelName} with $p lags", lines = true) - println (s"yp = $yp") - - banner ("Forecasts") -// val yf = mod.forecast (yp, h) // forecasted response matrix - val yf = mod.forecastAll (yy, yx, h) // forecasted response matrix - for k <- yf.indices2 do - new Plot (null, yy, yf(?, k), s"yy vs. yf_$k for ${mod.modelName} with $p lags", lines = true) - -// mod.testHorizons (h, y, yx) // calls testF for horizons 1 to h - ForecasterX.evalForecasts (mod, y, yx, h) -*/ - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARX_QuadTest3` main function tests the `ARX_Quad` class on real data: - * Forecasting COVID-19. Does In-Sample Testing on Endogenous variable. - * > runMain scalation.modeling.forecasting.ARX_QuadTest3 - */ -@main def ARX_QuadTest3 (): Unit = - - val LAGS = 10 // number of lags of y - val h = 6 // forecasting horizon - - val exo_vars = Array.ofDim [String] (0) // no exogenous variable in this case - val (xx, yy) = forecasting.Example_Covid.loadData (exo_vars, "new_deaths") - val iskip = yy.indexWhere (_ >= 6.0) // find day with at least 6 deaths - println (s"iskip = $iskip is first day with at least 6 deaths") - - val ex = xx(iskip until xx.dim) // trim away the first iskip rows - val y = yy(iskip until yy.dim) - println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") - - banner ("Test ARX_Quad on COVID-19 Weekly Data") - val mod = ARX_Quad (y, LAGS) // create model for time series data - val (yp, qof) = mod.trainNtest ()() // train the model on full dataset - new Plot (null, mod.getY, yp, s"${mod.modelName}, y vs. yp", lines = true) - - banner (s"Multi-horizon forecasting using the recursive method") - val yx = mod.getX - val yf = mod.forecastAll (y, yx, h) // forecasted response matrix - for k <- 0 to h do - new Plot (null, y, yf(?, k), s"y vs. yf_$k for ${mod.modelName} with $LAGS lags", lines = true) - - for k <- 1 to h do - val (yy, yfh, qof) = mod.testF (k, y, yx) // k-steps ahead forecast and its QoF - println (s"Evaluate QoF for horizon $k:") - println (FitM.fitMap (qof, qoF_names)) // evaluate k-steps ahead forecasts - end for - - banner (s"Feature Selection Technique: stepwise") - val (cols, rSq) = mod.stepwiseSelAll (cross = false) // R^2, R^2 bar, sMAPE, NA - val k = cols.size - println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for ARX_Quad with tech", lines = true) - println (mod.summary ()) - - banner ("Feature Importance") - println (s"Stepwise: rSq = $rSq") -// val imp = mod.importance (cols.toArray, rSq) -// for (c, r) <- imp do println (s"col = $c, \t ${ox_fname(c)}, \t importance = $r") - -end ARX_QuadTest3 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARX_QuadTest4` main function tests the `ARX_Quad` class on real data: - * Forecasting COVID-19 Weekly Data. Does In-Sample Testing on endogenous and exogenous variables. - * Stepsise gives: 0, 19, 40, 37, 60, 17, 15, 53, 5, 20, 50, 49, 48, 47, 18, 9, 6 best R^2-bar - * Stepsise gives: 0, 19, 40, 37, 60, 17, 15, 53, 5, 20, 50, 49, 48, 47 best sMAPE - * > runMain scalation.modeling.forecasting.ARX_QuadTest4 - */ -@main def ARX_QuadTest4 (): Unit = - - val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (xx, yy) = forecasting.Example_Covid.loadData (exo_vars, "new_deaths") - val iskip = yy.indexWhere (_ >= 6.0) // find day with at least 6 deaths - println (s"iskip = $iskip is first day with at least 6 deaths") - - val ex = xx(iskip until xx.dim) // trim away the first iskip rows - val y = yy(iskip until yy.dim) - println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") - - banner ("Test In-Sample ARX_Quad.exo on COVID-19 Weekly Data") - val mod = ARX_Quad.exo (y, 10, ex)(1, 11) // create model for time series data with exo - val (yp, qof) = mod.trainNtest ()() // train on full and test on full - new Plot (null, mod.getY, yp, s"${mod.modelName}, yy vs. yp", lines = true) - -// val tech = SelectionTech.Forward // pick one feature selection technique -// val tech = SelectionTech.Backward - val tech = SelectionTech.Stepwise - - banner (s"Feature Selection Technique: $tech") - val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA - val k = cols.size - println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for ARX_Quad with tech", lines = true) - println (mod.summary ()) - - banner ("Feature Importance") - println (s"$tech: rSq = $rSq") -// val imp = mod.importance (cols.toArray, rSq) -// for (c, r) <- imp do println (s"col = $c, \t ${Example_Covid.header(c)}, \t importance = $r") - -end ARX_QuadTest4 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARX_QuadTest5` main function tests the `ARX_Quad` class on real data: - * Forecasting COVID-19 Weekly Data. Does TnT Testing on endogenous and exogenous variables. - * Determine the terms to include in the model for TnT from using Stepwise on In-Sample. - * Stepsise gives: 0, 19, 40, 37, 60, 17, 15, 53, 5, 20, 50, 49, 48, 47, 18, 9, 6 best R^2-bar - * Stepsise gives: 0, 19, 40, 37, 60, 17, 15, 53, 5, 20, 50, 49, 48, 47 best sMAPE - * > runMain scalation.modeling.forecasting.ARX_QuadTest5 - */ -@main def ARX_QuadTest5 (): Unit = - - val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (xx, yy) = forecasting.Example_Covid.loadData (exo_vars, "new_deaths") - val iskip = yy.indexWhere (_ >= 6.0) // find day with at least 6 deaths - println (s"iskip = $iskip is first day with at least 6 deaths") - - val ex = xx(iskip until xx.dim) // trim away the first iskip rows - val y = yy(iskip until yy.dim) - println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") - - banner ("Test In-Sample ARX_Quad.exo on COVID-19 Weekly Data") - val mod = ARX_Quad.exo (y, 10, ex)(1, 11) // create model for time series data with exo - val (yp, qof) = mod.trainNtest ()() // train on full and test on full - new Plot (null, mod.getY, yp, s"${mod.modelName}, yy vs. yp", lines = true) - -// val tech = SelectionTech.Forward // pick one feature selection technique -// val tech = SelectionTech.Backward - val tech = SelectionTech.Stepwise - - banner (s"Feature Selection Technique: $tech") - val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA - val k = cols.size - println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for ARX_Quad with tech", lines = true) - println (mod.summary ()) - - banner ("Feature Importance") - println (s"$tech: rSq = $rSq") -// val imp = mod.importance (cols.toArray, rSq) -// for (c, r) <- imp do println (s"col = $c, \t ${Example_Covid.header(c)}, \t importance = $r") - - banner ("Run TnT on Best model") -// val bmod = mod.getBest._3 // get the best model from feature selection - val bmod = mod.getBest.mod.asInstanceOf [ARX_Quad] // get the best model from feature selection - val (x_, y_, xtest, ytest) = ForecasterX.split_TnT (bmod.getX, bmod.getY) - val (yptest, qoftest) = bmod.trainNtest (x_, y_)(xtest, ytest) // train on (x_, y_) and test on (xtest, ytest) - new Plot (null, ytest, yptest, s"${mod.modelName}, ytest vs. yptest", lines = true) - -end ARX_QuadTest5 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARX_QuadTest6` main function tests the `ARX_Quad` class on real data: - * Forecasting COVID-19 Weekly Data. Does Rolling Validation on endogenous and exogenous variables. - * Determine the terms to include in the model HOW? - * > runMain scalation.modeling.forecasting.ARX_QuadTest6 - */ -@main def ARX_QuadTest6 (): Unit = - - val LAGS = 10 // number of lags (values from past) - val rc = 1 // retraining cycle - val hh = 6 // forecasting horizon - - val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (xx, yy) = forecasting.Example_Covid.loadData (exo_vars, "new_deaths") - val iskip = yy.indexWhere (_ >= 6.0) // find day with at least 6 deaths - println (s"iskip = $iskip is first day with at least 6 deaths") - - val ex = xx(iskip until xx.dim) // trim away the first iskip rows - val y = yy(iskip until yy.dim) - println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") - - banner ("Test In-Sample ARX_Quad.exo on COVID-19 Weekly Data") - val mod = ARX_Quad.exo (y, LAGS, ex)(1, LAGS+1) // create model for time series data with exo - - val (yp, qof) = mod.trainNtest ()() // train on full and test on full - new Plot (null, mod.getY, yp, s"${mod.modelName}, y vs. yp", lines = true) - -// val tech = SelectionTech.Forward // pick one feature selection technique -// val tech = SelectionTech.Backward - val tech = SelectionTech.Stepwise - - banner (s"Feature Selection Technique: $tech") - val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA - val k = cols.size - println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for ARX with tech", lines = true) - println (mod.summary ()) - - banner ("Feature Importance") - println (s"$tech: rSq = $rSq") -// val imp = mod.importance (cols.toArray, rSq) -// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") - - banner ("Run Rolling Validation on ARX_Quad Best model") -// val bmod = mod.getBest._3 - val bmod = mod.getBest.mod.asInstanceOf [ARX_Quad] // get the best model from feature selection - ForecasterX.rollValidate (bmod, rc, hh) - -end ARX_QuadTest6 - -/* -Results for ARX_QuadTest4 -key: -0 intercept - forced in the model -1..10 new_deaths -11..20 new_deaths^2 -21..30 icu_patients -31..40 hosp_patients -41..50 new_tests -51..60 people_vaccinated --------------------------------------------------------- -Stepwise Results - stops after adding 16 terms out of 61 --------------------------------------------------------- -1. 0 intercept -2. 19 new-deaths^2 @ lag 9 94.4965, 94.4614, 14.4024 -3. 40 new-test @ lag 10 96.7542, 96.7126, 12.5111 -4. 37 hosp_patients @ lag 7 97.5908, 97.5442, 10.8841, -5. 60 people_vaccinated @ lag 10 97.7216, 97.6624, 10.7154 -6. 17 new-deaths^2 @ lag 7 97.8073, 97.7356, 10.5200 -7. 15 new-deaths^2 @ lag 5 97.9416, 97.8603, 10.3548 -8. 53 people_vaccinated @ lag 3 98.0055, 97.9130, 10.3473 -9. 5 new_deaths @ lag 5 98.0713, 97.9685, 9.87575 -10. 20 new-deaths^2 @ lag 10 98.1416, 98.0294, 10.1097 -11. 50 new_tests @ lag 10 98.1658, 98.0419, 9.72346 -12. 49 new_tests @ lag 9 98.2019, 98.0673, 9.44431 -13. 48 new_tests @ lag 8 98.2456, 98.1014, 9.28453 -14. 47 new_tests @ lag 7 98.2596, 98.1036, 9.24678 ** -15. 18 new-deaths^2 @ lag 8 98.2764, 98.1088, 9.30558 -16. 9 new_deaths @ lag 9 98.2891, 98.1096, 9.40272 -17. 6 new_deaths @ lag 6 98.2805, 98.1133, 9.53115 --------------------------------------------------------- -Backward Elimination Results - truncated --------------------------------------------------------- -1. 0 intercept -2. 19 new-deaths^2 @ lag 9 94.4965, 94.4614, 14.4024 -3. 30 icu-patients @ lag 10 96.4830, 96.4379, 12.0952 -4. 29 icu-patients @ lag 9 97.4180, 97.3680, 10.8648 -5. 39 hosp_patients @ lag 9 97.5852, 97.5225, 10.6671 -6. 8 new-deaths @ lag 8 97.6205, 97.5427, 10.5974 -7. 15 new-deaths^2 @ lag 5 97.7520, 97.6632, 10.5282 -8. 7 new_deaths @ lag 7 97.8385, 97.7383, 10.7373 -9. 9 new_deaths @ lag 9 97.9804, 97.8727, 10.4594 -10. 60 people_vaccinated @ lag 10 98.1069, 97.9925, 10.5465 -11. 56 people_vaccinated @ lag 6 98.1850, 98.0624, 10.0418 -12. 37 hosp_patients @ lag 7 98.2482, 98.1172, 9.78951 -13. 28 icu-patients @ lag 8 98.2995, 98.1597, 9.66914 -14. 26 icu-patients @ lag 6 98.3131, 98.1619, 9.65001 -15. 27 icu-patients @ lag 7 98.3450, 98.1841, 9.55611 -16. 35 hosp_patients @ lag 5 98.3621, 98.1903, 9.48847 ** -17. 24 icu-patients @ lag 4 98.3989, 98.2185, 9.49409 --------------------------------------------------------- -Forward Selection Results - truncated --------------------------------------------------------- -1. 0 intercept -2. 19 new-deaths^2 @ lag 9 94.4965, 94.4614, 14.4024 -3. 40 new-test @ lag 10 96.7542, 96.7126, 12.5111 -4. 37 hosp_patients @ lag 7 97.5908, 97.5442, 10.8841, -5. 60 people_vaccinated @ lag 10 97.7216, 97.6624, 10.7154 -6. 17 new-deaths^2 @ lag 7 97.8073, 97.7356, 10.5200 -7. 10 new_deaths @ lag 10 97.9416, 97.8603, 10.3548 -8. 15 new-deaths^2 @ lag 5 98.0055, 97.9130, 10.3473 -9. 53 people_vaccinated @ lag 3 98.0713, 97.9685, 9.87575 -10. 5 new_deaths @ lag 5 98.1416, 98.0294, 10.1097 -11. 20 new-deaths^2 @ lag 10 98.1658, 98.0419, 9.72346 -12. 50 new_tests @ lag 10 98.2019, 98.0673, 9.44431 -13. 45 new_tests @ lag 5 98.2456, 98.1014, 9.28453 -14. 49 new_tests @ lag 9 98.2596, 98.1036, 9.24678 ** -15. 48 new_tests @ lag 8 98.2764, 98.1088, 9.30558 -16. 47 new_tests @ lag 7 98.2891, 98.1096, 9.40272 -17. 18 new-deaths^2 @ lag 8 98.3041, 98.1131, 9.24220 -*/ - diff --git a/src/main/scala/scalation/modeling/forecasting_old/ARX_Quad_MV.scala b/src/main/scala/scalation/modeling/forecasting_old/ARX_Quad_MV.scala deleted file mode 100644 index 971795c19..000000000 --- a/src/main/scala/scalation/modeling/forecasting_old/ARX_Quad_MV.scala +++ /dev/null @@ -1,373 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Sun Feb 13 16:22:21 EST 2022 - * @see LICENSE (MIT style license file). - * - * @note Model: Quadratic AutoRegressive with eXogenous Variables - * (Time Series Generalized Quadratic Multi-Variate Regression) - */ - -package scalation -package modeling -package forecasting_old - -import scala.math.max - -import scalation.mathstat._ - -import neuralnet.RegressionMV - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARX_Quad_MV` object supports quadratic regression for Time Series data. - * Multi-horizon forecasting supported via the DIRECT method. - * Given a response vector y, a predictor matrix x is built that consists of - * lagged y vectors. Additional future response vectors are built for training. - * y_t = b dot x - * where x = [y_{t-1}, y_{t-2}, ... y_{t-lags}]. - * Matrix x includes constant, linear and generalized quadratic terms (x^pw where pw defaults to 2.0). - * For example, a lower power such as 1.5 may work better for longer horizons (pw may be tuned). - */ -object ARX_Quad_MV: - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `RegressionMV` object from a Time Series response vector y. - * The input/data matrix x is formed from the lagged y vectors as columns in matrix x. - * Quadratic terms are added to the model, one for each lag. - * @param y the original un-expanded output/response vector - * @param lags the maximum lag included (inclusive) - * @param h the forecasting horizon (1, 2, ... h) - * @param pw the power to raise the variables to (x_j ^ pw) defaults to 2.0 - * @param intercept whether to add a column of all ones to the matrix (intercept) - * @param hparam the hyper-parameters ((use Regression.hp for default) - */ - def apply (y: VectorD, lags: Int, h: Int, pw: Double = 2.0, intercept: Boolean = true, - hparam: HyperParameter = Regression.hp): RegressionMV = - val (x_, yy) = buildMatrix4TS (y, lags, h) // column for each lag - var x = x_ ++^ x_ ~^2 // add quadratic-ish terms - if intercept then x = VectorD.one (yy.dim) +^: x // add first column of all ones - - val mod = new RegressionMV (x, yy, null, hparam) - mod.modelName = s"ARX_Quad_MV_$lags" - mod - end apply - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `RegressionMV` object from a response matrix. The input/data matrix - * x is formed from the lagged y vectors as columns in matrix x. - * This method provides data rescaling. - * @param y the original un-expanded output/response vector - * @param lags the maximum lag included (inclusive) - * @param h the forecasting horizon (1, 2, ... h) - * @param pw the power to raise the variables to (x_j ^ pw) defaults to 2.0 - * @param intercept whether to add a column of all ones to the matrix (intercept) - * @param hparam the hyper-parameters (use Regression.hp for default) - */ - def rescale (y: VectorD, lags: Int, h: Int, pw: Double = 2.0, intercept: Boolean = true, - hparam: HyperParameter = Regression.hp): RegressionMV = - val (x_, yy) = buildMatrix4TS (y, lags, h) // column for each lag - var x = x_ ++^ x_ ~^ pw // add quadratic-ish terms - x = scale (extreme (x), (1.0, 5.0))(x) // rescale vector x matrix to [1, 5] - if intercept then x = VectorD.one (yy.dim) +^: x // add first column of all ones - - val mod = new RegressionMV (x, yy, null, hparam) - mod.modelName = s"ARX_Quad_MV_$lags" - mod - end rescale - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `RegressionMV` object from a response vector to fit a quadratic - * surface to Time Series data. The input/data matrix x is formed from the - * lagged y vectors as columns in matrix x. - * In addition, lagged exogenous variables are added. - * @param y the original un-expanded output/response vector - * @param lags the maximum lag included (inclusive) - * @parax ex the input matrix for exogenous variables (one per column) - * @param h the forecasting horizon (1, 2, ... h) - * @param pw the power to raise the variables to (x_j ^ pw) defaults to 2.0 - * @param intercept whether to add a column of all ones to the matrix (intercept) - * @param hparam the hyper-parameters (use Regression.hp for default) - * @param elag1 the minimum exo lag included (inclusive) - * @param elag2 the maximum exo lag included (inclusive) - */ - def exo (y: VectorD, lags: Int, ex: MatrixD, h: Int, pw: Double = 2.0, - intercept: Boolean = true, hparam: HyperParameter = Regression.hp) - (elag1: Int = max (1, lags / 5), - elag2: Int = max (1, lags)): RegressionMV = - val (x_, yy) = buildMatrix4TS (y, lags, h) // column for each lag - var x = x_ ++^ x_ ~^ pw // add quadratic-ish terms - if intercept then x = VectorD.one (yy.dim) +^: x // add first column of all ones - val endoCols = x.dim2 - println (s"endogenous: columns = $endoCols") - - val z = ARX.makeExoCols (lags, ex, elag1, elag2) // columns for exo vars - x = x ++^ z ++^ z ~^ pw // add linear and quad-ish terms for exo vars - println (s"exogenous: columns = ${x.dim2 - endoCols}") - - println (s"exo: x.dims = ${x.dims}, yy.dim = ${yy.dim}") -// println (s"exo: x = $x \n yy = $yy") - mathstat.diagnoseMat (x) - val mod = new RegressionMV (x, yy, null, hparam) - mod.modelName = s"ARX_Quad_MV.exo$lags" - mod - end exo - -end ARX_Quad_MV - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRX_Quad_MVTest` main function tests the `ARX_Quad_MV` object. - * This test is used to CHECK that the buildMatrix4TS function is working correctly. - * May get NaN for some maximum lags (p) due to multi-collinearity. - * > runMain scalation.modeling.forecasting.aRX_Quad_MVTest - */ -@main def aRX_Quad_MVTest (): Unit = - - val m = 30 - val y = VectorD.range (1, m) // used to CHECK the buildMatrix4TS function - val h = 3 // the forecasting horizon - - for p <- 5 to 5 do // autoregressive hyper-parameter p - banner (s"Test: ARX_Quad_MV with $p lags") - val mod = ARX_Quad_MV (y, p, h) // create model for time series data - mod.trainNtest ()() // train the model on full dataset - println (mod.summary) - - val yy = mod.getYY - val yp = mod.predict (mod.getX) - for k <- yp.indices2 do - new Plot (null, yy(?, k), yp(?, k), s"yy_$k vs. yp_$k for ${mod.modelName} (h=${k+1}) with $p lags", lines = true) - end for - end for - -end aRX_Quad_MVTest - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRX_Quad_MVTest2` main function tests the `ARX_Quad_MV` class on real data: - * Forecasting lake levels. Uses quadratic regression. - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.aRX_Quad_MVTest2 - */ -@main def aRX_Quad_MVTest2 (): Unit = - - import forecasting.Example_LakeLevels.y - - val h = 2 // the forecasting horizon - - for p <- 1 to 7 do // autoregressive hyper-parameter p - banner (s"Test: ARX_Quad_MV with $p lags") - val mod = ARX_Quad_MV (y, p, h) // create model for time series data - mod.trainNtest ()() // train the model on full dataset - println (mod.summary) - - banner ("Predictions/Forecasts") // direct forecasting technique - val yy = mod.getYY - val yf = mod.predict (mod.getX) - for k <- yf.indices2 do - new Plot (null, yy(?, k), yf(?, k), s"yy_$k vs. yf_$k for ${mod.modelName} (h=${k+1}) with $p lags", lines = true) - end for - println (s"yf = $yf") - println (s"yf.dims = ${yf.dims}") - end for - -end aRX_Quad_MVTest2 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRX_Quad_MVTest3` main function tests the `ARX_Quad_MV` class on real data: - * Forecasting COVID-19 Weekly Data. Uses quadratic regression, In-Sample Testing using - * endogenous variable. - * > runMain scalation.modeling.forecasting.aRX_Quad_MVTest3 - */ -@main def aRX_Quad_MVTest3 (): Unit = - - val LAGS = 10 // number of lags - val h = 6 // forecasting horizon - - val exo_vars = Array.ofDim [String] (0) // no exogenous variables in this case - val (xx, yy) = forecasting.Example_Covid.loadData (exo_vars, "new_deaths") - val iskip = yy.indexWhere (_ >= 6.0) // find day with at least 6 deaths - println (s"iskip = $iskip is first day with at least 6 deaths") - - val ex = xx(iskip until xx.dim) // trim away the first iskip rows - val y = yy(iskip until yy.dim) - println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") - - banner ("Test In-Sample ARX_Quad_MV on COVID-19 Weekly Data") - val mod = ARX_Quad_MV (y, LAGS, h) // create model for time series data -// val mod = ARX_Quad_MV.rescale (y, LAGS, h) // create model for time series data - scaling - val (yp, qof) = mod.trainNtest ()() // train on full and test on full - val yy_ = y(LAGS until y.dim) - - for k <- 0 until h do - new Plot (null, yy_, yp(?, k), s"${mod.modelName}, yy vs. yp @ h = $k", lines = true) - end for - - banner (s"Feature Selection Technique: Stepwise") - val (cols, rSq) = mod.stepwiseSelAll (cross = false) // R^2, R^2 bar, sMAPE, NA - val k = cols.size - println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for ARX_Quad_MV with tech", lines = true) - println (mod.summary ()) - - banner ("Feature Importance") - println (s"Stepwise: rSq = $rSq") -// val imp = mod.importance (cols.toArray, rSq) -// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") - -end aRX_Quad_MVTest3 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRX_Quad_MVTest4` main function tests the `ARX_Quad_MV` class on real data: - * Forecasting COVID-19 Weekly Data. Uses quadratic regression, In-Sample Testing using endogenous - * and exogeneous variables. - * > runMain scalation.modeling.forecasting.aRX_Quad_MVTest4 - */ -@main def aRX_Quad_MVTest4 (): Unit = - - val LAGS = 10 // number of lags - val h = 6 // forecasting horizon - - val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (xx, yy) = forecasting.Example_Covid.loadData (exo_vars, "new_deaths") - val iskip = yy.indexWhere (_ >= 6.0) // find day with at least 6 deaths - println (s"iskip = $iskip is first day with at least 6 deaths") - - val ex = xx(iskip until xx.dim) // trim away the first iskip rows - val y = yy(iskip until yy.dim) - println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") - - banner ("Test In-Sample ARX_Quad_MV.exo on COVID-19 Weekly Data") - val mod = ARX_Quad_MV.exo (y, LAGS, ex, h, 0.5)(1, LAGS+1) // create model for time series data with exo - val (yp, qof) = mod.trainNtest ()() // train on full and test on full - val yy_ = y(LAGS until y.dim) - new Plot (null, yy_, yp(?, 0), s"${mod.modelName}, yy vs. yp", lines = true) - -// val tech = SelectionTech.Forward // pick one feature selection technique -// val tech = SelectionTech.Backward - val tech = SelectionTech.Stepwise - - banner (s"Feature Selection Technique: $tech") - val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA - val k = cols.size - println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for ARX_Quad_MV with tech", lines = true) - println (mod.summary ()) - - banner ("Feature Importance") - println (s"$tech: rSq = $rSq") -// val imp = mod.importance (cols.toArray, rSq) -// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") - -end aRX_Quad_MVTest4 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRX_Quad_MVTest5` main function tests the `ARX_Quad_MV` class on real data: - * Forecasting COVID-19 Weekly Data. Uses Quadratic Regression. Does TnT Testing on endogenous - * and exogenous variables. Determine the terms to include in the model for TnT from using - * Stepwise on In-Sample. - * > runMain scalation.modeling.forecasting.aRX_Quad_MVTest5 - */ -@main def aRX_Quad_MVTest5 (): Unit = - - val LAGS = 10 // number of lags - val h = 6 // forecasting horizon - - val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (xx, yy) = forecasting.Example_Covid.loadData (exo_vars, "new_deaths") - val iskip = yy.indexWhere (_ >= 6.0) // find day with at least 6 deaths - println (s"iskip = $iskip is first day with at least 6 deaths") - - val ex = xx(iskip until xx.dim) // trim away the first iskip rows - val y = yy(iskip until yy.dim) - println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") - - banner ("Test In-Sample ARX_Quad_MV.exo on COVID-19 Weekly Data") - val mod = ARX_Quad_MV.exo (y, LAGS, ex, h)(1, LAGS+1) // create model for time series data with exo - val (yp, qof) = mod.trainNtest ()() // train on full and test on full - val yy_ = y(LAGS until y.dim) - new Plot (null, yy_, yp(?, 0), s"${mod.modelName}, yy vs. yp", lines = true) - -// val tech = SelectionTech.Forward // pick one feature selection technique -// val tech = SelectionTech.Backward - val tech = SelectionTech.Stepwise - - banner (s"Feature Selection Technique: $tech") - val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA - val k = cols.size - println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for ARX_Quad_MV with tech", lines = true) - println (mod.summary ()) - - banner ("Feature Importance") - println (s"$tech: rSq = $rSq") -// val imp = mod.importance (cols.toArray, rSq) -// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") - - banner ("Run TnT on Best model") - val bmod = mod.getBest._3 // get the best model from feature selection - val (x_, y_, xtest, ytest) = ARX_MV.split_TnT (bmod.getX, bmod.getYY) - val (yptest, qoftest) = bmod.asInstanceOf [RegressionMV].trainNtest (x_, y_)(xtest, ytest) // train on (x_, y_) and test on (xtest, ytest) - new Plot (null, ytest(?, 0), yptest(?, 0), s"${mod.modelName}, ytest vs. yptest", lines = true) - -end aRX_Quad_MVTest5 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRX_Quad_MVTest6` main function tests the `ARX_Quad_MV` class on real data: - * Forecasting COVID-19 Weekly Data. Uses Quadratic Regression. Does TnT Testing on endogenous - * and exogenous variables. Determine the terms to include in the model for TnT from using - * Stepwise on In-Sample. - * > runMain scalation.modeling.forecasting.aRX_Quad_MVTest6 - */ -@main def aRX_Quad_MVTest6 (): Unit = - - val LAGS = 10 // number of lags - val rc = 1 // retraining cycle - val h = 6 // forecasting horizon - - val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (xx, yy) = forecasting.Example_Covid.loadData (exo_vars, "new_deaths") - val iskip = yy.indexWhere (_ >= 6.0) // find day with at least 6 deaths - println (s"iskip = $iskip is first day with at least 6 deaths") - - val ex = xx(iskip until xx.dim) // trim away the first iskip rows - val y = yy(iskip until yy.dim) - println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") - - banner ("Test In-Sample ARX_Quad_MV.exo on COVID-19 Weekly Data") - val mod = ARX_Quad_MV.exo (y, LAGS, ex, h)(1, LAGS+1) // create model for time series data with exo - val (yp, qof) = mod.trainNtest ()() // train on full and test on full - val yy_ = y(LAGS until y.dim) - new Plot (null, yy_, yp(?, 0), s"${mod.modelName}, yy vs. yp", lines = true) - -// val tech = SelectionTech.Forward // pick one feature selection technique -// val tech = SelectionTech.Backward - val tech = SelectionTech.Stepwise - - banner (s"Feature Selection Technique: $tech") - val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA - val k = cols.size - println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for ARX_Quad_MV with tech", lines = true) - println (mod.summary ()) - - banner ("Feature Importance") - println (s"$tech: rSq = $rSq") -// val imp = mod.importance (cols.toArray, rSq) -// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") - - banner ("Run Rolling Validation on ARX_Quad_MV Best model") - val bmod = mod.getBest._3 // get the best model from feature selection - ARX_MV.rollValidate (bmod.asInstanceOf [RegressionMV], rc) - -end aRX_Quad_MVTest6 - diff --git a/src/main/scala/scalation/modeling/forecasting_old/ForecastUtil.scala b/src/main/scala/scalation/modeling/forecasting_old/ForecastUtil.scala deleted file mode 100644 index c424b1809..000000000 --- a/src/main/scala/scalation/modeling/forecasting_old/ForecastUtil.scala +++ /dev/null @@ -1,233 +0,0 @@ - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Sun Feb 13 16:22:21 EST 2022 - * @see LICENSE (MIT style license file). - * - * @note Model Framework: Utilities for Time Series Forecasting - */ - -package scalation -package modeling -package forecasting_old - -import scala.math.max - -import scalation.mathstat._ - -// FIX - ForecastUtil make uniform across DIRECT vs. RECURSIVE - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Given a response vector y, build and return - * (1) an input/predictor MATRIX xx and - * (2) an output/single-horizon output/response VECTOR yy. - * Used by Single-Variate forecast models such as `ARX`. - * that use RECURSIVE multi-horizon forecasting. - * The first response can't be predicted due to missing past values. - * Therefore the number of rows in xx and yy is reduced to "y.dim-1" (time 0 cut out). - * @param y the given output/response vector - * @param lags the maximum lag included (inclusive) - */ -def buildMatrix4TS (y: VectorD, lags: Int): (MatrixD, VectorD) = - val mm = y.dim - 1 - val yb = WeightedMovingAverage.backcast (y) +: y // y prependined with one backcast value - val xx = new MatrixD (mm, lags) - val yy = new VectorD (mm) // day 0 cut out - for t <- 0 until mm do - for j <- xx.indices2 do xx(t, lags - 1 - j) = yb(max0 (t + 1 - j)) - yy(t) = y(t+1) - end for - println (s"buildMatrix4TS: xx.dims = ${xx.dims}, yy.dim = ${yy.dim}") - (xx, yy) -end buildMatrix4TS - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Given a response vector y, build and return - * (1) an input/predictor MATRIX xx and - * (2) an output/multi-horizon output/response MATRIX yy. - * Used by Multi-Variate (MV) forecast models such as `ARX_MV`, - * that use DIRECT multi-horizon forecasting. - * The first response can't be predicted as its inputs are only the backcast value. - * Therefore, the number of rows in xx and yy is reduced to "y.dim-1". - * @param y the given output/response vector, i.e., the time series - * @param lags the maximum lag included (inclusive) - * @param hh the maximum forecasting horizon (h = 1, 2, ... hh) - */ -def buildMatrix4TS (y: VectorD, lags: Int, hh: Int): (MatrixD, MatrixD) = - val mm = y.dim - 1 - val yb = WeightedMovingAverage.backcast (y) +: y // y prependined with one backcast value - val xx = new MatrixD (y.dim-1, lags) // input matrix: column for each lag - val yy = new MatrixD (y.dim-1, hh) // output matrix: column for each horizon - for t <- 0 until mm do // skip first row (all the same values) - for j <- xx.indices2 do xx(t, lags - 1 - j) = yb(max0 (t + 1 - j)) - for j <- yy.indices2 do yy(t, j) = if t + 1 + j >= y.dim then -0.0 else y(t + 1 + j) - end for - println (s"buildMatrix4TS: xx.dims = ${xx.dims}, yy.dims = ${yy.dims}") - (xx, yy) -end buildMatrix4TS - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Given a response vector y, build and return - * (1) an input/predictor MATRIX xx and - * (2) an output/multi-horizon output/response MATRIX yy. - * Used by Multi-Variate (MV) forecast models such as `ARX_MV`, - * that use DIRECT multi-horizon forecasting. - * The first response can't be predicted as its inputs are only the backcast value - * The last h-1 responses can't be predicted due to missing future values. - * Therefore, the number of rows in xx and yy is reduced to "y.dim-h". - * @param y the given output/response vector - * @param lags the maximum lag included (inclusive) - * @param h the forecasting horizon (1, 2, ... h) -def buildMatrix4TS (y: VectorD, lags: Int, h: Int): (MatrixD, MatrixD) = - val yb = WeightedMovingAverage.backcast (y) +: y // y prependined with one backcast value - val xx = new MatrixD (y.dim-h, lags) // input matrix: column for each lag - val yy = new MatrixD (y.dim-h, h) // output matrix: column for each horizon - for i <- 1 until xx.dim+1 do // skip first row (all the same values) - for j <- xx.indices2 do xx(i-1, lags - 1 - j) = yb(max0 (i - j)) - for j <- yy.indices2 do yy(i-1, j) = y(i + j) - end for - println (s"buildMatrix4TS: xx.dims = ${xx.dims}, yy.dims = ${yy.dims}") - (xx, yy) -end buildMatrix4TS - */ - -/* - val xx = new MatrixD (y.dim-h, lags) // input matrix: column for each lag - val yy = new MatrixD (y.dim-h, h) // output matrix: column for each horizon - for i <- 1 until xx.dim+1 do // skip first row (all the same values) - for j <- xx.indices2 do xx(i-1, lags - 1 - j) = yb(max0 (i - j)) - for j <- yy.indices2 do yy(i-1, j) = y(i + j) - end for - - val xx = new MatrixD (y.dim + 1 - lags - h, lags) - val yy = new MatrixD (y.dim + 1 - lags - h, h) - for i <- lags to y.dim - h do - for j <- xx.indices2 do xx(i-lags, lags - 1 - j) = y(i - 1 - j) - for j <- yy.indices2 do yy(i-lags, j) = if i + j >= y.dim then -0.0 else y(i + j) -// for j <- yy.indices2 do yy(i-lags, j) = y(i + j) - end for - println (s"buildMatrix4TS: xx.dims = ${xx.dims} \n yy.dims = ${yy.dims}") - (xx, yy) -*/ - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Given an exogenous variable vector ex corresponding to an endogenous response - * vector y, build and return an input/predictor MATRIX xx. - * The first lag responses can't be predicted due to missing past values. - * Therefore the number of rows in xx is reduced to ex.dim - elag1. - * @param ex the exogenous variable vector - * @param lags the maximum lag included (inclusive) for the endogenous variable - * @param elag1 the minimum lag included (inclusive) for the exogenous variable - * @param elag2 the maximum lag included (inclusive) for the exogenous variable - */ -def buildMatrix4TS_exo (ex: VectorD, lags: Int, elag1: Int, elag2: Int): MatrixD = - val flaw = flawf ("top") - val n = elag2 - elag1 - if n < 1 then flaw ("buildMatrix4TS_exo", "min exo lag must be smaller than max exo lag") -// if elag2 > lags then flaw ("buildMatrix4TS_exo", "exo lag cannot exceed endogenous lag") - - val xx = new MatrixD (ex.dim - elag1, n) - for i <- elag1 until ex.dim do - for j <- xx.indices2 do xx(i-elag1, n - 1 - j) = ex(max(i - elag1 - j, 0)) - end for -// println (s"buildMatrix4TS_exo: xx = $xx") - xx -end buildMatrix4TS_exo - -/* commented out - * Therefore the number of rows in xx is reduced to ex.dim - lags. -def buildMatrix4TS_exo (ex: VectorD, lags: Int, elag1: Int, elag2: Int): MatrixD = - val flaw = flawf ("top") - val n = elag2 - elag1 - if n < 1 then flaw ("buildMatrix4TS_exo", "min exo lag must be smaller than max exo lag") -// if elag2 > lags then flaw ("buildMatrix4TS_exo", "exo lag cannot exceed endogenous lag") - - val xx = new MatrixD (ex.dim - lags, n) - for i <- lags until ex.dim do - for j <- xx.indices2 do xx(i-lags, n - 1 - j) = ex(i - elag1 - j) - end for -// println (s"buildMatrix4TS_exo: xx = $xx") - xx -end buildMatrix4TS_exo -*/ - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Build a 3D tensor that collects and (lag) expands the endogenous variable and all - * exogenous variables into the form 'time x lags x variables' --> INPUT TENSOR. - * Lags for endogenous variable: 1, 2, ... lags - * Lags for exogenous variables: el, el+1, ... el-1+lags - * For models like SARIMAX with weekly data, exo variables are not forecasted, so it - * is not possible to use exogenous lag 1 to make week-two forecasts. - * Also build a matrix of target values for each forecasting horizon --> OUTPUT MATRIX. - * The number of rows m = y.dim - el, as forecasts cannot be made unless there is at - * least one endogenous and one exogenous lag (past value) available to the model. - * Model: yy_hat = f(xx) with loss function, e.g., || yy - yy_hat ||_F - * NOTE: for models not taking tensor input, flatten into a matrix. - * @param y the endogenous variable vector over time (e.g., new_deaths) - * @param ex the exogenous variable matrix over time x exo_vars (e.g., icu_patients, hosp_patient) - * @param lags the maximum lag included (inclusive) - * @param h the forecasting horizon (1, 2, ... h) - * @param el the first exogenous lag (may be larger than 1) - */ -def buildTensor4TS (y: VectorD, ex: MatrixD, lags: Int, h: Int = 1)(el: Int = h): (TensorD, MatrixD) = - val flaw = flawf ("top") - if y.dim != ex.dim then - flaw ("buildTensor4TS", s"endo and exo variable sizes do not match: y.dim = ${y.dim} != ex.dim = ${ex.dim}") - val m = y.dim - el // number of rows - - val xx = new TensorD (m, lags, 1 + ex.dim2) // input tensor from endo and exo vars - for i <- xx.indices; j <- xx.indices2; k <- xx.indices3 do // time x lags x variables - xx(i, j, k) = if k == 0 then y(max(i - el - j, 0)) - else ex(max(i - el - j, 0), k-1) - - val yy = new MatrixD (m, h) // output matrix from endo vars - for i <- yy.indices; j <- yy.indices2 do // time x horizons - yy(i, j) = y(max(i - el - j, 0)) - - println (s"buildTensor4TS: xx.dims = ${xx.dims}, yy.dims = ${yy.dims}") - (xx, yy) // tuple of (input tensor, output matrix) -end buildTensor4TS - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Test the actual response vector vs. forecasted matrix, returning the QoF - * for all forecasting horizons 1 to h. - * FIX - not agreeing with `ForecasterUtil.testHorizons` - * @param mod the fittable model (one that extends `Fit`) - * @param y the original actual response vector - * @param yf the forecasted response matrix - * @param p the number of variables/lags used in the model - */ -def testForecast (mod: Fit, y: VectorD, yf: MatrixD, p: Int): MatrixD = - MatrixD (for k <- 1 until yf.dim2 - 1 yield - val y_ = y(p + k until y.dim) - val yf_ = yf(?, k)(0 until y.dim - p - k) - println (s"y_.dim = ${y_.dim}, yf_.dim = ${yf_.dim}") - mod.resetDF (p, y.dim - p - (k+1)) // reset the degrees of freedom - mod.diagnose (y_, yf_)) // return the QoF of the forecasts -end testForecast - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `buildTensor4TSTest` main function tests `buildTensor4TS method of the - * Covid dataset. - * > runMain scalation.modeling.forecasting.buildTensor4TSTest - */ -@main def buildTensor4TSTest (): Unit = - - val yy = forecasting.Example_Covid.loadData_y (forecasting.Example_Covid.response) -// val y = yy // full - val y = yy(0 until 116) // clip the flat end - val zons = 6 // max forecasting horizon - val lags = 7 // the number of lags - - val (x_, y_) = buildMatrix4TS (y, lags, zons) - - println (s"y = $y \n x_ = $x_ \n y_ = $y_") - -end buildTensor4TSTest - diff --git a/src/main/scala/scalation/modeling/forecasting_old/Forecaster.scala b/src/main/scala/scalation/modeling/forecasting_old/Forecaster.scala deleted file mode 100644 index 8eda30d1d..000000000 --- a/src/main/scala/scalation/modeling/forecasting_old/Forecaster.scala +++ /dev/null @@ -1,493 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller, Hao Peng - * @version 2.0 - * @date Sat Dec 8 14:32:12 EST 2018 - * @see LICENSE (MIT style license file). - * - * @note Model Framework: Base Trait for Forecasters with Vector Input - * - * @see ruqinren.wordpress.com/2020/02/21/all-the-confusion-about-arima-arimax-transfer-function-dynamic-regression-models/ - * robjhyndman.com/hyndsight/arimax/ - * medium.com/@xwang222/forecasting-101-ep07-multivariate-models-9f3a11fbb374 - */ - -package scalation -package modeling -package forecasting_old - -import scala.collection.mutable.Set -import scala.math.abs -import scala.util.control.Breaks.{break, breakable} - -import scalation.mathstat._ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Compute the sum of squares errors assuming the first 'skip' error is zero. - * @param y the actual response vector - * @param yp the predicted response vector (one-step ahead) - * @param skip skip this many elements at the beginning (defaults to 1) - */ -def ssef (y: VectorD, yp: VectorD, skip: Int = 1): Double = - var ss = 0.0 - for t <- skip until y.dim do ss += (y(t) - yp(t))~^2 - ss -end ssef - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Compute a reverse dot product of the parameter vector b and the most recent - * actual values in the time series y_, going backwards from y_t. - * Use max (0, ..) to avoid using negative indices into the y_ vector. - * @param b the parameter/coefficient vector (e.g., φ for AR) - * @param y_ the actual time series values to use in making predictions - * @param t the time point FROM WHICH to make prediction - */ -def rdot (b: VectorD, y_ : VectorD, t: Int): Double = - var sum = 0.0 - for j <- b.indices do sum += b(j) * y_(max0 (t-j)) // add φ_j y_t-j - sum -end rdot - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Compute a reverse dot product of the parameter vector b and a diagonal - * of the the yf matrix starting at element (r, c) and moving up and back. - * Use max (0, ..) to avoid using negative indices into the yf matrix. - * @param b the parameter/coefficient vector (e.g., φ for AR) - * @param yf the forecast matrix (time x horizons) - * @param r the starting row in the forecast matrix (time) - * @param c the starting column in the forecast matrix (horizon) - */ -def rdot (b: VectorD, yf: MatrixD, r: Int, c: Int): Double = - var sum = 0.0 - for j <- b.indices do sum += b(j) * yf(max0 (r-j), max0 (c-j)) - sum -end rdot - -/*---------------------------------------------------------------------------- - -The FORECASTING MATRIX yf: Example Calculation for AR(3) - move back the diagonal -and up after reaching column 0. - -yf | h=0 h=1 h=2 ------------------------ -t=0 | [1.0] 0.0 0.0 - | \ \ -t=1 | [2.0] 1.1 0.0 - | \ \ -t=2 | 3.0 [1.9] 0.9 - | \ \ -t=3 | 4.0 3.1 [2.1] - | \ \ -t=4 | 5.0 3.9 2.9 - | \ \ -t=4 | 6.0 5.1 2.9 - -yf(3, 2) = a + rdot = a + b(0) * yf(2, 1) + b(1) * yf(1, 0) + b(2) * yf(0, 0) - -Note: 'a' is the constant term and rdot multiplies the parameter vector 'b' times -elements in a diagonal in reverse. Also, the upper right triangle is unknowable -unless back-casting is used. - -Column h = 0: zeroth horizon forecasts are the actual (e.g., today's known) values in the time series -Column h = 1: horizon one forecasts are the one-step ahead (e.g., tomorrow's) forecasts -Column h = 2: horizon two forecasts are the two-steps ahead (e.g., day after tomorrow's) forecasts - -Row time t = 3: yf(3, 0) = 4.0 = the actual value for day 3, - yf(3, 1) = 3.1 = the one-step ahead forecast for day 3, made yesterday - yf(3, 2) = 2.1 = the two-steps ahead forecast for day 3, made two days ago - -----------------------------------------------------------------------------*/ - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Forecaster` trait provides a common framework for several forecasters. - * Note, the train method must be called first followed by test. - * @param y the response vector (time series data) - * @param tt the time vector, if relevant (index as time may suffice) - * @param hparam the hyper-parameters for models extending this trait - */ -trait Forecaster (y: VectorD, tt: VectorD = null, hparam: HyperParameter = null) - extends Model: - - private val debug = debugf ("Forecaster", true) // debug function - private val flaw = flawf ("Forecaster") // flaw function - protected val e = new VectorD (y.dim+1) // residual/error vector [e_0, e_1, ... e_m] - protected var yf: MatrixD = null // forecasts for all time points t & horizons to h - - if tt != null then println (s"Forecaster: time parameter vector tt has size $tt.dim") - - /** As seen from class WeightedMovingAverage, the missing signatures are as follows. - * For convenience, these are usable as stub implementations. - */ - def crossValidate(k: Int, rando: Boolean): Array[scalation.mathstat.Statistic] = ??? - def getX: scalation.mathstat.MatrixD = ??? - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the maximum lag used by the model (its capacity to look into the past). - * Models that use more than one past value to make predictions/forecasts must - * override this method, e.g., ARMA (2, 3) should set the cap to max(p, q) = 3. - */ - def cap: Int = 1 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the used response vector y. Used by derived classes where y may be - * transformed, e.g., `ARX`. - */ - def getY: VectorD = y - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the prediction vector yp. - */ - def getYp: VectorD = - if yf == null then flaw ("getYp", "can't access, since yf is null, call `predictAll` first") - yf(?, 1) - end getYp - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the FORECAST MATRIX yf (initially allocated in `predictAll` method). - */ - def getYf: MatrixD = yf - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the feature/variable names. Override for models like SARIMAX. - */ - def getFname: Array [String] = Array ("no-x features") - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Given a time series y_, train the forecasting function y_ = f(lags (y_)) + e, - * where f(lags (y_)) is a function of the lagged values of y_, - * by fitting its parameters. - * @param x_null the data/input matrix (ignored, pass null) - * @param y_ the testing/full response/output vector (e.g., full y) - */ - def train (x_null: MatrixD, y_ : VectorD): Unit - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test PREDICTIONS of a forecasting model y_ = f(lags (y_)) + e - * and return its predictions and QoF vector. Testing may be in-sample - * (on the training set) or out-of-sample (on the testing set) as determined - * by the parameters passed in. Note: must call train before test. - * Must override to get Quality of Fit (QoF). - * @param x_null the data/input matrix (ignored, pass null) - * @param y_ the actual testing/full response/output vector - */ - def test (x_null: MatrixD, y_ : VectorD): (VectorD, VectorD) = - val yp = predictAll (y_) // make all predictions - assert (y_.dim == yp.dim) // make sure the vector sizes agree - new Plot (null, y_, yp, s"test: Plot of y_, yp for $modelName vs. t", true) - (yp, null) // override to get QoF - end test - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train and test the forecasting model y_ = f(y-past) + e and report its QoF - * and plot its predictions. Return the predictions and QoF. - * @param y_ the training/full response/output vector (defaults to full y) - * @param yy the testing/full response/output vector (defaults to full y) - */ - def trainNtest (y_ : VectorD = y)(yy: VectorD = y): (VectorD, VectorD) = - train (null, y_) // train the model on training set - val (yp, qof) = test (null, yy) // test the model on testing set - println (report (qof)) // report on Quality of Fit (QoF) - println (s"mase = ${Fit.mase (y, yp)}") // Means Absolute Scaled Error - (yp, qof) - end trainNtest - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test FORECASTS of a forecasting model y_ = f(lags (y_)) + e - * and RETURN (1) aligned actual values, (2) its forecasts and (3) QoF vector. - * Testing may be in-sample (on the training set) or out-of-sample (on the testing set) - * as determined by the parameters passed in. Note: must call train and forecastAll - * before testF. - * @param h the forecasting horizon, number of steps ahead to produce forecasts - * @param y_ the testing/full response/output vector - */ - def testF (h: Int, y_ : VectorD): (VectorD, VectorD, VectorD) // actual, forecasted, qof - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Set up testing by making h-steps ahead FORECASTS, and then aligning actual - * and forecasted values. Helper method for implementations of testF method. - * DROP the first h elements. - * @param y_ the testing/full response/output vector - * @param h the forecasting horizon, number of steps ahead to produce forecasts - * @param doPlot whether to plot predicted and actual values vs. time t - */ - protected def testSetupF (y_ : VectorD, h: Int, doPlot: Boolean = true): (VectorD, VectorD) = - if yf == null || yf.dim2 < h+2 then yf = forecastAll (y_, h) - val yh = yf(?, h) // get column h of yf (y-forecasted) - val yy = y_(h-1 until y_.dim) - val yfh = yh(h-1 until y_.dim) // align actual and forecasted vectors - assert (yy.dim == yfh.dim) // make sure the vector sizes agree - if doPlot then new Plot (null, yy, yfh, s"Plot of yy, yfh for $modelName (h = $h) vs. t", true) - (yy, yfh) - end testSetupF - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the hyper-parameters. - */ - def hparameter: HyperParameter = hparam - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the vector of parameter/coefficient values (they are model specific). - * Override for models with parameters. - */ - def parameter: VectorD = new VectorD (0) // vector with no elements - def nparams: Int = parameter.dim // number of parameters - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the vector of residuals/errors. - */ - def residual: VectorD = { if e == null then flaw ("residual", "must call test method first"); e } - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** The standard signature for prediction does not apply to time series. - */ - def predict (z: VectorD): Double = - throw new UnsupportedOperationException ("predict (VectorD) use an alternative below") - end predict - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Predict a value for y_t+1 using the 1-step ahead forecast. - * y_t+1 = f (y_t, ...) + e_t+1 - * @param t the time point from which to make prediction - * @param y_ the actual values to use in making predictions - */ - def predict (t: Int, y_ : VectorD): Double - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Predict all values corresponding to the given time series vector y_. - * @param y_ the actual values to use in making predictions - def predictAll (y_ : VectorD): VectorD = - val yp = new VectorD (y_.dim + 1) // can't predict first day (set to 0) - for t <- y_.indices do yp(t+1) = predict (t, y_) - yp - end predictAll - */ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Predict all values corresponding to the given time series vector y_. - * Create FORECAST MATRIX yf and return PREDICTION VECTOR yp as second (1) column - * of yf with last value removed. - * @see `forecastAll` to forecast beyond horizon h = 1. - * @param y_ the actual time series values to use in making predictions - * @param h the forecasting horizon, number of steps ahead to produce forecasts - */ - def predictAll (y_ : VectorD): VectorD = - yf = new MatrixD (y_.dim + 1, 3) // forecasts for all time points t & horizons to h - for t <- y_.indices do yf(t, 1) = y_(t) // first (0) column holds the actual time series values - for t <- yf.indices do yf(t, 2) = t // last (2) column holds time (logical day) - - val yy = WeightedMovingAverage.backcast (y_) +: y_ // prepend by adding one backcasted value - for t <- yy.indices do yf(t, 1) = predict (t, yy) // use model to make predictions -// debug ("predictAll", s"y_.dim = ${y_.dim}, yf.dims = ${yf.dim}") - yf(?, 1)(0 until y_.dim) // return yp: first horizon only - end predictAll - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Produce a vector of size h, of 1 through h-steps ahead forecasts for the model. - * forecast the following time points: t+1, ..., t-1+h. - * Note, must create the yf matrix before calling the forecast method. - * Intended to work with rolling validation (analog of predict method) - * @param t the time point from which to make forecasts - * @param yf the forecast matrix (time x horizons) - * @param y_ the actual values to use in making predictions - * @param h the forecasting horizon, number of steps ahead to produce forecasts - */ - def forecast (t: Int, yf: MatrixD, y_ : VectorD, h: Int): VectorD - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all y_.dim time points at horizon h (h-steps ahead). - * Assign into FORECAST MATRIX and return the h-steps ahead forecast. - * Note, `predictAll` provides predictions for h = 1. - * @see `forecastAll` method in `Forecaster` trait. - * @param yf the forecast matrix (time x horizons) - * @param y_ the actual values to use in making forecasts - * @param h the forecasting horizon, number of steps ahead to produce forecasts - */ - def forecastAt (yf: MatrixD, y_ : VectorD, h: Int): VectorD - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast intervals for all y_.dim time points at horizon h (h-steps ahead). - * Create prediction intervals (two vectors) for the given time points at level p. - * Caveat: assumes errors follow a Normal distribution. Override this method - * to handle other cases. - * @param y_ the aligned actual values to use in making forecasts - * @param yfh the forecast vector at horizon h - * @param h the forecasting horizon, number of steps ahead to produce forecasts - * @param p the level (1 - alpha) for the prediction interval - */ - def forecastAtI (y_ : VectorD, yfh: VectorD, h: Int, p: Double = 0.9): (VectorD, VectorD) = - debug ("forecastAtI", s"for h = $h: y_.dim = ${y_.dim}, yfh.dim = ${yfh.dim}") - val sig_h = (y_ - yfh).stdev // standard error of estimate at horizon h - val width = z_sigma (sig_h, p) // interval half width - (yfh - width, yfh + width) // return lower and upper bounds - end forecastAtI - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all y_.dim time points and all horizons (1 through h-steps ahead). - * Record these in the FORECAST MATRIX yf, where - * yf(t, k) = k-steps ahead forecast for y_t - * Note, column 0, yf(?, 0), is set to y (the actual time series values). - * last column, yf(?, h+1), is set to t (the time values, for reference). - * Forecast recursively down diagonals in the yf forecast matrix. - * The top right and bottom left triangles in yf matrix are not forecastable. - * FIX - merge the forecast matrices used by predictAll and forecastAll. - * @param y_ the actual values to use in making forecasts - * @param h the maximum forecasting horizon, number of steps ahead to produce forecasts - */ - def forecastAll (y_ : VectorD, h: Int): MatrixD = -/* - val yp_ = yf(?, 1) // pull predictions via predictAll's yf column 1 - yf = new MatrixD (y_.dim + h, h + 2) // forecasts for all time points t & horizons to h - debug ("forecastAll", s"y_.dim = ${y_.dim}, yp_.dim = ${yp_.dim}, e.dim = ${e.dim}, yf.dims = ${yf.dims}") - - for t <- y_.indices do yf(t, 0) = y_(t) // first column (0) holds the actual time series values - for t <- yp_.indices do yf(t, 1) = yp_(t) // second column (1) holds the predictions (h = 1) - for t <- yf.indices do yf(t, h+1) = t // last column (h+1) holds time (logical day) -*/ - yf = makeForecastMatrix (y_, yf(?, 1), h) // make forecast matrix yf from prediction matrix yf - for k <- 2 to h do forecastAt (yf, y_, k) // forecast k-steps into the future - println (s"forcastAll: yf = $yf") - yf // return matrix of forecasted values - end forecastAll - - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Make the full FORECAST MATRIX from the prediction forecast matrix (built by `prodictAll`). - * Has has more columns and a few more rows and copies all contents from the prediction - * forecast matrix. - * @param y_ the actual values to use in making forecasts - * @param yp_ the predicted values (h=1) to use in making forecasts - * @param h the maximum forecasting horizon, number of steps ahead to produce forecasts - */ - def makeForecastMatrix (y_ : VectorD, yp_ : VectorD, h: Int): MatrixD = - val yf_ = new MatrixD (y_.dim + h, h + 2) // forecasts for all time points t & horizons to h - debug ("makeForecastMatrix", s"forecast matrix: y_.dim = ${y_.dim}, yp_.dim = ${yp_.dim} --> yf_.dims = ${yf_.dims}") - - for t <- y_.indices do yf_(t, 0) = y_(t) // first column (0) holds the actual time series values - for t <- yp_.indices do yf_(t, 1) = yp_(t) // second column (1) holds the predictions (h = 1) - for t <- yf_.indices do yf_(t, h+1) = t // last column (h+1) holds time (logical day) - yf_ - end makeForecastMatrix - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Perform forward selection to find the most predictive variable to add the - * existing model, returning the variable to add and the new model. - * May be called repeatedly. - * Note, all lags up and including 'p|q' define the model. - * @see `Fit` for index of QoF measures. - * @param cols the lags/columns currently included in the existing model (currently ignored) - * @param idx_q index of Quality of Fit (QoF) to use for comparing quality - */ - def forwardSel (cols: Set [Int], idx_q: Int = QoF.rSqBar.ordinal): (Int, Forecaster) = ??? - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Perform forward selection to find the most predictive lags/variables to have - * in the model, returning the variables added and the new Quality of Fit (QoF) - * measures for all steps. - * @see `Fit` for index of QoF measures. - * @param idx_q index of Quality of Fit (QoF) to use for comparing quality - * @param cross whether to include the cross-validation QoF measure (currently ignored) - */ - def forwardSelAll (idx_q: Int = QoF.rSq.ordinal, cross: Boolean = false): (Set [Int], MatrixD) = - val rSq = new MatrixD (MAX_LAGS, 3) // R^2, R^2 Bar, R^2 cv - val cols = Set (1) // start with lag1 in model - - println (s"forwardSelAll (l = 0): cols = $cols") - breakable { - for l <- 2 until MAX_LAGS do - val (j, mod_j) = forwardSel (cols, idx_q) // add most predictive variable - if j == -1 then break () - cols += j // add variable x_j -// val fit_j = mod_j.fit - val fit_j = mod_j.test (null, y)._2 - rSq(l) = Fit.qofVector (fit_j, null) // use new model, mod_j, no cross - val k = cols.size - 1 - println (s"==> forwardSelAll (l = $l): add (#$k) variable $j, cols = $cols, qof = ${fit_j(idx_q)}") - end for - } // breakable - - (cols, rSq(0 until cols.size)) - end forwardSelAll - -end Forecaster - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Forecaster` companion object provides methods useful for classes extending - * the `Forecaster` trait, i.e., forecasting models with a single input variable. - */ -object Forecaster: - -// private val debug = debugf ("Forecaster", true) // debug function - private val flaw = flawf ("Forecaster") // flaw function - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Point out the differences between two vectors/time series. - * @param u the first vector/time series - * @param v the second vector/time series - * @param scale the scale factor to set the tolerance 'tol' - * @param allow flag indicating whether allow (via assert) any differences - */ - def differ (u: VectorD, v: VectorD, scale: Double = 1E-9, allow: Boolean = true): Int = - if u.dim != v.dim then flaw ("differ", s"requires u.dim = ${u.dim} = v.dim = ${v.dim}") - val tol = u.mean * scale - var cnt = 0 -// for t <- u.indices if u(t) !=~ v(t) do // machine epsilon - for t <- u.indices if abs (u(t) - v(t)) > tol do // application tolerance - cnt += 1 - println (s"differ at t = $t: ${u(t)} \t ${v(t)}") - end for - banner (s"differ (u, v): found $cnt points that differ") - if ! allow then assert (cnt == 0) - cnt - end differ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Check the first two columns (horizon 0 and 1) of the forecast matrix yf. - * Assertions will terminate the program when these disagree with [y, yp]. - * Note, =~ is approximately equals. - * @param yf the forecast matrix (time x horizons) - * @param y the actual time series values - * @param yp the values from the predict method - * @param show_yf whether to show the whole forecast matrix yf - def checkForecastMatrix (yf: MatrixD, y: VectorD, yp: VectorD, show_yf: Boolean = false): Unit = - if show_yf then println (s"yf = $yf") - banner (s"checkForecastMatrix: yf.dims = ${yf.dims}, y.dim = ${y.dim}, yp.dim = ${yp.dim}") - val m = y.dim - val yf0 = yf(?, 0)(0 until m) - val yf1 = yf(?, 1)(1 until m) // FIX: in ARX -- yp is shifted (0, m-1) - debug ("checkForecastMatrix", "yf0 vs. y"); differ (yf0, y) - debug ("checkForecastMatrix", "yf1 vs. yp"); differ (yf1, yp) - assert (yf0 =~ y) // zeroth forecast = actual values - assert (yf1 =~ yp) // first forecast = predicted values - end checkForecastMatrix - */ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Evaluate the quality of point and optionally interval forecast for horizon (h = 1 to hh). - * @param mod the forecasting model to be evaluated - * @param y the actual time series values - * @param hh the maximum forecasting horizon (h = 1 to hh) - * @param ints whether to evaluate prediction interval forecasts as well as point forecasts - */ - def evalForecasts (mod: Forecaster & Fit, y: VectorD, hh: Int, ints: Boolean = false): Unit = - val ftMat = new MatrixD (hh, Fit.N_QoF) - banner (s"Evaluate ${mod.modelName}'s QoF for horizons 1 to $hh:") - - for h <- 1 to hh do - val (yy, yfh, qof) = mod.testF (h, y) // h-steps ahead forecast and its QoF - ftMat(h-1) = qof -// println (FitM.fitMap (qof, qoF_names)) // evaluate h-steps ahead forecasts - - if ints then - val (low, up) = mod.forecastAtI (yy, yfh, h) // prediction interval forecasts - val qof_all = mod.diagnose_ (yy, yfh, low, up) // fully evaluate h-steps ahead forecasts - mod.show_interval_forecasts (yy, yfh, low, up, qof_all, h) - end for - - println ("fitMap qof = ") - println (FitM.showFitMap (ftMat.transpose, QoF.values.map (_.toString))) - end evalForecasts - -end Forecaster - diff --git a/src/main/scala/scalation/modeling/forecasting_old/KalmanFilter.scala b/src/main/scala/scalation/modeling/forecasting_old/KalmanFilter.scala deleted file mode 100644 index 17171295e..000000000 --- a/src/main/scala/scalation/modeling/forecasting_old/KalmanFilter.scala +++ /dev/null @@ -1,169 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller, Hao Peng - * @version 2.0 - * @date Sun Sep 13 20:37:41 EDT 2015 - * @see LICENSE (MIT style license file). - * - * @note Model: Kalman Filter - * - * @see web.mit.edu/kirtley/kirtley/binlustuff/literature/control/Kalman%20filter.pdf - * @see en.wikipedia.org/wiki/Kalman_filter - */ - -package scalation -package modeling -package forecasting_old - -import scalation.mathstat._ -import scalation.random.NormalVec - -// FIX: needs more thorough testing and estimation for matrices - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `KalmanFilter` class is used to fit state-space models. - * x_t = F x_t-1 + G u_t + w_t (State Equation) - * z_t = H x_t + v_t (Observation/Measurement Equation) - * @param x0 the initial state vector - * @param ff the state transition matrix (F) - * @param hh the observation matrix (H) - * @param qq the process noise covariance matrix (Q) - * @param rr the observation noise covariance matrix (R) - * @param gg the optional control-input matrix (G) - * @param u the optional control vector - */ -class KalmanFilter (x0: VectorD, ff: MatrixD, hh: MatrixD, qq: MatrixD, rr: MatrixD, - gg: MatrixD = null, u: VectorD = null): - - private val MAX_ITER = 20 // maximum number of iterations - private val doPlot = true // flag for drawing plot - private val n = ff.dim // dimension of the state vector - private val _0 = VectorD (n) // vector of 0's - private val ii = MatrixD.eye (n, n) // identity matrix - private val fft = ff.transpose // transpose of ff - private val hht = hh.transpose // transpose of hh - private var x = x0 // the state estimate - private var pp = new MatrixD (n, n) // the covariance estimate - - val traj = if doPlot then new MatrixD (MAX_ITER, n+1) else new MatrixD (0, 0) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Predict the state of the process at the next time point. - */ - def predict (): Unit = - x = ff * x // new predicted state - if u != null && gg != null then x += gg * u // if using control - pp = ff * pp * fft + qq // new predicted covariance - end predict - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Update the state and covariance estimates with the current and possibly - * noisy measurements - * @param z current measurement/observation of the state - */ - def update (z: VectorD): Unit = - val y = z - hh * x // measurement residual - val ss = hh * pp * hht + rr // residual covariance - val kk = pp * hht * ss.inverse // optimal Kalman gain - x = x + kk * y // updated state estimate - pp = (ii - kk * hh) * pp // updated covariance estimate - end update - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Iteratively solve for x using predict and update phases. - * @param dt the time increment (delta t) - * @param u the control vector - */ - def solve (dt: Double, u: VectorD = null): VectorD = - var t = 0.0 // initial time - - for k <- 0 until MAX_ITER do - - t += dt // advance time - if doPlot then traj(k) = x :+ t // add current time t, state x to trajectory - - // predict - predict () // estimate new state x and covariance pp - - // update - val v = NormalVec (_0, rr).gen // observation noise - FIX - should work in trait - val z = hh * x + v // new observation - - update (z) - end for - x - end solve - -end KalmanFilter - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `kalmanFilterTest` main function is used to test the `KalmanFilter` class. - * @see en.wikipedia.org/wiki/Kalman_filter - * > runMain scalation.modeling.forecasting.kalmanFilterTest - */ -@main def kalmanFilterTest (): Unit = - - banner ("KalmanFilterTest") - - val dt = 0.1 // time increment (delta t) - val var_a = 0.5 // variance of uncontrolled acceleration a - val var_z = 0.5 // variance from observation noise - - val ff = MatrixD ((2, 2), 1.0, dt, // transition matrix - 0.0, 1.0) - - val hh = MatrixD ((1, 2), 1.0, 0.0) - - val qq = MatrixD ((2, 2), dt~^4/4, dt~^3/2, - dt~^3/2, dt~^2) * var_a - - val rr = MatrixD ((1, 1), var_z) - - val x0 = VectorD (0.0, 0.0) - - val kf = new KalmanFilter (x0, ff, hh, qq, rr) - - println ("solve = " + kf.solve (dt)) - println ("traj = " + kf.traj) - - new Plot (kf.traj(?, 2), kf.traj(?, 0), kf.traj(?, 1)) - -end kalmanFilterTest - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `kalmanFilterTest2` main function is used to test the `KalmanFilter` class. - * @see https://faculty.washington.edu/ezivot/econ584/notes/statespacemodels.pdf - * > runMain scalation.modeling.forecasting.kalmanFilterTest2 - * -@main def kalmanFilterTest2 (): Unit = - - banner ("KalmanFilterTest: AR(2)") - - val dt = 0.1 // time increment (delta t) - val var_a = 0.5 // variance of uncontrolled acceleration a - val var_z = 0.5 // variance from observation noise - - val ff = MatrixD ((2, 2), phi1, phi2, // transition matrix - 1.0, 0.0) - - val hh = MatrixD ((1, 2), 1.0, 0.0) - - val qq = MatrixD ((2, 2), dt~^4/4, dt~^3/2, - dt~^3/2, dt~^2) * var_a - - val rr = MatrixD ((1, 1), var_z) - - val x0 = VectorD (0.0, 0.0) - - val kf = new KalmanFilter (x0, ff, hh, qq, rr) - - println ("solve = " + kf.solve (dt)) - println ("traj = " + kf.traj) - - new Plot (kf.traj(?, 2), kf.traj(?, 0), kf.traj(?, 1)) - -end kalmanFilterTest2 - */ - diff --git a/src/main/scala/scalation/modeling/forecasting_old/NullModel.scala b/src/main/scala/scalation/modeling/forecasting_old/NullModel.scala deleted file mode 100644 index c8d2c2d69..000000000 --- a/src/main/scala/scalation/modeling/forecasting_old/NullModel.scala +++ /dev/null @@ -1,221 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Sat Jun 13 01:27:00 EST 2017 - * @see LICENSE (MIT style license file). - * - * @note Model: Null Model (guess the mean) - * Also known as the Mean Model - */ - -package scalation -package modeling -package forecasting_old - -import scalation.mathstat._ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `NullModel` class provides basic time series analysis capabilities for - * NullModel models. NullModel models are often used for forecasting. - * Given time series data stored in vector y, its next value y_t+1 = y(t+1) - * may be predicted based on prior value of y and its noise: - * - * y_t+1 = mu_y + e_t+1 - * - * where mu_y is the mean of y and e_t+1 is the new residual/error term. - * @param y the response vector (time-series data) - * @param tt the time vector, if relevant (time index may suffice) - * @param hparam the hyper-parameters (none => use null) - */ -class NullModel (y: VectorD, tt: VectorD = null, hparam: HyperParameter = null) - extends Forecaster (y, tt, hparam) - with Correlogram (y) - with Fit (dfm = 1, df = y.dim): - - private val debug = debugf ("NullModel", true) // debug function - private val flaw = flawf ("NullModel") // flaw function - m = y.dim // number of time points (@see `FitM`) - private var mu = NO_DOUBLE // the relevant sample mean of y - - modelName = s"NullModel" - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train/fit a `NullModel` model to the times-series data in vector y_. - * @param x_null the data/input matrix (ignored, pass null) - * @param y_ the training/full response/output vector - */ - def train (x_null: MatrixD, y_ : VectorD): Unit = - m = y_.dim // length of relevant time-series - makeCorrelogram (y_) // correlogram computes psi matrix - mu = y_(1 until y_.dim).mean // record the relevant sample mean (check rSq = 0) - debug ("train", s"parameters for $modelName = $parameter") // [mu] - end train - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test PREDICTIONS of a NullModel forecasting model y_ = f(lags (y_)) + e - * and return its predictions and QoF vector. Testing may be in-sample - * (on the training set) or out-of-sample (on the testing set) as determined - * by the parameters passed in. Note: must call train before test. - * @param x_null the data/input matrix (ignored, pass null) - * @param y_ the testing/full response/output vector - */ - override def test (x_null: MatrixD, y_ : VectorD): (VectorD, VectorD) = - val (yp, no_qof) = super.test (null, y_) // call super.test for predictions - resetDF (1, y_.dim - 1) // reset the degrees of freedom - (yp, diagnose (y_, yp)) // return predictions and QoF vector - end test - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test FORECASTS of a NullModel forecasting model y_ = f(lags (y_)) + e - * and return its forecasts and QoF vector. Testing may be in-sample - * (on the training set) or out-of-sample (on the testing set) as determined - * by the parameters passed in. Note: must call train and forecastAll before testF. - * @param h the forecasting horizon, number of steps ahead to produce forecasts - * @param y_ the training/testing/full response/output vector - */ - def testF (h: Int, y_ : VectorD): (VectorD, VectorD, VectorD) = - val (yy, yfh) = testSetupF (y_, h) // get and align actual and forecasted values - resetDF (1, yy.dim - 1) // reset the degrees of freedom - println (s"testF: yy.dim = ${yy.dim}, yfh.dim = ${yfh.dim}") -// differ (yy, yfh) // uncomment for debugging - (yy, yfh, diagnose (yy, yfh)) // return predictions and QoF vector - end testF - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the parameter vector for the Null Model model. - */ - override def parameter: VectorD = VectorD (mu) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Predict a value for y_t+1 using the 1-step ahead forecast. - * y_t+1 = mu_y - * @param t the time point from which to make prediction - * @param y_ the actual values to use in making predictions - */ - def predict (t: Int, y_ : VectorD): Double = mu // predict using the mean value - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Produce a vector of size h, of 1 through h-steps ahead forecasts for the model. - * forecast the following time points: t+1, ..., t-1+h. - * Note, must create the yf matrix before calling the forecast method. - * Intended to work with rolling validation (analog of predict method) - * @param t the time point from which to make forecasts - * @param yf the forecast matrix (time x horizons) - * @param y_ the actual values to use in making predictions - * @param h the forecasting horizon, number of steps ahead to produce forecasts - */ - def forecast (t: Int, yf: MatrixD, y_ : VectorD, h: Int): VectorD = - if h < 1 then flaw ("forecast", s"horizon h = $h must be at least 1") - val yd = new VectorD (h) // hold forecasts for each horizon - for k <- 1 to h do - yf(t+k, k) = mu // forecast down the diagonal - yd (k-1) = mu // record diagonal values - end for - yd // return forecasts for each horizon - end forecast - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all y_.dim time points at horizon h (h-steps ahead). - * Assign to FORECAST MATRIX and return h-step ahead forecast. - * Note, `predictAll` provides predictions for h = 1. - * @see `forecastAll` method in `Forecaster` trait. - * @param yf the forecast matrix (time x horizons) - * @param y_ the actual values to use in making forecasts - * @param h the forecasting horizon, number of steps ahead to produce forecasts - */ - def forecastAt (yf: MatrixD, y_ : VectorD, h: Int): VectorD = - if h < 2 then flaw ("forecastAt", s"horizon h = $h must be at least 2") - val h1 = h - 1 - - yf(h1, h) = mu // first forecast is special case - - for t <- y_.indices do // make forecasts over all time points for horizon k - yf(t+h, h) = mu // forecast down the diagonal - training mean - end for - yf(?, h) // return the h-step ahead forecast vector - end forecastAt - -end NullModel - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `NullModel` companion object provides factory methods for the `NullModel` class. - */ -object NullModel: - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create an `NullModel` object. - * @param y the response vector (time series data) - * @param tt the time vector, if relevant (time index may suffice) - * @param hparam the hyper-parameters - */ - def apply (y: VectorD, tt: VectorD = null, hparam: HyperParameter = null): NullModel = - new NullModel (y, tt, hparam) - end apply - -end NullModel - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `nullModelTest` main function tests the `NullModel` class on simulated data. - * Test predictions (one step ahead forecasts). - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.nullModelTest - */ -@main def nullModelTest (): Unit = - - val y = makeTSeries () // create simulated time-series (see `Stationary`) - - banner (s"Test Predictions: NullModel on simulated time-series") - val mod = new NullModel (y) // create model for time series data Null Model - mod.trainNtest ()() // train and test on full dataset - - banner ("Select model based on ACF and PACF") - mod.plotFunc (mod.acF, "ACF") // Auto-Correlation Function (ACF) - mod.plotFunc (mod.pacF, "PACF") // Partial Auto-Correlation Function (PACF) - -end nullModelTest - -import forecasting.Example_LakeLevels.y - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `nullModelTest2` main function tests the `NullModel` class on real data: - * Forecasting lake levels. - * Test predictions (one step ahead forecasts). - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.nullModelTest2 - */ -@main def nullModelTest2 (): Unit = - - banner (s"Test Predictions: NullModel on LakeLevels Dataset") - val mod = new NullModel (y) // create model for time series data - mod.trainNtest ()() // train and test on full dataset - - banner ("Select model based on ACF and PACF") - mod.plotFunc (mod.acF, "ACF") // Auto-Correlation Function (ACF) - mod.plotFunc (mod.pacF, "PACF") // Partial Auto-Correlation Function (PACF) - -end nullModelTest2 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `nullModelTest3` main function tests the `NullModel` class on real data: - * Forecasting lake levels. - * Test forecasts (1 to h steps ahead forecasts). - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.nullModelTest3 - */ -@main def nullModelTest3 (): Unit = - - val hh = 3 // maximum forecasting horizon - - banner (s"Test Forecasts: NullModel on LakeLevels Dataset") - val mod = new NullModel (y) // create model for time series data - mod.trainNtest ()() // train and test on full dataset - - mod.forecastAll (y, hh) // forecast h-steps ahead (h = 1 to hh) for all y - Forecaster.evalForecasts (mod, y, hh, true) - -end nullModelTest3 - diff --git a/src/main/scala/scalation/modeling/forecasting_old/QuadSpline.scala b/src/main/scala/scalation/modeling/forecasting_old/QuadSpline.scala deleted file mode 100644 index 2f1cf07ae..000000000 --- a/src/main/scala/scalation/modeling/forecasting_old/QuadSpline.scala +++ /dev/null @@ -1,292 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Tue May 11 16:25:40 EDT 2021 - * @see LICENSE (MIT style license file). - * - * @note Model: Quadratic Spline - */ - -// U N D E R D E V E L O P M E N T - -// FIX - forecast matrix looks like random work with a extra shift - -package scalation -package modeling -package forecasting_old - -import scala.math.max - -import scalation.mathstat._ - -import QuadSpline.splineFit - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `QuadSpline` class fits quadratic splines to time-series data that are equally - * spaced in time. A sliding window consisting of three data points is perfectly fit - * to a quadratic curve. - * - * y_t = a + bt + ct^2 - * - * Note, slope matching and smoothness issues are ignored. - * @see wordsandbuttons.online/quadratic_splines_are_useful_too.html - * Any time point from t = 3 to the end of time series may be forecasted. - * FIX -- check the degrees of freedom (df) - * @param y the response vector (time-series data) - * @param tt the time vector, if relevant (time index may suffice) - * @param hparam the hyper-parameters (none => use null) - */ -class QuadSpline (y: VectorD, tt: VectorD = null, hparam: HyperParameter = null) - extends Forecaster (y, tt, hparam) - with Correlogram (y) - with Fit (dfm = 3, df = y.dim - 3): - - private val flaw = flawf ("QuadSpline") // flaw function - - modelName = "QuadSpline" - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train/fit a `QuadSpline` model to the times-series data in vector y_. - * Note: for `QuadSpline` there are no parameters to train. - * @param x_null the data/input matrix (ignored) - * @param y_ the response/output vector (currently only works for y) - */ - override def train (x_null: MatrixD, y_ : VectorD): Unit = { } - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test PREDICTIONS of a QuadSpline forecasting model y_ = f(lags (y_)) + e - * and return its predictions and QoF vector. Testing may be in-sample - * (on the training set) or out-of-sample (on the testing set) as determined - * by the parameters passed in. Note: must call train before test. - * @param x_null the training/testing data/input matrix (ignored, pass null) - * @param y_ the training/testing/full response/output vector - */ - override def test (x_null: MatrixD, y_ : VectorD): (VectorD, VectorD) = - val (yp, no_qof) = super.test (null, y_) // call super.test for predictions - resetDF (3, y_.dim - 3) // reset the degrees of freedom - (yp, diagnose (y_, yp)) // return predictions and QoF vector - end test - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test FORECASTS of a QuadSpline forecasting model y_ = f(lags (y_)) + e - * and return its forecasts and QoF vector. Testing may be in-sample - * (on the training set) or out-of-sample (on the testing set) as determined - * by the parameters passed in. Note: must call train and forecastAll before testF. - * @param h the forecasting horizon, number of steps ahead to produce forecasts - * @param y_ the training/testing/full response/output vector - */ - def testF (h: Int, y_ : VectorD): (VectorD, VectorD, VectorD) = - val (yy, yfh) = testSetupF (y_, h) // get and align actual and forecasted values - resetDF (3, yy.dim - 3) // reset the degrees of freedom - println (s"testF: yy.dim = ${yy.dim}, yfh.dim = ${yfh.dim}") -// Forecaster.differ (yy, yfh) // uncomment for debugging - (yy, yfh, diagnose (yy, yfh)) // return aligned actual, forecasted and QoF vectors - end testF - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the parameter vector (none, so return an empty vector). - */ - override def parameter: VectorD = new VectorD (0) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Predict a value for y_t+1 using the 1-step ahead forecast based on the quadratic - * curve fit to the previous three values: y_t-2, y_t-1, y_t. - * - * y_t+1 = a + bt + ct^2 - * - * @param t the time point from which to make prediction - * @param y_ the actual values to use in making predictions - */ - def predict (t: Int, y_ : VectorD): Double = - if t >= 2 then - val (a, b, c) = splineFit (t, y_(t-2), y_(t-1), y_(t)) // Quadratic Fit - a + b*t + c*t~^2 - else - y_(t) - end if -// if t < 2 || t >= y_.dim then y_.last - end predict - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Produce a vector of size h, of 1 through h-steps ahead forecasts for the model. - * forecast the following time points: t+1, ..., t-1+h. - * Note, must create the yf matrix before calling the forecast method. - * Intended to work with rolling validation (analog of predict method) - * @param t the time point from which to make forecasts - * @param yf the forecast matrix (time x horizons) - * @param y_ the actual values to use in making predictions - * @param h the forecasting horizon, number of steps ahead to produce forecasts - */ - def forecast (t: Int, yf: MatrixD, y_ : VectorD, h: Int): VectorD = - if h < 1 then flaw ("forecast", s"horizon h = $h must be at least 1") - val yd = new VectorD (h) // hold forecasts for each horizon - for k <- 1 to h do - val pred = predict (t+k-1, y_) - yf(t+k, k) = pred // forecast down the diagonal - yd(k-1) = pred // record diagonal values - end for - yd // return forecasts for each horizon - end forecast - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all y_.dim time points at horizon h (h-steps ahead). - * Assign to FORECAST MATRIX and return h-step ahead forecast. - * Note, `predictAll` provides predictions for h = 1. - * @see `forecastAll` method in `Forecaster` trait. - * @param yf the forecast matrix (time x horizons) - * @param y_ the actual values to use in making forecasts - * @param h the forecasting horizon, number of steps ahead to produce forecasts - */ - def forecastAt (yf: MatrixD, y_ : VectorD, h: Int): VectorD = - if h < 2 then flaw ("forecastAt", s"horizon h = $h must be at least 2") - val h1 = h - 1 - - yf(h1, h) = yf(h1-1, h1) // first forecast is special case - - for t <- y_.indices do // make forecasts over all time points for horizon k - if t > 2 then - val (a, b, c) = splineFit (t, yf(t+h-3, max0 (h-3)), // Quadratic Fit - yf(t+h-2, max0 (h-2)), - yf(t+h-1, max0 (h-1))) - yf(t+h, h) = a + b*t + c*t~^2 - else if t == 2 then - val (a, b) = splineFit (t, yf(t+h-2, max0 (h-2)), // Linear Fit - yf(t+h-1, max0 (h-1))) - yf(t+h, h) = a + b*t - else - yf(t+h, h) = yf(0, 0) - end for - yf(?, h) // return the h-step ahead forecast vector - end forecastAt - -end QuadSpline - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `QuadSpline` companion object provides factory methods for the `QuadSpline` class. - */ -object QuadSpline: - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `QuadSpline` object that predicts using a quadratic curve that - * fits the last three point. - * @param y the response vector (time-series data) - * @param tt the time vector, if relevant (time index may suffice) - * @param hparam the hyper-parameters (none => use null) - */ - def apply (y: VectorD, tt: VectorD, hparam: HyperParameter = null): QuadSpline = - new QuadSpline (y, tt, hparam) - end apply - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Based on time t and three points y_t-2, y_t-1, y_t, determine values for the - * coefficients a, b and c. - * @param t the time point - * @param y_2 the response value 2 time units in the past - * @param y_1 the response value 1 time units in the past - * @param y_0 the current response value - */ - def splineFit (t: Double, y_2: Double, y_1: Double, y_0: Double): (Double, Double, Double) = - val c = 0.5 * (y_0 - 2*y_1 + y_2) - val b = 0.5 * (y_0 - y_2 - 4*c*t) - val a = y_1 - b*t - c*t*t - (a, b, c) - end splineFit - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Based on time t and two points y_t-1, y_t, determine values for the - * coefficients a and b. - * @param t the time point - * @param y_1 the response value 1 time units in the past - * @param y_0 the current response value - */ - def splineFit (t: Double, y_1: Double, y_0: Double): (Double, Double) = - val b = y_1 - y_0 - val a = y_1 - b*t - (a, b) - end splineFit - -end QuadSpline - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `quadSplineTest` main function tests the `QuadSpline` class on simulated data. - * Test predictions (one step ahead forecasts). - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.quadSplineTest - */ -@main def quadSplineTest (): Unit = - - val y = makeTSeries () // create simulated time-series (see `Stationary`) - - banner (s"Test Predictions: QuadSpline on simulated time-series") - val mod = new QuadSpline (y) // create model for time series data AR(1) - mod.trainNtest ()() // train and test on full dataset - -end quadSplineTest - -import forecasting.Example_LakeLevels.y - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `quadSplineTest2` main function is used to test the `QuadSpline` class. - * Forecasting lake levels. - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.quadSplineTest2 - */ -@main def quadSplineTest2 (): Unit = - - banner ("RandomWalk Model for Lake Levels Dataset") - val rw = new RandomWalk (y) // create a random walk model - val (yp, qof) = rw.trainNtest ()() // train and test on full dataset - - banner ("QuadSpline Model for Lake Levels Dataset") - val mod = new QuadSpline (y) // create a quadratic spline model - val (yp2, qof2) = mod.trainNtest ()() // train and test on full dataset - - val mix = (yp + yp2) * 0.5 - new Plot (null, y(1 until y.dim), mix, "Mix: y vs. mix", lines = true) - -end quadSplineTest2 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `quadSplineTest3` main function tests the `QuadSpline` class on real data: - * Forecasting lake levels. - * Test forecasts (1 to h steps ahead forecasts). - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.quadSplineTest3 - */ -@main def quadSplineTest3 (): Unit = - - val hh = 3 // maximum forecasting horizon - - banner (s"Test Forecasts: QuadSpline on LakeLevels Dataset") - val mod = new QuadSpline (y) // create model for time series data - mod.trainNtest ()() // train and test on full dataset - - mod.forecastAll (y, hh) // forecast h-steps ahead (h = 1 to hh) for all y - Forecaster.evalForecasts (mod, y, hh, true) - -end quadSplineTest3 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `quadSplineTest4` main function is used to test the `QuadSpline` class. - * Forecasting Fibonacci numbers. - * > runMain scalation.modeling.forecasting.quadSplineTest4 - */ -@main def quadSplineTest4 (): Unit = - - val y = VectorD (1, 2, 3, 5, 8, 13, 21, 34, 55, 89) - - banner ("RandomWalk Model") - val rw = new RandomWalk (y) // create a random walk model - rw.trainNtest ()() // train and test on full dataset - - banner ("QuadSpline Model") - val mod = new QuadSpline (y) // create a quadratic spline model - mod.trainNtest ()() // train and test on full dataset - -end quadSplineTest4 - diff --git a/src/main/scala/scalation/modeling/forecasting_old/README.txt b/src/main/scala/scalation/modeling/forecasting_old/README.txt deleted file mode 100644 index b607bbb81..000000000 --- a/src/main/scala/scalation/modeling/forecasting_old/README.txt +++ /dev/null @@ -1,37 +0,0 @@ - -This package consists of older and experimental code. -End users should use the `forecasting` package - -Simple Univariate Time Series Models ------------------------------------- - -Utilities: - -Forecaster.scala -- Base Trait for Forecasters with Vector Input -RollingValidation.scala -- Rolling Validation for Forecasters -Stationarity.scala -- Unit Root Tests for Time Series Stationarity -Stationarity_KPSS.scala -- Kwiatkowski–Phillips–Schmidt–Shin (KPSS) Test for Stationarity - -Baseline: - -NullModel.scala -- Null/Mean Model: - y_t = mean -RandomWalk.scala -- Random Walk/Last-Value Model: - y_t = y_t-1 -MovingAverage.scala -- Simple Moving Average Model: - y_t = mean of last q values -TrendModel.scala -- Linear Trend Model: - y_t = a + b t - -Simple Models: - -SimpleExpSmoothing.scala -- Simple Exponential Smoothing (SES) Model: - s_t = α y_t-1 + (1-α)s_t-1; y_t = s_t -QuadSpline.scala -- Quadratic Spline Model: - y_t = y_t = a + b t + c t^2 -AR.scala -- Auto-Regressive (AR) Model: - y_t = δ + φ_1 y_t-1 + ... + φ_p y_t-p -ARMA.scala -- Auto-Regressive, Moving-Average (ARMA) Model: - y_t = δ + φ_1 y_t-1 + ... + φ_p y_t-p - + θ_1 e_t-1 + ... + θ_q e_t-q - diff --git a/src/main/scala/scalation/modeling/forecasting_old/RandomWalk.scala b/src/main/scala/scalation/modeling/forecasting_old/RandomWalk.scala deleted file mode 100644 index 9576b6851..000000000 --- a/src/main/scala/scalation/modeling/forecasting_old/RandomWalk.scala +++ /dev/null @@ -1,260 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Sat Jun 13 01:27:00 EST 2017 - * @see LICENSE (MIT style license file). - * - * @note Model: Random Walk (guess previous value) - * Also known as the Naive Model - */ - -package scalation -package modeling -package forecasting_old - -import scalation.mathstat._ - -import scalation.scala2d.writeImage - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `RandomWalk` class provides basic time series analysis capabilities for - * RandomWalk models. RandomWalk models are often used for forecasting. - * Given time series data stored in vector y, its next value y_t+1 = y(t+1) - * may be predicted based on its past value of y: - * - * y_t+1 = y_t + e_t+1 - * - * where y_t is the previous value of y and e_t+1 is the new residual/error term. - * @param y the response vector (time series data) - * @param tt the time vector, if relevant (time index may suffice) - * @param hparam the hyper-parameters (none => use null) - */ -class RandomWalk (y: VectorD, tt: VectorD = null, hparam: HyperParameter = null) - extends Forecaster (y, tt, hparam) - with Correlogram (y) - with Fit (dfm = 1, df = y.dim - 1): - - private val debug = debugf ("RandomWalk", true) // debug function - private val flaw = flawf ("RandomWalk") // flaw function - - modelName = s"RandomWalk" - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train/fit a `RandomWalk` model to the times-series data in vector y_. - * @param x_null the data/input matrix (ignored, pass null) - * @param y_ the training/full response vector - */ - def train (x_null: MatrixD, y_ : VectorD): Unit = - m = y_.dim // length of relevant time-series - makeCorrelogram (y_) // correlogram computes psi matrix - debug ("train", s"parameters for $modelName = $parameter") // [] - end train - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test PREDICTIONS of a RandomWalk forecasting model y_ = f(lags (y_)) + e - * and return its predictions and QoF vector. Testing may be in-sample - * (on the full set) or out-of-sample (on the testing set) as determined - * by the parameters passed in. Note: must call train before test. - * @param x_null the data/input matrix (ignored, pass null) - * @param y_ the testing/full response/output vector - */ - override def test (x_null: MatrixD, y_ : VectorD): (VectorD, VectorD) = - val (yp, no_qof) = super.test (null, y_) // call super.test for predictions - resetDF (1, y_.dim - 1) // reset the degrees of freedom - (yp, diagnose (y_, yp)) // return predictions and QoF vector - end test - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test FORECASTS of a RandomWalk forecasting model y_ = f(lags (y_)) + e - * and return its forecasts and QoF vector. Testing may be in-sample - * (on the training set) or out-of-sample (on the testing set) as determined - * by the parameters passed in. Note: must call train and forecastAll before testF. - * @param h the forecasting horizon, number of steps ahead to produce forecasts - * @param y_ the testing/full response/output vector - */ - def testF (h: Int, y_ : VectorD): (VectorD, VectorD, VectorD) = - val (yy, yfh) = testSetupF (y_, h) // get and align actual and forecasted values - resetDF (1, yy.dim - 1) // reset the degrees of freedom - debug ("testF", s" for h = $h: yy.dim = ${yy.dim}, yfh.dim = ${yfh.dim}") -// Forecaster.differ (yy, yfh) // uncomment for debugging - (yy, yfh, diagnose (yy, yfh)) // return actual, forecasted and QoF vectors - end testF - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Predict a value for y_t+1 using the 1-step ahead forecast. - * - * y_t+1 = y_t - * - * @param t the time point from which to make prediction - * @param y_ the actual values to use in making predictions - */ - def predict (t: Int, y_ : VectorD): Double = y_(t) // predict using the prior value - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Produce a vector of size h, of 1 through h-steps ahead forecasts for the model. - * forecast the following time points: t+1, ..., t-1+h. - * Note, must create the yf matrix before calling the forecast method. - * Intended to work with rolling validation (analog of predict method) - * @param t the time point from which to make forecasts - * @param yf the forecast matrix (time x horizons) - * @param y_ the actual values to use in making predictions - * @param h the forecasting horizon, number of steps ahead to produce forecasts - */ - def forecast (t: Int, yf: MatrixD, y_ : VectorD, h: Int): VectorD = - if h < 1 then flaw ("forecast", s"horizon h = $h must be at least 1") - - val yd = new VectorD (h) // hold forecasts for each horizon - for k <- 1 to h do - yf(t+k, k) = y_(t) // forecast down the diagonal - yd(k-1) = y_(t) // record diagonal values - end for - yd // return forecasts for each horizon - end forecast - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all y_.dim time points at horizon h (h-steps ahead). - * Assign to FORECAST MATRIX and return h-step ahead forecast. - * Note, `predictAll` provides predictions for h = 1. - * @see `forecastAll` method in `Forecaster` trait. - * @param yf the forecast matrix (time x horizons) - * @param y_ the actual values to use in making forecasts - * @param h the forecasting horizon, number of steps ahead to produce forecasts - */ - def forecastAt (yf: MatrixD, y_ : VectorD, h: Int): VectorD = - if h < 2 then flaw ("forecastAt", s"horizon h = $h must be at least 2") - val h1 = h - 1 - - yf(h1, h) = yf(h1-1, h1) // first forecast is special case - - for t <- y_.indices do // make forecasts over all time points for horizon k - yf(t+h, h) = yf(t+h-1, h-1) // forecast down the diagonal - previous value - end for - yf(?, h) // return the h-step ahead forecast vector - end forecastAt - -end RandomWalk - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `RandomWalk` companion object provides factory methods for the `RandomWalk` class. - */ -object RandomWalk: - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create an `RandomWalk` object. - * @param y the response vector (time series data) - * @param tt the time vector, if relevant (time index may suffice) - * @param hparam the hyper-parameters - */ - def apply (y: VectorD, tt: VectorD = null, hparam: HyperParameter = null): RandomWalk = - new RandomWalk (y, tt, hparam) - end apply - -end RandomWalk - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `randomWalkTest` main function tests the `RandomWalk` class on simulated data. - * Test predictions (one step ahead forecasts). - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.aRTest - */ -@main def randomWalkTest (): Unit = - - val y = makeTSeries () // create simulated time-series (see `Stationary`) - - banner (s"Test Predictions: Random Walk on simulated time-series") - val mod = new RandomWalk (y) // create model for time series data Random Walk Model - mod.trainNtest ()() // train and test on full dataset - - banner ("Select model based on ACF and PACF") - mod.plotFunc (mod.acF, "ACF") // Auto-Correlation Function (ACF) - mod.plotFunc (mod.pacF, "PACF") // Partial Auto-Correlation Function (PACF) - -end randomWalkTest - -import forecasting.Example_LakeLevels.y - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `randomWalkTest2` main function tests the `RandomWalk2` class on real data: - * Forecasting lake levels. It compares with the other baselines: `NullModel` and `TrendModel`. - * Test predictions (one step ahead forecasts). - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.randomWalkTest2 - */ -@main def randomWalkTest2 (): Unit = - - banner (s"Test Predictions: RandomWalk on LakeLevels Dataset") - val mod = new RandomWalk (y) // create a Random Walk Model - val (yp, qof) = mod.trainNtest ()() // train and test on full dataset - - banner (s"Test Predictions: NullModel on LakeLevels Dataset") - val nm = new NullModel (y) // create a Null Model - val (yp2, qof2) = nm.trainNtest ()() // train and test on full dataset - - banner (s"Test Predictions: TrendModel on LakeLevels Dataset") - val tm = new TrendModel (y) // create a Trend Model - val (yp3, qof3) = tm.trainNtest ()() // train and test on full dataset - - val yy = y(1 until y.dim) - val plot = new PlotM (null, MatrixD (yy, yp, yp2, yp3), Array ("y", "y-RW", "y-NM", "y-TM"), - "Compare Baseline Models", lines = true) - - writeImage (DATA_DIR + "plot_ts_baseline.png", plot) - - banner ("Select model based on ACF and PACF") - mod.plotFunc (mod.acF, "ACF") // Auto-Correlation Function (ACF) - mod.plotFunc (mod.pacF, "PACF") // Partial Auto-Correlation Function (PACF) - -end randomWalkTest2 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `randomWalkTest3` main function tests the `RandomWalk` class on real data: - * Forecasting lake levels. - * Test forecasts (1 to h steps ahead forecasts). - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.randomWalkTest3 - */ -@main def randomWalkTest3 (): Unit = - - val hh = 3 // maximum forecasting horizon - - banner (s"Test Forecasts: RandomWalk on LakeLevels Dataset") - val mod = new RandomWalk (y) // create model for time series data - mod.trainNtest ()() // train and test on full dataset - - mod.forecastAll (y, hh) // forecast h-steps ahead (h = 1 to hh) for all y - Forecaster.evalForecasts (mod, y, hh, true) - -end randomWalkTest3 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `randomWalkTest4` main function tests the `RandomWalk` class on real data: - * Forecasting COVID-19. Test forecasts (1 to h steps ahead forecasts). - * > runMain scalation.modeling.forecasting.randomWalkTest4 - */ -@main def randomWalkTest4 (): Unit = - - val exo_vars = Array.ofDim [String] (0) // no exogenous variables in this case - val (xx, yy) = forecasting.Example_Covid.loadData (exo_vars, "new_deaths") - val iskip = yy.indexWhere (_ >= 6.0) // find day with at least 6 deaths - println (s"iskip = $iskip is first week with at least 6 deaths") - - val rat = 0.5 // set train-test ratio - val cut = (rat * yy.dim).toInt - val y = yy(cut until yy.dim) // throw away the training part (no training for RW) - - val hh = 4 // forecasting horizon (weeks 1 to 4) - - banner (s"Test Forecasts: RandomWalk on COVID-19 Dataset") - val mod = new RandomWalk (y) // create model for time series data - mod.trainNtest ()() // train and test on full dataset - - mod.forecastAll (y, hh) // forecast h-steps ahead (h = 1 to hh) for all y - Forecaster.evalForecasts (mod, y, hh, true) - -end randomWalkTest4 - diff --git a/src/main/scala/scalation/modeling/forecasting_old/RegressionTreeGB4TS2.scala b/src/main/scala/scalation/modeling/forecasting_old/RegressionTreeGB4TS2.scala deleted file mode 100644 index f4f000475..000000000 --- a/src/main/scala/scalation/modeling/forecasting_old/RegressionTreeGB4TS2.scala +++ /dev/null @@ -1,433 +0,0 @@ - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Fri Jun 21 23:13:48 EDT 2024 - * @see LICENSE (MIT style license file). - * - * @note Model: Gradient Boosting for Time Series - */ - -package scalation -package modeling -package forecasting_old - -import scala.math.max - -import scalation.mathstat._ - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `RegressionTreeGB4TS2` object supports Gradient Boosting for Time Series data. - * Multi-horizon forecasting supported via the Recursive method. - * Given a response vector y, a predictor matrix x is built that consists of - * lagged y vectors. Additional future response vectors are built for training. - * - * y_t = f(x) - * - * where x = [y_{t-1}, y_{t-2}, ... y_{t-lags}]. - */ -object RegressionTreeGB4TS2: - - private val debug = debugf ("RegressionTreeGB4TS2", true) // debug function - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `RegressionTreeGB` object from a response vector. The input/data matrix - * x is formed from the lagged y vectors as columns in matrix x. - * @param y the original un-expanded output/response vector - * @param lags the maximum lag included (inclusive) - * @param h the forecasting horizon (1, 2, ... h) - * @param intercept whether to add a column of all ones to the matrix (intercept) - * @param hparam the hyper-parameters (use RegressionTree.hp for default) - */ - def apply (y: VectorD, lags: Int, h: Int, intercept: Boolean = true, - hparam: HyperParameter = RegressionTree.hp): RegressionTreeGB = - val (x_, yy) = buildMatrix4TS (y, lags, h) // column for each lag - val x = if intercept then VectorD.one (yy.dim) +^: x_ else x_ // add first column of all ones - val y_ = yy(?, 0) // use first column (h = 1) - debug ("apply", s"x.dims = ${x.dims}, y_.dim = ${y_.dim}") - - val mod = new RegressionTreeGB (x, y_, null, hparam) - mod.modelName = s"RegressionTreeGB4TS2$lags" - mod - end apply - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `RegressionTreeGB` object from a response matrix. The input/data matrix - * x is formed from the lagged y vectors as columns in matrix x. - * This method provides data rescaling. - * @param y the original un-expanded output/response vector - * @param lags the maximum lag included (inclusive) - * @param h the forecasting horizon (1, 2, ... h) - * @param intercept whether to add a column of all ones to the matrix (intercept) - * @param hparam the hyper-parameters (use RegressionTree.hp for default) - */ - def rescale (y: VectorD, lags: Int, h: Int, intercept: Boolean = true, - hparam: HyperParameter = RegressionTree.hp): RegressionTreeGB = - val (x_, yy) = buildMatrix4TS (y, lags, h) // column for each lag - var x = scale (extreme (x_), (1.0, 5.0))(x_) // rescale vector x matrix to [1, 5] - if intercept then x = VectorD.one (yy.dim) +^: x // add first column of all ones - val y_ = yy(?, 0) // use first column - debug ("rescale", s"x.dims = ${x.dims}, y_.dim = ${y_.dim}") - - val mod = new RegressionTreeGB (x, y_, null, hparam) - mod.modelName = s"RegressionTreeGB4TS2$lags" - mod - end rescale - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `RegressionTreeGB` object from a response vector. The input/data matrix - * x is formed from the lagged y vectors as columns in matrix x. - * In addition, lagged exogenous variables are added. - * @param y the original un-expanded output/response vector - * @param lags the maximum lag included (inclusive) - * @parax ex the input matrix for exogenous variables (one per column) - * @param h the forecasting horizon (1, 2, ... h) - * @param intercept whether to add a column of all ones to the matrix (intercept) - * @param hparam the hyper-parameters (use RegressionTree.hp for default) - * @param elag1 the minimum exo lag included (inclusive) - * @param elag2 the maximum exo lag included (inclusive) - */ - def exo (y: VectorD, lags: Int, ex: MatrixD, h: Int, - intercept: Boolean = true, hparam: HyperParameter = RegressionTree.hp) - (elag1: Int = max (1, lags / 5), - elag2: Int = max (1, lags)): RegressionTreeGB = - val (x_, yy) = buildMatrix4TS (y, lags, h) // column for each lag - var x = if intercept then VectorD.one (yy.dim) +^: x_ else x_ // add first column of all ones - val endoCols = x.dim2 - println (s"endogenous: columns = $endoCols") - - x = x ++^ ARX.makeExoCols (lags, ex, elag1, elag2) // add columns for each lagged exo var - println (s"exogenous: columns = ${x.dim2 - endoCols}") - - val y_ = yy(?, 0) // use first column - debug ("exo", s"x.dims = ${x.dims}, y_.dim = ${y_.dim}") - - val mod = new RegressionTreeGB (x, y_, null, hparam) - mod.modelName = s"RegressionTreeGB4TS2.exo_$lags" - mod - end exo - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Split the x matrix and y matrix into training and testing sets. - * @param x the x data/input matrix - * @param y the y response/output matrix - * @param ratio the ratio of the TESTING set to the full dataset (most common 70-30, 80-20) - */ -// def split_TnT (x: MatrixD, y: MatrixD, ratio: Double = 0.20): (MatrixD, MatrixD, MatrixD, MatrixD) = - def split_TnT (x: MatrixD, y: VectorD, ratio: Double = 0.20): (MatrixD, VectorD, MatrixD, VectorD) = - val n = x.dim - val tr_size = (n * (1.0 - ratio)).toInt - println (s"RegressionTreeGB4TS2.split_TnT: tr_size = $tr_size, te_size = ${n - tr_size}") - (x(0 until tr_size), y(0 until tr_size), x(tr_size until n), y(tr_size until n)) - end split_TnT - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Align (for the testing set) the actual response vector for comparison with - * the predicted/forecasted response vector, returning a time vector and sliced - * response vectors. - * @param tr_size the size of the intial training set - * @param y the actual response for the full dataset (to be sliced) - * @param yp the predicted response for the full dataset (to be sliced) - * @param h_ the current forecasting horizon - 1 - */ - def align (tr_size: Int, y: VectorD, yp: VectorD, h_ : Int): (VectorD, VectorD, VectorD) = - debug ("align:", s"y.dim = ${y.dim}, yp.dim = ${yp.dim}, h_ = $h_") - (VectorD.range (tr_size, y.dim - h_), y(tr_size until y.dim - h_), yp(0 until yp.dim - h_)) - end align - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Use rolling-validation to compute test Quality of Fit (QoF) measures - * by dividing the dataset into a TRAINING SET (tr) and a TESTING SET (te) - * as follows: [ <-- tr_size --> | <-- te_size --> ] - * This version calls predict (DIRECT) for h-steps ahead out-of-sample forecasts. - * @see `RollingValidation` - * @param mod the forecasting model being used (e.g., `RegressionTreeGB4TS2`) - * @param rc the retraining cycle (number of forecasts until retraining occurs) - * @param te_size the size of the testing set - */ - def rollValidate (mod: Predictor & Fit, rc: Int, te_size_ : Int): VectorD = - val x = mod.getX // get data/input matrix - val y = mod.getY // get response/output matrix - val hh = 1 - val ftMat = new MatrixD (hh, Fit.N_QoF) - banner (s"rollValidate: Evaluate ${mod.modelName}'s QoF for the horizons: 1 to $hh") - - val te_size = if te_size_ < 0 then RollingValidation.teSize (y.dim) else te_size_ // size of test set - val tr_size = y.dim - te_size // size of initial training set - debug ("rollValidate", s"y.dim = ${y.dim}, train: tr_size = $tr_size; test: te_size = $te_size, rc = $rc") - -// val yp = new MatrixD (te_size, y.dim2) // y-predicted over testing set - val yp = new VectorD (te_size) // y-predicted over testing set - for i <- 0 until te_size do // iterate through testing set - val t = tr_size + i // next time point to forecast -// if i % rc == 0 then mod.train (x(0 until t), y(0 until t)) // retrain on sliding training set (growing set) - if i % rc == 0 then mod.train (x(i until t), y(i until t)) // retrain on sliding training set (fixed size set) - yp(i) = mod.predict (x(t-1)) // predict the next value - end for - - val df = max0 (mod.parameter.dim - 1) // degrees of freedom for model - mod.resetDF (df, te_size - df) // reset degrees of freedom - - for k <- 0 until hh do // move thru each horizon 1 to h - val (t, yk, ypk) = align (tr_size, y, yp, k) // clip ending zeros (0.0 or -0.0) - debug ("rollValidate", s"horizon $k: yk.dim = ${yk.dim}, ypk.dim = ${ypk.dim}") - new Plot (t, yk, ypk, s"Plot yy, yp vs. t for horizon ${k+1}", lines = true) - val qof = mod.diagnose (yk, ypk) - ftMat(k) = qof -// println (FitM.fitMap (qof, qoF_names)) - end for - println ("fitMap qof = ") - println (FitM.showFitMap (ftMat.transpose, QoF.values.map (_.toString))) - yp - end rollValidate - -end RegressionTreeGB4TS2 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `regressionTreeGB4TS2Test` main function tests the `RegressionTreeGB4TS2` class. - * This test is used to CHECK that the `buildMatrix4TS` function is working correctly. - * May get NaN for some maximum lags (p) due to multi-collinearity. - * > runMain scalation.modeling.forecasting.regressionTreeGB4TS2Test - */ -@main def regressionTreeGB4TS2Test (): Unit = - - val m = 30 - val y = VectorD.range (1, m) // used to CHECK the buildMatrix4TS function - val h = 3 // the forecasting horizon - - for p <- 5 to 5 do // autoregressive hyper-parameter p - banner (s"Test: RegressionTreeGB4TS2 with $p lags") - val mod = RegressionTreeGB4TS2 (y, p, h) // create model for time series data - mod.trainNtest ()() // train the model on full dataset - println (mod.summary) - - val yy = mod.getY - val yp = mod.predict (mod.getX) - new Plot (null, yy, yp, s"yy vs. yp for ${mod.modelName} (h=1) with $p lags", lines = true) - end for - -end regressionTreeGB4TS2Test - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `regressionTreeGB4TS2Test2` main function tests the `RegressionTreeGB4TS2` class on real data: - * Forecasting lake levels. - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.regressionTreeGB4TS2Test2 - */ -@main def regressionTreeGB4TS2Test2 (): Unit = - - import forecasting.Example_LakeLevels.y - val h = 1 // the forecasting horizon - - for p <- 1 to 7 do // autoregressive hyper-parameter p - banner (s"Test: RegressionTreeGB4TS2 with $p lags") - val mod = RegressionTreeGB4TS2 (y, p, h) // create model for time series data - mod.trainNtest ()() // train the model on full dataset - println (mod.summary) - - banner ("Predictions/Forecasts") // direct forecasting technique - val yy = mod.getY - val yf = mod.predict (mod.getX) -// for k <- yf.indices2 do - new Plot (null, yy, yf, s"yy vs. yf for ${mod.modelName} (h=1) with $p lags", lines = true) - println (s"yf = $yf") - println (s"yf.dim = ${yf.dim}") - end for - -end regressionTreeGB4TS2Test2 - -import forecasting.Example_Covid.{loadData, NO_EXO, response} - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `regressionTreeGB4TS2Test3` main function tests the `RegressionTreeGB4TS2` class on real data: - * Forecasts COVID-19 Weekly Data using endogenous variable only. - * Does In-Sample Testing (In_ST). - * Determines the terms to include in the model using Feature Selection. - * > runMain scalation.modeling.forecasting.regressionTreeGB4TS2Test3 - */ -@main def regressionTreeGB4TS2Test3 (): Unit = - - val LAGS = 10 // number of lags - val h = 6 // forecasting horizon - - val (ex, y) = loadData (NO_EXO, response) - val yy = y(0 until 116) // clip the flat part of the data - println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") - - banner ("Test In-Sample RegressionTreeGB4TS2 on COVID-19 Weekly Data") - val mod = RegressionTreeGB4TS2 (yy, LAGS, h) // create model for time series data -// val mod = RegressionTreeGB4TS2.rescale (yy, LAGS, h) // create model for time series data - scaling - val (yp, qof) = mod.trainNtest ()() // train on full and test on full - val yy_ = yy.drop (1) // can't forecast first point - - new Plot (null, yy_, yp, s"${mod.modelName}, yy_ vs. yp @ h = 1", lines = true) - - val y_yp = MatrixD (yy_, yp).transpose - println (s"y_yp = $y_yp") - -// mod.forecastAll (yy, h) // FIX - to be implemented - see ARX.scala -// Forecaster.evalForecasts (mod, yy, h) // FIX - to be implemented - see ARX.scala - -/* - banner (s"Feature Selection Technique: Stepwise") - val (cols, rSq) = mod.stepwiseSelAll (cross = false) // R^2, R^2 bar, sMAPE, NA - val k = cols.size - println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for RegressionTreeGB4TS2 with tech", lines = true) -// println (mod.summary ()) - - banner ("Feature Importance") - println (s"Stepwise: rSq = $rSq") -// val imp = mod.importance (cols.toArray, rSq) -// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") -*/ - -end regressionTreeGB4TS2Test3 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `regressionTreeGB4TS2Test4` main function tests the `RegressionTreeGB4TS2` class on real data: - * Forecasts COVID-19 Weekly Data using endogenous and exogenous variables. - * Does In-Sample Testing (In-ST). - * Determines the terms to include in the model using Feature Selection. - * > runMain scalation.modeling.forecasting.regressionTreeGB4TS2Test4 - */ -@main def regressionTreeGB4TS2Test4 (): Unit = - - val LAGS = 10 // number of lags - val h = 6 // forecasting horizon - - val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (ex, y) = loadData (exo_vars, response) - println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") - - banner ("Test In-Sample RegressionTreeGB4TS2.exo on COVID-19 Weekly Data") - val mod = RegressionTreeGB4TS2.exo (y, LAGS, ex, h)(1, LAGS+1) // create model for time series data - with exo - val (yp, qof) = mod.trainNtest ()() // train on full and test on full - val yy_ = y(LAGS until y.dim) - new Plot (null, yy_, yp, s"${mod.modelName}, yy vs. yp", lines = true) - -// val tech = SelectionTech.Forward // pick one feature selection technique -// val tech = SelectionTech.Backward - val tech = SelectionTech.Stepwise - - banner (s"Feature Selection Technique: $tech") - val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA - val k = cols.size - println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for RegressionTreeGB4TS2 with tech", lines = true) -// println (mod.summary ()) - - banner ("Feature Importance") - println (s"$tech: rSq = $rSq") -// val imp = mod.importance (cols.toArray, rSq) -// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") - -end regressionTreeGB4TS2Test4 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `regressionTreeGB4TS2Test5` main function tests the `RegressionTreeGB4TS2` class on real data: - * Forecasts COVID-19 Weekly Data using endogenous and exogenous variables. - * Does In-Sample Testing (In-ST). - * Determines the terms to include in the model using Feature Selection. - * Run Train-n-Test (TnT) Split testing on best model. - * > runMain scalation.modeling.forecasting.regressionTreeGB4TS2Test5 - */ -@main def regressionTreeGB4TS2Test5 (): Unit = - - val LAGS = 10 // number of lags - val h = 6 // forecasting horizon - - val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (ex, y) = loadData (exo_vars, response) - println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") - - banner ("Test In-Sample RegressionTreeGB4TS2.exo on COVID-19 Weekly Data") - val mod = RegressionTreeGB4TS2.exo (y, LAGS, ex, h)(1, LAGS+1) // create model for time series data with exo - val (yp, qof) = mod.trainNtest ()() // train on full and test on full - val yy_ = y(LAGS until y.dim) - new Plot (null, yy_, yp, s"${mod.modelName}, yy vs. yp", lines = true) - -// val tech = SelectionTech.Forward // pick one feature selection technique -// val tech = SelectionTech.Backward - val tech = SelectionTech.Stepwise - - banner (s"Feature Selection Technique: $tech") - val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA - val k = cols.size - println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for RegressionTreeGB4TS2 with tech", lines = true) -// println (mod.summary ()) - - banner ("Feature Importance") - println (s"$tech: rSq = $rSq") -// val imp = mod.importance (cols.toArray, rSq) -// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") - - banner ("Run TnT on Best model") -// val bmod = mod.getBest._4 // get the best model from feature selection - val bmod = mod.getBest.mod.asInstanceOf [RegressionTreeGB] // get the best model from feature selection - val (x_, y_, xtest, ytest) = RegressionTreeGB4TS2.split_TnT (bmod.getX, bmod.getY) - val (yptest, qoftest) = bmod.trainNtest (x_, y_)(xtest, ytest) // train on (x_, y_) and test on (xtest, ytest) - new Plot (null, ytest, yptest, s"${mod.modelName}, ytest vs. yptest", lines = true) - -end regressionTreeGB4TS2Test5 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `regressionTreeGB4TS2Test6` main function tests the `RegressionTreeGB4TS2` class on real data: - * Forecasts COVID-19 Weekly Data using endogenous and exogenous variables. - * Does In-Sample Testing (In-ST). - * Determines the terms to include in the model using Feature Selection. - * Run Train-n-Test (TnT) Split testing on best model using Rolling Validation. - * > runMain scalation.modeling.forecasting.regressionTreeGB4TS2Test6 - */ -@main def regressionTreeGB4TS2Test6 (): Unit = - - val LAGS = 10 // number of lags (values from past) - val rc = 1 // retraining cycle - val h = 6 // forecasting horizon - - val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (ex, y) = loadData (exo_vars, response) - println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") - - val te_size = RollingValidation.teSize (y.dim) - println (s"te_size = $te_size") - - banner ("Test In-Sample RegressionTreeGB4TS2.exo on COVID-19 Weekly Data") - val mod = RegressionTreeGB4TS2.exo (y, LAGS, ex, h)(1, LAGS+1) // create model for time series data with exo - val (yp, qof) = mod.trainNtest ()() // train on full and test on full - val yy_ = y(LAGS until y.dim) - new Plot (null, yy_, yp, s"${mod.modelName}, yy vs. yp", lines = true) - -// val tech = SelectionTech.Forward // pick one feature selection technique -// val tech = SelectionTech.Backward - val tech = SelectionTech.Stepwise - - banner (s"Feature Selection Technique: $tech") - val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA - val k = cols.size - println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for RegressionTreeGB4TS2 with tech", lines = true) -// println (mod.summary ()) - - banner ("Feature Importance") - println (s"$tech: rSq = $rSq") -// val imp = mod.importance (cols.toArray, rSq) -// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") - - banner ("Run Rolling Validation on RegressionTreeGB4TS2 Best model") -// val bmod = mod.getBest._4 // get the best model from feature selection - val bmod = mod.getBest.mod.asInstanceOf [RegressionTreeGB] // get the best model from feature selection - RegressionTreeGB4TS2.rollValidate (bmod, rc, te_size) - -end regressionTreeGB4TS2Test6 - diff --git a/src/main/scala/scalation/modeling/forecasting_old/SimpleExpSmoothing.scala b/src/main/scala/scalation/modeling/forecasting_old/SimpleExpSmoothing.scala deleted file mode 100644 index f09b0237b..000000000 --- a/src/main/scala/scalation/modeling/forecasting_old/SimpleExpSmoothing.scala +++ /dev/null @@ -1,414 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller, Hao Peng - * @version 2.0 - * @date Thu Jun 13 13:13:26 EDT 2019 - * @see LICENSE (MIT style license file). - * - * @note Model: Simple Exponential Smoothing (SES) - * - * @see https://otexts.com/fpp2/ses.html - */ - -package scalation -package modeling -package forecasting_old - -import scala.math.min - -import scalation.mathstat._ -//import scalation.optimization.quasi_newton.{BFGS => Optimizer} // change import to change optimizer -//import scalation.optimization.quasi_newton.{LBFGS => Optimizer} -import scalation.optimization.quasi_newton.{LBFGS_B => Optimizer} -import scalation.optimization.quasi_newton.LBFGS_B.makeBounds -import scalation.random._ - -//import Fit._ -//import RollingValidation.trSize - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `SimpleExpSmoothing` class provide very basic time series analysis using - * Simple Exponential Smoothing models. The forecasted value is the weighted average - * the latest value y_t and the latest smoothed value s_t. The smoothing parameter - * α in [0, 1] causes the contributions of older values to decay exponentially. - * @see Smoothing Equation in section 7.1. - * - * s_t+1 = α y_t + (1 - α) s_t smoothing equation - * yf_t+1 = s_t+1 forecast equation - * - * where vector s is the smoothed version of vector y. - * @param y the response vector (original time series data) - * @param tt the time vector, if relevant (time index may suffice) - * @param hparam the hyper-parameters - */ -class SimpleExpSmoothing (y: VectorD, tt: VectorD = null, hparam: HyperParameter = SimpleExpSmoothing.hp) - extends Forecaster (y, tt, hparam) - with Correlogram (y) - with Fit (dfm = 1, df = y.dim - 1): - - private val debug = debugf ("SimpleExpSmoothing", true) // debug function - private val flaw = flawf ("SimpleExpSmoothing") // flaw function - private val TOL = 1E-4 // tolerance - private val lo_up = makeBounds (1, 0.0, 1.05) // lower & upper bounds on α for optimizer (1.0 + slack) - - private var α = hparam ("α").toDouble // default value for the smoothing parameter - private var s = VectorD.nullv // vector of smoothed/leveled values (state) - private val sf = new VectorD (y.dim) // to hold smooth values for a forecast horizon - private var opt = true // whehther to optimize the smoothing parameter - - modelName = "SimpleExpSmoothing" - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Reset the smoothing parameter α. - * @param a the smoothing parameter - */ - def reset (a: Double): Unit = α = a - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Toggle the opt flag that indicates whether optimization should be used to set α. - */ - def toggleOpt (): Unit = opt = ! opt - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Smooth the time-series data y, returning the leveled/smoothed data s. - * May be viewed as unoptimized training. - * @see Smoothing Equation in section 7.1. - * s_t+1 = α y_t + (1 - α) s_t // smoothing equation - * @param a the smoothing parameter (decay rate for older values) - * @param y_ the response/output vector (training/full) - */ - def smooth (a: Double = α, y_ : VectorD = y): VectorD = - s = new VectorD (y_.dim) - s(0) = y(0) - for t <- 0 until y_.dim-1 do s(t+1) = a * y_(t) + (1 - a) * s(t) - s - end smooth - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train the `SimpleExpSmoothing` model on the time-series data, by finding the value - * for the smoothing parameter α that minimizes the sum of squared errors sse. - * @param x_null the data/input matrix (ignored, pass null) - * @param y_ the response/output vector (training/full) - */ - def train (x_null: MatrixD, y_ : VectorD): Unit = - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /* The objective function to be minimized (sum of squared errors) using the `Optimizer` - * @param x the input vector VectorD (α) to be optimized - */ - def f_obj (x: VectorD): Double = (y_ - smooth (x(0), y_)).normSq // only one parameter - - if opt then - val optimizer = new Optimizer (f_obj, l_u = lo_up) // Bounded Quasi-Newton optimizer -// val optimizer = new Optimizer (f_obj) // Quasi-Newton optimizer - val opt = optimizer.solve (VectorD (α), toler = TOL) // optimize value for α - α = (opt._2)(0) // pull α from vector result - end if - s = smooth (α) // vector of smoothed/predicted values, with optimized α - debug ("train", s"diagnose = ${diagnose (y_, s)}") - debug ("train", s"optimal smoothing parameter α = $α") - end train - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test PREDICTIONS of an SES forecasting model y_ = f(lags (y_)) + e - * and return its predictions and QoF vector. Testing may be in-sample - * (on the training set) or out-of-sample (on the testing set) as determined - * by the parameters passed in. Note: must call train before test. - * @param x_null the training/testing data/input matrix (ignored, pass null) - * @param y_ the training/testing/full response/output vector - */ - override def test (x_null: MatrixD, y_ : VectorD): (VectorD, VectorD) = - val (yp, no_qof) = super.test (null, y_) // call super.test for predictions - resetDF (1, y_.dim - 1) // reset the degrees of freedom - (yp, diagnose (y_, yp)) // return predictions and QoF vector - end test - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test FORECASTS of an SES forecasting model y_ = f(lags (y_)) + e - * and return its forecasts and QoF vector. Testing may be in-sample - * (on the training set) or out-of-sample (on the testing set) as determined - * by the parameters passed in. Note: must call train and forecastAll before testF. - * @param h the forecasting horizon, number of steps ahead to produce forecasts - * @param y_ the training/testing/full response/output vector - */ - def testF (h: Int, y_ : VectorD): (VectorD, VectorD, VectorD) = - val (yy, yfh) = testSetupF (y_, h) // get and align actual and forecasted values - resetDF (1, yy.dim - 1) // reset the degrees of freedom - println (s"testF: yy.dim = ${yy.dim}, yfh.dim = ${yfh.dim}") -// Forecaster.differ (yy, yfh) // uncomment for debugging - (yy, yfh, diagnose (yy, yfh)) // return aligned actual, forecasted and QoF vectors - end testF - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the parameter vector. - */ - override def parameter: VectorD = VectorD (α) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Predict a value for y_t+1 using the 1-step ahead forecasts. - * y_t+1 = s_t+1 - * @see predictAll in `Forecaster` - * @param t the time point/index to be predicted - * @param y_ the actual values to use in making predictions - */ - def predict (t: Int, y_ : VectorD): Double = s(min (t, s.dim-1)) -// def predict (t: Int, y_ : VectorD): Double = s(min (t+1, s.dim-1)) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the vector of predicted values on the training data. - * Must call smooth or train first. - * @param y_ the actual values to use in making predictions - */ -// override def predictAll (y_ : VectorD): VectorD = s - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Produce a vector of size h, of 1 through h-steps ahead forecasts for the model. - * forecast the following time points: t+1, ..., t-1+h. - * Note, must create the yf matrix before calling the forecast method. - * Intended to work with rolling validation (analog of predict method) - * @param t the time point from which to make forecasts - * @param yf the forecast matrix (time x horizons) - * @param y_ the actual values to use in making predictions - * @param h the forecasting horizon, number of steps ahead to produce forecasts - */ - def forecast (t: Int, yf: MatrixD, y_ : VectorD, h: Int): VectorD = - if h < 1 then flaw ("forecast", s"horizon h = $h must be at least 1") - val yd = new VectorD (h) // hold forecasts for each horizon - println (s"yf.dims = ${yf.dims}") - for k <- 1 to h do - val pred = s(min (t+k, s.dim-1)) - yf(t+k, k) = pred // forecast down the diagonal - yd(k-1) = pred // record diagonal values - end for - yd // return forecasts for each horizon - end forecast - - // FIX - pick one forecast or forecast2 - - def forecast2 (t: Int, yf: MatrixD, y_ : VectorD, h: Int): VectorD = - if h < 1 then flaw ("forecast", s"horizon h = $h must be at least 1") - val yd = new VectorD (h+1) - - yd(0) = if t == 0 then y(0) else s(t-1) - sf(0) = s(t) - for k <- 1 to h do - yd(k) = sf(k-1) - sf(k) = α * yd(k-1) + (1 - α) * sf(k-1) - end for - yd(1 until yd.dim) - end forecast2 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all y_.dim time points at horizon h (h-steps ahead). - * Assign to FORECAST MATRIX and return h-step ahead forecast. - * Note, `predictAll` provides predictions for h = 1. - * @see `forecastAll` method in `Forecaster` trait. - * @param yf the forecast matrix (time x horizons) - * @param y_ the actual values to use in making forecasts - * @param h the forecasting horizon, number of steps ahead to produce forecasts - */ - def forecastAt (yf: MatrixD, y_ : VectorD, h: Int): VectorD = - if h < 2 then flaw ("forecastAt", s"horizon h = $h must be at least 2") - val h1 = h - 1 - - yf(h1, h) = yf(h1-1, h1) // first forecast is special case - - for t <- y_.indices do // make forecasts over all time points for horizon k - yf(t+h, h) = sf(h1) - sf(h) = α * yf(t+h1, h1) + (1 - α) * sf(h1) - end for - yf(?, h) // return the h-step ahead forecast vector - end forecastAt - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all m time points and all horizons (1 through h-steps ahead). - * Record these in the yf matrix, where - * yf(t, k) = k-steps ahead forecast for y_t - * Note, yf(?, 0) is set to y (the actual time-series values). - * FIX - integrate with forecastAt - * @param h the maximum forecasting horizon, number of steps ahead to produce forecasts - * @param y_ the actual values to use in making predictions -// def forecastAll (h: Int, y_ : VectorD): MatrixD = - override def forecastAll (y_ : VectorD, h: Int): MatrixD = - val m = y_.dim - yf = new MatrixD (m, h+1) // forecasts for all time points t & horizons to h - val s_ = new MatrixD (m, h+1) // state values, per time x horizon - yf(?, 0) = y // first column is actual values, horizon 0 - s_(?, 0) = s - for k <- 1 to h do - yf(0, k) = y(0) // copy first actual value - s_(0, k) = s(0) - for t <- 1 until m do // forecast the rest - yf(t, k) = s_(t, k-1) - s_(t, k) = α * yf(t-1, k-1) + (1 - α) * s_(t-1, k-1) - end for -// debug ("forecastAll", s"yf(?, $k) = ${yf(?, k)}") - end for - yf // return matrix of forecasted values - end forecastAll -*/ - -end SimpleExpSmoothing - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `SimpleExpSmoothing` companion object provides factory methods for the - * `SimpleExpSmoothing` class. - */ -object SimpleExpSmoothing: - - /** Base hyper-parameter specification for `SimpleExpSmoothing` - */ - val hp = new HyperParameter; - hp += ("α", 0.9, 0.9) // default value for the smoothing parameter - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `SimpleExpSmoothing` object. - * @param y the response vector (time series data) - * @param tt the time vector, if relevant (time index may suffice) - * @param hparam the hyper-parameters - */ - def apply (y: VectorD, tt: VectorD = null, hparam: HyperParameter = hp): SimpleExpSmoothing = - new SimpleExpSmoothing (y, tt, hparam) - end apply - -end SimpleExpSmoothing - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `simpleExpSmoothingTest` main function tests the `AR` class on simulated data. - * Test predictions (one step ahead forecasts). - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.simpleExpSmoothingTest - */ -@main def simpleExpSmoothingTest (): Unit = - - val y = makeTSeries () // create simulated time-series (see `Stationary`) - - banner (s"Test Predictions: SimpleExpSmoothing on simulated time-series") - val mod = new SimpleExpSmoothing (y) // create model for time series data AR(1) - mod.trainNtest ()() // train and test on full dataset - -end simpleExpSmoothingTest - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `simpleExpSmoothingTest2` main function is used to test the `SimpleExpSmoothing` class. - * Forecasting lake levels. Compare AR(1) and SimpleExpSmoothing models for the time series data. - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.simpleExpSmoothingTest2 - */ -@main def simpleExpSmoothingTest2 (): Unit = - - import forecasting.Example_LakeLevels.y - - val hh = 2 - - banner ("Build AR(1) model") - val ar1 = new AR (y) // time series model AR(1) - ar1.trainNtest ()() // train and test on full dataset - - banner ("Build SimpleExpSmoothing model") - val mod = new SimpleExpSmoothing (y) // time series model SimpleExpSmoothing - mod.trainNtest ()() // train and test on full dataset - - banner ("ForecastAll ...") - mod.forecastAll (y, hh) // forecast h-steps ahead (h = 1 to hh) for all y - Forecaster.evalForecasts (mod, y, hh, true) - -/* - for h <- 1 to 4 do // h-steps ahead forecast - banner (s"Rolling Validation h = $h") - val stats = SimpleRollingValidation.crossValidate2 (mod, kt_ = 1, h = h) - Fit.showQofStatTable (stats) - end for -*/ - -end simpleExpSmoothingTest2 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `simpleExpSmoothingTest3` main function is used to test the `SimpleExpSmoothing` class. - * Test customized smoothing (call smooth) versus optimized smoothing (call train). - * > runMain scalation.modeling.forecasting.simpleExpSmoothingTest3 - */ -@main def simpleExpSmoothingTest3 (): Unit = - - val m = 50 - val r = Random () - val y = VectorD (for i <- 0 until m yield i + 10.0 * r.gen) - - val mod = new SimpleExpSmoothing (y) // smooth time series data: y vs. t - - banner ("Customized Simple Exponential Smoothing") - mod.smooth (0.5) // use customized parameters, don't train - val (yp, qof) = mod.test (null, y) // test the model on full dataset - println (mod.report (qof)) // report on Quality of Fit (QoF) - println (s"mase = ${Fit.mase (y, yp)}") - - banner ("Optimized Simple Exponential Smoothing") - mod.train (null, y) // train to use optimal α - val (yp2, qof2) = mod.test (null, y) // test the model on full dataset - println (mod.report (qof2)) // report on Quality of Fit (QoF) - println (s"mase = ${Fit.mase (y, yp2)}") - -end simpleExpSmoothingTest3 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `simpleExpSmoothingTest4` main function is used to test the `SimpleExpSmoothing` class. - * Test rolling validation. - * > runMain scalation.modeling.forecasting.simpleExpSmoothingTest4 - */ -@main def simpleExpSmoothingTest4 (): Unit = - - val m = 50 - val r = Random () - val y = VectorD (for i <- 0 until m yield i + 10.0 * r.gen) - val h = 3 - println (s"y = $y") - - banner ("Optimized Simple Exponential Smoothing") - val mod = new SimpleExpSmoothing (y) // smooth time series data: y vs. t - mod.trainNtest ()() // train-test use optimal α - -// FIX - val yf = mod.forecastAll (y, h) - for k <- 1 to h do // h-steps ahead forecast - banner (s"forecastAll h = $h") - new Plot (null, y, yf(k), s"SES: Plot y and yf(${k})", lines = true) - -/* - banner (s"Rolling Validation h = $h") - val stats = SimpleRollingValidation.crossValidate2 (mod, kt_ = 1, h = h) - Fit.showQofStatTable (stats) -*/ - end for - -end simpleExpSmoothingTest4 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `simpleExpSmoothingTest5` main function is used to test the `SimpleExpSmoothing` class. - * Forecasting lake levels for several values of the smoothing parameter α. - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.simpleExpSmoothingTest5 - */ -@main def simpleExpSmoothingTest5 (): Unit = - - import forecasting.Example_LakeLevels.y - - val mod = new SimpleExpSmoothing (y) // time series model SimpleExpSmoothing - mod.toggleOpt () // switch auto optimization off - - for i <- 0 to 5 do - val a = i.toDouble / 5.0 - banner (s"Build SimpleExpSmoothing model with α = $a") - mod.reset (a) - mod.train (null, y) // train the model on full dataset - val (yp, qof) = mod.test (null, y) // test the model on full dataset - println (mod.report (qof)) // report on Quality of Fit (QoF) - end for - -end simpleExpSmoothingTest5 - diff --git a/src/main/scala/scalation/modeling/forecasting_old/SimpleMovingAverage.scala b/src/main/scala/scalation/modeling/forecasting_old/SimpleMovingAverage.scala deleted file mode 100644 index c7f3994a4..000000000 --- a/src/main/scala/scalation/modeling/forecasting_old/SimpleMovingAverage.scala +++ /dev/null @@ -1,306 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Sat Jun 13 01:27:00 EST 2017 - * @see LICENSE (MIT style license file). - * - * @note Model: Simple Moving Average (not the same as MA in ARMA) - */ - -package scalation -package modeling -package forecasting_old - -import scala.math.max - -import scalation.mathstat._ -import scalation.random.Normal - -//import RollingValidation.trSize - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `SimpleMovingAverage` class provides basic time series analysis capabilities. - * For a `SimpleMovingAverage` model with the time series data stored in vector y, the - * next value y_t+1 = y(t+1) may be predicted based on the mean of the q prior values of y: - * - * y_t+1 = mean (y_t, ..., y_t-q') + e_t+1 - * - * where e_t+1 is the new residual/error term and q' = q-1. - * @param y the response vector (time series data) - * @param tt the time points, if needed - * @param hparam the hyper-parameters - */ -class SimpleMovingAverage (y: VectorD, tt: VectorD = null, hparam: HyperParameter = SimpleMovingAverage.hp) - extends Forecaster (y, tt, hparam) - with Correlogram (y) - with Fit (dfm = 1, df = y.dim - 1): - - private val flaw = flawf ("SimpleMovingAverage") // flaw function - private val q = hparam("q").toInt // take mean of last q values - - modelName = s"SimpleMovingAverage($q)" - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train/fit an `SimpleMovingAverage` model to the times series data in vector y_. - * Note: for `SimpleMovingAverage` there are no parameters to train. - * @param x_null the data/input matrix (ignored, pass null) - * @param y_ the actual training/full response/output vector - */ - def train (x_null: MatrixD, y_ : VectorD): Unit = {} - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test PREDICTIONS of a Simple Moving Average forecasting model and - * return its predictions and QoF vector. Testing may be in-sample - * (on the training set) or out-of-sample (on the testing set) as determined - * by the parameters passed in. Note: must call train before test. - * @param x_null the data/input matrix (ignored, pass null) - * @param y_ the actual testing/full response/output vector - */ - override def test (x_null: MatrixD, y_ : VectorD): (VectorD, VectorD) = - val (yp, no_qof) = super.test (null, y_) // call super.test for predictions - resetDF (1, y_.dim - 1) // reset the degrees of freedom - (yp, diagnose (y_, yp)) // return predictions and QoF vector - end test - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test FORECASTS of a Simple Moving Average forecasting model and - * return its forecasts and QoF vector. Testing may be in-sample - * (on the training set) or out-of-sample (on the testing set) as determined - * by the parameters passed in. Note: must call train and forecastAll before testF. - * @param h the forecasting horizon, number of steps ahead to produce forecasts - * @param y_ the training/testing/full response/output vector - */ - def testF (h: Int, y_ : VectorD): (VectorD, VectorD, VectorD) = - val (yy, yfh) = testSetupF (y_, h) // get and align actual and forecasted values - resetDF (1, yy.dim - 1) // reset the degrees of freedom - println (s"testF: yy.dim = ${yy.dim}, yfh.dim = ${yfh.dim}") -// differ (yy, yfh) // uncomment for debugging - (yy, yfh, diagnose (yy, yfh)) // return aligned actual, forecasted and QoF vectors - end testF - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the parameter vector (there are none, so return an empty vector). - */ - override def parameter: VectorD = new VectorD (0) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Predict a value for y_t+1 using the 1-step ahead forecast. - * - * y_t+1 = φ_0 y_t + φ_1 y_t-1 + ... + φ_p-1 y_t-(p-1) - * - * When t-j is negative, use y_0 - * @param t the time point from which to make prediction - * @param y_ the actual values to use in making predictions - */ - def predict (t: Int, y_ : VectorD): Double = - val sumq = new SumQueue (q) - for i <- max (0, t-q+1) to t do sumq += y_(i) // y_t-q+1 + ... + y_t - sumq.mean // prediction for y_t+1 - end predict - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Produce a vector of size h, of 1 through h-steps ahead forecasts for the model. - * forecast the following time points: t+1, ..., t-1+h. - * Note, must create the yf matrix before calling the forecast method. - * Intended to work with rolling validation (analog of predict method) - * FIX - not updated - * @param t the time point from which to make forecasts - * @param yf the forecast matrix (time x horizons) - * @param y_ the actual values to use in making predictions - * @param h the forecasting horizon, number of steps ahead to produce forecasts - */ - def forecast (t: Int, yf: MatrixD, y_ : VectorD, h: Int): VectorD = - if h < 1 then flaw ("forecast", s"horizon h = $h must be at least 1") - val yd = new VectorD (h) // hold forecasts for each horizon - val sumq = new SumQueue (q) - for i <- max (0, t-q+1) to t do sumq += y(i) // y_t-q+1 + ... + y_t - for k <- 1 to h do - val pred = sumq.mean - yf(t+k, k) = pred // forecast down the diagonal - yd(k-1) = pred // record diagonal values - sumq += yf(t+k, k-1) // replace oldest value with this value - end for - yd // return forecasts for each horizon - end forecast - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all y_.dim time points at horizon h (h-steps ahead). - * Assign to FORECAST MATRIX and return h-step ahead forecast. - * Note, `predictAll` provides predictions for h = 1. - * @see `forecastAll` method in `Forecaster` trait. - * @param yf the forecast matrix (time x horizons) - * @param y_ the actual values to use in making forecasts - * @param h the forecasting horizon, number of steps ahead to produce forecasts - */ - def forecastAt (yf: MatrixD, y_ : VectorD, h: Int): VectorD = - if h < 2 then flaw ("forecastAt", s"horizon h = $h must be at least 2") - val h1 = h - 1 - - yf(h1, h) = yf(h1-1, h1) // first forecast is special case - - val sumq = new SumQueue (q) // maintain sum of q most recent values - sumq += y(0) // put in the first actual value - for t <- y_.indices do // make forecasts over all time points for horizon k - yf(t+h, h) = sumq.mean // mean of last q values - sumq += yf(t+h, h1) // replace oldest value with this value - // FIX: using the diagonal could work better - end for - yf(?, h) // return the h-step ahead forecast vector - end forecastAt - -end SimpleMovingAverage - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `SimpleMovingAverage` companion object provides factory methods for the `SimpleMovingAverage` class. - */ -object SimpleMovingAverage: - - /** Base hyper-parameter specification for `SimpleMovingAverage` - */ - val hp = new HyperParameter - hp += ("q", 2, 2) // number of prior values for mean - hp += ("u", 1.0, 1.0) // flat (0) vs. linear (1) weights - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `SimpleMovingAverage` object. - * @param y the response vector (time series data) - * @param tt the time points, if needed - * @param hparam the hyper-parameters - */ - def apply (y: VectorD, tt: VectorD = null, hparam: HyperParameter = hp): SimpleMovingAverage = - new SimpleMovingAverage (y, tt, hparam) - end apply - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Decompose a univariate time series into a moving average and a remainder. - * @see https://arxiv.org/pdf/2106.13008.pdf (Autoformer) - * @param y the response vector (time series data) - * @param tt the time points, if needed - * @param hparam the hyper-parameters - */ - def decompose (y: VectorD, tt: VectorD = null, hparam: HyperParameter = hp): (VectorD, VectorD) = - val sma = new SimpleMovingAverage (y, tt, hparam) - val s = sma.predictAll (y) - s(0) = s(1) // pad by copy copying first avg back - (s, y - s) - end decompose - -end SimpleMovingAverage - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `simpleMovingAverageTest` main function tests the `SimpleMovingAverage` class on simulated data. - * > runMain scalation.modeling.forecasting.simpleMovingAverageTest - */ -@main def simpleMovingAverageTest (): Unit = - - val y = makeTSeries () // generate a time-series (see `Stationary`) - - banner (s"Test Predictions: AR(1) on simulated time-series") - val ar = new AR (y) // create model for time series data AR(1) - ar.trainNtest ()() // train and test on full dataset - - banner (s"Test Predictions: SimpleMovingAverage on simulated time-series") - val mod = new SimpleMovingAverage (y) // time series model - mod.trainNtest ()() // train and test on full dataset - - banner ("Select model based on ACF and PACF") - ar.plotFunc (ar.acF, "ACF") // Auto-Correlation Function (ACF) - ar.plotFunc (ar.pacF, "PACF") // Partial Auto-Correlation Function (PACF) - -end simpleMovingAverageTest - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `simpleMovingAverageTest2` main function is used to test the `SimpleMovingAverage` class. - * > runMain scalation.modeling.forecasting.simpleMovingAverageTest2 - */ -@main def simpleMovingAverageTest2 (): Unit = - - val y = makeTSeries ((t: Double) => t, 30, Normal ()) // generate a time-series (see `Stationary`) - - banner ("Build AR(1) Model") - val ar = new AR (y) // time series model - ar.trainNtest ()() // train and test on full dataset - - banner ("Build SimpleMovingAverage Model") - val mod = new SimpleMovingAverage (y) // time series model - mod.trainNtest ()() // train and test on full dataset - -/* - banner ("Make Forecasts") - val steps = 10 // number of steps for the forecasts - val rw_f = rw.forecast (steps) - println (s"$steps-step ahead forecasts using SimpleMovingAverage model = $rw_f") - val tf = VectorD.range (n, n + steps) - new Plot (tf, rw_f, null, s"Plot SimpleMovingAverage forecasts vs. t", true) -*/ - -end simpleMovingAverageTest2 - -import forecasting.Example_LakeLevels.y - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `simpleMovingAverageTest3` main function is used to test the `SimpleMovingAverage` class. - * Forecasting lake levels. - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.simpleMovingAverageTest3 - */ -@main def simpleMovingAverageTest3 (): Unit = - - val hh = 3 // maximum forecasting horizon - - banner (s"Test Forecasts: SimpleMovingAverage on LakeLevels Dataset") - val mod = new SimpleMovingAverage (y) // create model for time series data - mod.trainNtest ()( ) // train and test on full dataset - - mod.forecastAll (y, hh) // forecast h-steps ahead (h = 1 to hh) for all y - Forecaster.evalForecasts (mod, y, hh, true) - -end simpleMovingAverageTest3 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `simpleMovingAverageTest4` main function is used to test the `SimpleMovingAverage` class. - * Decompose the lake levels dataset. - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.simpleMovingAverageTest4 - */ -@main def simpleMovingAverageTest4 (): Unit = - - import forecasting.Example_LakeLevels.y - - SimpleMovingAverage.hp("q") = 5 // number of points to average - banner ("Use SimpleMovingAverage to Decompose the Lake Level Dataset") - val (s, z) = SimpleMovingAverage.decompose (y) // time series model - new Plot (null, y, null, "original time series", lines = true) - new Plot (null, s, null, "moving average", lines = true) - new Plot (null, z, null, "remainder", lines = true) - -end simpleMovingAverageTest4 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `simpleMovingAverageTest5` main function is used to test the `SimpleMovingAverage` class. - * > runMain scalation.modeling.forecasting.simpleMovingAverageTest5 - */ -@main def simpleMovingAverageTest5 (): Unit = - - val data = MatrixD.load ("travelTime.csv") // automatically prepends DATA_DIR - - val (t, y) = (data(?, 0), data(?, 1)) - - println (s"t.dim = ${t.dim}, y.dim = ${y.dim}") - - banner ("Build AR(1) Model") - val ar = new AR (y) // time series model - ar.trainNtest ()() // train and test on full dataset - - banner (s"Build SimpleMovingAverage model") - val mod = new SimpleMovingAverage (y) // time series model - mod.trainNtest ()() // train and test on full dataset - -end simpleMovingAverageTest5 - diff --git a/src/main/scala/scalation/modeling/forecasting_old/Stationarity.scala b/src/main/scala/scalation/modeling/forecasting_old/Stationarity.scala deleted file mode 100644 index 944073749..000000000 --- a/src/main/scala/scalation/modeling/forecasting_old/Stationarity.scala +++ /dev/null @@ -1,369 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author Hao Peng, John Miller - * @version 2.0 - * @date Thu May 26 13:00:49 EDT 2022 - * @see LICENSE (MIT style license file). - * - * @note Model Support: Statistical Test for Time Series Stationarity - * - * Unit Root Tests for Time Series Stationarity - * (1 is a root of the process characteristic equation) - * @see github.com/olmallet81/URT - */ - -package scalation -package modeling -package forecasting_old - -import scala.collection.immutable.HashMap -import scala.Double.NaN -import scala.math.max -import scala.util.control.Breaks.{break, breakable} - -import scalation.mathstat._ -import scalation.random.{Normal, Variate} - -type CriticalValues = HashMap [Int, VectorD] - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `UnitRoot` trait provides a common framework for various unit root testers - * for Time Series Stationarity. - * This code is translated from the C++ code found in - * @see github.com/olmallet81/URT. - * @param testName the name of test, e.g., KPSS - * @param nobs the number of observations (length of time-series) - * @param validTrends vector of test valid trends types, e.g., constant, linear trend - * @param lagsType default lags value long or short time-series - * @param lags the number of lags to use - * @param trend type of trend to test for - */ -trait UnitRoot (protected val testName: String, protected val nobs: Int, - protected val validTrends: VectorS, protected var lagsType: String, - protected var lags: Int, protected var trend: String): - - protected var coeff: HashMap [Double, CriticalValues] = null // HashMap containing critical values coefficients - protected var pval = 1.0 // test p-value - protected var newLags = false // control if a new number of lags has been chosen - protected var newTrend = false // control if a new trend has been chosen - - private var maxLags = 0 // maximum number of lags for lag length optimization - private var npar = 0 // number of parameters excluding lag difference terms - private var prevLagsType: String = null // previous type of lags - private var prevTrend: String = null // previous regression trend - private var trendType: String = null // regression trend for outputting test results - private var prevLags = 0 // previous number of lags - private val optim = false // control if lag length is optimized - private var newTest = false // control if new test is run (true) or all parameters remain same (false) - - private val probas = Array (0.001, 0.005, 0.01, 0.025, 0.05, - 0.10 , 0.20 , 0.50, 0.80 , 0.90, - 0.95 , 0.975, 0.99, 0.995, 0.999) // array of probabilities for p-value computation - private val criticalVals = Array.ofDim [Double] (probas.length) // array containing tne test critical values - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Reset the values for lags for trend. - * @param lags the number of lags to use - * @param trend type of trend to test for - */ - def reset (lags_ : Int, trend_ : String): Unit = { lags = lags_; trend = trend_ } - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Get test pvalue. - */ - def getPval (): Double = pval - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Get test valid trends. - */ - def getTrends (): VectorS = validTrends - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute test statistic. - */ - def statistic (): Double - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute test statistic p-value. - */ - def pvalue (stat: Double): Double = - if stat.isNaN then pval = NaN - else if newTest then // if a new test has been run - computeCV () // computing critical values - computePval (stat) // computing p-value - newTest = false - end if - pval - end pvalue - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Output test results (can be overridden by derived classes). - */ - def show (stat: Double): Unit = - banner (s"$testName Test Results") // outputting test name - println (s" Statistic = $stat") // outputting test statistic - print (" P-value = ") // outputting p-value - - if pval <= probas(0) then print ("< 0.001") - else if pval >= probas(14) then print ("> 0.999") - else print (pval) - println () - - println (s" Lags = $lags") - println (s" Trend = $trendType") - println (" ------------------------------------\n") - - println (" Test Hypothesis") // outputting test hypothesis - println (" ------------------------------------") - - println (" H0: The process is weakly stationary") // KPSS hypotheses - println (" H1: The process contains a unit root") - println () - - println (" Critical Values") // outputting critical values - println (" ---------------") - - val idx = Array (12, 10, 9) // declaring array of critical value indexes - - if stat.isNaN then - println (s" 1% $NaN") - println (s" 5% $NaN") - println (s" 10% $NaN") - else - println (s" 1% ${criticalVals(idx(0))}") - println (s" 5% ${criticalVals(idx(1))}") - println (s" 10% ${criticalVals(idx(2))}") - end if - - println (" ---------------") - println (" Test Conclusion") // outputting test conclusion - - if pval <= 0.01 then println ("We can reject H0 at the 1% significance level") - else if pval <= 0.05 then println ("We can reject H0 at the 5% significance level") - else if pval <= 0.10 then println ("We can reject H0 at the 10% significance level") - else if ! pval.isNaN then println ("We cannot reject H0") - else println ("We cannot conclude, NaN produced") - end show - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Set number of lags, checking input validity. This method needs to know - * optim so it needs to be run after set_method (). - */ - protected def setLags (): Unit = - if ! optim then - if lags < 0 then // number of lags cannot be strictly negative - lags = 0 - println ("\n WARNING: number of lags cannot be negative, it has been set to 0 by default.\n") - end if - if ! lagsType.isEmpty && lags != prevLags then lagsType = "" // if user has switched from a default lags value - // to a value of his choice (for all tests) - else if maxLags < 0 then // maxLags cannot be strictly negative - maxLags = 0 - println ("\n WARNING: maximum number of lags cannot be negative, it has been set to a default value (L12-rule).\n") - end if - - // updating lags only for PP and KPSS tests, for ADF and DFGLS tests lags will be updated at the next optimization or - // set back to prevLags if maxLags, trend, method and level are the same as before - - if optim && maxLags == 0 || ! lagsType.isEmpty then // computing default lags value for KPSS test - maxLags = - if lagsType == "short" then (4 * (0.01 * nobs)~^0.25).toInt // short => L4-rule (Schwert 1989) - else (12 * (0.01 * nobs)~^0.25).toInt // long => L12-rule (Schwert 1989) - lags = maxLags - end if - - if ! optim && lags != prevLags then // if number of lags is different than previous value - newTest = true - newLags = true - prevLags = lags - end if - end setLags - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Set lags type long or short for PP and KPSS default lags value or ADF - * and DFGLS default maxlags value. - */ - protected def setLagsType (): Unit = - if lagsType.isEmpty || lagsType == prevLagsType then return // skipping method if lagsType is empty or did not change - if lagsType != "long" && lagsType != "short" then - println("\n WARNING: unknown default type of lags, long has been selected by default.\n") - lagsType = "long" // default lags type is long - end if - prevLagsType = lagsType - end setLagsType - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Set regression trend. - */ - protected def setTrend (): Unit = - if trend == prevTrend then return // skip method if trend did not change - - if ! validTrends.contains (trend) then // invalid => set default trend to constant - trend = "c" - trendType = "constant"; npar = 2 - println ("\n WARNING: unknown regression trend selected, 'constant term' has been selected by default.\n") - println (s" Possible trends for this test are $validTrends") - else if trend == "c" then - trendType = "constant"; npar = 2 - else if trend == "nc" then - trendType = "no constant"; npar = 1 - else if trend == "ct" then - trendType = "constant trend"; npar = 3 - else if trend == "ctt" then - trendType = "quadratic trend"; npar = 4 - end if - - if trend != prevTrend then - newTest = true - newTrend = true - prevTrend = trend - end if - end setTrend - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute critical value from probabilities. - */ - private def computeCV (): Unit = - val n = nobs - lags - 1 // computing adjusted number of observations - - for i <- probas.indices do - criticalVals(i) = 0 // computing critical value - val n0 = coeff.getOrElse (probas(i), null).getOrElse (0, null).size - for j <- 0 until n0 do - criticalVals(i) += coeff.getOrElse (probas(i), null).getOrElse (0, null)(j) / (n~^j) - end for - - val n1 = coeff.getOrElse (probas(i), null).getOrElse (1, null).size - for j <- 0 until n1 do - criticalVals(i) += coeff.getOrElse (probas(i), null).getOrElse (1, null)(j) * ((lags.toDouble/n)~^(j+1)) - end for - end for - end computeCV - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute p-value by linear interpolation from critical values. - */ - private def computePval (stat: Double): Unit = - if stat <= criticalVals(0) then pval = probas(0) // if stat <= critical value for first probability (in abs value) - else - breakable { - for i <- 1 until probas.length if stat <= criticalVals(i) do - pval = probas(i-1) + (stat - criticalVals(i-1)) * (probas(i) - probas(i-1)) / (criticalVals(i) - criticalVals(i-1)) - break () - end for - } // breakable - end if - if stat > criticalVals.last then pval = probas.last // if stat > critical value for last probability (in abs value) - end computePval - -end UnitRoot - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `makeTSeries` top level function generates time-series data. - * @param signal the function of time used to make the deterministic part - * @param m the length of the time series - * @param noise the random variate generator used for the noise part - */ -def makeTSeries (signal: FunctionS2S = (t: Double) => 100 + 40 * (t-1) - (t-2) * (t-2), - m: Int = 50, noise: Variate = Normal (0.0, 10000.0)): VectorD = - VectorD (for t <- 0 until m yield signal (t) + noise.gen) -end makeTSeries - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `makeTSeries` top level function recursively generates time-series data - * by simulating and AR process. - * y_t+1 = δ + Σ(φ_j y_t-j) + e_t+1 - * Note: all defaults generates white noise with variance 1 - * @param c the initial value for the time series - * @param φ the auto-regressive coefficients - * @param m the length of the time series - * @param noise the random variate generator used for the noise part - */ -def makeTSeriesR (c: Double = 0.0, φ: VectorD = new VectorD (0), m: Int = 50, - noise: Variate = Normal ()): VectorD = - val y = new VectorD (m) - y(0) = c - for t <- 0 until m-1 do - var sum = 0.0 - for j <- φ.indices do - sum += φ(j) * y(max (0, t-j)) - end for - y(t+1) = sum + noise.gen - end for - y -end makeTSeriesR - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `stationaryTest` main function tests the `Stationary` class on a simulated - * time-series. - * > runMain scalation.modeling.forecasting.stationaryTest - */ -@main def stationaryTest (): Unit = - - val y = makeTSeries () - - banner ("Test Stationary on simulated time-series") - val stats = Stats4TS (y, MAX_LAGS) - println (stats) - val zero = new VectorD (stats.acr.dim) - new Plot (null, stats.acr, zero, "ACF vs. k", true) - - new Plot (null, y, null, "simulated time series", lines = true) - -end stationaryTest - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `stationaryTest2` main function tests the `Stationary` class on a simulated - * stationary time-series. An AR(1) is a stationary process when |φ_1| < 1, - * a unit root process when |φ_1| = 1, and explosive otherwise. - * > runMain scalation.modeling.forecasting.stationaryTest2 - */ -@main def stationaryTest2 (): Unit = - - val rates = Array (0.99, 1.0, 1.01) - - for i <- rates.indices do - val φ = VectorD (rates (i)) - val y = makeTSeriesR (0, φ, 2000) - - banner (s"Test Stationary on simulated stationary time-series with rate = ${rates(i)}") - val stats = Stats4TS (y, MAX_LAGS) - println (stats) - val zero = new VectorD (stats.acr.dim) - new Plot (null, stats.acr, zero, s"ACF vs. k for rate = ${rates(i)}", true) - new Plot (null, y, null, s"simulated time series with rate = ${rates(i)}", lines = true) - end for - -end stationaryTest2 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `stationaryTest3` main function tests the `Stationary` class on a simulated - * stationary time-series. An AR(2) is a stationary process when |φ_2| < 1 and |φ_1| < 1 - φ_2, - * a unit root process when |φ_2| = 1 or |φ_1| = 1 - φ_2, and explosive otherwise. - * > runMain scalation.modeling.forecasting.stationaryTest3 - */ -@main def stationaryTest3 (): Unit = - - val rates = Array (Array ((0.49, 0.49), (0.50, 0.50), (0.51, 0.51)), - Array ((0.01, 0.98), (0.01, 0.99), (0.01, 1.00)), - Array ((1.98, -0.99), (1.99, -0.99), (2.00, -0.99))) - - for i <- rates.indices; j <- rates(0).indices do - val φ = VectorD (rates (i)(j)._1, rates(i)(j)._2) - val y = makeTSeriesR (0, φ, 2000) - - banner (s"Test Stationary on simulated stationary time-series with rate = ${rates(i)(j)}") - val stats = Stats4TS (y, MAX_LAGS) - println (stats) -// val zero = new VectorD (stats.acr.dim) -// new Plot (null, stats.acr, zero, s"ACF vs. k for rate = ${rates(i)(j)}", true) - new Plot (null, y, null, s"simulated time series with rate = ${rates(i)(j)}", lines = true) - end for - -end stationaryTest3 - diff --git a/src/main/scala/scalation/modeling/forecasting_old/TrendModel.scala b/src/main/scala/scalation/modeling/forecasting_old/TrendModel.scala deleted file mode 100644 index a2edbc242..000000000 --- a/src/main/scala/scalation/modeling/forecasting_old/TrendModel.scala +++ /dev/null @@ -1,224 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Sat May 28 23:47:06 EDT 2022 - * @see LICENSE (MIT style license file). - * - * @note Model: (Linear) Trend Model - */ - -package scalation -package modeling -package forecasting_old - -import scalation.mathstat._ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `TrendModel` class provides basic time series analysis capabilities for - * TrendModel models. TrendModel models are often used for forecasting. - * Given time series data stored in vector y, its next value y_t+1 = y(t+1) - * may be predicted based on a linear function of time t: - * - * y_t+1 = b0 + b1 (t+1) + e_t+1 - * - * where b0 is the intercept, b1 is the slope and e_t+1 is the new residual/error term. - * @param y the response vector (time-series data) - * @param tt the time vector (time index may suffice) - * @param hparam the hyper-parameters (none => use null) - */ -class TrendModel (y: VectorD, tt: VectorD = null, hparam: HyperParameter = null) - extends Forecaster (y, tt, hparam) - with Correlogram (y) - with Fit (dfm = 1, df = y.dim - 1): - - private val debug = debugf ("TrendModel", true) // debug function - private val flaw = flawf ("TrendModel") // flaw function - m = y.dim // number of time points (@see `FitM`) - private var b = VectorD.nullv // parameter values from SimpleRegression - - modelName = s"TrendModel" - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train/fit a `TrendModel` model to the times-series data in vector y_. - * @param x_null the data/input matrix (ignored, pass null) - * @param y_ the training/full response vector - */ - def train (x_null: MatrixD, y_ : VectorD): Unit = - makeCorrelogram (y_) // correlogram computes psi matrix - - val yy = y_(1 until y_.dim) - val t_ = VectorD.range (1, y_.dim) - b = SimpleRegression.coeff (t_, yy) // compute b = [b_0, b_1] - debug ("train", s"parameters for $modelName = $parameter") // [b_0, b_1] - end train - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test PREDICTIONS of a TrendModel forecasting model y_ = f(lags (y_)) + e - * and return its predictions and QoF vector. Testing may be in-sample - * (on the training set) or out-of-sample (on the testing set) as determined - * by the parameters passed in. Note: must call train before test. - * @param x_null the training/testing data/input matrix (ignored, pass null) - * @param y_ the training/testing/full response/output vector - */ - override def test (x_null: MatrixD, y_ : VectorD): (VectorD, VectorD) = - val (yp, no_qof) = super.test (null, y_) // call super.test for predictions - resetDF (1, y_.dim - 1) // reset the degrees of freedom - (yp, diagnose (y_, yp)) // return predictions and QoF vector - end test - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test FORECASTS of a TrendModel forecasting model y_ = f(lags (y_)) + e - * and return its forecasts and QoF vector. Testing may be in-sample - * (on the training set) or out-of-sample (on the testing set) as determined - * by the parameters passed in. Note: must call train and forecastAll before testF. - * @param h the forecasting horizon, number of steps ahead to produce forecasts - * @param y_ the training/testing/full response/output vector - */ - def testF (h: Int, y_ : VectorD): (VectorD, VectorD, VectorD) = - val (yy, yfh) = testSetupF (y_, h) // get and align actual and forecasted values - resetDF (1, yy.dim - 1) // reset the degrees of freedom - println (s"testF: yy.dim = ${yy.dim}, yfh.dim = ${yfh.dim}") -// Forecaster.differ (yy, yfh) // uncomment for debugging - (yy, yfh, diagnose (yy, yfh)) // return aligned actual, forecasted and QoF vectors - end testF - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the parameter vector for the Linear Trend model. - */ - override def parameter: VectorD = b - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Predict a value for y_t+1 using the 1-step ahead forecast. - * y_t+1 = b_0 + b_1 (t+1) - * Use `SimpleRegression` to determine the coefficients b_0 and b_1 and make predictions. - * @param t the time point from which to make prediction - * @param y_ the actual values to use in making predictions - */ - def predict (t: Int, y_ : VectorD): Double = b(0) + b(1) * (t+1) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Produce a vector of size h, of 1 through h-steps ahead forecasts for the model. - * forecast the following time points: t+1, ..., t-1+h. - * Note, must create the yf matrix before calling the forecast method. - * Intended to work with rolling validation (analog of predict method) - * @param t the time point from which to make forecasts - * @param yf the forecast matrix (time x horizons) - * @param y_ the actual values to use in making predictions - * @param h the forecasting horizon, number of steps ahead to produce forecasts - */ - def forecast (t: Int, yf: MatrixD, y_ : VectorD, h: Int): VectorD = - if h < 1 then flaw ("forecast", s"horizon h = $h must be at least 1") - val yd = new VectorD (h) // hold forecasts for each horizon - for k <- 1 to h do - val pred = b(0) + b(1) * (t+k) - yf(t+k, k) = pred // forecast down the diagonal - yd(k-1) = pred // record diagonal values - end for - yd // return forecasts for each horizon - end forecast - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all y_.dim time points at horizon h (h-steps ahead). - * Assign to FORECAST MATRIX and return h-step ahead forecast. - * Note, `predictAll` provides predictions for h = 1. - * @see `forecastAll` method in `Forecaster` trait. - * @param yf the forecast matrix (time x horizons) - * @param y_ the actual values to use in making forecasts - * @param h the forecasting horizon, number of steps ahead to produce forecasts - */ - def forecastAt (yf: MatrixD, y_ : VectorD, h: Int): VectorD = - if h < 2 then flaw ("forecastAt", s"horizon h = $h must be at least 2") - val h1 = h - 1 - - yf(h1, h) = b(0) + b(1) * h1 // first forecast is special case - - for t <- y_.indices do // make forecasts over all time points for horizon k - yf(t+h, h) = b(0) + b(1) * (t+h) // forecast down the diagonal - SimpleRegression - end for - yf(?, h) // return the h-step ahead forecast vector - end forecastAt - -end TrendModel - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `TrendModel` companion object provides factory methods for the `TrendModel` class. - */ -object TrendModel: - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create an `TrendModel` object. - * @param y the response vector (time series data) - * @param tt the time vector, if relevant (time index may suffice) - * @param hparam the hyper-parameters - */ - def apply (y: VectorD, tt: VectorD = null, hparam: HyperParameter = null): TrendModel = - new TrendModel (y, tt, hparam) - end apply - -end TrendModel - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `trendModelTest` main function tests the `TrendModel` class on simulated data. - * Test predictions (one step ahead forecasts). - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.trendModelTest - */ -@main def trendModelTest (): Unit = - - val y = makeTSeries () // create simulated time-series (see `Stationary`) - - banner (s"Test Predictions: TrendModel on simulated time-series") - val mod = new TrendModel (y) // create model for time series data Trend Model - mod.trainNtest ()() // train and test on full dataset - - banner ("Select model based on ACF and PACF") - mod.plotFunc (mod.acF, "ACF") // Auto-Correlation Function (ACF) - mod.plotFunc (mod.pacF, "PACF") // Partial Auto-Correlation Function (PACF) - -end trendModelTest - -import forecasting.Example_LakeLevels.y - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `trendModelTest2` main function tests the `TrendModel` class on real data: - * Forecasting lake levels. - * Test predictions (one step ahead forecasts). - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.trendModelTest2 - */ -@main def trendModelTest2 (): Unit = - - banner (s"Test Predictions: TrendModel on LakeLevels Dataset") - val mod = new TrendModel (y) // create model for time series data - mod.trainNtest ()() // train and test on full dataset - - banner ("Select model based on ACF and PACF") - mod.plotFunc (mod.acF, "ACF") // Auto-Correlation Function (ACF) - mod.plotFunc (mod.pacF, "PACF") // Partial Auto-Correlation Function (PACF) - -end trendModelTest2 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `trendModelTest3` main function tests the `TrendModel` class on real data: - * Forecasting lake levels. - * Test forecasts (1 to h steps ahead forecasts). - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.trendModelTest3 - */ -@main def trendModelTest3 (): Unit = - - val hh = 3 // maximum forecasting horizon - - banner (s"Test Forecasts: TrendModel on LakeLevels Dataset") - val mod = new TrendModel (y) // create model for time series data - mod.trainNtest ()() // train and test on full dataset - - mod.forecastAll (y, hh) // forecast h-steps ahead (h = 1 to hh) for all y - Forecaster.evalForecasts (mod, y, hh, true) - -end trendModelTest3 - diff --git a/src/main/scala/scalation/modeling/forecasting_old/VAR.scala b/src/main/scala/scalation/modeling/forecasting_old/VAR.scala deleted file mode 100644 index 77a23e68e..000000000 --- a/src/main/scala/scalation/modeling/forecasting_old/VAR.scala +++ /dev/null @@ -1,350 +0,0 @@ - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Sun Aug 27 14:03:25 EDT 2023 - * @see LICENSE (MIT style license file). - * - * @note Model: Vector AutoRegressive (VAR) - */ - -package scalation -package modeling -package forecasting_old - -import scala.math.max - -import scalation.mathstat._ -import scalation.modeling.neuralnet.{PredictorMV, RegressionMV} - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `VAR` object supports regression for Multivariate Time Series data. - * Given a response matrix y, a predictor matrix x is built that consists of - * lagged y vectors. Additional future response vectors are built for training. - * y_t = b dot x - * where x = [y_{t-1}, y_{t-2}, ... y_{t-lag}]. - */ -object VAR: - - private val debug = debugf ("VAR", true) // debug function - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `RegressionMV` object from a response matrix. The input/data matrix - * x is formed from the lagged y vectors as columns in matrix x. - * @param y the original un-expanded output/response matrix - * @param lags the maximum lag included (inclusive) - * @param h the forecasting horizon (1, 2, ... h) - * @param intercept whether to add a column of all ones to the matrix (intercept) - * @param hparam the hyper-parameters (use Regression.hp for default) - */ - def apply (y: MatrixD, lags: Int, h: Int, intercept: Boolean = true, - hparam: HyperParameter = Regression.hp): RegressionMV = - - var x = ARX.makeExoCols (lags, y, 1, lags+1) // add columns for each lagged vars - val yy = y(1 until y.dim) // trim y to match x - if intercept then x = VectorD.one (yy.dim) +^: x // add first column of all ones - - println (s"x.dims = ${x.dims}, yy.dim = ${yy.dim}") - println (s"x = $x \n yy = $yy") - val mod = new RegressionMV (x, yy, null, hparam) - mod.modelName = s"VAR_$lags" - mod - end apply - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Split the x matrix and y matrix into training and testing sets. - * @param x the x data/input matrix - * @param y the y response/output matrix - * @param ratio the ratio of the TESTING set to the full dataset (most common 70-30, 80-20) - */ - def split_TnT (x: MatrixD, y: MatrixD, ratio: Double = 0.30): (MatrixD, MatrixD, MatrixD, MatrixD) = - val n = x.dim - val tr_size = (n * (1.0 - ratio)).toInt - println (s"VAR.split_TnT: tr_size = $tr_size, te_size = ${n - tr_size}") - (x(0 until tr_size), y(0 until tr_size), x(tr_size until n), y(tr_size until n)) - end split_TnT - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Use rolling-validation to compute test Quality of Fit (QoF) measures - * by dividing the dataset into a TRAINING SET (tr) and a TESTING SET (te) - * as follows: [ <-- tr_size --> | <-- te_size --> ] - * This version calls predict for one-step ahead out-of-sample forecasts. - * @see `RollingValidation` - * @param mod the forecasting model being used (e.g., `VAR`) - * @param rc the retraining cycle (number of forecasts until retraining occurs) - */ - def rollValidate (mod: PredictorMV & Fit, rc: Int): Unit = - val x = mod.getX // get data/input matrix - val y = mod.getYY // get response/output matrix - val te_size = RollingValidation.teSize (y.dim) // size of testing set - val tr_size = y.dim - te_size // size of initial training set - debug ("rollValidate", s"train: tr_size = $tr_size; test: te_size = $te_size, rc = $rc") - - val yp = new MatrixD (te_size, y.dim2) // y-predicted over testing set - for i <- 0 until te_size do // iterate through testing set - val t = tr_size + i // next time point to forecast -// if i % rc == 0 then mod.train (x(0 until t), y(0 until t)) // retrain on sliding training set (growing set) - if i % rc == 0 then mod.train (x(i until t), y(i until t)) // retrain on sliding training set (fixed size set) - yp(i) = mod.predict (x(t-1)) // predict the next value - end for - - val df = max (0, mod.parameter(0).dim - 1) // degrees of freedom for model - mod.resetDF (df, te_size - df) // reset degrees of freedom - for k <- y.indices2 do - val (t, yk) = RollingValidation.align (tr_size, y(?, k)) // align vectors - val ypk = yp(?, k) - banner (s"QoF for horizon ${k+1} with yk.dim = ${yk.dim}, ypk.dim = ${ypk.dim}") - new Plot (t, yk, ypk, s"Plot yy, yp vs. t for horizon ${k+1}", lines = true) - println (FitM.fitMap (mod.diagnose (yk, ypk), qoF_names)) - end for - end rollValidate - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Plot actual vs. predicted values for all variables (columns of the matrices). - * @param y the original un-expanded output/response matrix - * @param yp the predicted values (one-step ahead forecasts) matrix - * @param name the name of the model run to produce yp - */ - def plotAll (y: MatrixD, yp: MatrixD, name: String): Unit = - for j <- y.indices2 do - new Plot (null, y(?, j), yp(?, j), s"$name, y vs. yp @ var j = $j", lines = true) - end plotAll - -end VAR - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `varTest` main function tests the `VAR` class. - * This test is used to CHECK that the makeExoCols method (@see `apply`) is working correctly. - * May get NaN for some maximum lags (p) due to multi-collinearity. - * > runMain scalation.modeling.forecasting.varTest - */ -@main def varTest (): Unit = - - val m = 30 - val z = VectorD.range (1, m) // used to CHECK the makeExoCols method - val y = MatrixD (z, -z + m) - val h = 3 // the forecasting horizon - - for p <- 5 to 5 do // autoregressive hyper-parameter p - banner (s"Test: VAR with $p lags") - val mod = VAR (y, p, h) // create model for time series data - mod.trainNtest ()() // train the model on full dataset - println (mod.summary) - - val yy = mod.getYY - val yp = mod.predict (mod.getX) - VAR.plotAll (yy, yp, mod.modelName) -// for k <- yp.indices2 do -// new Plot (null, yy(?, k), yp(?, k), s"yy_$k vs. yp_$k for ${mod.modelName} (h=${k+1}) with $p lags", lines = true) - end for - -end varTest - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `varTest2` main function tests the `VAR` class on real data: - * Forecasting Gas Furnace Data. Performs In-Sample Testing. - * > runMain scalation.modeling.forecasting.varTest2 - */ -@main def varTest2 (): Unit = - - import forecasting.Example_GasFurnace._ - - val LAGS = 5 // number of lags - val h = 4 // forecasting horizon - - val y = forecasting.Example_GasFurnace.loadData_yy (header) - println (s"y.dims = ${y.dims}") - - banner ("Test In-Sample VAR on GasFurnace Data") - val mod = VAR (y, LAGS, h) // create model for time series data - val (yp, qof) = mod.trainNtest ()() // train on full and test on full - println (mod.summary) - val yy_ = y(1 until y.dim) // can't forecast first values at t = 0 - VAR.plotAll (yy_, yp, mod.modelName) - -end varTest2 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `varTest3` main function tests the `VAR` class on real data: - * Forecasting COVID-19 Weekly Data. Performs In-Sample Testing. - * Goal: Find the variable that works best with "new_deaths" - * > runMain scalation.modeling.forecasting.varTest3 - */ -@main def varTest3 (): Unit = - - val LAGS = 5 // number of lags - val h = 6 // forecasting horizon - - val vars = Array ("new_deaths", "icu_patients") - val yy = forecasting.Example_Covid.loadData_yy (vars) - val iskip = yy(?, 0).indexWhere (_ >= 6.0) // find day with at least 6 deaths - println (s"iskip = $iskip is first day with at least 6 deaths") - val y = yy(iskip until yy.dim) // trim away the first iskip rows - println (s"y.dims = ${y.dims}") - - banner ("Test In-Sample VAR on COVID-19 Weekly Data") - val mod = VAR (y, LAGS, h) // create model for time series data - val (yp, qof) = mod.trainNtest ()() // train on full and test on full - println (mod.summary ()) - val yy_ = y(1 until y.dim) // can't forecast first values at t = 0 - VAR.plotAll (yy_, yp, mod.modelName) - - banner (s"Feature Selection Technique: Stepwise") - val (cols, rSq) = mod.stepwiseSelAll (cross = false) // R^2, R^2 bar, sMAPE, NA - val k = cols.size - println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for VAR with tech", lines = true) - - banner ("Feature Importance") - println (s"Stepwise: rSq = $rSq") -// val imp = mod.importance (cols.toArray, rSq) -// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") - -end varTest3 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `varTest4` main function tests the `VAR` class on real data: - * Forecasting COVID-19 Weekly Data. Performs In-Sample Testing. - * Goal: Find the four variables that works best with "new_deaths" - * > runMain scalation.modeling.forecasting.varTest4 - */ -@main def varTest4 (): Unit = - - val LAGS = 5 // number of lags - val h = 6 // forecasting horizon - - val vars = Array ("new_deaths", "icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val yy = forecasting.Example_Covid.loadData_yy (vars) - val iskip = yy(?, 0).indexWhere (_ >= 6.0) // find day with at least 6 deaths - println (s"iskip = $iskip is first day with at least 6 deaths") - val y = yy(iskip until yy.dim) // trim away the first iskip rows - println (s"y.dims = ${y.dims}") - - banner ("Test In-Sample VAR on COVID-19 Weekly Data") - val mod = VAR (y, LAGS, h) // create model for time series data - with exo - val (yp, qof) = mod.trainNtest ()() // train on full and test on full - println (mod.summary ()) - val yy_ = y(1 until y.dim) // can't forecast first values at t = 0 - VAR.plotAll (yy_, yp, mod.modelName) - -// val tech = SelectionTech.Forward // pick one feature selection technique -// val tech = SelectionTech.Backward - val tech = SelectionTech.Stepwise - - banner (s"Feature Selection Technique: $tech") - val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA - val k = cols.size - println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for VAR with tech", lines = true) - - banner ("Feature Importance") - println (s"$tech: rSq = $rSq") -// val imp = mod.importance (cols.toArray, rSq) -// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") - -end varTest4 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `varTest5` main function tests the `VAR` class on real data: - * Forecasting COVID-19 Weekly Data. Does TnT Testing on endogenous and exogenous variables. - * Determine the terms to include in the model using Stepwise on In-Sample. - * > runMain scalation.modeling.forecasting.varTest5 - */ -@main def varTest5 (): Unit = - - val LAGS = 5 // number of lags - val h = 6 // forecasting horizon - - val vars = Array ("new_deaths", "icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val yy = forecasting.Example_Covid.loadData_yy (vars) - val iskip = yy(?, 0).indexWhere (_ >= 6.0) // find day with at least 6 deaths - println (s"iskip = $iskip is first day with at least 6 deaths") - val y = yy(iskip until yy.dim) // trim away the first iskip rows - println (s"y.dims = ${y.dims}") - - banner ("Test In-Sample VAR on COVID-19 Weekly Data") - val mod = VAR (y, LAGS, h) // create model for time series data - with exo - val (yp, qof) = mod.trainNtest ()() // train on full and test on full - println (mod.summary ()) - val yy_ = y(1 until y.dim) // can't forecast first values at t = 0 - VAR.plotAll (yy_, yp, mod.modelName) - -// val tech = SelectionTech.Forward // pick one feature selection technique -// val tech = SelectionTech.Backward - val tech = SelectionTech.Stepwise - - banner (s"Feature Selection Technique: $tech") - val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA - val k = cols.size - println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for VAR with tech", lines = true) - - banner ("Feature Importance") - println (s"$tech: rSq = $rSq") -// val imp = mod.importance (cols.toArray, rSq) -// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") - - banner ("Run TnT on Best model") - val bmod = mod.getBest._3 // get the best model from feature selection - val (x_, y_, xtest, ytest) = VAR.split_TnT (bmod.getX, bmod.getYY) - val (yptest, qoftest) = bmod.asInstanceOf [RegressionMV].trainNtest (x_, y_)(xtest, ytest) // train on (x_, y_) and test on (xtest, ytest) - new Plot (null, ytest(?, 0), yptest(?, 0), s"${mod.modelName}, ytest vs. yptest", lines = true) - -end varTest5 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `varTest6` main function tests the `VAR` class on real data: - * Forecasting COVID-19 Weekly Data. Does Rolling Validation on variables. - * Determine the terms to include in the model using Stepwise on In-Sample. - * > runMain scalation.modeling.forecasting.varTest6 - */ -@main def varTest6 (): Unit = - - val LAGS = 5 // number of lags - val h = 6 // forecasting horizon - - val vars = Array ("new_deaths", "icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val yy = forecasting.Example_Covid.loadData_yy (vars) - val iskip = yy(?, 0).indexWhere (_ >= 6.0) // find day with at least 6 deaths - println (s"iskip = $iskip is first day with at least 6 deaths") - val y = yy(iskip until yy.dim) // trim away the first iskip rows - println (s"y.dims = ${y.dims}") - - banner ("Test In-Sample VAR on COVID-19 Weekly Data") - val mod = VAR (y, LAGS, h) // create model for time series data - with exo - val (yp, qof) = mod.trainNtest ()() // train on full and test on full - println (mod.summary ()) - val yy_ = y(1 until y.dim) // can't forecast first values at t = 0 - VAR.plotAll (yy_, yp, mod.modelName) - -// val tech = SelectionTech.Forward // pick one feature selection technique -// val tech = SelectionTech.Backward - val tech = SelectionTech.Stepwise - - banner (s"Feature Selection Technique: $tech") - val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA - val k = cols.size - println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for VAR with tech", lines = true) - - banner ("Feature Importance") - println (s"$tech: rSq = $rSq") -// val imp = mod.importance (cols.toArray, rSq) -// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") - - banner ("Run Rolling Validation on VAR Best model") - val bmod = mod.getBest._3 // get the best model from feature selection - VAR.rollValidate (bmod.asInstanceOf [RegressionMV], 1) - -end varTest6 - diff --git a/src/main/scala/scalation/modeling/forecasting_old/WeightedMovingAverage.scala b/src/main/scala/scalation/modeling/forecasting_old/WeightedMovingAverage.scala deleted file mode 100644 index 97c4bdff2..000000000 --- a/src/main/scala/scalation/modeling/forecasting_old/WeightedMovingAverage.scala +++ /dev/null @@ -1,365 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Sat Jun 13 01:2700 EST 2017 - * @see LICENSE (MIT style license file). - * - * @note Model: Weighted Moving Average (not the same as MA in ARMA) - */ - -package scalation -package modeling -package forecasting_old - -import scalation.mathstat._ -import scalation.random.Normal - -//import RollingValidation.trSize - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `WeightedMovingAverage` class provides basic time series analysis capabilities. - * For a `WeightedMovingAverage` model with the time series data stored in vector y, the - * next value y_t+1 = y(t+1) may be predicted based on the weighted-average of the past - * q values of y: - * - * y_t+1 = weight-average (y_t, ..., y_t-q') + e_t+1 - * - * where e_t+1 is the new residual/error term and q' = q-1. The hyper-parameter u selects - * between flat (u = 0), linear weights (u = 1) or comninations of both. - * @param y the response vector (time series data) - * @param tt the time points, if needed - * @param hparam the hyper-parameters - */ -class WeightedMovingAverage (y: VectorD, tt: VectorD = null, hparam: HyperParameter = SimpleMovingAverage.hp) - extends Forecaster (y, tt, hparam) - with Correlogram (y) - with Fit (dfm = 1, df = y.dim - 1): - - private val debug = debugf ("WeightedMovingAverage", false) // debug function - private val flaw = flawf ("WeightedMovingAverage") // flaw function - private val q = hparam("q").toInt // take weighted average of last q values - private val u = hparam("u").toDouble // u = 0 => flat, 1 => linear weights - - modelName = s"WeightedMovingAverage($q)" - - private val ww = VectorD.range (1, q+1) - private val w1 = ww / ww.sum // linear weights - private val w2 = VectorD.one (q) / q // flat weights - val w = w1 * u + w2 * (1 - u) // combination of weights - - debug ("init", s"size q = $q, weights w = $w") - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train/fit an `WeightedMovingAverage` model to the times series data in vector y_. - * Note: for `WeightedMovingAverage` there are no parameters to train. - * @param x_null the data/input matrix (ignored, pass null) - * @param y_ the actual training/full response/output vector - */ - def train (x_null: MatrixD, y_ : VectorD): Unit = {} - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test PREDICTIONS of a Weighted Moving Average forecasting model and - * return its predictions and QoF vector. Testing may be in-sample - * (on the training set) or out-of-sample (on the testing set) as determined - * by the parameters passed in. Note: must call train before test. - * @param x_null the data/input matrix (ignored, pass null) - * @param y_ the actual testing/full response/output vector - */ - override def test (x_null: MatrixD, y_ : VectorD): (VectorD, VectorD) = - val (yp, no_qof) = super.test (null, y_) // call super.test for predictions - resetDF (1, y_.dim - 1) // reset the degrees of freedom - (yp, diagnose (y_, yp)) // return predictions and QoF vector - end test - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test FORECASTS of a Weighted Moving Average forecasting model and - * return its forecasts and QoF vector. Testing may be in-sample - * (on the training set) or out-of-sample (on the testing set) as determined - * by the parameters passed in. Note: must call train and forecastAll before testF. - * @param h the forecasting horizon, number of steps ahead to produce forecasts - * @param y_ the training/testing/full response/output vector - */ - def testF (h: Int, y_ : VectorD): (VectorD, VectorD, VectorD) = - val (yy, yfh) = testSetupF (y_, h) // get and align actual and forecasted values - resetDF (1, yy.dim - 1) // reset the degrees of freedom - println (s"testF: yy.dim = ${yy.dim}, yfh.dim = ${yfh.dim}") -// differ (yy, yfh) // uncomment for debugging - (yy, yfh, diagnose (yy, yfh)) // return aligned actual, forecasted and QoF vectors - end testF - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the parameter vector (there are none, so return an empty vector). - */ - override def parameter: VectorD = new VectorD (0) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Predict a value for y_t+1 using the 1-step ahead forecast. - * - * y_t+1 = [y_t-q+1, ... y_t] dot w - * - * When t < q-1 is negative, use mean of partial sequence - * @param t the time point from which to make prediction - * @param y_ the actual values to use in making predictions - */ - def predict (t: Int, y_ : VectorD): Double = - if t < q-1 then - var sum = y_(0) - for j <- 1 to t do sum += y_(t) - sum / (t+1) - else - val yy = y_(t-q+1 until t+1) - yy dot w - end predict - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Produce a vector of size h, of 1 through h-steps ahead forecasts for the model. - * forecast the following time points: t+1, ..., t-1+h. - * Note, must create the yf matrix before calling the forecast method. - * Intended to work with rolling validation (analog of predict method) - * FIX - not updated - * @param t the time point from which to make forecasts - * @param yf the forecast matrix (time x horizons) - * @param y_ the actual values to use in making predictions - * @param hh the max forecasting horizon, number of steps ahead to produce forecasts - */ - def forecast (t: Int, yf: MatrixD, y_ : VectorD, hh: Int): VectorD = - if hh < 1 then flaw ("forecast", s"horizon hh = $hh must be at least 1") - val yd = new VectorD (hh) // hold forecasts for each horizon - - for h <- 1 to hh do - val h1 = h - 1 - val pred = - if t+h < q then - val yy = yf(0 until t+h, h1) - yy.sum / yy.dim // use flat weights - else - val yy = yf(t+h-q until t+h, h1) // FIX: using the diagonal could work better - yy dot w - yf(t+h, h) = pred // forecast down the diagonal - yd(h-1) = pred // record diagonal values - end for - yd // return forecasts for each horizon - end forecast - -/* - val yd = new VectorD (h) // hold forecasts for each horizon - val sumq = new SumQueue (q) - for i <- max (0, t-q+1) to t do sumq += y(i) // y_t-q+1 + ... + y_t - for k <- 1 to h do - val pred = sumq.mean - yf(t+k, k) = pred // forecast down the diagonal - yd(k-1) = pred // record diagonal values - sumq += yf(t+k, k-1) // replace oldest value with this value - end for -*/ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all y_.dim time points at horizon h (h-steps ahead). - * Assign to FORECAST MATRIX and return h-step ahead forecast. - * Note, `predictAll` provides predictions for h = 1. - * @see `forecastAll` method in `Forecaster` trait. - * @param yf the forecast matrix (time x horizons) - * @param y_ the actual values to use in making forecasts - * @param h the forecasting horizon, number of steps ahead to produce forecasts - */ - def forecastAt (yf: MatrixD, y_ : VectorD, h: Int): VectorD = - if h < 2 then flaw ("forecastAt", s"horizon h = $h must be at least 2") - val h1 = h - 1 - - yf(h1, h) = yf(h1-1, h1) // first forecast is special case - - for t <- y_.indices do // make forecasts over all time points for horizon k - if t+h < q then - val yy = yf(0 until t+h, h1) - yf(t+h, h) = yy.sum / yy.dim // use flat weights - else - val yy = yf(t+h-q until t+h, h1) // FIX: using the diagonal could work better - yf(t+h, h) = yy dot w - end for - yf(?, h) // return the h-step ahead forecast vector - end forecastAt - -end WeightedMovingAverage - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `WeightedMovingAverage` companion object provides factory methods for the `WeightedMovingAverage` class. - */ -object WeightedMovingAverage: - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `WeightedMovingAverage` object. - * @param y the response vector (time series data) - * @param tt the time points, if needed - * @param hparam the hyper-parameters - */ - def apply (y: VectorD, tt: VectorD = null, hparam: HyperParameter = SimpleMovingAverage.hp): WeightedMovingAverage = - new WeightedMovingAverage (y, tt, hparam) - end apply - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Backcast to predict the value prior to the start of the time series. - * @param y the response vector (time series data), a prefix suffices - * @param q the length of the moving average - */ - def backcast (y_ : VectorD): Double = - val q = SimpleMovingAverage.hp ("q").toInt - val yy = y_(0 until q+1).reverse - val mod = new WeightedMovingAverage (yy) - mod.predict (q, yy) - end backcast - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Decompose a univariate time series into a moving average and a remainder. - * @see https://arxiv.org/pdf/2106.13008.pdf (Autoformer) - * @param y the response vector (time series data) - * @param tt the time points, if needed - * @param hparam the hyper-parameters - */ - def decompose (y: VectorD, tt: VectorD = null, hparam: HyperParameter = SimpleMovingAverage.hp): (VectorD, VectorD) = - val sma = new WeightedMovingAverage (y, tt, hparam) - val s = sma.predictAll (y) - s(0) = s(1) // pad by copying first avg back - (s, y - s) - end decompose - -end WeightedMovingAverage - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `weightedMovingAverageTest` main function tests the `WeightedMovingAverage` class - * on simulated data. - * > runMain scalation.modeling.forecasting.weightedMovingAverageTest - */ -@main def weightedMovingAverageTest (): Unit = - - val y = makeTSeries () // generate a time-series (see `Stationary`) - - banner (s"Test Predictions: WeightedMovingAverage on simulated time-series") - val mod = new WeightedMovingAverage (y) // time series model - mod.trainNtest ()() // train and test on full dataset - - banner ("Build SimpleMovingAverage Model") - val mod2 = new SimpleMovingAverage (y) // time series model - mod2.trainNtest ()() // train and test on full dataset - -end weightedMovingAverageTest - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `weightedMovingAverageTest2` main function is used to test the `WeightedMovingAverage` class. - * > runMain scalation.modeling.forecasting.weightedMovingAverageTest2 - */ -@main def weightedMovingAverageTest2 (): Unit = - - val y = makeTSeries ((t: Double) => t, 30, Normal ()) // generate a time-series (see `Stationary`) - - banner ("Build WeightedMovingAverage Model") - val mod = new WeightedMovingAverage (y) // time series model - mod.trainNtest ()() // train and test on full dataset - - banner ("Build SimpleMovingAverage Model") - val mod2 = new SimpleMovingAverage (y) // time series model - mod2.trainNtest ()() // train and test on full dataset - -/* - banner ("Make Forecasts") - val steps = 10 // number of steps for the forecasts - val rw_f = rw.forecast (steps) - println (s"$steps-step ahead forecasts using WeightedMovingAverage model = $rw_f") - val tf = VectorD.range (n, n + steps) - new Plot (tf, rw_f, null, s"Plot WeightedMovingAverage forecasts vs. t", true) -*/ - -end weightedMovingAverageTest2 - -import forecasting.Example_LakeLevels.y - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `weightedMovingAverageTest3` main function is used to test the `WeightedMovingAverage` class. - * Forecasting lake levels. - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.weightedMovingAverageTest3 - */ -@main def weightedMovingAverageTest3 (): Unit = - - val hh = 3 // maximum forecasting horizon - - val hp = SimpleMovingAverage.hp - hp("q") = 2 - - banner (s"Test Forecasts: WeightedMovingAverage on LakeLevels Dataset") - val mod = new WeightedMovingAverage (y) // create model for time series data - val (yp0, qof0) = mod.trainNtest ()() // train and test on full dataset - val yp = mod.predictAll (y) - Forecaster.differ (yp, yp0) - - mod.forecastAll (y, hh) // forecast h-steps ahead (h = 1 to hh) for all y - Forecaster.evalForecasts (mod, y, hh, true) - - banner (s"Test Forecasts: SimpleMovingAverage on LakeLevels Dataset") - val mod2 = new SimpleMovingAverage (y) // create model for time series data - val (yp2, qof2) = mod2.trainNtest ()() // train and test on full dataset - - banner (s"Test Forecasts: Random Walk on LakeLevels Dataset") - val mod3 = new RandomWalk (y) // create model for time series data - val (yp3, qof3) = mod3.trainNtest ()() // train and test on full dataset - - val yp_ = yp - val yp2_ = yp2 ++ VectorD (y.last) - val yp3_ = yp3 ++ VectorD (y.last) - println (MatrixD (y, yp_, yp2_, yp3_).transpose) - - val tf = new TestFit (y.dim) - println (tf.testDiagnose (y, yp_)) - println (tf.testDiagnose (y, yp2_)) - println (tf.testDiagnose (y, yp3_)) - - println (WeightedMovingAverage.backcast (y)) - -end weightedMovingAverageTest3 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `weightedMovingAverageTest4` main function is used to test the `WeightedMovingAverage` class. - * Decompose the lake levels dataset. - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.weightedMovingAverageTest4 - */ -@main def weightedMovingAverageTest4 (): Unit = - - import forecasting.Example_LakeLevels.y - - SimpleMovingAverage.hp("q") = 5 // number of points to average - banner ("Use WeightedMovingAverage to Decompose the Lake Level Dataset") - val (s, z) = WeightedMovingAverage.decompose (y) // time series model - new Plot (null, y, null, "original time series", lines = true) - new Plot (null, s, null, "moving average", lines = true) - new Plot (null, z, null, "remainder", lines = true) - -end weightedMovingAverageTest4 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `weightedMovingAverageTest5` main function is used to test the `WeightedMovingAverage` class. - * > runMain scalation.modeling.forecasting.weightedMovingAverageTest5 - */ -@main def weightedMovingAverageTest5 (): Unit = - - val data = MatrixD.load ("travelTime.csv") // automatically prepends DATA_DIR - - val (t, y) = (data(?, 0), data(?, 1)) - - println (s"t.dim = ${t.dim}, y.dim = ${y.dim}") - - banner ("Build AR(1) Model") - val ar = new AR (y) // time series model - ar.trainNtest ()() // train and test on full dataset - - banner (s"Build WeightedMovingAverage model") - val mod = new WeightedMovingAverage (y) // time series model - mod.trainNtest ()() // train and test on full dataset - -end weightedMovingAverageTest5 - diff --git a/src/main/scala/scalation/modeling/forecasting_old/index.html b/src/main/scala/scalation/modeling/forecasting_old/index.html deleted file mode 100644 index 36cee371a..000000000 --- a/src/main/scala/scalation/modeling/forecasting_old/index.html +++ /dev/null @@ -1,41 +0,0 @@ - - -

    Source files in forecasting Package

    -

    - - \ No newline at end of file diff --git a/src/main/scala/scalation/modeling/forecasting_old/old/AR.scala.bak b/src/main/scala/scalation/modeling/forecasting_old/old/AR.scala.bak deleted file mode 100644 index 00ed61247..000000000 --- a/src/main/scala/scalation/modeling/forecasting_old/old/AR.scala.bak +++ /dev/null @@ -1,252 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author Hao Peng, John Miller, Michael Cotterell - * @version 2.0 - * @date Sat Jun 13 01:27:00 EST 2017 - * @see LICENSE (MIT style license file). - * - * @title Model: Auto-Regressive - * - * @see http://en.wikipedia.org/wiki/Autoregressive%E2%80%93moving-average_model - * @see http://www.emu.edu.tr/mbalcilar/teaching2007/econ604/lecture_notes.htm - * @see http://www.stat.berkeley.edu/~bartlett/courses/153-fall2010 - * @see http://www.princeton.edu/~apapanic/ORFE_405,_Financial_Time_Series_%28Fall_2011%29_files/slides12-13.pdf - */ - -package scalation -package modeling -package forecasting - -import scala.collection.mutable.Set -import scala.math.{max, min} - -import scalation.mathstat._ -import scalation.random.{Normal, Uniform} - -import Fit._ - -// FIX - don't use actual y values for first p predictions - compare with ARIMA - -import AR._ -import Forecaster._ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `AR` class provides basic time series analysis capabilities for Auto-Regressive - * (AR) models. In an AR(p) model, p refers to the order of the Auto-Regressive - * components of the model. AR models are often used for forecasting. - * Given time series data stored in vector y, its next value y_t = y(t) - * may be predicted based on prior values of y and its noise: - * y_t = δ + Σ(φ_k y_t-k) - * where δ is a constant, φ is the autoregressive coefficient vector, - * and e is the noise vector. - *---------------------------------------------------------------------------------- - * @param y the response vector (time-series data) - * @param tt the time vector, if relevant (time index may suffice) - * @param hparam the hyper-parameters - */ -class AR (y: VectorD, tt: VectorD = null, hparam: HyperParameter = AR.hp) - extends Forecaster (y, tt, hparam) - with Correlogram (y) - with Fit (dfm = hparam("p").toInt, df = y.dim - hparam("p").toInt): - - private val debug = debugf ("AR", true) // debug function - private val flaw = flawf ("AR") // flaw function - private var m = y.dim // number of time points - private var p = hparam("p").toInt // p-th order Auto-Regressive model - private var φ = VectorD.nullv // AR(p) parameters/coefficients - private var δ = NO_DOUBLE // drift/constant term - private var trained = false // has trained been called? - - if p > MAX_LAGS then flaw ("AR", s"p = $p must not be greater than MAX_LAGS = $MAX_LAGS") - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the model name including its current hyper-parameter, e.g., AR(2). - */ - override def modelName: String = s"AR($p)" - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train/fit an `AR` model to the times-series data in vector y_. - * Estimate the coefficient vector φ for a p-th order Auto-Regressive AR(p) model. - * z_t = φ_0 * z_t-1 + ... + φ_p-1 * z_t-p + e_t - * Uses the Durbin-Levinson Algorithm to determine the coefficients. - * The φ vector is p-th row of 'psi' matrix (ignoring the first (0th) column). - * @param x_null the data/input matrix (ignored) - * @param y_ the training/full response vector (defaults to y) - */ - def train (x_null: MatrixD = null, y_ : VectorD = y): Unit = - m = y_.dim - resetDF (p, m - p) - makeCorrelogram (y_) - φ = psiM(p)(1 to p+1) - δ = statsF.mu * (1 - φ.sum) - trained = true - debug ("train", s"parameters for AR($p) model: φ = $φ, δ = $δ") - end train - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Retrain/fit an `AR` model to the times-series data using another order p_ - * Estimate the coefficient vector φ for a p-th order Auto-Regressive AR(p) model. - * The φ vector is p-th row of psi matrix (ignoring the first (0th) column). - * @param p_ another order - */ - def retrain (p_ : Int): Unit = - if ! trained then flaw ("retrain", "train must be called before retrain") - p = p_ - resetDF (p, m - p) - φ = psiM(p)(1 to p+1) - δ = statsF.mu * (1 - φ.sum) - debug ("retrain", s"parameters for AR($p) model: φ = $φ, δ = $δ") - end retrain - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test an AR forecasting model y_ = f(lags (y_)) + e and return its QoF vector. - * Testing may be be in-sample (on the training set) or out-of-sample - * (on the testing set) as determined by the parameters passed in. - * Note: must call train before test. - * @param x_null the training/testing data/input matrix (ignored, pass null) - * @param y_ the training/testing response/output vector (defaults to full y) - */ - def test (x_null: MatrixD = null, y_ : VectorD = y): VectorD = - val yp = predictAll (y_) // make predictions - val (y1, y2) = align (y_, yp) - diagnose (y1, y2) // evaluate the quality of these predictions - fit // return the QoF vector - end test - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the parameter vector. - */ - def parameter: VectorD = φ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Predict a value for time point t using 1-step ahead forecasts. - * y_t = φ_0 y_t-1 + φ_1 y_t-2 + ... + φ_p-1 y_t-p - * When k < 0 let y_k = y_0 (assume first value repeats). - * @param tn the time point (index) to be predicted - * @param y_ the actual values to use in making predictions - */ - def predict (t: Int, y_ : VectorD): Double = - if t < 1 || t > y_.dim then flaw ("predict", s"time index t = $t is out of range") - var sum = δ - for j <- 0 until p do sum += φ(j) * y_(max (0, t-1-j)) - sum - end predict - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Predict values for all time points using 1-step ahead forecasts. - * Return a vector that is the predictions (zero-centered) of a p-th order - * Auto-Regressive AR(p) model. - * @see predictAll in `ForecasterVec` for uncentered results - def predictAllz (): VectorD = - val zp = new VectorD (m) // forecasts for all time points t - for t <- 0 until p do zp(t) = z(t) // copy first p actual values into zp - for t <- p until m do - var sum = 0.0 - for j <- 0 until p do sum += φ(j) * zp(max (0, t-1-j)) - zp(t) = sum - end for - zp // return vector of predicted values - end predictAllz - */ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all m time points and all horizons (1 through h-steps ahead). - * Record these in the yf matrix, where - * yf(t, k) = k-steps ahead forecast for y_t - * Note, yf(?, 0) is set to y (the actual time-series values). - * @param h the maximum forecasting horizon, number of steps ahead to produce forecasts - */ - def forecastAll (h: Int): MatrixD = - val yf = new MatrixD (m, h+1) // forecasts for all time points t & horizons to h - yf(?, 0) = y // first column is actual values, horizon 0 - for k <- 1 to h do - val c = min (k, p) // cut point from actual to forecasted values - for t <- 0 until c do yf(t, k) = y(t) // copy first c actual values - for t <- c until m do // forecast the rest - var sum = δ - for j <- 0 until p do sum += φ(j) * yf(max (0, t-1-j), max (0, k-1-j)) - yf(t, k) = sum - end for - debug ("forecastAll", s"yf(?, $k) = ${yf(?, k)}") - end for - yf // return matrix of forecasted values - end forecastAll - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Produce h-steps ahead forecast on the testing data during cross validation. - * @param y the current response vector - * @param t the time point to be forecast - * @param h the forecasting horizon, number of steps ahead to produce forecast - override def forecastX (y: VectorD = y, t: Int = y.dim, h: Int = 1): Double = - if t > m then flaw ("forecast", "no forecasts with starting t > m are provided") - - val zf = new VectorD (p+h) - // Must calculate the z values by doing y - mu on the fly because these values are beyond the bounds of the z vector - for l <- 0 until p do zf(l) = y(max (0, t-p+l)) - stats.mu // copy first p values into zf. - - for k <- 1 to h do // advance the forecasting horizon - var sum = 0.0 - for j <- 0 until p do sum += φ(j) * zf(p-2+k-j) - zf(p-1+k) = sum - end for - zf.last + stats.mu // return the last forecast - end forecastX - */ - -end AR - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `AR` companion object provides factory methods for the `AR` class. - */ -object AR: - - /** Base hyper-parameter specification for `AR` - */ - val hp = new HyperParameter - hp += ("p", 1, 1) // for the AR part - - private val flaw = flawf ("AR") // flaw function - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create an `AR` object. - * @param y the response vector (time series data) - * @param tt the time vector, if relevant (time index may suffice) - * @param hparam the hyper-parameters - */ - def apply (y: VectorD, tt: VectorD = null, hparam: HyperParameter = AR.hp): AR = - new AR (y, tt, hparam) - end apply - -end AR - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARTest` object is used to test the `AR` class on real data: Forecasting lake - * levels. Test the test and predictAll methods over the whole times-series. - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.ARTest - */ -object ARTest extends App: - - import Example_LakeLevels.y - val t = VectorD.range (1, y.dim) - - var ar: AR = null - for p <- 1 to 11 do // autoregressive hyper-parameter - hp("p") = p // set p hyper-parameter - banner (s"Test: AR($p)") - ar = new AR (y) // create model for time series data - ar.train () // train the model on full dataset - println (ar.report (ar.test ())) // test the model and report results - val yp = ar.predictAll (y) // predict 1-step ahead for all y - val (y1, y2) = align (y, yp) // compare y(1:m) with yp(0:m-1) - new Plot (t, y1, y2, s"AR($p): y-actual vs. y-predicted", lines = true) - end for - - banner ("Select model based on ACF and PACF") - ar.plotFunc (ar.acF, "ACF") - ar.plotFunc (ar.pacF, "PACF") - -end ARTest - diff --git a/src/main/scala/scalation/modeling/forecasting_old/old/AR.scala.bak2 b/src/main/scala/scalation/modeling/forecasting_old/old/AR.scala.bak2 deleted file mode 100644 index a6bb1d97e..000000000 --- a/src/main/scala/scalation/modeling/forecasting_old/old/AR.scala.bak2 +++ /dev/null @@ -1,218 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author Hao Peng, John Miller, Michael Cotterell - * @version 2.0 - * @date Sat Jun 13 01:27:00 EST 2017 - * @see LICENSE (MIT style license file). - * - * @title Model: Auto-Regressive (AR) - * - * @see http://en.wikipedia.org/wiki/Autoregressive%E2%80%93moving-average_model - * @see http://www.emu.edu.tr/mbalcilar/teaching2007/econ604/lecture_notes.htm - * @see http://www.stat.berkeley.edu/~bartlett/courses/153-fall2010 - * @see http://www.princeton.edu/~apapanic/ORFE_405,_Financial_Time_Series_%28Fall_2011%29_files/slides12-13.pdf - */ - -package scalation -package modeling -package forecasting - -import scala.math.max - -import scalation.mathstat._ - -import AR.hp - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `AR` class provides basic time series analysis capabilities for Auto-Regressive - * (AR) models. In an AR(p) model, p refers to the order of the Auto-Regressive - * components of the model. AR models are often used for forecasting. - * Given time-series data stored in vector y, its next value y_t = y(t) - * may be predicted based on prior values of y and its noise: - * y_t = δ + Σ(φ_k y_t-k) + e_t - * where δ is a constant, φ is the auto-regressive coefficient vector, - * and e_t is the noise term. - *---------------------------------------------------------------------------------- - * @param y the response vector (time-series data) - * @param tt the time vector, if relevant (time index may suffice) - * @param hparam the hyper-parameters - */ -abstract class AR (y: VectorD, tt: VectorD = null, hparam: HyperParameter = AR.hp) - extends Forecaster (y, tt, hparam) - with Correlogram (y) - with Fit (dfm = hparam("p").toInt, df = y.dim - hparam("p").toInt): - - private val debug = debugf ("AR", true) // debug function - private val flaw = flawf ("AR") // flaw function - - m = y.dim // number of time points (@see `FitM`) - private var p = hparam("p").toInt // p-th order Auto-Regressive model - private var φ = VectorD.nullv // AR(p) parameters/coefficients - private var δ = NO_DOUBLE // drift/intercept/constant term - private var yf = MatrixD.nullm // the forecast matrix - time points x horizons - - if p > MAX_LAGS then flaw ("init", s"p = $p must not be greater than MAX_LAGS = $MAX_LAGS") - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the model name including its current hyper-parameter, e.g., AR(2). - */ - override def modelName: String = s"AR($p)" - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train/fit an `AR` model to the times-series data in vector y_. - * Estimate the coefficient vector φ for a p-th order Auto-Regressive AR(p) model. - * Uses Durbin-Levinson Algorithm (in `Correlogram`) to determine the coefficients. - * The φ vector is p-th row of psi matrix (ignoring the first (0th) column). - * @param x_null the data/input matrix (ignored, pass null) - * @param y_ the training/full response vector (e.g., full y) - */ - def train (x_null: MatrixD, y_ : VectorD): Unit = - m = y_.dim // length of relevant time-series - resetDF (p, m - p) // reset the degrees of freedom - makeCorrelogram (y_) // correlogram computes psi matrix - φ = psiM(p)(1 to p+1) // coefficients = p-th row, columns 1, 2, ... p - δ = statsF.mu * (1 - φ.sum) // compute drift/intercept - debug ("train", s"parameters for AR($p) model: φ = $φ, δ = $δ") - end train - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test PREDICTIONS of an AR forecasting model y_ = f(lags (y_)) + e and return its - * QoF vector. Testing may be be in-sample (on the training set) or out-of-sample - * (on the testing set) as determined by the parameters passed in. - * Note: must call train before test. - * @param x_null the training/testing data/input matrix (ignored, pass null) - * @param y_ the full/testing response/output vector (e.g., full y) - */ - def test (x_null: MatrixD, y_ : VectorD): (VectorD, VectorD) = - val (yy, yp) = testSetup (y_) // get and align actual and predicted values - e = yy - yp // set the residuals/errors - resetDF (p, yy.dim - p) // reset the degrees of freedom - (yp, diagnose (yy, yp)) // return predictions and QoF Vector - end test - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test FORECASTS of an AR forecasting model y_ = f(lags (y_)) + e and return its - * QoF vector. Testing may be in-sample (on the training set) or out-of-sample - * (on the testing set) as determined by the parameters passed in. - * Note: must call train before testf. - * @param h the forecasting horizon, number of steps ahead to produce forecasts - * @param y_ the full/testing response/output vector (e.g., full y) - * @param redo whether to use existing forecasts or redo them (defaults to false) - */ - def testf (h: Int, y_ : VectorD, redo: Boolean = false): VectorD = - if yf == null || yf.dim2 < h+1 || redo then yf = forecastAll (h, y_) // redo all forecasts - val yy = y_(h to y_.dim) - val yf_h = yf(?, h)(h to y_.dim) // pull column h from the forecast matrix and align - resetDF (p, yy.dim - p) // reset the degrees of freedom - diagnose (yy, yf_h) // evaluate and return the QoF of these forecasts - end testf - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the parameter vector for the AR(p) model. - */ - override def parameter: VectorD = φ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Predict a value for time point/index t using 1-step ahead forecasts. - * y_t = φ_0 y_t-1 + φ_1 y_t-2 + ... + φ_p-1 y_t-p - * When k < 0 let y_k = y_0 (i.e., assume first value repeats back in time). - * @see predictAll in `Forecaster` - * @param t the time point/index to be predicted - * @param y_ the actual values to use in making predictions - */ - def predict (t: Int, y_ : VectorD): Double = - if t < 1 || t > y_.dim then flaw ("predict", s"time index t = $t is out of range") - var sum = δ - for j <- 0 until p do sum += φ(j) * y_(max (0, t-1-j)) - sum // prediction for y_t - end predict - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all y_.dim time points and all horizons (1 through h-steps ahead). - * Record these in the yf matrix, where - * yf(t, k) = k-steps ahead forecast for y_t - * Note, column 0, yf(?, 0), is set to y (the actual time-series values). - * Forecast recurse down diagonals in the yf forecast matrix. - * The top right and bottom left triangles in yf matrix are not forecastable. - * @param h the maximum forecasting horizon, number of steps ahead to produce forecasts - * @param y_ the actual values to use in making forecasts - */ - def forecastAll (h: Int, y_ : VectorD): MatrixD = - yf = new MatrixD (y_.dim+h, h+1) // forecasts for all time points t & horizons to h - for t <- 0 until m do yf(t, 0) = y_(t) // first column is actual values, horizon 0 - for k <- 1 to h do - for t <- y_.indices do // make forecasts over all time points for horizon k - var sum = δ - for j <- 0 until p do sum += φ(j) * yf(max (0, t+k-1-j), max (0, k-1-j)) - yf(t+k, k) = sum // forecast down the diagonal - end for - debug ("forecastAll", s"yf(?, $k) = ${yf(?, k)}") - end for - yf // return matrix of forecasted values - end forecastAll - -end AR - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `AR` companion object provides factory methods for the `AR` class. - */ -object AR: - - /** Base hyper-parameter specification for `AR` class - */ - val hp = new HyperParameter - hp += ("p", 1, 1) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create an `AR` object. - * @param y the response vector (time series data) - * @param tt the time vector, if relevant (time index may suffice) - * @param hparam the hyper-parameters - */ - def apply (y: VectorD, tt: VectorD = null, hparam: HyperParameter = AR.hp): AR = - new AR (y, tt, hparam) - end apply - -end AR - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRTest` main function tests the `AR` class on real data: Forecasting lake levels. - * Test the test, predictAll, testf and forecastAll methods over the whole times-series. - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.aRTest - */ -@main def aRTest (): Unit = - - import Example_LakeLevels.y - - val m = y.dim - val t = VectorD.range (1, m) - val h = 3 // the forecasting horizon - - var mod: AR = null - for p <- 1 to 10 do // autoregressive hyper-parameter p - hp("p") = p // set p hyper-parameter - banner (s"Test: AR($p)") - mod = new AR (y) // create model for time series data - mod.train (null, y) // train the model on full dataset - val yp = mod.testPred (y, t) - - val yf = mod.forecastAll (h, y) // forecast h-steps ahead for all y - println (s"yf = $yf") - println (s"yf.dims = ${yf.dims}") -// assert (yf(?, 0)(0 until m) == y) // column 0 must agree with actual values -// assert (yf(?, 1)(1 to m+1) == yp) // column 1 must agree with one step-ahead predictions - for k <- 1 to h do - println (s"evalaute QoF for horizon $k:") - println (FitM.fitMap (mod.testf (k, y), QoF.values.map (_.toString))) // evaluate k-units ahead forecasts - end for - end for - - banner ("Select model based on ACF and PACF") - mod.plotFunc (mod.acF, "ACF") // Auto-Correlation Function (ACF) - mod.plotFunc (mod.pacF, "PACF") // Partial Auto-Correlation Function (PACF) - -end aRTest - diff --git a/src/main/scala/scalation/modeling/forecasting_old/old/AR.scala.bak3 b/src/main/scala/scalation/modeling/forecasting_old/old/AR.scala.bak3 deleted file mode 100644 index 47b407d4d..000000000 --- a/src/main/scala/scalation/modeling/forecasting_old/old/AR.scala.bak3 +++ /dev/null @@ -1,289 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author Hao Peng, John Miller, Michael Cotterell - * @version 2.0 - * @date Sat Jun 13 01:27:00 EST 2017 - * @see LICENSE (MIT style license file). - * - * @title Model: Auto-Regressive (AR) - * - * @see http://en.wikipedia.org/wiki/Autoregressive%E2%80%93moving-average_model - * @see http://www.emu.edu.tr/mbalcilar/teaching2007/econ604/lecture_notes.htm - * @see http://www.stat.berkeley.edu/~bartlett/courses/153-fall2010 - * @see http://www.princeton.edu/~apapanic/ORFE_405,_Financial_Time_Series_%28Fall_2011%29_files/slides12-13.pdf - */ - -package scalation -package modeling -package forecasting - -import scala.math.max - -import scalation.mathstat._ - -import Forecaster.differ -import AR.hp - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `AR` class provides basic time series analysis capabilities for Auto-Regressive - * (AR) models. In an AR(p) model, p refers to the order of the Auto-Regressive - * components of the model. AR models are often used for forecasting. - * Given time-series data stored in vector y, its next value y_t = y(t) - * may be predicted based on prior values of y and its noise: - * y_t = δ + Σ(φ_k y_t-k) + e_t - * where δ is a constant, φ is the auto-regressive coefficient vector, - * and e_t is the noise term. - * @param y the response vector (time-series data) - * @param tt the time vector, if relevant (time index may suffice) - * @param hparam the hyper-parameters - */ -class AR (y: VectorD, tt: VectorD = null, hparam: HyperParameter = AR.hp) - extends Forecaster (y, tt, hparam) - with Correlogram (y) - with Fit (dfm = hparam("p").toInt, df = y.dim - hparam("p").toInt): - - private val debug = debugf ("AR", true) // debug function - private val flaw = flawf ("AR") // flaw function - - m = y.dim // number of time points (@see `FitM`) - private var p = hparam("p").toInt // p-th order Auto-Regressive model - private var φ = VectorD.nullv // AR(p) parameters/coefficients - private var δ = NO_DOUBLE // drift/intercept/constant term -// private var yf = MatrixD.nullm // the forecast matrix - time points x horizons - - if p > MAX_LAGS then flaw ("init", s"p = $p must not be greater than MAX_LAGS = $MAX_LAGS") - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the model name including its current hyper-parameter, e.g., AR(2). - */ - override def modelName: String = s"AR($p)" - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train/fit an `AR` model to the times-series data in vector y_. - * Estimate the coefficient vector φ for a p-th order Auto-Regressive AR(p) model. - * Uses Durbin-Levinson Algorithm (in `Correlogram`) to determine the coefficients. - * The φ vector is p-th row of psi matrix (ignoring the first (0th) column). - * @param x_null the data/input matrix (ignored, pass null) - * @param y_ the training/full response vector (e.g., full y) - */ - def train (x_null: MatrixD, y_ : VectorD): Unit = - m = y_.dim // length of relevant time-series - resetDF (p, m - p) // reset the degrees of freedom - makeCorrelogram (y_) // correlogram computes psi matrix - φ = psiM(p)(1 to p+1) // coefficients = p-th row, columns 1, 2, ... p - δ = statsF.mu * (1 - φ.sum) // compute drift/intercept - debug ("train", s"parameters for AR($p) model: φ = $φ, δ = $δ") - end train - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test PREDICTIONS of an AR forecasting model y_ = f(lags (y_)) + e - * and return its predictions and QoF vector. Testing may be in-sample - * (on the training set) or out-of-sample (on the testing set) as determined - * by the parameters passed in. Note: must call train before test. - * @param x_null the training/testing data/input matrix (ignored, pass null) - * @param y_ the training/testing/full response/output vector - */ - def test (x_null: MatrixD, y_ : VectorD): (VectorD, VectorD) = - val (yy, yp) = testSetup (y_) // get and align actual and predicted values - resetDF (1, yy.dim - 1) // reset the degrees of freedom - e = yy - yp // determine error/residual vector - println (s"test: yy.dim = ${yy.dim}, yp.dim = ${yp.dim}") -// differ (yy, yp) // uncomment for debugging - (yp, diagnose (yy, yp)) // return predictions and QoF vector - end test - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test FORECASTS of an AR forecasting model y_ = f(lags (y_)) + e - * and return its forecasts and QoF vector. Testing may be in-sample - * (on the training set) or out-of-sample (on the testing set) as determined - * by the parameters passed in. Note: must call train and forecastAll before testF. - * @param h the forecasting horizon, number of steps ahead to produce forecasts - * @param y_ the training/testing/full response/output vector - */ - def testF (h: Int, y_ : VectorD): (VectorD, VectorD) = - val (yy, yfh) = testSetupH (y_, h) // get and align actual and forecasted values - resetDF (1, yy.dim - 1) // reset the degrees of freedom - println (s"testF: yy.dim = ${yy.dim}, yfh.dim = ${yfh.dim}") -// differ (yy, yfh) // uncomment for debugging - (yfh, diagnose (yy, yfh)) // return predictions and QoF vector - end testF - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the parameter vector for the AR(p) model. - */ - override def parameter: VectorD = φ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Predict a value for time point/index t using 1-step ahead forecasts. - * y_t = φ_0 y_t-1 + φ_1 y_t-2 + ... + φ_p-1 y_t-p - * When k < 0 let y_k = y_0 (i.e., assume first value repeats back in time). - * @see predictAll in `Forecaster` - * @param t the time point/index to be predicted - * @param y_ the actual values to use in making predictions - * - def predict (t: Int, y_ : VectorD): Double = - if t < 1 || t > y_.dim then flaw ("predict", s"time index t = $t is out of range") - var sum = δ - for j <- 0 until p do sum += φ(j) * y_(max (0, t-1-j)) - sum // prediction for y_t - end predict - */ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all y_.dim time points and all horizons (1 through h-steps ahead). - * Record these in the yf matrix, where - * yf(t, k) = k-steps ahead forecast for y_t - * Note, column 0, yf(?, 0), is set to y (the actual time-series values). - * Forecast recurse down diagonals in the yf forecast matrix. - * The top right and bottom left triangles in yf matrix are not forecastable. - * @param h the maximum forecasting horizon, number of steps ahead to produce forecasts - * @param y_ the actual values to use in making forecasts - * - def forecastAll (h: Int, y_ : VectorD): MatrixD = - yf = new MatrixD (y_.dim+h, h+1) // forecasts for all time points t & horizons to h - for t <- 0 until m do yf(t, 0) = y_(t) // first column is actual values, horizon 0 - for k <- 1 to h do - for t <- y_.indices do // make forecasts over all time points for horizon k - var sum = δ - for j <- 0 until p do sum += φ(j) * yf(max (0, t+k-1-j), max (0, k-1-j)) - yf(t+k, k) = sum // forecast down the diagonal - end for - debug ("forecastAll", s"yf(?, $k) = ${yf(?, k)}") - end for - yf // return matrix of forecasted values - end forecastAll - */ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all y_.dim time points at horizon h (h-steps ahead). - * Assign to forecasting matrix and return h-step ahead forecast. - * @param yf the forecasting matrix (time x horizons) - * @param y_ the actual values to use in making forecasts - * @param h the forecasting horizon, number of steps ahead to produce forecasts - */ - def forecastAt (yf: MatrixD, y_ : VectorD, h: Int): VectorD = - for t <- y_.indices do // make forecasts over all time points for horizon k - var sum = δ - for j <- 0 until p do sum += φ(j) * yf(max (0, t+h-1-j), max (0, h-1-j)) - yf(t+h, h) = sum // forecast down the diagonal - end for - yf(?, h) // return the h-step ahead forecast vector - end forecastAt - -end AR - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `AR` companion object provides factory methods for the `AR` class. - */ -object AR: - - /** Base hyper-parameter specification for `AR` class - */ - val hp = new HyperParameter - hp += ("p", 1, 1) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create an `AR` object. - * @param y the response vector (time series data) - * @param tt the time vector, if relevant (time index may suffice) - * @param hparam the hyper-parameters - */ - def apply (y: VectorD, tt: VectorD = null, hparam: HyperParameter = AR.hp): AR = - new AR (y, tt, hparam) - end apply - -end AR - -import Example_LakeLevels.y - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRTest` main function tests the `AR` class on real data: Forecasting lake levels. - * Test predictions (one step ahead forecasts). - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.aRTest - */ -@main def aRTest (): Unit = - - banner (s"Test Predictions: AR(1) on LakeLevels Dataset") - val mod = new AR (y) // create model for time series data AR(1) - mod.train (null, y) // train the model on full dataset - val (yp, qof) = mod.test (null, y) // test the model on full dataset - println (mod.report (qof)) // report on Quality of Fit (QoF) - - banner ("Select model based on ACF and PACF") - mod.plotFunc (mod.acF, "ACF") // Auto-Correlation Function (ACF) - mod.plotFunc (mod.pacF, "PACF") // Partial Auto-Correlation Function (PACF) - -end aRTest - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRTest2` main function tests the `AR` class on real data: Forecasting lake levels. - * Test forecasts (1 to h steps ahead forecasts). - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.aRTest2 - */ -@main def aRTest2 (): Unit = - - val m = y.dim // number of data points - val hh = 3 // maximum forecasting horizon - - banner (s"Test Forecasts: AR(1) on LakeLevels Dataset") - val mod = new AR (y) // create model for time series data AR(1) - mod.train (null, y) // train the model on full dataset - val (yp, qof) = mod.test (null, y) // test the model on full dataset - println (mod.report (qof)) // report of Quality of Fit (QoF) - - val yf = mod.forecastAll (y, hh) // forecast h-steps ahead (h = 1 to hh) for all y - println (s"yf = $yf") - println (s"y.dim = ${y.dim}, yp.dim = ${yp.dim}, yf.dims = ${yf.dims}") - assert (yf(?, 0)(0 until m) == y) // column 0 must agree with actual values - differ (yf(?, 1)(1 until m), yp) - assert (yf(?, 1)(1 until m) == yp) // column 1 must agree with one step-ahead predictions - - for h <- 1 to hh do - val (yfh, qof) = mod.testF (h, y) // h-steps ahead forecast and its QoF - println (s"Evaluate QoF for horizon $h:") - println (FitM.fitMap (qof, QoF.values.map (_.toString))) // evaluate h-steps ahead forecasts - end for - -end aRTest2 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRTest3` main function tests the `AR` class on real data: Forecasting lake levels. - * Test forecasts (1 to h steps ahead forecasts). Try multiple values for p. - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.aRTest3 - */ -@main def aRTest3 (): Unit = - - val m = y.dim // number of data points - val hh = 3 // maximum forecasting horizon - - var mod: AR = null - for p <- 1 to 10 do // autoregressive hyper-parameter p - hp("p") = p // set p hyper-parameter - banner (s"Test Forecasts: AR($p) on LakeLevels Dataset") - mod = new AR (y) // create model for time series data - mod.train (null, y) // train the model on full dataset - val (yp, qof) = mod.test (null, y) // test the model on full dataset - println (mod.report (qof)) // report of Quality of Fit (QoF) - - val yf = mod.forecastAll (y, hh) // forecast h-steps ahead for all y - println (s"yf = $yf") - println (s"y.dim = ${y.dim}, yp.dim = ${yp.dim}, yf.dims = ${yf.dims}") - assert (yf(?, 0)(0 until m) == y) // column 0 must agree with actual values - differ (yf(?, 1)(1 until m), yp) - assert (yf(?, 1)(1 until m) == yp) // column 1 must agree with one step-ahead predictions - - for h <- 1 to hh do - val (yfh, qof) = mod.testF (h, y) // h-steps ahead forecast and its QoF - println (s"Evaluate QoF for horizon $h:") - println (FitM.fitMap (qof, QoF.values.map (_.toString))) // evaluate h-steps ahead forecasts - end for - end for - -end aRTest3 - diff --git a/src/main/scala/scalation/modeling/forecasting_old/old/AR.scala.bak4 b/src/main/scala/scalation/modeling/forecasting_old/old/AR.scala.bak4 deleted file mode 100644 index 92cb73cdb..000000000 --- a/src/main/scala/scalation/modeling/forecasting_old/old/AR.scala.bak4 +++ /dev/null @@ -1,456 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author Hao Peng, John Miller, Michael Cotterell - * @version 2.0 - * @date Sat Jun 13 01:27:00 EST 2017 - * @see LICENSE (MIT style license file). - * - * @title Model: Auto-Regressive (AR) - * - * @see http://en.wikipedia.org/wiki/Autoregressive%E2%80%93moving-average_model - * @see http://www.emu.edu.tr/mbalcilar/teaching2007/econ604/lecture_notes.htm - * @see http://www.stat.berkeley.edu/~bartlett/courses/153-fall2010 - * @see http://www.princeton.edu/~apapanic/ORFE_405,_Financial_Time_Series_%28Fall_2011%29_files/slides12-13.pdf - */ - -package scalation -package modeling -package forecasting - -import scala.math.max - -import scalation.mathstat._ - -import Forecaster.differ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `AR` class provides basic time series analysis capabilities for Auto-Regressive - * (AR) models. In an AR(p) model, p refers to the order of the Auto-Regressive - * components of the model. AR models are often used for forecasting. - * Given time series data stored in vector y, its next value y_t+1 = y(t+1) - * may be predicted based on prior values of y and its noise: - * y_t+1 = δ + Σ(φ_j y_t-j) + e_t+1 - * where δ is a constant, φ is the auto-regressive coefficient vector, - * and e_t+1 is the noise term. - * @param y the response vector (time series data) - * @param tt the time vector, if relevant (time index may suffice) - * @param hparam the hyper-parameters - */ -class AR (y: VectorD, tt: VectorD = null, hparam: HyperParameter = ARMA.hp) - extends Forecaster (y, tt, hparam) - with Correlogram (y) - with Fit (dfm = hparam("p").toInt, df = y.dim - hparam("p").toInt): - - private val debug = debugf ("AR", true) // debug function - private val flaw = flawf ("AR") // flaw function - private val p = hparam("p").toInt // p-th order Auto-Regressive model - private var φ = VectorD.nullv // AR(p) parameters/coefficients - private var δ = NO_DOUBLE // drift/intercept/constant term - private val pnq = p // sum of # parameters - private var calPhi = true // caluculate phi vector - not externally supplied - - if p > MAX_LAGS then flaw ("init", s"p = $p must not be greater than MAX_LAGS = $MAX_LAGS") - - modelName = s"AR($p)" - - def setPhi (phi: VectorD): Unit = - φ = phi - calPhi = false - end setPhi - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train/fit an `AR` model to the times-series data in vector y_. - * Estimate the coefficient vector φ for a p-th order Auto-Regressive AR(p) model. - * Uses Durbin-Levinson Algorithm (in `Correlogram`) to determine the coefficients. - * The φ vector is p-th row of psi matrix (ignoring the first (0th) column). - * @param x_null the data/input matrix (ignored, pass null) - * @param y_ the training/full response vector (e.g., full y) - */ - def train (x_null: MatrixD, y_ : VectorD): Unit = - m = y_.dim // length of relevant time series - resetDF (pnq, m - pnq) // reset the degrees of freedom - makeCorrelogram (y_) // correlogram computes psi matrix - if calPhi then φ = psiM(p)(1 until p+1) // coefficients = p-th row, columns 1, 2, ... p - δ = statsF.mu * (1 - φ.sum) // compute drift/intercept - debug ("train", s"parameters for AR($p) model: φ = $φ, δ = $δ") - end train - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test PREDICTIONS of an AR forecasting model y_ = f(lags (y_)) + e - * and return its predictions and QoF vector. Testing may be in-sample - * (on the training set) or out-of-sample (on the testing set) as determined - * by the parameters passed in. Note: must call train before test. - * @param x_null the data/input matrix (ignored, pass null) - * @param y_ the testing/full response/output vector - */ - def test (x_null: MatrixD, y_ : VectorD): (VectorD, VectorD) = - val (yy, yp) = testSetup (y_) // get and align actual and predicted values - resetDF (pnq, yy.dim - pnq) // reset the degrees of freedom - println (s"test: yy.dim = ${yy.dim}, yp.dim = ${yp.dim}") -// differ (yy, yp) // uncomment for debugging - (yp, diagnose (yy, yp)) // return predictions and QoF vector - end test - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test FORECASTS of an AR forecasting model y_ = f(lags (y_)) + e - * and return its forecasts and QoF vector. Testing may be in-sample - * (on the training set) or out-of-sample (on the testing set) as determined - * by the parameters passed in. Note: must call train and forecastAll before testF. - * @param h the forecasting horizon, number of steps ahead to produce forecasts - * @param y_ the testing/full response/output vector - */ - def testF (h: Int, y_ : VectorD): (VectorD, VectorD) = - val (yy, yfh) = testSetupF (y_, h) // get and align actual and forecasted values - resetDF (pnq, yy.dim - pnq) // reset the degrees of freedom - println (s"testF: yy.dim = ${yy.dim}, yfh.dim = ${yfh.dim}") -// differ (yy, yfh) // uncomment for debugging - (yfh, diagnose (yy, yfh)) // return predictions and QoF vector - end testF - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the parameter vector for the AR(p) model. - */ - override def parameter: VectorD = φ :+ δ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Predict a value for y_t+1 using the 1-step ahead forecast. - * y_t+1 = φ_0 y_t + φ_1 y_t-1 + ... + φ_p-1 y_t-(p-1) - * When t-j is negative, use y_0 - * @param t the time point from which to make prediction - * @param y_ the actual values to use in making predictions - */ - def predict (t: Int, y_ : VectorD): Double = - var sum = δ // intercept - for j <- 0 until p do sum += φ(j) * y_(max (0, t-j)) // add φ_j y_t-j - sum - end predict - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Produce a vector of size h, of 1 through h-steps ahead forecasts for the model. - * forecast the following time points: t+1, ..., t-1+h. - * Note, must create the yf matrix before calling the forecast method. - * Intended to work with rolling validation (analog of predict method) - * @param t the time point from which to make forecasts - * @param yf the forecasting matrix (time x horizons) - * @param y_ the actual values to use in making predictions - * @param h the forecasting horizon, number of steps ahead to produce forecasts - */ - def forecast (t: Int, yf: MatrixD, y_ : VectorD, h: Int): VectorD = - if h < 1 then flaw ("forecast", s"horizon h = $h must be at least 1") - val yd = new VectorD (h) // hold forecasts for each horizon - for k <- 1 to h do - val t1 = t + k - 1 // time point prior to horizon - val sum0 = δ + rdot (φ, yf, t1, k-1) - var sum = δ - for j <- 0 until p do sum += φ(j) * yf(max (0, t1-j), max (0, k-1-j)) - assert (sum0 == sum) - yf(t+k, k) = sum // forecast down the diagonal - yd (k-1) = sum // record diagonal values - end for - yd // return forecasts for each horizon - end forecast - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute a reverse dot product of the parameter vector b and a diagonal - * of the the yf matrix starting at element (r, c) and moving up and back. - * Use max (0, ..) to avoid using negative indices into the yf matrix. - * @param b the parameter/cofficient vector (e.g., φ for AR) - * @param yf the forecasting matrix (time x horizons) - * @param r the starting row in the forecasting matrix - * @param c the starting column in the forecasting matrix - */ - def rdot (b: VectorD, yf: MatrixD, r: Int, c: Int): Double = - var sum = 0.0 - for j <- b.indices do sum += b(j) * yf(max (0, r-j), max (0, c-j)) - sum - end rdot - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all y_.dim time points at horizon h (h-steps ahead). - * Assign to forecasting matrix and return h-step ahead forecast. - * For 1-step ahead (h = 1), - * y_t = δ + φ_0 y_t-1 + φ_1 y_t-2 + ... + φ_p-1 y_t-p - * When k < 0 let y_k = y_0 (i.e., assume first value repeats back in time). - * @param yf the forecasting matrix (time x horizons) - * @param y_ the actual values to use in making forecasts - * @param h the forecasting horizon, number of steps ahead to produce forecasts - */ - def forecastAt (yf: MatrixD, y_ : VectorD, h: Int): VectorD = - if h < 1 then flaw ("forecastAt", s"horizon h = $h must be at least 1") - for t <- y_.indices do // make forecasts over all time points for horizon h - val t1 = t + h - 1 // time point prior to horizon - val sum0 = δ + rdot (φ, yf, t1, h-1) - var sum = δ - for j <- 0 until p do sum += φ(j) * yf(max (0, t1-j), max (0, h-1-j)) - println (s"sum0 = $sum0, sum = $sum") - assert (sum0 =~ sum) - yf(t+h, h) = sum // forecast down the diagonal - end for - yf(?, h) // return the h-step ahead forecast vector - end forecastAt - -end AR - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `AR` companion object provides factory methods for the `AR` class. - * Use `ARMA` for hyper-parameters. - */ -object AR: - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create an `AR` object. - * @param y the response vector (time series data) - * @param tt the time vector, if relevant (time index may suffice) - * @param hparam the hyper-parameters - */ - def apply (y: VectorD, tt: VectorD = null, hparam: HyperParameter = ARMA.hp): AR = - new AR (y, tt, hparam) - end apply - -end AR - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRTest` main function tests the `AR` class on simulated data. - * Test predictions (one step ahead forecasts). - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.aRTest - */ -@main def aRTest (): Unit = - - val y = makeTSeries () // create simulated time series (see `Stationary`) - - banner (s"Test Predictions: AR(1) on simulated time series") - val mod = new AR (y) // create model for time series data AR(1) - mod.trainNtest ()() // train and test on full dataset - - banner ("Select model based on ACF and PACF") - mod.plotFunc (mod.acF, "ACF") // Auto-Correlation Function (ACF) - mod.plotFunc (mod.pacF, "PACF") // Partial Auto-Correlation Function (PACF) - -end aRTest - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRTest2` main function tests the `AR` class on simulated data. - * Test predictions (one step ahead forecasts). - * > runMain scalation.modeling.forecasting.aRTest2 - */ -@main def aRTest2 (): Unit = - - import scala.math.sqrt - - import ActivationFun.f_sigmoid.{fM, dM} - import neuralnet.{NeuralNet_3L, Optimizer} - - val y = VectorD (1, 2, 4, 7, 9, 8, 6, 5, 3) // create a time series by hand - - val m = y.dim - val mu_y = y.mean // mean for full series - - def rho (k: Int): Double = - var s = 0.0 - var q = 0.0 - for t <- 0 until y.dim-k do - s += (y(t) - mu_y) * (y(t+k) - mu_y) - for t <- 0 until y.dim do - q += (y(t) - mu_y)~^2 - s / q - end rho - - val yB1 = y(1 until m) // apply back-shift operator - val yy = y(0 until m-1) // y clipped to match the size of yB1 - val zz = yy - yy.mean - val zB1 = yB1 - yB1.mean - val r1 = (zz dot zB1) / sqrt ((zz dot zz) * (zB1 dot zB1)) // lag-1 auto-correlation - println (s"mu_y = $mu_y") - println (s"zz = $zz") - println (s"zB1 = $zB1") - println (s"r1 = $r1") - println (s"rho1 = ${rho(1)}") - println (s"rho2 = ${rho(2)}") - - banner (s"Test Predictions: AR(1) on hand created time series") - var mod = new AR (y) // create model for time series data AR(1) - mod.setPhi (VectorD (0.6)) // allows coeficients to be user specified - mod.trainNtest ()() // train and test on full dataset - - banner (s"Test Predictions: AR(2) on hand created time series") - ARMA.hp("p") = 2 - mod = new AR (y) // create model for time series data AR(2) - mod.trainNtest ()() // train and test on full dataset - - val x = MatrixD ((9, 3), 1, 1, 8, - 1, 2, 7, - 1, 3, 6, - 1, 4, 5, - 1, 5, 5, - 1, 6, 4, - 1, 7, 4, - 1, 8, 3, - 1, 9, 2) - - banner (s"Test Predictions: Regression on hand created time series") - val reg = new Regression (x, y) - val (yp, qof) =reg.trainNtest ()() // train and test on full dataset - println (reg.summary ()) - new Plot (null, y, yp, "Regression", lines = true) - - banner (s"Test Predictions: NeuralNet_3L on hand created time series") - val x_ = x(?, 1 until 3) - val y_ = MatrixD.fromVector (y) - val a = MatrixD.fill (2, 2, 0.1) // weight matrix A - val b = MatrixD.fill (2, 1, 0.1) // weight matrix B - val ab = VectorD.fill (2)(0.1) // bias vector alpha - val bb = VectorD.fill (1)(0.1) // bias vector beta - val u = x_ * a + ab // hidden layer pre-activation - val z = fM (u) // hidden layer (use sigmoid) - val v = z * b + bb // output layer pre-activation - val yp_ = v // output layer (use id) - val e = yp_ - y_ // negative error - val d1 = e *~ dM (v) // delta 1: output -> hidden - val d0 = (d1 * b.transpose) *~ dM (z) // delta 0: hidden -> input - - println (s"u = $u, z = $z, v = $v, yp_ = $yp_, e = $e, d1 = $d1, d0 = $d0") - - Optimizer.hp ("eta") = 1.0 - val nn3 = new NeuralNet_3L (x(?, 1 until 3), MatrixD.fromVector (y), nz = 2) - val (yq, q0f) = nn3.trainNtest ()() // train and test on full dataset -// val (yq, q0f) = nn3.trainNtest2 ()() // train and test on full dataset - auto eta - nn3.opti.plotLoss ("NeuralNet_3L") - new Plot (null, y, yq(?, 0), "NeuralNet_3L", lines = true) - - banner ("Select model based on ACF and PACF") - mod.plotFunc (mod.acF, "ACF") // Auto-Correlation Function (ACF) - mod.plotFunc (mod.pacF, "PACF") // Partial Auto-Correlation Function (PACF) - -end aRTest2 - -import Example_LakeLevels.y - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRTest3` main function tests the `AR` class on real data: Forecasting lake levels. - * Test predictions (one step ahead forecasts). - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.aRTest3 - */ -@main def aRTest3 (): Unit = - - banner (s"Test Predictions: AR(1) on LakeLevels Dataset") - val mod = new AR (y) // create model for time series data AR(1) - mod.trainNtest ()() // train and test on full dataset - - banner ("Select model based on ACF and PACF") - mod.plotFunc (mod.acF, "ACF") // Auto-Correlation Function (ACF) - mod.plotFunc (mod.pacF, "PACF") // Partial Auto-Correlation Function (PACF) - -end aRTest3 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRTest4` main function tests the `AR` class on real data: Forecasting lake levels. - * Test forecasts (1 to h steps ahead forecasts). - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.aRTest4 - */ -@main def aRTest4 (): Unit = - - val m = y.dim // number of data points - val hh = 2 // maximum forecasting horizon - - ARMA.hp("p") = 3 - banner (s"Test Forecasts: AR(1) on LakeLevels Dataset") - val mod = new AR (y) // create model for time series data AR(1) - val (yp, qof) = mod.trainNtest ()() // train and test on full dataset - - val yf = mod.forecastAll (y, hh) // forecast h-steps ahead (h = 1 to hh) for all y - println (s"y.dim = ${y.dim}, yp.dim = ${yp.dim}, yf.dims = ${yf.dims}") - println (s"yf = $yf") - assert (yf(?, 0)(0 until m) == y) // column 0 must agree with actual values - differ (yf(?, 1)(1 until m), yp) - assert (yf(?, 1)(1 until m) == yp) // column 1 must agree with one step-ahead predictions - - for h <- 1 to hh do - val (yfh, qof) = mod.testF (h, y) // h-steps ahead forecast and its QoF - val yy = y(h until m) // actual response aligned with yfh - println (s"Evaluate QoF for horizon $h:") - println (FitM.fitMap (qof, QoF.values.map (_.toString))) // evaluate h-steps ahead forecasts - println (s"Fit.mae (y, yfh, h) = ${Fit.mae (y, yfh, h)}") // evaluate h-steps ahead forecasts with MAE - println (s"Fit.mae_n (y, 1) = ${Fit.mae_n (y, 1)}") // evaluate h-steps ahead forecasts with MAE_n - println (s"Fit.mase (y, yfh, h) = ${Fit.mase (y, yfh, h)}") // evaluate h-steps ahead forecasts with MASE - end for - -end aRTest4 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRTest5` main function tests the `AR` class on real data: Forecasting lake levels. - * Test forecasts (1 to h steps ahead forecasts). Try multiple values for p. - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.aRTest5 - */ -@main def aRTest5 (): Unit = - - val m = y.dim // number of data points - val hh = 2 // maximum forecasting horizon - - var mod: AR = null - for p <- 1 to 7 do // autoregressive hyper-parameter p - ARMA.hp("p") = p // set p hyper-parameter - banner (s"Test: AR($p) on LakeLevels Dataset") - mod = new AR (y) // create model for time series data - val (yp, qof) = mod.trainNtest ()() // train and test on full dataset - - val yf = mod.forecastAll (y, hh) // forecast h-steps ahead for all y -// println (s"yf = $yf") - println (s"y.dim = ${y.dim}, yp.dim = ${yp.dim}, yf.dims = ${yf.dims}") - assert (yf(?, 0)(0 until m) == y) // column 0 must agree with actual values - differ (yf(?, 1)(1 until m), yp) - assert (yf(?, 1)(1 until m) == yp) // column 1 must agree with one step-ahead predictions - - for h <- 1 to hh do - val (yfh, qof) = mod.testF (h, y) // h-steps ahead forecast and its QoF - println (s"Evaluate QoF for horizon $h:") - println (FitM.fitMap (qof, QoF.values.map (_.toString))) // evaluate h-steps ahead forecasts - end for - end for - -end aRTest5 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRTest6` main function tests the `AR` class on real data: Forecasting Weekly Covid-19. - * Test forecasts (1 to h steps ahead forecasts). Try multiple values for p. - * > runMain scalation.modeling.forecasting.aRTest6 - */ -@main def aRTest6 (): Unit = - - val y = Example_Covid.loadData_y ("new_deaths") - val m = y.dim // number of data points - val hh = 2 // maximum forecasting horizon - - println (s"y.dim = ${y.dim}") - - var mod: AR = null - for p <- 1 to 12 do // autoregressive hyper-parameter p - ARMA.hp("p") = p // set p hyper-parameter - banner (s"Test: AR($p) on Covid-19 Weekly Dataset") - mod = new AR (y) // create model for time series data - val (yp, qof) = mod.trainNtest ()() // train and test on full dataset - - val yf = mod.forecastAll (y, hh) // forecast h-steps ahead for all y -// println (s"yf = $yf") - println (s"y.dim = ${y.dim}, yp.dim = ${yp.dim}, yf.dims = ${yf.dims}") - assert (yf(?, 0)(0 until m) == y) // column 0 must agree with actual values - differ (yf(?, 1)(1 until m), yp) - assert (yf(?, 1)(1 until m) == yp) // column 1 must agree with one step-ahead predictions - - for h <- 1 to hh do - val (yfh, qof) = mod.testF (h, y) // h-steps ahead forecast and its QoF - println (s"Evaluate QoF for horizon $h:") - println (FitM.fitMap (qof, QoF.values.map (_.toString))) // evaluate h-steps ahead forecasts - end for - end for - -end aRTest6 - diff --git a/src/main/scala/scalation/modeling/forecasting_old/old/AR1MA.scala.bak b/src/main/scala/scalation/modeling/forecasting_old/old/AR1MA.scala.bak deleted file mode 100644 index 1e89a07bb..000000000 --- a/src/main/scala/scalation/modeling/forecasting_old/old/AR1MA.scala.bak +++ /dev/null @@ -1,352 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller, Hao Peng - * @version 2.0 - * @date Thu May 26 18:06:08 EDT 2022 - * @see LICENSE (MIT style license file). - * - * @note Model: Auto-Regressive, Integrated (0 or 1), Moving Average (AR1MA) - * - * @see http://en.wikipedia.org/wiki/Autoregressive%E2%80%93moving-average_model - * @see http://www.emu.edu.tr/mbalcilar/teaching2007/econ604/lecture_notes.htm - * @see http://www.stat.berkeley.edu/~bartlett/courses/153-fall2010 - * @see http://www.princeton.edu/~apapanic/ORFE_405,_Financial_Time_Series_%28Fall_2011%29_files/slides12-13.pdf - */ - -package scalation -package modeling -package forecasting - -import scalation.mathstat._ - -import ARIMA_diff._ -import Forecaster.differ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Return the first difference of the time-series y, giving the velocity v_t = y_t+1 - y_t. - * @param y the original time-series to be differenced - */ -//def del (y: VectorD): VectorD = VectorD (for t <- 0 until y.dim - 1 yield y(t+1) - y(t)) - -//inline def Δ (y: VectorD): VectorD = del (y) - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Return the undifferenced time-series from the velocity series. - * @param v the differenced time-series (velocity) - * @param y0 the first value in the original time-series - * -def undel (v: VectorD, y0: Double): VectorD = - val y = new VectorD (v.dim + 1) - y(0) = y0 - for t <- 1 until y.dim do y(t) = v(t-1) + y(t-1) - y -end undel - */ - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `AR1MA` class provides basic time-series analysis capabilities for Auto- - * Regressive 'AR' Integrated 'I' Moving-Average 'MA' models. In an - * AR1MA(p, q) model, p and q refer to the order of the Auto-Regressive and - * Moving-Average components of the model; d=1 refers to the order of differencing. - * Works by taking the first difference and delegating to the `ARMA` class. - * Also works for d=0 (no differencing). - * @param y the response vector (time-series data) - * @param tt the time vector, if relevant (time index may suffice) - * @param hparam the hyper-parameters - * @param diffr whether to take a first difference (defaults to true) - */ -class AR1MA (y: VectorD, tt: VectorD = null, hparam: HyperParameter = SARIMAX.hp, - diffr: Boolean = true) - extends Forecaster (y, tt, hparam) - with Correlogram (y) - with Fit (dfm = hparam("p").toDouble, df = y.dim - pq (hparam)): - - private val debug = debugf ("AR1MA", true) // debug function - private val p = hparam("p").toInt // p-th order Auto-Regressive model - private val q = hparam("q").toInt // q-th order Moving-Average model - private val v = if diffr then Δ(y) else y // first difference of the full time-series - private val arma = new ARMA (v, tt, hparam) // delegate to the `ARMA` class - - arma.modelName = s"AR1MA($p, $q)" // rename delegate ARMA to match - modelName = arma.modelName // use same name for AR1MA - - new Plot (null, y, null, s"Plot $modelName: y vs. t", lines = true) - if diffr then new Plot (null, v, null, s"Plot $modelName: v = Δ(y) vs. t", lines = true) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Pick one of the following vectors: v full first difference, u differenced, or u itself. - * @param u the input time-series vector - */ - def pick (u: VectorD): VectorD = - if u == y then v // passed in original full time-series - else if diffr then Δ(u) // sub-series differenced - else u // sub-series as is - end pick - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train/fit an `AR1MA` model to the times-series data in vector y_. - * Estimate the coefficient vectors φ and θ for (p, q)-th order AR1MA(p, q) model. - * @param x_null the data/input matrix (ignored, pass null) - * @param y_ the training/full response vector (e.g., full y) - */ - override def train (x_null: MatrixD, y_ : VectorD): Unit = - arma.train (x_null, pick (y_)) - end train - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test PREDICTIONS of an AR1MA forecasting model y_ = f(lags (y_)) + e - * and return its predictions and QoF vector. Testing may be in-sample - * (on the training set) or out-of-sample (on the testing set) as determined - * by the parameters passed in. Note: must call train before test. - * @param x_null the data/input matrix (ignored, pass null) - * @param y_ the training/testing/full response/output vector - */ - def test (x_null: MatrixD, y_ : VectorD): (VectorD, VectorD) = - arma.test (x_null, pick (y_)) - end test - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train and test the forecasting model y_ = f(y-past) + e and report its QoF - * and plot its predictions. Return the predictions and QoF. - * @param y_ the training/full response/output vector (defaults to full y) - * @param yy the testing/full response/output vector (defaults to full y) - */ - override def trainNtest (y_ : VectorD = y)(yy: VectorD = y): (VectorD, VectorD) = - arma.trainNtest (pick (y_))(pick (yy)) - end trainNtest - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test FORECASTS of an AR1MA forecasting model y_ = f(lags (y_)) + e - * and return its forecasts and QoF vector. Testing may be in-sample - * (on the training set) or out-of-sample (on the testing set) as determined - * by the parameters passed in. Note: must call train and forecastAll before testF. - * @param h the forecasting horizon, number of steps ahead to produce forecasts - * @param y_ the training/testing/full response/output vector - */ - def testF (h: Int, y_ : VectorD): (VectorD, VectorD, VectorD) = - arma.testF (h, pick (y_)) // return aligned actual, forecasted and qof vectors - end testF - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the parameter vector for the AR1MA(p, q) model. - */ - override def parameter: VectorD = arma.parameter - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Predict a value for y_t+1 using the 1-step ahead forecast. - * y_t+1 = f (y_t, ...) - * @param t the time point from which to make prediction - * @param y_ the actual values to use in making predictions - */ - def predict (t: Int, y_ : VectorD): Double = - arma.predict (t, pick (y_)) - end predict - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Predict all values corresponding to the given vector y_. - * @param y_ the actual values to use in making predictions - */ - override def predictAll (y_ : VectorD): VectorD = - arma.predictAll (pick (y_)) - end predictAll - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Produce a vector of size h, of 1 through h-steps ahead forecasts for the model. - * forecast the following time points: t+1, ..., t-1+h. - * Note, must create the yf matrix before calling the forecast method. - * Intended to work with rolling validation (analog of predict method) - * @param t the time point from which to make forecasts - * @param yf the forecasting matrix (time x horizons) - * @param y_ the actual values to use in making predictions - * @param h the forecasting horizon, number of steps ahead to produce forecasts - */ - def forecast (t: Int, yf: MatrixD, y_ : VectorD, h: Int): VectorD = - arma.forecast (t, yf, pick (y_), h) - end forecast - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all y_.dim time points at horizon h (h-steps ahead). - * Assign to forecasting matrix and return h-step ahead forecast. - * @param yf the forecasting matrix (time x horizons) - * @param y_ the actual values to use in making forecasts - * @param h the forecasting horizon, number of steps ahead to produce forecasts - */ - def forecastAt (yf: MatrixD, y_ : VectorD, h: Int): VectorD = - arma.forecastAt (yf, pick (y_), h) - end forecastAt - - //////////////////////////////////////////////////////////////////////////////// - // Make predictions/forecasts on the original scale time-series (not differenced). - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Predict a value for y_t+1 using the 1-step ahead forecast. - * y_t+1 = f (y_t, ...) + e_t+1 - * @param t the time point from which to make prediction - * @param y_ the actual values to use in making predictions - */ - def predict2 (t: Int, y_ : VectorD): Double = - arma.predict (t, pick (y_)) + (if diffr then y_(t) else 0.0) - end predict2 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Predict all values corresponding to the given vector y_. - * @param y_ the actual values to use in making predictions - */ - def predictAll2 (y_ : VectorD, show: Boolean = true): VectorD = - val yp = new VectorD (y_.dim) - yp(0) = y_(0) - for t <- 0 until y_.dim-1 do yp(t+1) = predict2 (t, y_) - if show then -// println (FitM.fitMap (diagnose (y_, yp), qoF_names)) - println (s"nparams = $nparams") - resetDF (nparams - 1, y_.dim - nparams) - println (report (diagnose (y_, yp))) // report on Quality of Fit (QoF) - println (s"mase = ${Fit.mase (y, yp)}") // Means Absolute Scaled Error - new Plot (null, y_, yp, "Plot y, yp vs. t", lines = true) - end if - yp - end predictAll2 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all y_.dim time points at horizon h (h-steps ahead). - * Assign to forecasting matrix and return h-step ahead forecast. - * @param yf the forecasting matrix (time x horizons) - * @param y_ the actual values to use in making forecasts - * @param h the forecasting horizon, number of steps ahead to produce forecasts - */ - def forecastAt2 (yf: MatrixD, y_ : VectorD, h: Int): VectorD = - val yfh = arma.forecastAt (yf, pick (y_), h) - if diffr then yfh + y_ else yfh - end forecastAt2 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all y_.dim time points and all horizons (1 through h-steps ahead). - * Record these in the yf matrix, where - * yf(t, k) = k-steps ahead forecast for y_t - * Note, column 0, yf(?, 0), is set to y (the actual time-series values). - * Forecast recursively down diagonals in the yf forecasting matrix. - * The top right and bottom left triangles in yf matrix are not forecastable. - * @param y_ the actual values to use in making forecasts - * @param h the maximum forecasting horizon, number of steps ahead to produce forecasts - */ - def forecastAll2 (y_ : VectorD, h: Int): MatrixD = - debug ("forecastAll2", s"y_.dim = ${y_.dim}, e.dim = ${e.dim}") - yf = new MatrixD (y_.dim+h, h+2) // forecasts for all time points t & horizons to h - for t <- y_.indices do yf(t, 0) = y_(t) // first column is the timestep (e.g., logical day) - for k <- 1 to h do forecastAt2 (yf, y_, k) // forecast k-steps into the future - for t <- yf.indices do yf(t, h+1) = t // last column is time (logical day) - yf // return matrix of forecasted values - end forecastAll2 - -end AR1MA - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aR1MATest` main function tests the `AR1MA` class on simulated data. - * Test predictions (one step ahead forecasts). - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.aR1MATest - */ -@main def aR1MATest (): Unit = - - val y = makeTSeries () // create simulated time-series (see `Stationary`) - - banner (s"Test Predictions: AR1MA(1, 0) on simulated time-series") - val mod = new AR1MA (y) // create model for time-series data AR(1) - mod.trainNtest ()() // train and test on full dataset - - banner ("Select model based on ACF and PACF") - mod.plotFunc (mod.acF, "ACF") // Auto-Correlation Function (ACF) - mod.plotFunc (mod.pacF, "PACF") // Partial Auto-Correlation Function (PACF) - -end aR1MATest - -import Example_LakeLevels.y - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aR1MATest2` main function tests the `AR1MA` class on real data: Forecasting lake levels. - * Test predictions (one step ahead forecasts) with no differencing - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.aR1MATest2 - */ -@main def aR1MATest2 (): Unit = - - import SARIMAX.hp - - // d = 0 (no differencing) => should give same results as ARMA (@see `aRMATest4`) - - for p <- 1 to 3; q <- 0 to 2 do - hp("p") = p; hp("q") = q // set p (AR) and q (MA) hyper-parameters - val mod = new AR1MA (y, diffr = false) // create model for time-series data AR1MA(1, 0) - banner (s"Test Predictions: ${mod.modelName} (d=0) on LakeLevels Dataset") - val (vp, qof) = mod.trainNtest ()() // test and test the model on full dataset - val yp = mod.predictAll2 (y) // results on original scale - - new Plot (null, y, yp, s"Plot: ${mod.modelName} predictAll2: y, yp vs t", lines = true) - end for - -end aR1MATest2 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aR1MATest3` main function tests the `AR1MA` class on real data: Forecasting lake levels. - * Test predictions (one step ahead forecasts) taking one difference. - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.aR1MATest3 - */ -@main def aR1MATest3 (): Unit = - - import SARIMAX.hp - - val v = Δ (y) // take the first difference of time-series y - differ (y, undiff (v, y)) // verify recovery of original times-series -// differ (y, undel (v, y(0))) // verify recovery of original times-series - - for p <- 1 to 7; q <- 0 to 2 do - hp("p") = p; hp("q") = q // set p (AR) and q (MA) hyper-parameters - val mod = new AR1MA (y) // create model for time-series data AR1MA(1, 0) - banner (s"Test Predictions: ${mod.modelName} (d=1) on LakeLevels Dataset") - val (vp, qof) = mod.trainNtest ()() // test and test the model on full dataset - val yp = mod.predictAll2 (y) // results on original scale - - new Plot (null, y, yp, s"Plot: ${mod.modelName} predictAll2: y, yp vs t", lines = true) - end for - -end aR1MATest3 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aR1MATest4` main function tests the `AR1MA` class on real data: - * Forecasting COVID-19. - * > runMain scalation.modeling.forecasting.aR1MATest4 - */ -@main def aR1MATest4 (): Unit = - - import SARIMAX.hp - - val data = MatrixD.load ("covid_19.csv", 1, 1) // skip first row (header) and first column - val yy = data(?, 4) // column 4 is daily deaths -// val yy = data(?, 5) // column 5 is daily deaths smoothed - val is = yy.indexWhere (_ >= 2.0) // find day of first death with at least 2 deaths - println (s"is = $is is first day with at least 2 deaths") - val y = yy(is until yy.dim) // slice out days before is - -// val h = 2 // forecasting horizon - for p <- 1 to 5; q <- 1 to 3 do // AR1MA hyper-parameter settings - hp("p") = p; hp("q") = q - val mod = new AR1MA (y) // create an AR1MA model - val (vp, qof) = mod.trainNtest ()() // train and the model on full dataset - val yp = mod.predictAll2 (y) // results on original scale - println (s"yp = $yp") - -/* - val yfa = mod.forecastAll (y, h) - val yf = yfa(?, h) // forecasted values - h steps ahead - new Plot (null, y, yf, s"Plot of y & yf, forecasted (h = $h) ${mod.modelName} vs. t", true) -*/ - - end for - -end aR1MATest4 diff --git a/src/main/scala/scalation/modeling/forecasting_old/old/ARIMA.scala.bak b/src/main/scala/scalation/modeling/forecasting_old/old/ARIMA.scala.bak deleted file mode 100644 index a26e0dd8d..000000000 --- a/src/main/scala/scalation/modeling/forecasting_old/old/ARIMA.scala.bak +++ /dev/null @@ -1,268 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Sat Jul 31 13:20:29 EDT 2021 - * @see LICENSE (MIT style license file). - * - * @title Model: Auto-Regressive, Integrated, Moving-Average (ARIMA) - * - * @see http://en.wikipedia.org/wiki/Autoregressive%E2%80%93moving-average_model - * @see http://www.emu.edu.tr/mbalcilar/teaching2007/econ604/lecture_notes.htm - * @see http://www.stat.berkeley.edu/~bartlett/courses/153-fall2010 - * @see http://www.princeton.edu/~apapanic/ORFE_405,_Financial_Time_Series_%28Fall_2011%29_files/slides12-13.pdf - */ - -package scalation -package modeling -package forecasting - -import scala.math.max - -import scalation.mathstat._ -import scalation.optimization._ - -import ARIMA.hp - -val flaw = flawf ("forecasting") // flaw function - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Take the 1-st difference of vector/time-series y. - * Note, it stores the first value in the original times-series in the first - * position of the differenced vector. - * @param y the vector/time-series to be differenced - */ -def diff (y: VectorD): (Double, VectorD) = - val yd = new VectorD (y.dim-1) - for i <- yd.indices do yd(i) = y(i+1) - y(i) - (y(0), yd) -end diff - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Take the 1-st inverse-difference of vector/time-series x. - * Restores the original time-series if x(0) holds first value in original time-series. - * @param x the vector/time-series to be inverse-differenced - */ -def diffinv (y0: Double, yd: VectorD): VectorD = - val y = new VectorD (yd.dim+1) - y(0) = y0 - for i <- 1 until y.dim do y(i) = yd(i-1) + y(i-1) - y -end diffinv - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Take the 'd'-th difference on vector/time-series 'y'. For efficiency, - * this method is destructive of 'y' (make a copy to preserve). - * @param y the vector/time-series to be differenced - * @param d the order or number of differences to be taken -def diff (y: VectorD, d: Int): VectorD = - if d < 1 then flaw ("diff", s"requires the number of differences $d > 0") - for k <- 1 to d do diff (y) - y -end diff - */ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Take the 'd'-th inverse-difference on vector/time-series 'y'. For efficiency, - * this method is destructive of 'y' (make a copy to preserve). - * Restores the original time-series if 'y(0)' holds first value in original time-series. - * @param y the vector/time-series to be inverse-differenced - * @param d the order or number of inverse-differences to be taken -def diffinv (y: VectorD, d: Int): VectorD = - if d < 1 then flaw ("diffinv", s"requires the number of inverse-differences $d > 0") - for k <- 1 to d do diffinv (y) - y -end diffinv - */ - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARIMA` class provides basic time series analysis capabilities for Auto-Regressive, - * Integrated, Moving-Average (ARIMA) models. In an ARIMA(p, q) model, p refers to the - * order of the Auto-Regressive components, d refers to the number of differences, - * and q refers to the Moving-Average compoenest of the model. - * ARIMA models are often used for forecasting. - * Given time-series data stored in vector y, its next value y_t = y(t) - * may be predicted based on prior values of y and its noise: - * diff_d (y_t) = δ + Σ(φ_k y_t-k) + Σ(θ_k e_t-k) + e_t - * where δ is a constant, φ is the auto-regressive coefficient vector, - * θ is the moving-average vector, and e_t is the noise term. - *---------------------------------------------------------------------------------- - * @param y the response vector (time-series data) - * @param tt the time vector, if relevant (time index may suffice) - * @param hparam the hyper-parameters - */ -class ARIMA (y: VectorD, tt: VectorD = null, hparam: HyperParameter = ARIMA.hp) - extends ARMA (diff (y)._2, tt, hparam): -// extends ARMA (diff (y, hparam("d").toInt), tt, hparam): - - private val debug = debugf ("ARIMA", true) // debug function - private val flaw = flawf ("ARIMA") // flaw function - private val d = hparam("d").toInt // the number of differences - private val (y0, yd) = diff (y) - - assert (getY == yd) - - if p > MAX_LAGS then flaw ("ARIMA", s"p = $p must not be greater than MAX_LAGS = $MAX_LAGS") - - debug ("constructor", s"d = $d, y0 = $y0, diff (yd) = $yd") - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the model name including its current hyper-parameter, e.g., ARIMA(3, 1, 2). - */ - override def modelName: String = s"ARIMA($p, $d, $q)" - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Predict all values for a time-series using 1-step ahead forecasts. - * y_t = φ_0 y_t-1 + φ_1 y_t-2 + ... + φ_p-1 y_t-p - * @see predictAll in `Forecaster` - * @param y_ the actual values to use in making predictions - */ - override def predictAll (y_ : VectorD): VectorD = - super.predictAll (y_) - end predictAll - - def predictAll2 (y_ : VectorD): VectorD = - val yd = super.predictAll (y_) - val yy = new VectorD (yd.dim + 1) - yy(0) = y0 - for i <- 1 until yy.dim do yy(i) = yy(i-1) + yd(i-1) - println (s"predictAll2: yy = $yy") - yy - end predictAll2 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test PREDICTIONS of an ARMA forecasting model y_ = f(lags (y_)) + e and return its - * QoF vector. Testing may be be in-sample (on the training set) or out-of-sample - * (on the testing set) as determined by the parameters passed in. - * Note: must call train before test. - * @param x_null the training/testing data/input matrix (ignored, pass null) - * @param y_ the training/testing response/output vector (e.g., full y) - */ - def test2 (x_null: MatrixD, y_ : VectorD): VectorD = - val yp = predictAll2 (yd) // make predictions - val yy = y_(1 to y_.dim) - val yyp = yp(0 to y_.dim-1) // align actual and predicted vectors - - resetDF (p, yy.dim - p) // reset the degrees of freedom - diagnose (yy, yp) // evaluate and return the QoF of these predictions - end test2 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all y_.dim time points and all horizons (1 through h-steps ahead). - * Record these in the yf matrix, where - * yf(t, k) = k-steps ahead forecast for y_t - * Note, column 0, yf(?, 0), is set to y (the actual time-series values). - * Forecast recurse down diagonals in the yf forecast matrix. - * The top right and bottom left triangles in yf matrix are not forecastable. - * @param h the maximum forecasting horizon, number of steps ahead to produce forecasts - * @param y_ the actual values to use in making forecasts - */ - override def forecastAll (h: Int, y_ : VectorD): MatrixD = - val yy = super.forecastAll (h, y_) -// MatrixD (for j <- yf.indices2 yield diffinv (yy(?, j))).transpose - yy - end forecastAll - -end ARIMA - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARIMA` companion object provides factory methods for the `ARIMA` class. - */ -object ARIMA: - - /** Base hyper-parameter specification for `ARIMA` class - */ - val hp = new HyperParameter - hp += ("p", 1, 1) - hp += ("d", 1, 1) - hp += ("q", 1, 1) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create an `ARIMA` object. - * @param y the response vector (time series data) - * @param tt the time vector, if relevant (time index may suffice) - * @param hparam the hyper-parameters - */ - def apply (y: VectorD, tt: VectorD = null, hparam: HyperParameter = ARIMA.hp): ARIMA = - new ARIMA (y, tt, hparam) - end apply - -end ARIMA - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARIMATest` object is used to test the `ARIMA` class on real data: Forecasting lake levels. - * Test the test, predictAll, testf and forecastAll methods over the whole times-series. - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.ARIMATest - */ -object ARIMATest extends App: - - import Example_LakeLevels.y - val m = y.dim - val t = VectorD.range (1, m) - val h = 3 // the forecasting horizon - val d = 1; hp("d") = d // differencing hyper-parameter d - val q = 1; hp("q") = q // moving-average hyper-parameter q - - var ar: ARIMA = null - for p <- 1 to 1 do // autoregressive hyper-parameter p - hp("p") = p // set p hyper-parameter - banner (s"Test: ARIMA($p, $d, $q}") - ar = new ARIMA (y) // create model for time series data - val yd = ar.getY - ar.train (null, yd) // train the model on full dataset - - banner (s"Test: ARIMA($p, $d, $q} Differenced") - println (ar.report (ar.test (null, yd))) // test the model and report results - val ydp = ar.predictAll (yd) // predict 1-step ahead for all y - val yyd = yd(1 to yd.dim) - new Plot (t, yyd, ydp, s"ARIMA($p, $d, $q): yd-actual vs. yd-predicted", lines = true) - - banner (s"Test: ARIMA($p, $d, $q} Undifferenced") - println (ar.report (ar.test2 (null, yd))) // test the model and report results - val yp = ar.predictAll2 (yd) // predict 1-step ahead for all y - val yy = y(1 to y.dim) - new Plot (t, yy, yp, s"ARIMA($p, $d, $q): y-actual vs. y-predicted", lines = true) -/* - val yf = ar.forecastAll (h, y) // forecast h-steps ahead for all y - println (s"yf = $yf") - println (s"yf.dims = ${yf.dims}") - assert (yf(?, 0)(0 until m) == y) // column 0 must agree with actual values - assert (yf(?, 1)(1 to m+1) == yp) // column 1 must agree with one step-ahead predictions - for k <- 1 to h do - println (s"evalaute QoF for horizon $k:") - println (Fit.fitMap (ar.testf (k, y))) // evaluate k-units ahead forecasts - end for -*/ - end for - - banner ("Select model based on ACF and PACF") - ar.plotFunc (ar.acF, "ACF") // Auto-Correlation Function (ACF) - ar.plotFunc (ar.pacF, "PACF") // Partial Auto-Correlation Function (PACF) - -end ARIMATest - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARIMATest2` object is used to test the `ARIMA` class on real data: Forecasting lake levels. - * Test the test, predictAll, testf and forecastAll methods over the whole times-series. - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.ARIMATest2 - */ -object ARIMATest2 extends App: - - import Example_LakeLevels.y - - val (y0, yd) = diff (y) - val z = diffinv (y0, yd) - - println (s"original y = $y") - println (s"differenced x = $yd") - println (s"restored z = $z") - assert (z == y) - -end ARIMATest2 - diff --git a/src/main/scala/scalation/modeling/forecasting_old/old/ARIMA.scala.bak2 b/src/main/scala/scalation/modeling/forecasting_old/old/ARIMA.scala.bak2 deleted file mode 100644 index 3a0150d87..000000000 --- a/src/main/scala/scalation/modeling/forecasting_old/old/ARIMA.scala.bak2 +++ /dev/null @@ -1,341 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author Hao Peng, John Miller, Michael Cotterell - * @version 2.0 - * @date Sat Jun 13 01:27:00 EST 2017 - * @see LICENSE (MIT style license file). - * - * @title Model: Auto-Regressive, Integrated, Moving-Average (ARIMA) - * - * @see http://en.wikipedia.org/wiki/Autoregressive%E2%80%93moving-average_model - * @see http://www.emu.edu.tr/mbalcilar/teaching2007/econ604/lecture_notes.htm - * @see http://www.stat.berkeley.edu/~bartlett/courses/153-fall2010 - * @see http://www.princeton.edu/~apapanic/ORFE_405,_Financial_Time_Series_%28Fall_2011%29_files/slides12-13.pdf - */ - -package scalation -package modeling -package forecasting - -import scala.math.max - -import scalation.mathstat._ -import scalation.optimization._ - -import ARIMA.hp - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Take the 1-st difference of vector/time-series y, returning the first original - * value and the differenced time-series. - * @param y the vector/time-series to be differenced - */ -def diff (y: VectorD): (Double, VectorD) = - val yd = new VectorD (y.dim-1) - for i <- yd.indices do yd(i) = y(i+1) - y(i) - (y(0), yd) -end diff - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Take the 1-st inverse-difference of vector/time-series yd. - * @param y0 the first value from the undifferenced time-series - * @param yd the vector/time-series to be inverse-differenced - */ -def diffinv (y0: Double, yd: VectorD): VectorD = - val y = new VectorD (yd.dim+1) - y(0) = y0 - for i <- 1 until y.dim do y(i) = yd(i-1) + y(i-1) - y -end diffinv - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARIMA` class provides basic time series analysis capabilities for Auto-Regressive, - * Moving-Average (ARIMA) models. In an ARIMA(p, q) model, p refers to the order of the - * Auto-Regressive components and q refers to the Moving-Average compoenest of the model. - * ARIMA models are often used for forecasting. - * Given time-series data stored in vector y, its next value y_t = y(t) - * may be predicted based on prior values of y and its noise: - * y_t = δ + Σ(φ_k y_t-k) + Σ(θ_k e_t-k) + e_t - * where δ is a constant, φ is the auto-regressive coefficient vector, - * θ is the moving-average vector, and e_t is the noise term. - *---------------------------------------------------------------------------------- - * @param y the response vector (time-series data) - * @param tt the time vector, if relevant (time index may suffice) - * @param hparam the hyper-parameters - */ -class ARIMA (y: VectorD, tt: VectorD = null, hparam: HyperParameter = ARIMA.hp) - extends Forecaster (y, tt, hparam) - with Correlogram (y) - with Fit (dfm = hparam("p").toInt, df = y.dim - hparam("p").toInt): - - private val debug = debugf ("ARIMA", true) // debug function - private val flaw = flawf ("ARIMA") // flaw function - private var m = y.dim // number of time points - private var p = hparam("p").toInt // p-th order Auto-Regressive, - private var d = hparam("d").toInt // d-th order Differencing and - private var q = hparam("q").toInt // q-th order Moving-Average model - private var φ = VectorD.nullv // AR(p) parameters/coefficients part - private var θ = VectorD.nullv // MA(q) parameters/coefficients part - private var yf = MatrixD.nullm // the forecast matrix - time points x horizons - - val (y0, yd) = diff (y) // the first orginal value and differenced time-series - - if p > MAX_LAGS then flaw ("ARIMA", s"p = $p must not be greater than MAX_LAGS = $MAX_LAGS") - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the model name including its current hyper-parameter, e.g., ARIMA(2, 1, 1). - */ - override def modelName: String = s"ARIMA($p, $d, $q)" - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train/fit an `ARIMA` model to the times-series data in vector y_. - * Estimate the coefficient vector φ for a p-th order Auto-Regressive ARIMA(p) model. - * Uses Durbin-Levinson Algorithm (in `Correlogram`) to determine the coefficients. - * The φ vector is p-th row of psi matrix (ignoring the first (0th) column). - * @param x_null the data/input matrix (ignored, pass null) - * @param y_ the training/full response vector (e.g., full y) - */ - def train (x_null: MatrixD, y_ : VectorD): Unit = - m = y_.dim // length of relevant time-series - e = new VectorD (m) - resetDF (p, m - p) // reset the degrees of freedom - makeCorrelogram (y_) // correlogram computes psi matrix, gives ACF and PACF - - φ = new VectorD (p) // zeros for AR part - θ = new VectorD (q) // zeros for MA part - val b = φ ++ θ // combine all parameters -> vector to optimize - - def csse (b: VectorD): Double = // objective function - conditional sum of squared errors - φ = b(0 to p); θ = b(p to p+q) - val (yy, yp) = testSetup (yd) // get and align actual and predicted values - val s = (yy - yp).normSq // sum of squared errors -// println (s"csse: s = $s, b = $b") - s - end csse - - def nll (b: VectorD): Double = // objective function - negative log-likelihood (MLE) - 0.0 // FIX - implement - end nll - - val optimizer = new BFGS (csse) // apply Quasi-Newton BFGS optimizer -// val optimizer = new ConjugateGradient (csse) // apply Conjugate Gradient optimizer - fails -// val optimizer = new CoordinateDescent (csse) // apply Coordinate Descent optimizer -// val optimizer = new NelderMeadSimplex (csse, 3) // apply Nelder-Mead Simplex optimizer -// val optimizer = new GridSearch (csse, 3); optimizer.setAxes () // apply GridSearch BFGS optimizer - close - val (fb, bb) = optimizer.solve (b) // optimal solution for the objective function and parameters - - φ = bb(0 to p); θ = bb(p to p+q) // recover parameters for z - debug ("train", s"parameters for ARIMA($p, $d, $q) model: φ = $φ, θ = $θ") - end train - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test PREDICTIONS of an ARIMA forecasting model y_ = f(lags (y_)) + e and return its - * QoF vector. Testing may be be in-sample (on the training set) or out-of-sample - * (on the testing set) as determined by the parameters passed in. - * Note: must call train before test. - * @param x_null the training/testing data/input matrix (ignored, pass null) - * @param y_ the training/testing response/output vector (e.g., full y) - */ - def test (x_null: MatrixD, y_ : VectorD): VectorD = - val (yy, yp) = testSetup (y_) // get and align actual and predicted values - resetDF (p, yy.dim - p) // reset the degrees of freedom - diagnose (yy, yp) // evaluate and return the QoF of these predictions - end test - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test PREDICTIONS of an ARIMA forecasting model y_ = f(lags (y_)) + e and return its - * QoF vector. Testing may be be in-sample (on the training set) or out-of-sample - * (on the testing set) as determined by the parameters passed in. - * Note: must call train before test. - * @param x_null the training/testing data/input matrix (ignored, pass null) - * @param y_ the training/testing response/output vector (e.g., full y) - */ - def test2 (x_null: MatrixD, y_ : VectorD): VectorD = - val yp = predictAll2 (yd) // make predictions - val yy = y_(1 to y_.dim) - val yyp = yp(0 to y_.dim-1) // align actual and predicted vectors - - resetDF (p, yy.dim - p) // reset the degrees of freedom - diagnose (yy, yyp) // evaluate and return the QoF of these predictions - end test2 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test FORECASTS of an ARIMA forecasting model y_ = f(lags (y_)) + e and return its - * QoF vector. Testing may be be in-sample (on the training set) or out-of-sample - * (on the testing set) as determined by the parameters passed in. - * Note: must call train before test. - * @param h the forecasting horizon, number of steps ahead to produce forecasts - * @param y_ the training/testing response/output vector (e.g., full y) - * @param redo whether to use existing forecasts or redo them (defaults to false) - */ - def testf (h: Int, y_ : VectorD, redo: Boolean = false): VectorD = - if yf == null || yf.dim2 < h+1 || redo then yf = forecastAll (h, y_) // redo all forecasts - val yy = y_(h to y_.dim) - val yf_h = yf(?, h)(h to y_.dim) // pull column h from the forecast matrix and align - resetDF (p, yy.dim - p) // reset the degrees of freedom - diagnose (yy, yf_h) // evaluate and return the QoF of these forecasts - end testf - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the parameter vector for the ARIMA(p, d, q) model. - */ - override def parameter: VectorD = φ ++ θ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Predict a value for time point/index t using 1-step ahead forecasts. - * y_t = φ_0 y_t-1 + φ_1 y_t-2 + ... + φ_p-1 y_t-p - * When k < 0 let y_k = y_0 (i.e., assume first value repeats back in time), - * but do not assume errors repeat. - * @see predictAll in `Forecaster` - * @param t the time point/index to be predicted - * @param y_ the actual values to use in making predictions - */ - def predict (t: Int, y_ : VectorD): Double = - if t < 1 || t > y_.dim then flaw ("predict", s"time index t = $t is out of range") - var sum = 0.0 - for j <- 0 until p do sum += φ(j) * y_(max (0, t-1-j)) - for j <- 0 until q if t-j > 0 do sum += θ(j) * e(t-1-j) - if t < y_.dim then e(t) = y_(t) - sum // update the t-th error e_t - sum // prediction for y_t, yp_t - end predict - - def predictAll2 (y_ : VectorD): VectorD = - val yp = new VectorD (yd.dim+1) - yp(0) = y0 - e(0) = 0.0 - for t <- 1 to yd.dim do - var sum = 0.0 - for j <- 0 until p do sum += φ(j) * yd(max (0, t-1-j)) - for j <- 0 until q if t-j > 0 do sum += θ(j) * e(t-1-j) - yp(t) = y(t-1) + sum // prediction for y_t, yp_t - if t < yd.dim then e(t) = yd(t) - sum // update the t-th error e_t -// if t < yd.dim then e(t) = y(t) - yp(t) // update the t-th error e_t - end for - yp(1 to yd.dim) - end predictAll2 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all y_.dim time points and all horizons (1 through h-steps ahead). - * Record these in the yf matrix, where - * yf(t, k) = k-steps ahead forecast for y_t - * Note, column 0, yf(?, 0), is set to y (the actual time-series values). - * Forecast recurse down diagonals in the yf forecast matrix. - * The top right and bottom left triangles in yf matrix are not forecastable. - * @param h the maximum forecasting horizon, number of steps ahead to produce forecasts - * @param y_ the actual values to use in making forecasts - */ - def forecastAll (h: Int, y_ : VectorD): MatrixD = - yf = new MatrixD (y_.dim+h, h+1) // forecasts for all time points t & horizons to h - for t <- 0 until m do yf(t, 0) = y_(t) // first column is actual values, horizon 0 - for k <- 1 to h do - for t <- y_.indices do // make forecasts over all time points for horizon k - var sum = 0.0 - for j <- 0 until p do sum += φ(j) * yf(max (0, t+k-1-j), max (0, k-1-j)) - yf(t+k, k) = sum // forecast down the diagonal - end for - debug ("forecastAll", s"yf(?, $k) = ${yf(?, k)}") - end for - yf // return matrix of forecasted values - end forecastAll - -end ARIMA - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARIMA` companion object provides factory methods for the `ARIMA` class. - */ -object ARIMA: - - /** Base hyper-parameter specification for `ARIMA` class - */ - val hp = new HyperParameter - hp += ("p", 1, 1) - hp += ("d", 1, 1) - hp += ("q", 1, 1) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create an `ARIMA` object. - * @param y the response vector (time series data) - * @param tt the time vector, if relevant (time index may suffice) - * @param hparam the hyper-parameters - */ - def apply (y: VectorD, tt: VectorD = null, hparam: HyperParameter = ARIMA.hp): ARIMA = - new ARIMA (y, tt, hparam) - end apply - -end ARIMA - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARIMATest` object is used to test the `ARIMA` class on real data: Forecasting lake levels. - * Test the test, predictAll, testf and forecastAll methods over the whole times-series. - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.ARIMATest - */ -object ARIMATest extends App: - - import Example_LakeLevels.y - val m = y.dim - val t = VectorD.range (1, m) - val h = 3 // the forecasting horizon - val d = 1; hp("d") = d // number of differences - val q = 1; hp("q") = q // moving-average hyper-parameter q - - var ar: ARIMA = null - for p <- 1 to 1 do // autoregressive hyper-parameter p - hp("p") = p // set p hyper-parameter - banner (s"Test: ARIMA($p, $q}") - ar = new ARIMA (y) // create model for time series data - ar.train (null, y) // train the model on full dataset - - banner ("Test: Differenced Time-Series") - val yd = ar.yd - println (ar.report (ar.test (null, yd))) // test the model and report results - var yp = ar.predictAll (yd) // predict 1-step ahead for all y - var yy = yd(1 to yd.dim) - new Plot (t, yy, yp, s"ARIMA($p, $d, $q): yd-actual vs. yd-predicted", lines = true) - - banner ("Test: Undifferenced Time-Series") -// println (ar.report (ar.test2 (null, yd))) // test the model and report results - yp = ar.predictAll2 (y) // predict 1-step ahead for all y - yy = y(1 to yd.dim) - println (s"fit = ${ar.diagnose (yy, yp)}") - new Plot (t, yy, yp, s"ARIMA($p, $d, $q): y-actual vs. y-predicted", lines = true) -/* - val yf = ar.forecastAll (h, y) // forecast h-steps ahead for all y - println (s"yf = $yf") - println (s"yf.dims = ${yf.dims}") - assert (yf(?, 0)(0 until m) == y) // column 0 must agree with actual values - assert (yf(?, 1)(1 to m+1) == yp) // column 1 must agree with one step-ahead predictions - for k <- 1 to h do - println (s"evalaute QoF for horizon $k:") - println (Fit.fitMap (ar.testf (k, y))) // evaluate k-units ahead forecasts - end for -*/ - end for - - banner ("Select model based on ACF and PACF") - ar.plotFunc (ar.acF, "ACF") // Auto-Correlation Function (ACF) - ar.plotFunc (ar.pacF, "PACF") // Partial Auto-Correlation Function (PACF) - -end ARIMATest - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARIMATest2` object is used to test functions used by the `ARIMA` class. - * Test the diff and diffinv functions. - * > runMain scalation.modeling.forecasting.ARIMATest2 - */ -object ARIMATest2 extends App: - - import Example_LakeLevels.y - - val (y0, yd) = diff (y) - val z = diffinv (y0, yd) - - println (s"original y = $y") - println (s"differenced x = $yd") - println (s"restored z = $z") - assert (z == y) - -end ARIMATest2 - diff --git a/src/main/scala/scalation/modeling/forecasting_old/old/ARIMA.scala.sav b/src/main/scala/scalation/modeling/forecasting_old/old/ARIMA.scala.sav deleted file mode 100644 index c142e2857..000000000 --- a/src/main/scala/scalation/modeling/forecasting_old/old/ARIMA.scala.sav +++ /dev/null @@ -1,561 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author Hao Peng, John Miller - * @version 2.0 - * @date Sat Jun 13 01:27:00 EST 2017 - * @see LICENSE (MIT style license file). - * - * @note Auto-Regressive, Integrated, Moving Average (ARIMA) Model - * - * @see http://en.wikipedia.org/wiki/Autoregressive%E2%80%93moving-average_model - * @see http://www.emu.edu.tr/mbalcilar/teaching2007/econ604/lecture_notes.htm - * @see http://www.stat.berkeley.edu/~bartlett/courses/153-fall2010 - * @see http://www.princeton.edu/~apapanic/ORFE_405,_Financial_Time_Series_%28Fall_2011%29_files/slides12-13.pdf - */ - -package scalation -package modeling -package forecasting - -import scala.math.{max, sqrt} - -import scalation.mathstat._ -import scalation.optimization._ -import scalation.random.{Normal, Uniform} - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Companion object for class `ARIMA`. Includes features related to differencing - * and automated order selection. - * @see www.jstatsoft.org/article/view/v027i03/v27i03.pdf - */ -object ARIMA: - - /** Base hyper-parameter specification for `ARIMA` class - */ - val hp = new HyperParameter - hp += ("p", 1, 1) - hp += ("d", 1, 1) - hp += ("q", 1, 1) - - private val flaw = flawf ("ARIMA") // flaw function - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the 'd'th difference of the time-series for 'd' in {0, 1, 2, 3}. - * A new vector is returned even when there is no difference taken ('d = 0'), - * to ensure the original is preserved. - * @param y the original time-series to be differenced - * @param d the order of simple differencing - */ - def difference (y: VectorD, d: Int): VectorD = - d match - case 0 => - y.copy - case 1 => - VectorD (for i <- 0 until y.dim-1 yield y(i+1) - y(i)) - case 2 => - VectorD (for i <- 0 until y.dim-2 yield y(i+2) - 2*y(i+1) + y(i)) - case 3 => - VectorD (for i <- 0 until y.dim-3 yield y(i+3) - 3*y(i+2) + 3*y(i+1) - y(i)) - case _ => - flaw ("difference", "ARIMA does not support differencing higher than order 3"); null - end match - end difference - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Transform the fitted values on the training data of a differenced time series back - * to the original scale. Undo trend differencing only. - * @see stats.stackexchange.com/questions/32634/difference-time-series-before-arima-or-within-arima - * @param yp the vector of predicted/fitted values - * @param y the original time-series vector - * @param d the order of simple differencing - */ - def transformBack (yp: VectorD, y: VectorD, d: Int): VectorD = - d match - case 0 => - yp - case 1 => - val tb = new VectorD (y.dim) - tb(0) = y(0) - for i <- 0 until y.dim-1 do tb(i+1) = yp(i) + y(i) - tb - case 2 => - val tb = new VectorD (y.dim) - tb(0) = y(0); tb(1) = y(1) - for i <- 0 until y.dim-2 do tb(i+2) = yp(i) + 2*y(i+1) - y(i) - tb - case 3 => - val tb = new VectorD (y.dim) - tb(0) = y(0); tb(1) = y(1); tb(2) = y(2) - for i <- 0 until y.dim-3 do tb(i+3) = yp(i) + 3*y(i+2) - 3*y(i+1) + y(i) - tb - case _ => - flaw ("transformBack", "ARIMA does not support differencing higher than order 3"); null - end match - end transformBack - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Transform the forecasted values of a differenced time series back to the original - * for all horizons scale. - * @see stats.stackexchange.com/questions/32634/difference-time-series-before-arima-or-within-arima - * @param ypa the matrix of all multi-horizon forecasted values - * @param y the original time-series vector - * @param d the order of simple differencing - */ - def transformBack_allH (ypa: MatrixD, y: VectorD, d: Int): MatrixD = - val tb = new MatrixD (ypa.dim, ypa.dim2) - tb(?, 0) = y - for k <- 1 until ypa.dim2 do tb(?, k) = transformBack (ypa(?, k), y, d) - tb - end transformBack_allH - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Transform the forecast values of a differenced time series back to the - * original scale. - * @see stats.stackexchange.com/questions/32634/difference-time-series-before-arima-or-within-arima - * @param yf the vector of forecasted values - * @param y the original time series - * @param d the order of simple differencing - * @param t the time point being forecasted (@see the 'forecast' method) - */ - def transformBackF (yf: VectorD, y: VectorD, d: Int, t: Int): VectorD = - d match - case 0 => - yf - case 1 => - val tb = y(t - 1 to t) ++ yf - for i <- 1 until tb.dim do tb(i) += tb(i-1) - tb(1 to tb.dim) - case 2 => - val tb = y(t-2 to t) ++ yf - for i <- 2 until tb.dim do tb(i) += (2*tb(i-1) - tb(i-2)) - tb(2 to tb.dim) - case 3 => - val tb = y(t-3 to t) ++ yf - for i <- 3 until tb.dim do tb(i) += (3*tb(i-1) - 3*tb(i-2) + tb(i-3)) - tb(3 to tb.dim) - case _ => - flaw ("transformBackF", "ARIMA does not support differencing higher than order 3"); null - end match - end transformBackF - -end ARIMA - -import ARIMA._ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARIMA` class provides basic time series analysis capabilities for Auto- - * Regressive 'AR' Integrated 'I' Moving-Average 'MA' models. In an - * ARIMA(p, d, q) model, p and q refer to the order of the Auto-Regressive - * and Moving-Average components of the model; d refers to the order of - * differencing. Given time series data stored in vector y, its next value y_t = y(t) - * may be predicted based on prior values of y and its noise: - * y_t = δ + Σ(φ_i y_t-i) + Σ(θ_i e_t-i) + e_t - * where δ is a constant, φ is the auto-regressive coefficient vector, - * θ is the moving-average coefficient vector, and e is the noise vector. - *------------------------------------------------------------------------------ - * If d > 0, then the time series must be differenced first before applying - * the above model. - *------------------------------------------------------------------------------ - * @param y the original input vector (time series data) - * @param tt the time vector, if relevant (time index may suffice) - * @param hparam the hyper-parameters - */ -class ARIMA (y: VectorD, tt: VectorD = null, hparam: HyperParameter = ARIMA.hp) - extends ARMA (y, tt, hparam): - - private val debug = debugf ("ARIMA", true) // debug function - private val flaw = flawf ("ARIMA") // flaw function - - protected val d = hparam("d").toInt // the number of differences to take - protected var differenced = d > 0 // flag indicating whether differencing will be applied - protected var params = p + q + (if differenced then 0 else 1) // number of parameters estimated - - protected var mu = -0.0 // sample mean (-0.0 means unassigned) - protected var μ = -0.0 // population mean estimated using MLE - protected var sig2 = -0.0 // sample variance - protected var σ2 = -0.0 // population variance estimated using MLE - - private var z = VectorD.nullv // vector of centered predicted/fitted values - private var zp = VectorD.nullv // vector of centered predicted/fitted values - - init (y) // initialize vectors and parameters - - modelName = s"ARIMA($p, $d, $q)" - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Initialize variables based on the working time-series v. - * Set/change the working time series. May be used to set the time series - * to a different time window in order to produce newer forecast. - * @param v the working vector/time-series - */ - protected def init (v: VectorD): Unit = - mu = v.mean // sample mean - z = difference (v, d) // take the d-th difference of the time series - zp = new VectorD (z.dim) // predicted values prior to undifferencing/uncentering -// e = new VectorD (z.dim) // vector of errors/residuals - sig2 = z.variance // sample variance - end init - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the maximum lag used by this model (its capacity to look into the past). - */ - override def cap: Int = max (p, q) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Show estimates for parameters. - */ - def showParameterEstimates (): Unit = - println (s"differenced = $differenced") - println (s"φ = $φ") // AR parameters - println (s"θ = $θ") // MA parameters - println (s"δ = $δ") // drift - println (s"mu = $mu") // sample mean - println (s"μ = $μ") // MLE mean - println (s"sig2 = $sig2") // sample variance - println (s"σ2 = $σ2") // MLE variance - end showParameterEstimates - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train/fit an `ARIMA` model to the times-series data in vector y_. - * Estimate the coefficient vectors φ and θ for (p, q)-th order ARIMA(p, d, q) model. - * It uses BFGS, a Quasi-Newton optimizer, to minimize the negative log-likelihood. - * @param x_null the data/input matrix (ignored, pass null) - * @param y_ the training/full response vector - */ - override def train (x_null: MatrixD, y_ : VectorD): Unit = - val optimizer = new BFGS (nll) // nonlinear optimizer - val b = new VectorD (params + 1) // parameter values - - if ! differenced then b(b.size-2) = mu // sample mean, initial est. for μ parameter - b(b.size-1) = sqrt (sig2) // sample standard deviation, initial est. for σ parameter - optimizer.solve (b) // find b that maximizes likelihood - - δ = μ * (1 - φ.sum) // update drift value -// δ = stats.mu * (1 - φ.sum) - - showParameterEstimates () - end train - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** The negative log-likelihood function to be minimized. - * @see math.unice.fr/~frapetti/CorsoP/Chapitre_4_IMEA_1.pdf, page 36 - * @see spia.uga.edu/faculty_pages/monogan/teaching/ts/Barima.pdf - * @see stats.stackexchange.com/questions/77663/arima-estimation-by-hand - * @param b the input parameter vector - */ - protected def nll (b: VectorD): Double = - if b.size != params + 1 then flaw ("nll", "input parameter vector size incorrect") - for i <- 0 until p do φ(i) = b(i) - for i <- p until p+q do θ(i-p) = b(i) - if ! differenced then μ = b(b.size-2) - σ2 = b.last~^2 - - updateFittedValues () - end nll - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Update the vector of fitted values 'zp', the vector of errors 'e', and - * return the negative log-likelihood '-ll'. - * @see `Fit` for definition of 'll'. - */ - protected def updateFittedValues (): Double = - if ! differenced then for i <- z.indices do z(i) = y(i) - μ // for undifferenced time series, center using est. μ - - zp(0) = z(0) // no past values or errors => copy actual - for t <- 1 until zp.dim do - e(t-1) = z(t-1) - zp(t-1) // error in previous forecast - var sum = 0.0 - for j <- 0 until p if t-j > 0 do sum += φ(j) * z(t-1-j) - for j <- 0 until q if t-j > 0 do sum += θ(j) * e(t-1-j) - zp(t) = sum - end for - - -ll (e.normSq / m, σ2, m) // return negative log likelihood - end updateFittedValues - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the error (difference between actual and predicted) and useful - * diagnostics for the dataset. - * @param y vector of observed values - * @param yp vector of predicted values - */ - override def test (x_null: MatrixD, y_ : VectorD): (VectorD, VectorD) = - // FIX - add testSetup - val yp = predictAll (y_) - resetDF (params, y.dim - params) - (yp, diagnose (y, yp)) - end test - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the vector of predicted/fitted values on the training/full dataset. - * Based on 'zp' calculated in the 'updateFittedValues' method. - * @param y_ the given time-series - */ - override def predictAll (y_ : VectorD): VectorD = - if differenced then transformBack (zp, y, d) else zp + μ - end predictAll - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Produce h-steps-ahead forecast for ARIMA models. - * @see ams.sunysb.edu/~zhu/ams586/Forecasting.pdf - * @param t the time point from which to make forecasts (in the original scale) - * @param h the number of steps to forecast, must be at least one - */ - def forecast (t: Int = y.dim, h: Int = 1): VectorD = - if t > y.dim then flaw ("forecast", s"t ($t) cannot be greater than y.dim (${y.dim})") - val tz = t - d // scale t to match vector z and e - if tz < cap then flaw ("forecast", s"tz ($tz) must be at least cap ($cap)") - - val zf = new VectorD (cap + h) // forecasted centered values - val e_ = new VectorD (cap + h) // available observed errors - - for i <- 0 until cap if tz-cap+i >= 0 do // seed with first cap = max(p, q) values - zf(i) = z(tz-cap+i) // copy first cap values - e_(i) = e(tz-cap+i) // unveil first cap errors (observed in training) - end for - for i <- cap until zf.dim do // start at t = cap (enough for first value to forecast) - var sum = 0.0 - for j <- 0 until p do sum += φ(j) * zf(i-1-j) - for j <- 0 until q do sum += θ(j) * e_(i-1-j) - zf(i) = sum - end for - val f = zf(cap to zf.dim) // dump first cap values - if differenced then transformBackF (f, y, d, t) - else f + μ // return the vector of forecasts - end forecast - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all time points using 1 through h-steps ahead forecasts. - * The h-th row of matrix is the horizon h forecast (where h = 0 is actual data). - * @param h the forecasting horizon, number of steps ahead to produce forecasts - */ - override def forecastAll (y_ : VectorD, h: Int): MatrixD = - val yf = new MatrixD (y.dim, h+1) // forecasts for all horizons h & time points t - yf(?, 0) = y // first row is actual values - val cut = cap + d // cut over from actual to forecasted values - - for t <- y.indices do - if t < cut then - for k <- 1 to h do yf(t, k) = y(t) // copy first cut observed values from y - else - val ft = forecast (t, h) // forecasts at time point t, horizons 1 to h - for k <- 1 to h if t+k-1 < y.dim do - yf(t+k-1, k) = ft(k-1) // place forecasts diagonally - end for - end if - end for - - // fill in blank values in first few rows where no forecasts can be produced by copying values from previous columns - for k <- 2 to h; t <- cut until cut+k-1 do yf(t, k) = yf(t, k-1) // copy forecasted values - - yf // return matrix of forecasted values - end forecastAll - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all m time points and all horizons (1 through h-steps ahead). - * Record these in the yf matrix, where - * yf(t, k) = k-steps ahead forecast for y_t - * Note, yf(?, 0) is set to y (the actual time-series values). - * Do not forecast errors, rather use observed errors from training and make sure not - * to use errors that would correspond to knowing future errors (all future errors should - * be assumed to be 0). - * @see https://otexts.com/fpp3/arima-forecasting.html, section 9.8 - * @param h the maximum forecasting horizon, number of steps ahead to produce forecasts - */ -/*** - override def forecastAll2 (h: Int): MatrixD = forecastAll (h) - FIX - values must be computed diagonially - does not work for d = 1, etc. (missing value at 'cut') - override def forecastAll2 (h: Int): MatrixD = - { - val zf = new MatrixD (y.dim, h+1) // forecast matrix: rows - time, cols - horizon - for t <- z.indices do zf(t, 0) = z(t) // first column is actual values, horizon 0 - val cut = cap + d // cut over from actual to forecasted values - - for k <- 1 to h do // loop through k-steps ahead forecasts - val e_ = new VectorD (z.dim) // redetermine errors from a clean slate - - for t <- 0 until cut do // seed the first cap = max(p, q) values - zf(t, k) = z(t) // copy first cap actual values - e_(t) = e(t) // copy first cap errors (observed in training) - end for - - for t <- cut until y.dim do // forecast from cap to the end of time-series - if t-k >= 0 then e_(t-k) = e(t-k) // unveil previous error at time t-k - var sum = 0.0 - for j <- 0 until p if t-j > 0 then sum += φ(j) * zf(t-1-j, max (0, k-1-j)) - for j <- 0 until q if t-j > 0 then sum += θ(j) * e_(t-1-j) - zf(t, k) = sum // centered forecast for time t - end for - end for - println (s"forecastAll2: zf (${zf.dim}) = $zf") - if differenced then transformBack_allH (zf, y, d) - else zf + μ // return uncentered forecasts - end forecastAll2 -***/ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Obtain residuals/errors in the original scale. - */ - def residuals: VectorD = if differenced then y - predictAll (y) else e - -end ARIMA - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRIMATest` main function tests the `ARIMA` class on real data: - * Forecasting lake levels. - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.aRIMATest - */ -@main def aRIMATest (): Unit = - - import Example_LakeLevels.y - - val d = 0 // apply d-th order differencing - no differencing -// val d = 1 // apply d-th order differencing - first differences - - for h <- 1 to 2 do // forecasting horizon - for p <- 1 to 6; q <- 0 to 3 do // ARMA hyper-parameter settings - banner (s"Test: ARIMA ($p, $d, $q) with h = $h") - hp("p") = p; hp("d") = d; hp("q") = q - val mod = new ARIMA (y) // create an ARIMA model - val (yp, qof) = mod.trainNtest ()() // train and the model on full dataset - - val yfa = mod.forecastAll (y, h) - val yf = yfa(?, h) // forecasted values - h steps ahead - new Plot (null, y, yf, s"Plot of y & yf, forecasted (h = $h) ${mod.modelName} vs. t", true) - - if h == 1 then Forecaster.differ (yp, yf, allow = true) -/* - val skip = max (p, q) // skip the cap start-up - banner (s"aRIMATest: QoF (@h = $h) for yf = mod.forecastAll") - println (s"rSq (yf) for h = $h is ${rSqF (y, yf)}") - println (s"rSq (yf) for h = $h is ${rSqF (y, yf, max (p, q))}, skip = $skip") - println (s"sMAPE (yf) for h = $h is ${smapeF (y, yf)}") - println (s"sMAPE (yf) for h = $h is ${smapeF (y, yf, max (p, q))}, skip = $skip") - - banner (s"aRIMATest: QoF (@h = $h) for yf2 = mod.forecastAll2") - println (s"rSq (yf2) for h = $h is ${rSqF (y, yf2)}") - println (s"rSq (yf2) for h = $h is ${rSqF (y, yf2, max (p, q))}, skip = $skip") - println (s"sMAPE (yf2) for h = $h is ${smapeF (y, yf2)}") - println (s"sMAPE (yf2) for h = $h is ${smapeF (y, yf2, max (p, q))}, skip = $skip") -*/ - end for - end for - -end aRIMATest - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRIMATest2` main function tests the `ARIMA` class. - * Test simulated data. - * > runMain scalation.modeling.forecasting.aRIMATest2 - */ -@main def aRIMATest2 (): Unit = - - banner ("ARIMA Test2") - val m = 20 - val noise = Normal (0, 2) -// val noise = Uniform (-5, 5) - val y = VectorD (for i <- 0 until m yield i + noise.gen) - - println (s"y = $y") - - val (p, d, q) = (1, 1, 1) - hp("p") = p; hp("d") = d; hp("q") = q - banner (s"Build ARIMA($p, $d, $q) model") - val mod = new ARIMA (y) // time series data: y vs. t - mod.trainNtest ()() // train and the model on full dataset - -end aRIMATest2 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRIMATest3` main function tests the `ARIMA` class. - * Traffic dataset. - * > runMain scalation.modeling.forecasting.aRIMATest3 - */ -@main def aRIMATest3 (): Unit = - - val data = MatrixD.load ("travelTime.csv") - val y = data(?, 1) - - val steps = 1 // number of steps for the forecasts - val d = 1 // levels of differencing - val (p, q) = (1, 1) - hp("p") = p; hp("d") = d; hp("q") = q - - banner (s"Build ARIMA($p, $d, $q) model") - val mod = new ARIMA (y) // time series data: y vs. t - mod.trainNtest ()() // train and the model on full dataset - - val ar_f = mod.forecast (h = steps) - println (s"$steps-step ahead forecasts using ${mod.modelName} model = $ar_f") - -end aRIMATest3 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRIMATest4` main function tests the `ARIMA` class. - * Simulated data with a quadratic pattern. - * > runMain scalation.modeling.forecasting.aRIMATest4 - */ -@main def aRIMATest4 (): Unit = - - val y = makeTSeries () // make a simulated time-series (see `Stationary`) - - val steps = 2 // number of steps for the forecasts - val (d, q) = (1, 1) // levels of differencing - hp("d") = d; hp("q") = q - - for p <- 1 to 3 do - hp("p") = p - banner (s"Build ARIMA($p, $d, $q) model") - val mod = new ARIMA (y) // time series model ARIMA - mod.trainNtest ()() // train and the model on full dataset - - banner ("Make Forecasts") - val yf = mod.forecast (steps) - println (s"$steps-step ahead forecasts using ${mod.modelName} model = $yf") - end for - -end aRIMATest4 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRIMATest5` main function tests the `ARIMA` class on real data: - * Forecasting COVID-19. - * > runMain scalation.modeling.forecasting.aRIMATest5 - */ -@main def aRIMATest5 (): Unit = - - val data = MatrixD.load ("covid_19.csv", 1, 1) // skip first row (header) and first column - val yy = data(?, 4) // column 5 is daily deaths -// val yy = data(?, 5) // column 5 is daily deaths smoothed - val is = yy.indexWhere (_ >= 2.0) // find day of first death with at least 2 deaths - println (s"is = $is is first day with at least 2 deaths") - val y = yy(is until yy.dim) // slice out days before is - val h = 2 // forecasting horizon - - val ar1 = new AR (y) - ar1.trainNtest ()() // train and the model on full dataset - banner (s"AR(1) $h-steps rolling validation results") - RollingValidation.rollValidate (ar1, 2, 14) -/* - val yfa = rw.forecastAll (y, h) - val yf = yfa(?, h) // forecasted values - h steps ahead - new Plot (null, y, yf, s"Plot of y & yf, forecasted (h = $h) ${rw.modelName} vs. t", true) - - hp("d") = 0 // level of differencing, try 0 and 1 - for p <- 1 to 15; q <- 1 to 3 do // ARIMA hyper-parameter settings - hp("p") = p; hp("q") = q - val mod = new ARIMA (y) // create an ARIMA model - val (yp, qof) = mod.trainNtest ()() // train and the model on full dataset - - val yfa = mod.forecastAll (y, h) - val yf = yfa(?, h) // forecasted values - h steps ahead - new Plot (null, y, yf, s"Plot of y & yf, forecasted (h = $h) ${mod.modelName} vs. t", true) - end for -*/ - -end aRIMATest5 - diff --git a/src/main/scala/scalation/modeling/forecasting_old/old/ARIMA.scala.sav2 b/src/main/scala/scalation/modeling/forecasting_old/old/ARIMA.scala.sav2 deleted file mode 100644 index 5d2771f26..000000000 --- a/src/main/scala/scalation/modeling/forecasting_old/old/ARIMA.scala.sav2 +++ /dev/null @@ -1,568 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author Hao Peng, John Miller - * @version 2.0 - * @date Sat Jun 13 01:27:00 EST 2017 - * @see LICENSE (MIT style license file). - * - * @note Model: Auto-Regressive, Integrated, Moving Average (ARIMA) - * - * @see http://en.wikipedia.org/wiki/Autoregressive%E2%80%93moving-average_model - * @see http://www.emu.edu.tr/mbalcilar/teaching2007/econ604/lecture_notes.htm - * @see http://www.stat.berkeley.edu/~bartlett/courses/153-fall2010 - * @see http://www.princeton.edu/~apapanic/ORFE_405,_Financial_Time_Series_%28Fall_2011%29_files/slides12-13.pdf - */ - -// U N D E R D E V E L O P M E N T - -package scalation -package modeling -package forecasting - -import scala.math.sqrt - -import scalation.mathstat._ -import scalation.optimization.quasi_newton.{BFGS => Optimizer} // change import to change optimizer -//import scalation.optimization.quasi_newton.{LBFGS => Optimizer} -import scalation.random.Normal - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Companion object for class `ARIMA`. Includes features related to differencing - * and automated order selection. - * @see www.jstatsoft.org/article/view/v027i03/v27i03.pdf - */ -object ARIMA: - - private val flaw = flawf ("ARIMA") // flaw function - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the 'd'th difference of the time-series for 'd' in {0, 1, 2, 3}. - * A new vector is returned even when there is no difference taken ('d = 0'), - * to ensure the original is preserved. - * @param y the original time-series to be differenced - * @param d the order of simple differencing - */ - def difference (y: VectorD, d: Int): VectorD = - d match - case 0 => - y.copy - case 1 => - VectorD (for i <- 0 until y.dim-1 yield y(i+1) - y(i)) - case 2 => - VectorD (for i <- 0 until y.dim-2 yield y(i+2) - 2*y(i+1) + y(i)) - case 3 => - VectorD (for i <- 0 until y.dim-3 yield y(i+3) - 3*y(i+2) + 3*y(i+1) - y(i)) - case _ => - flaw ("difference", "ARIMA does not support differencing higher than order 3"); null - end match - end difference - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Transform the fitted values on the training data of a differenced time series back - * to the original scale. Undo trend differencing only. - * @see stats.stackexchange.com/questions/32634/difference-time-series-before-arima-or-within-arima - * @param yp the vector of predicted/fitted values - * @param y the original time-series vector - * @param d the order of simple differencing - */ - def transformBack (yp: VectorD, y: VectorD, d: Int): VectorD = - d match - case 0 => - yp - case 1 => - val tb = new VectorD (y.dim) - tb(0) = y(0) - for i <- 0 until y.dim-1 do tb(i+1) = yp(i) + y(i) - tb - case 2 => - val tb = new VectorD (y.dim) - tb(0) = y(0); tb(1) = y(1) - for i <- 0 until y.dim-2 do tb(i+2) = yp(i) + 2*y(i+1) - y(i) - tb - case 3 => - val tb = new VectorD (y.dim) - tb(0) = y(0); tb(1) = y(1); tb(2) = y(2) - for i <- 0 until y.dim-3 do tb(i+3) = yp(i) + 3*y(i+2) - 3*y(i+1) + y(i) - tb - case _ => - flaw ("transformBack", "ARIMA does not support differencing higher than order 3"); null - end match - end transformBack - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Transform the forecasted values of a differenced time series back to the original - * for all horizons scale. - * @see stats.stackexchange.com/questions/32634/difference-time-series-before-arima-or-within-arima - * @param ypa the matrix of all multi-horizon forecasted values - * @param y the original time-series vector - * @param d the order of simple differencing - */ - def transformBack_allH (ypa: MatrixD, y: VectorD, d: Int): MatrixD = - val tb = new MatrixD (ypa.dim, ypa.dim2) - tb(?, 0) = y - for k <- 1 until ypa.dim2 do tb(?, k) = transformBack (ypa(?, k), y, d) - tb - end transformBack_allH - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Transform the forecast values of a differenced time series back to the - * original scale. - * @see stats.stackexchange.com/questions/32634/difference-time-series-before-arima-or-within-arima - * @param yf the vector of forecasted values - * @param y the original time series - * @param d the order of simple differencing - * @param t the time point being forecasted (@see the 'forecast' method) - */ - def transformBackF (yf: VectorD, y: VectorD, d: Int, t: Int): VectorD = - d match - case 0 => - yf - case 1 => - val tb = y(t - 1 to t) ++ yf - for i <- 1 until tb.dim do tb(i) += tb(i-1) - tb(1 to tb.dim) - case 2 => - val tb = y(t-2 to t) ++ yf - for i <- 2 until tb.dim do tb(i) += (2*tb(i-1) - tb(i-2)) - tb(2 to tb.dim) - case 3 => - val tb = y(t-3 to t) ++ yf - for i <- 3 until tb.dim do tb(i) += (3*tb(i-1) - 3*tb(i-2) + tb(i-3)) - tb(3 to tb.dim) - case _ => - flaw ("transformBackF", "ARIMA does not support differencing higher than order 3"); null - end match - end transformBackF - -end ARIMA - -import ARIMA._ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARIMA` class provides basic time series analysis capabilities for Auto- - * Regressive 'AR' Integrated 'I' Moving-Average 'MA' models. In an - * ARIMA(p, d, q) model, p and q refer to the order of the Auto-Regressive - * and Moving-Average components of the model; d refers to the order of - * differencing. Given time series data stored in vector y, its next value y_t = y(t) - * may be predicted based on prior values of y and its noise: - * y_t = δ + Σ(φ_i y_t-i) + Σ(θ_i e_t-i) + e_t - * where δ is a constant, φ is the auto-regressive coefficient vector, - * θ is the moving-average coefficient vector, and e is the noise vector. - *------------------------------------------------------------------------------ - * If d > 0, then the time series must be differenced first before applying - * the above model. - *------------------------------------------------------------------------------ - * @param y the original input vector (time series data) - * @param tt the time vector, if relevant (time index may suffice) - * @param hparam the hyper-parameters - */ -class ARIMA (y: VectorD, tt: VectorD = null, hparam: HyperParameter = SARIMAX.hp) - extends ARMA (y, tt, hparam): - - private val flaw = flawf ("ARIMA") // flaw function - - protected val d = hparam("d").toInt // the number of differences to take -// protected var cap = 0 // max of p and q - protected var params = 0 // number of parameters estimated - protected var differenced = d > 0 // flag indicating whether differencing will be applied - - protected var mu = -0.0 // sample mean (-0.0 means unassigned) - protected var μ = -0.0 // population mean estimated using MLE - protected var sig2 = -0.0 // sample variance - protected var σ2 = -0.0 // population variance estimated using MLE - - private var z = VectorD.nullv // vector of centered predicted/fitted values - private var zp = VectorD.nullv // vector of centered predicted/fitted values - - init (y) // initialize vectors and parameters - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the model name including current hyper-parameters, e.g., ARIMA(2, 1, 1). - */ - modelName = s"ARIMA($p, $d, $q)" - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Initialize variables based on the working time-series v. - * Set/change the working time series. May be used to set the time series - * to a different time window in order to produce newer forecast. - * @param v the working vector/time-series - */ - protected def init (v: VectorD): Unit = - mu = v.mean // sample mean - z = difference (v, d) // take the d-th difference of the time series - zp = new VectorD (z.dim) // predicted values prior to undifferencing/uncentering -// e = new VectorD (z.dim) // vector of errors/residuals - sig2 = z.variance // sample variance - - φ = new VectorD (p) // AR coefficients - θ = new VectorD (q) // MA coefficients -// cap = max (p, q) // greatest lag - params = p + q + (if differenced then 0 else 1) // number of parameters - end init - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Show estimates for parameters. - */ - def showParameterEstimates (): Unit = - println (s"differenced = $differenced") - println (s"φ = $φ") // AR parameters - println (s"θ = $θ") // MA parameters - println (s"δ = $δ") // drift - println (s"mu = $mu") // sample mean - println (s"μ = $μ") // MLE mean - println (s"sig2 = $sig2") // sample variance - println (s"σ2 = $σ2") // MLE variance - end showParameterEstimates - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train/fit an `ARIMA` model to the times-series data in vector y_. Must call setPQ first. - * Estimate the coefficient vectors φ and θ for (p, q)-th order ARIMA(p, d, q) model. - * It uses BFGS, a Quasi-Newton optimizer, to minimize the negative log-likelihood. - * @param x_null the data/input matrix (ignored, pass null) - * @param y_ the training/full response vector - */ - override def train (x_null: MatrixD, y_ : VectorD): Unit = - val b = new VectorD (params + 1) // parameter values - if ! differenced then b(b.size-2) = mu // sample mean, initial est. for μ parameter - b(b.size-1) = sqrt (sig2) // sample standard deviation, initial est. for σ parameter - - val optimizer = new Optimizer (nll) // apply Quasi-Newton optimizer -// val (fb, bb) = optimizer.solve (b, 0.5) // optimal solution for the objective function and parameters - val (fb, bb) = optimizer.solve3 (b, 0.5) // optimal solution for the objective function and parameters - - δ = μ * (1 - φ.sum) // update drift value -// δ = stats.mu * (1 - φ.sum) - - showParameterEstimates () - end train - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** The negative log-likelihood function to be minimized. - * @see math.unice.fr/~frapetti/CorsoP/Chapitre_4_IMEA_1.pdf, page 36 - * @see spia.uga.edu/faculty_pages/monogan/teaching/ts/Barima.pdf - * @see stats.stackexchange.com/questions/77663/arima-estimation-by-hand - * @param b the input parameter vector - */ - protected def nll (b: VectorD): Double = - if b.size != params + 1 then flaw ("nll", "input parameter vector size incorrect") - for i <- 0 until p do φ(i) = b(i) - for i <- p until p+q do θ(i-p) = b(i) - if ! differenced then μ = b(b.size-2) - σ2 = b.last~^2 - - updateFittedValues () - end nll - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Update the vector of fitted values 'zp', the vector of errors 'e', and - * return the negative log-likelihood '-ll'. - * @see `Fit` for definition of 'll'. - */ - protected def updateFittedValues (): Double = - if ! differenced then for i <- z.indices do z(i) = y(i) - μ // for undifferenced time series, center using est. μ - - zp(0) = z(0) // no past values or errors => copy actual - for t <- 1 until zp.dim do - e(t-1) = z(t-1) - zp(t-1) // error in previous forecast - var sum = 0.0 - for j <- 0 until p if t-j > 0 do sum += φ(j) * z(t-1-j) - for j <- 0 until q if t-j > 0 do sum += θ(j) * e(t-1-j) - zp(t) = sum - end for - - -ll (e.normSq / m, σ2, m) // return negative log likelihood - end updateFittedValues - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the error (difference between actual and predicted) and useful - * diagnostics for the dataset. - * @param y_ vector of observed values - * @param yp vector of predicted values - */ - override def test (x_null: MatrixD, y_ : VectorD): (VectorD, VectorD) = - // FIX - add testSetup - val yp = predictAll (y_) - resetDF (params, y.dim - params) - (yp, diagnose (y, yp)) - end test - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the vector of predicted/fitted values on the training/full dataset. - * Based on 'zp' calculated in the 'updateFittedValues' method. - * @param y_ the given time-series - */ - override def predictAll (y_ : VectorD): VectorD = - println (s"predictAll: y.dim = ${y.dim}, y_.dim = ${y_.dim}, zp.dim = ${zp.dim}") - if differenced then { println (s"zp.dim = ${zp.dim}"); transformBack (zp, y_, d) } else zp + μ - end predictAll - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Produce h-steps-ahead forecast for ARIMA models. - * @see ams.sunysb.edu/~zhu/ams586/Forecasting.pdf - * @param t the time point from which to make forecasts (in the original scale) - * @param h the number of steps to forecast, must be at least one - */ - def forecast (t: Int = y.dim, h: Int = 1): VectorD = - if t > y.dim then flaw ("forecast", s"t ($t) cannot be greater than y.dim (${y.dim})") - val tz = t - d // scale t to match vector z and e - if tz < cap then flaw ("forecast", s"tz ($tz) must be at least cap ($cap)") - - val zf = new VectorD (cap + h) // forecasted centered values - val e_ = new VectorD (cap + h) // available observed errors - - for i <- 0 until cap if tz-cap+i >= 0 do // seed with first cap = max(p, q) values - zf(i) = z(tz-cap+i) // copy first cap values - e_(i) = e(tz-cap+i) // unveil first cap errors (observed in training) - end for - for i <- cap until zf.dim do // start at t = cap (enough for first value to forecast) - var sum = 0.0 - for j <- 0 until p do sum += φ(j) * zf(i-1-j) - for j <- 0 until q do sum += θ(j) * e_(i-1-j) - zf(i) = sum - end for - val f = zf(cap to zf.dim) // dump first cap values - if differenced then transformBackF (f, y, d, t) - else f + μ // return the vector of forecasts - end forecast - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all time points using 1 through h-steps ahead forecasts. - * The h-th row of matrix is the horizon h forecast (where h = 0 is actual data). - * @param h the forecasting horizon, number of steps ahead to produce forecasts - */ - override def forecastAll (y_ : VectorD, h: Int): MatrixD = - val yf = new MatrixD (y.dim, h+1) // forecasts for all horizons h & time points t - yf(?, 0) = y // first row is actual values - val cut = cap + d // cut over from actual to forecasted values - - for t <- y.indices do - if t < cut then - for k <- 1 to h do yf(t, k) = y(t) // copy first cut observed values from y - else - val ft = forecast (t, h) // forecasts at time point t, horizons 1 to h - for k <- 1 to h if t+k-1 < y.dim do - yf(t+k-1, k) = ft(k-1) // place forecasts diagonally - end for - end if - end for - - // fill in blank values in first few rows where no forecasts can be produced by copying values from previous columns - for k <- 2 to h; t <- cut until cut+k-1 do yf(t, k) = yf(t, k-1) // copy forecasted values - - yf // return matrix of forecasted values - end forecastAll - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all m time points and all horizons (1 through h-steps ahead). - * Record these in the yf matrix, where - * yf(t, k) = k-steps ahead forecast for y_t - * Note, yf(?, 0) is set to y (the actual time-series values). - * Do not forecast errors, rather use observed errors from training and make sure not - * to use errors that would correspond to knowing future errors (all future errors should - * be assumed to be 0). - * @see https://otexts.com/fpp3/arima-forecasting.html, section 9.8 - * @param h the maximum forecasting horizon, number of steps ahead to produce forecasts - */ -/*** - override def forecastAll2 (h: Int): MatrixD = forecastAll (h) - FIX - values must be computed diagonially - does not work for d = 1, etc. (missing value at 'cut') - override def forecastAll2 (h: Int): MatrixD = - { - val zf = new MatrixD (y.dim, h+1) // forecast matrix: rows - time, cols - horizon - for t <- z.indices do zf(t, 0) = z(t) // first column is actual values, horizon 0 - val cut = cap + d // cut over from actual to forecasted values - - for k <- 1 to h do // loop through k-steps ahead forecasts - val e_ = new VectorD (z.dim) // redetermine errors from a clean slate - - for t <- 0 until cut do // seed the first cap = max(p, q) values - zf(t, k) = z(t) // copy first cap actual values - e_(t) = e(t) // copy first cap errors (observed in training) - end for - - for t <- cut until y.dim do // forecast from cap to the end of time-series - if t-k >= 0 then e_(t-k) = e(t-k) // unveil previous error at time t-k - var sum = 0.0 - for j <- 0 until p if t-j > 0 then sum += φ(j) * zf(t-1-j, max (0, k-1-j)) - for j <- 0 until q if t-j > 0 then sum += θ(j) * e_(t-1-j) - zf(t, k) = sum // centered forecast for time t - end for - end for - println (s"forecastAll2: zf (${zf.dim}) = $zf") - if differenced then transformBack_allH (zf, y, d) - else zf + μ // return uncentered forecasts - end forecastAll2 -***/ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Obtain residuals/errors in the original scale. - */ - def residuals: VectorD = if differenced then y - predictAll (y) else e - -end ARIMA - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRIMATest` main function tests the `ARIMA` class on real data: - * Forecasting lake levels. - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.aRIMATest - */ -@main def aRIMATest (): Unit = - - import Example_LakeLevels.y - import SARIMAX.hp - -// val d = 0 // apply d-th order differencing - no differencing - val d = 1 // apply d-th order differencing - first differences - - for h <- 1 to 1 do // forecasting horizon - for p <- 1 to 7 do // auto-regressive hyper-parameter settings - for q <- 0 to 2 do // moving-average hyper-parameter settings - banner (s"Test: ARIMA ($p, $d, $q) with h = $h") - hp("p") = p; hp("d") = d; hp("q") = q // set p, d and q for the ARIMA model - val mod = new ARIMA (y) // create an ARIMA model - mod.trainNtest ()() // train the model on full dataset - -/* - val yfa = mod.forecastAll (y, h) - val yf = yfa(?, h) // forecasted values - h steps ahead - new Plot (null, y, yf, s"Plot of y & yf, forecasted (h = $h) ${mod.modelName} vs. t", true) - - if h == 1 then Forecaster.differ (yp, yf, allow = true) - val skip = max (p, q) // skip the cap start-up - banner (s"aRIMATest: QoF (@h = $h) for yf = mod.forecastAll") - println (s"rSq (yf) for h = $h is ${rSqF (y, yf)}") - println (s"rSq (yf) for h = $h is ${rSqF (y, yf, max (p, q))}, skip = $skip") - println (s"sMAPE (yf) for h = $h is ${smapeF (y, yf)}") - println (s"sMAPE (yf) for h = $h is ${smapeF (y, yf, max (p, q))}, skip = $skip") - - banner (s"aRIMATest: QoF (@h = $h) for yf2 = mod.forecastAll2") - println (s"rSq (yf2) for h = $h is ${rSqF (y, yf2)}") - println (s"rSq (yf2) for h = $h is ${rSqF (y, yf2, max (p, q))}, skip = $skip") - println (s"sMAPE (yf2) for h = $h is ${smapeF (y, yf2)}") - println (s"sMAPE (yf2) for h = $h is ${smapeF (y, yf2, max (p, q))}, skip = $skip") -*/ - end for - end for - end for - -end aRIMATest - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRIMATest2` main function tests the `ARIMA` class. - * Test simulated data. - * > runMain scalation.modeling.forecasting.aRIMATest2 - */ -@main def aRIMATest2 (): Unit = - - import SARIMAX.hp - - banner ("ARIMA Test2") - val m = 20 - val noise = Normal (0, 2) -// val noise = Uniform (-5, 5) - val y = VectorD (for i <- 0 until m yield i + noise.gen) - - println (s"y = $y") - - val (p, d, q) = (1, 1, 1) - hp("p") = p; hp("d") = d; hp("q") = q // set p, d and q for the ARIMA model - val mod = new ARIMA (y) // time series data: y vs. t - mod.train (null, y) // train the model on full dataset - val (yp, qof) = mod.test (null, y) // test the model on full dataset - println (mod.report (qof)) // report on Quality of Fit (QoF) - -end aRIMATest2 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRIMATest3` main function tests the `ARIMA` class. - * Traffic dataset. - * > runMain scalation.modeling.forecasting.aRIMATest3 - */ -@main def aRIMATest3 (): Unit = - - import SARIMAX.hp - - val nfile = "travelTime.csv" - val data = MatrixD.load (nfile) - -// val t = data(?, 0) - val y = data(?, 1) - println (s"y = $y") - - val (p, d, q) = (1, 1, 1) - val steps = 1 // number of steps for the forecasts - - hp("p") = p; hp("d") = d; hp("q") = q // set p, d and q for the ARIMA model - val mod = new ARIMA (y) // time series data: y vs. t - mod.train (null, y) // train the model on full dataset - val (yp, qof) = mod.test (null, y) // test the model on full dataset - println (mod.report (qof)) // report on Quality of Fit (QoF) - - val ar_f = mod.forecast (h = steps) - println (s"$steps-step ahead forecasts using ${mod.modelName} model = $ar_f") - -end aRIMATest3 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRIMATest4` main function tests the `ARIMA` class. - * Simulated data with a quadratic pattern. - * > runMain scalation.modeling.forecasting.aRIMATest4 - */ -@main def aRIMATest4 (): Unit = - - import SARIMAX.hp - - val m = 50 - val (p, d, q) = (1, 1, 1) // hyper-parameters for the ARIMA model - val steps = 2 // number of steps for the forecasts - val sig2 = 10000.0 - val noise = Normal (0.0, sig2) - val y = VectorD (for i <- 0 until m yield 40 * (i-1) - (i-2) * (i-2) + noise.gen) - - banner (s"Build ARIMA($p, $d, $q) model") - hp("p") = p; hp("d") = d; hp("q") = q // set p, d and q for the ARIMA model - val mod = new ARIMA (y) // time series model ARIMA - mod.train (null, y) // train the model on full dataset - val (yp, qof) = mod.test (null, y) // test the model on full dataset - println (mod.report (qof)) // report on Quality of Fit (QoF) - - banner ("Make Forecasts") - val yf = mod.forecast (steps) - println (s"$steps-step ahead forecasts using ${mod.modelName} model = $yf") - -end aRIMATest4 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRIMATest5` main function tests the `ARIMA` class on real data: - * Forecasting lake levels. Select the best number of lags. - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.aRIMATest5 - * -@main def aRIMATest5 (): Unit = - - import Example_LakeLevels.y - - val d = 0 // level of differencing - val mod = new ARIMA (y) // create model for time series data - mod.setPQ (VectorI (1, 1)) - mod.train (null, y) // train the model on full dataset - val (yp, qof) = mod.test (null, y) // test the model on full dataset - println (mod.report (qof)) // report on Quality of Fit (QoF) - - val res = mod.forwardSel () - println (s"forwardSel: $res") - - for (sp, sq) <- Array ((1, 0), (2, 1), (1, 1), (1, 2), (0, 1)) do - val res = mod.forwardSel2 (VectorI (sp, sq)) - println (s"forwardSel2: $res") - end for - -end aRIMATest5 - */ - diff --git a/src/main/scala/scalation/modeling/forecasting_old/old/ARMA.scala.bak b/src/main/scala/scalation/modeling/forecasting_old/old/ARMA.scala.bak deleted file mode 100644 index 9773c32ef..000000000 --- a/src/main/scala/scalation/modeling/forecasting_old/old/ARMA.scala.bak +++ /dev/null @@ -1,255 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author Hao Peng, John Miller, Michael Cotterell - * @version 2.0 - * @date Sat Jun 13 01:27:00 EST 2017 - * @see LICENSE (MIT style license file). - * - * @title Model: Auto-Regressive, Moving-Average (ARMA) - * - * @see http://en.wikipedia.org/wiki/Autoregressive%E2%80%93moving-average_model - * @see http://www.emu.edu.tr/mbalcilar/teaching2007/econ604/lecture_notes.htm - * @see http://www.stat.berkeley.edu/~bartlett/courses/153-fall2010 - * @see http://www.princeton.edu/~apapanic/ORFE_405,_Financial_Time_Series_%28Fall_2011%29_files/slides12-13.pdf - */ - -package scalation -package modeling -package forecasting - -import scala.math.max - -import scalation.mathstat._ -import scalation.optimization._ - -import ARMA.hp - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARMA` class provides basic time series analysis capabilities for Auto-Regressive, - * Moving-Average (ARMA) models. In an ARMA(p, q) model, p refers to the order of the - * Auto-Regressive components and q refers to the Moving-Average compoenest of the model. - * ARMA models are often used for forecasting. - * Given time-series data stored in vector y, its next value y_t = y(t) - * may be predicted based on prior values of y and its noise: - * y_t = δ + Σ(φ_k y_t-k) + Σ(θ_k e_t-k) + e_t - * where δ is a constant, φ is the auto-regressive coefficient vector, - * θ is the moving-average vector, and e_t is the noise term. - *---------------------------------------------------------------------------------- - * @param y the response vector (time-series data) - * @param tt the time vector, if relevant (time index may suffice) - * @param hparam the hyper-parameters - */ -abstract class ARMA (y: VectorD, tt: VectorD = null, hparam: HyperParameter = ARMA.hp) - extends Forecaster (y, tt, hparam) - with Correlogram (y) - with Fit (dfm = hparam("p").toInt, df = y.dim - hparam("p").toInt): - - private val debug = debugf ("ARMA", true) // debug function - private val flaw = flawf ("ARMA") // flaw function - - m = y.dim // number of time points (@see `FitM`) - protected var p = hparam("p").toInt // p-th order Auto-Regressive and - protected var q = hparam("q").toInt // q-th order Moving-Average model - protected var φ = VectorD.nullv // AR(p) parameters/coefficients part - protected var θ = VectorD.nullv // MA(q) parameters/coefficients part - protected var δ = NO_DOUBLE // drift/intercept/constant term - protected var yf = MatrixD.nullm // the forecast matrix - time points x horizons - - if p > MAX_LAGS then flaw ("init", s"p = $p must not be greater than MAX_LAGS = $MAX_LAGS") - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the model name including its current hyper-parameter, e.g., ARMA(2, 1). - */ - override def modelName: String = s"ARMA($p, $q)" - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train/fit an `ARMA` model to the times-series data in vector y_. - * Estimate the coefficient vector φ for a p-th order Auto-Regressive ARMA(p) model. - * Uses Durbin-Levinson Algorithm (in `Correlogram`) to determine the coefficients. - * The φ vector is p-th row of psi matrix (ignoring the first (0th) column). - * @param x_null the data/input matrix (ignored, pass null) - * @param y_ the training/full response vector (e.g., full y) - */ - def train (x_null: MatrixD, y_ : VectorD): Unit = - m = y_.dim // length of relevant time-series - e = new VectorD (m) - resetDF (p, m - p) // reset the degrees of freedom - makeCorrelogram (y_) // correlogram computes psi matrix, gives ACF and PACF - - val mu = y_.mean // sample mean of y_ - val z = y_ - mu // optimization works better using zero-centered data - φ = new VectorD (p) // zeros for AR part - θ = new VectorD (q) // zeros for MA part - δ = 0.0 // drift/intercept for z (should end up close to zero) - val b = φ ++ θ :+ δ // combine all parameters -> vector to optimize - - def csse (b: VectorD): Double = // objective function - conditional sum of squared errors - φ = b(0 to p); θ = b(p to p+q); δ = b(b.dim-1) - val (yy, yp) = testSetup (z) // get and align actual and predicted values - val s = (yy - yp).normSq // sum of squared errors -// println (s"csse: s = $s, b = $b") - s - end csse - - def nll (b: VectorD): Double = // objective function - negative log-likelihood (MLE) - 0.0 // FIX - implement - end nll - - val optimizer = new BFGS (csse) // apply Quasi-Newton BFGS optimizer -// val optimizer = new ConjugateGradient (csse) // apply Conjugate Gradient optimizer - fails -// val optimizer = new CoordinateDescent (csse) // apply Coordinate Descent optimizer -// val optimizer = new NelderMeadSimplex (csse, 3) // apply Nelder-Mead Simplex optimizer -// val optimizer = new GridSearch (csse, 3); optimizer.setAxes () // apply GridSearch BFGS optimizer - close - val (fb, bb) = optimizer.solve (b) // optimal solution for the objective function and parameters - - φ = bb(0 to p); θ = bb(p to p+q); δ = bb(b.dim-1) // recover parameters for z - δ += mu * (1 - φ.sum) // uncenter - debug ("train", s"parameters for ARMA($p, $q) model: φ = $φ, θ = $θ, δ = $δ") - end train - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test PREDICTIONS of an ARMA forecasting model y_ = f(lags (y_)) + e and return its - * QoF vector. Testing may be be in-sample (on the training set) or out-of-sample - * (on the testing set) as determined by the parameters passed in. - * Note: must call train before test. - * @param x_null the training/testing data/input matrix (ignored, pass null) - * @param y_ the training/testing response/output vector (e.g., full y) - */ - def test (x_null: MatrixD, y_ : VectorD): (VectorD, VectorD) = - val (yy, yp) = testSetup (y_) // get and align actual and predicted values - resetDF (p+q, yy.dim - p+q) // reset the degrees of freedom - (yp, diagnose (yy, yp)) // evaluate and return the QoF of these predictions - end test - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test FORECASTS of an ARMA forecasting model y_ = f(lags (y_)) + e and return its - * QoF vector. Testing may be be in-sample (on the training set) or out-of-sample - * (on the testing set) as determined by the parameters passed in. - * Note: must call train before test. - * @param h the forecasting horizon, number of steps ahead to produce forecasts - * @param y_ the training/testing response/output vector (e.g., full y) - * @param redo whether to use existing forecasts or redo them (defaults to false) - */ - def testf (h: Int, y_ : VectorD, redo: Boolean = false): VectorD = - if yf == null || yf.dim2 < h+1 || redo then yf = forecastAll (h, y_) // redo all forecasts - val yy = y_(h to y_.dim) - val yf_h = yf(?, h)(h to y_.dim) // pull column h from the forecast matrix and align - resetDF (p, yy.dim - p) // reset the degrees of freedom - diagnose (yy, yf_h) // evaluate and return the QoF of these forecasts - end testf - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the parameter vector for the ARMA(p, q) model. - */ - override def parameter: VectorD = φ ++ θ :+ δ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Predict a value for time point/index t using 1-step ahead forecasts. - * y_t = φ_0 y_t-1 + φ_1 y_t-2 + ... + φ_p-1 y_t-p - * When k < 0 let y_k = y_0 (i.e., assume first value repeats back in time), - * but do not assume errors repeat. - * @see predictAll in `Forecaster` - * @param t the time point/index to be predicted - * @param y_ the actual values to use in making predictions - */ - def predict (t: Int, y_ : VectorD): Double = - if t < 1 || t > y_.dim then flaw ("predict", s"time index t = $t is out of range") - var sum = δ - for j <- 0 until p do sum += φ(j) * y_(max (0, t-1-j)) - for j <- 0 until q if t-j > 0 do sum += θ(j) * e(t-1-j) - if t < y_.dim then e(t) = y_(t) - sum // update the t-th error e_t - sum // prediction for y_t, yp_t - end predict - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all y_.dim time points and all horizons (1 through h-steps ahead). - * Record these in the yf matrix, where - * yf(t, k) = k-steps ahead forecast for y_t - * Note, column 0, yf(?, 0), is set to y (the actual time-series values). - * Forecast recurse down diagonals in the yf forecast matrix. - * The top right and bottom left triangles in yf matrix are not forecastable. - * @param h the maximum forecasting horizon, number of steps ahead to produce forecasts - * @param y_ the actual values to use in making forecasts - */ - def forecastAll (h: Int, y_ : VectorD): MatrixD = - yf = new MatrixD (y_.dim+h, h+1) // forecasts for all time points t & horizons to h - for t <- 0 until m do yf(t, 0) = y_(t) // first column is actual values, horizon 0 - for k <- 1 to h do - for t <- y_.indices do // make forecasts over all time points for horizon k - var sum = δ - for j <- 0 until p do sum += φ(j) * yf(max (0, t+k-1-j), max (0, k-1-j)) - yf(t+k, k) = sum // forecast down the diagonal - end for - debug ("forecastAll", s"yf(?, $k) = ${yf(?, k)}") - end for - yf // return matrix of forecasted values - end forecastAll - -end ARMA - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARMA` companion object provides factory methods for the `ARMA` class. - */ -object ARMA: - - /** Base hyper-parameter specification for `ARMA` class - */ - val hp = new HyperParameter - hp += ("p", 1, 1) - hp += ("q", 1, 1) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create an `ARMA` object. - * @param y the response vector (time series data) - * @param tt the time vector, if relevant (time index may suffice) - * @param hparam the hyper-parameters - */ - def apply (y: VectorD, tt: VectorD = null, hparam: HyperParameter = ARMA.hp): ARMA = - new ARMA (y, tt, hparam) - end apply - -end ARMA - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRMATest` main function tests the `ARMA` class on real data: Forecasting lake levels. - * Test the test, predictAll, testf and forecastAll methods over the whole times-series. - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.aRMATest - */ -@main def aRMATest (): Unit = - - import Example_LakeLevels.y - - val m = y.dim - val t = VectorD.range (1, m) - val h = 3 // the forecasting horizon - val q = 1; hp("q") = q // moving-average hyper-parameter q - - var mod: ARMA = null - for p <- 1 to 1 do // autoregressive hyper-parameter p - hp("p") = p // set p hyper-parameter - banner (s"Test: ARMA($p, $q}") - mod = new ARMA (y) // create model for time series data - mod.train (null, y) // train the model on full dataset - val yp = mod.testPred (y, t) -/* - val yf = mod.forecastAll (h, y) // forecast h-steps ahead for all y - println (s"yf = $yf") - println (s"yf.dims = ${yf.dims}") - assert (yf(?, 0)(0 until m) == y) // column 0 must agree with actual values - assert (yf(?, 1)(1 to m+1) == yp) // column 1 must agree with one step-ahead predictions - for k <- 1 to h do - println (s"evalaute QoF for horizon $k:") - println (Fit.fitMap (mod.testf (k, y))) // evaluate k-units ahead forecasts - end for -*/ - end for - - banner ("Select model based on ACF and PACF") - mod.plotFunc (mod.acF, "ACF") // Auto-Correlation Function (ACF) - mod.plotFunc (mod.pacF, "PACF") // Partial Auto-Correlation Function (PACF) - -end aRMATest - diff --git a/src/main/scala/scalation/modeling/forecasting_old/old/ARMA2.scala.bak b/src/main/scala/scalation/modeling/forecasting_old/old/ARMA2.scala.bak deleted file mode 100644 index cedb942b8..000000000 --- a/src/main/scala/scalation/modeling/forecasting_old/old/ARMA2.scala.bak +++ /dev/null @@ -1,303 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author Hao Peng, John Miller, Michael Cotterell - * @version 2.0 - * @date Sat Jun 13 01:27:00 EST 2017 - * @see LICENSE (MIT style license file). - * - * @title Model: Auto-Regressive, Moving-Average (ARMA2) - * - * @see http://en.wikipedia.org/wiki/Autoregressive%E2%80%93moving-average_model - * @see http://www.emu.edu.tr/mbalcilar/teaching2007/econ604/lecture_notes.htm - * @see http://www.stat.berkeley.edu/~bartlett/courses/153-fall2010 - * @see http://www.princeton.edu/~apapanic/ORFE_405,_Financial_Time_Series_%28Fall_2011%29_files/slides12-13.pdf - */ - -package scalation -package modeling -package forecasting - -import scala.math.max - -import scalation.mathstat._ -import scalation.optimization._ - -import ARMA2.hp - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARMA2` class provides basic time series analysis capabilities for Auto-Regressive, - * Moving-Average (ARMA2) models. In an ARMA2(p, q) model, p refers to the order of the - * Auto-Regressive components and q refers to the Moving-Average compoenest of the model. - * ARMA2 models are often used for forecasting. - * Given time-series data stored in vector y, its next value y_t = y(t) - * may be predicted based on prior values of y and its noise: - * y_t = δ + Σ(φ_k y_t-k) + Σ(θ_k e_t-k) + e_t - * where δ is a constant, φ is the auto-regressive coefficient vector, - * θ is the moving-average vector, and e_t is the noise term. - *---------------------------------------------------------------------------------- - * @param y the response vector (time-series data) - * @param tt the time vector, if relevant (time index may suffice) - * @param hparam the hyper-parameters - */ -class ARMA2 (y: VectorD, tt: VectorD = null, hparam: HyperParameter = ARMA2.hp) - extends Forecaster (y, tt, hparam) - with Correlogram (y) - with Fit (dfm = hparam("p").toInt, df = y.dim - hparam("p").toInt): - - private val debug = debugf ("ARMA2", true) // debug function - private val flaw = flawf ("ARMA2") // flaw function - - m = y.dim // number of time points (@see `FitM`) - protected var p = hparam("p").toInt // p-th order Auto-Regressive and - protected var q = hparam("q").toInt // q-th order Moving-Average model - protected var φ = VectorD.nullv // AR(p) parameters/coefficients part - protected var θ = VectorD.nullv // MA(q) parameters/coefficients part - protected var δ = NO_DOUBLE // drift/intercept/constant term -// protected var yf = MatrixD.nullm // the forecast matrix - time points x horizons - - if p > MAX_LAGS then flaw ("init", s"p = $p must not be greater than MAX_LAGS = $MAX_LAGS") - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the model name including its current hyper-parameter, e.g., ARMA2(2, 1). - */ - override def modelName: String = s"ARMA2($p, $q)" - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train/fit an `ARMA2` model to the times-series data in vector y_. - * Estimate the coefficient vector φ for a p-th order Auto-Regressive ARMA2(p) model. - * Uses Durbin-Levinson Algorithm (in `Correlogram`) to determine the coefficients. - * The φ vector is p-th row of psi matrix (ignoring the first (0th) column). - * @param x_null the data/input matrix (ignored, pass null) - * @param y_ the training/full response vector (e.g., full y) - */ - def train (x_null: MatrixD, y_ : VectorD): Unit = - m = y_.dim // length of relevant time-series - resetDF (p+q, m - (p+q)) // reset the degrees of freedom - makeCorrelogram (y_) // correlogram computes psi matrix, gives ACF and PACF - - val mu = y_.mean // sample mean of y_ - val z = y_ - mu // optimization works better using zero-centered data - φ = new VectorD (p) // zeros for AR part - θ = new VectorD (q) // zeros for MA part - δ = 0.0 // drift/intercept for z (should end up close to zero) - val b = φ ++ θ :+ δ // combine all parameters -> vector to optimize - - def csse (b: VectorD): Double = // objective function - conditional sum of squared errors - φ = b(0 to p); θ = b(p to p+q); δ = b(b.dim-1) // pull parameters out of b vector - ssef (z, predictAll (z)) // compute loss function - end csse - - def nll (b: VectorD): Double = // objective function - negative log-likelihood (MLE) - 0.0 // FIX - implement - end nll - - val optimizer = new BFGS (csse) // apply Quasi-Newton BFGS optimizer -// val optimizer = new ConjugateGradient (csse) // apply Conjugate Gradient optimizer - fails -// val optimizer = new CoordinateDescent (csse) // apply Coordinate Descent optimizer -// val optimizer = new NelderMeadSimplex (csse, 3) // apply Nelder-Mead Simplex optimizer -// val optimizer = new GridSearch (csse, 3); optimizer.setAxes () // apply GridSearch BFGS optimizer - close - val (fb, bb) = optimizer.solve (b) // optimal solution for the objective function and parameters - - φ = bb(0 to p); θ = bb(p to p+q); δ = bb(b.dim-1) // recover parameters for z - δ += mu * (1 - φ.sum) // uncenter - debug ("train", s"parameters for ARMA2($p, $q) model: φ = $φ, θ = $θ, δ = $δ") - end train - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test PREDICTIONS of an ARMA2 forecasting model y_ = f(lags (y_)) + e and return its - * QoF vector. Testing may be be in-sample (on the training set) or out-of-sample - * (on the testing set) as determined by the parameters passed in. - * Note: must call train before test. - * @param x_null the training/testing data/input matrix (ignored, pass null) - * @param y_ the training/testing response/output vector (e.g., full y) - */ - def test (x_null: MatrixD, y_ : VectorD): (VectorD, VectorD) = - val (yy, yp) = testSetup (y_) // get and align actual and predicted values - resetDF (p+q, yy.dim - (p+q)) // reset the degrees of freedom - (yp, diagnose (yy, yp)) // evaluate and return the QoF of these predictions - end test - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test FORECASTS of an ARMA2 forecasting model y_ = f(lags (y_)) + e and return its - * QoF vector. Testing may be be in-sample (on the training set) or out-of-sample - * (on the testing set) as determined by the parameters passed in. - * Note: must call train before test. - * @param h the forecasting horizon, number of steps ahead to produce forecasts - * @param y_ the training/testing response/output vector (e.g., full y) - * @param redo whether to use existing forecasts or redo them (defaults to false) - */ - def testF (h: Int, y_ : VectorD): (VectorD, VectorD) = - if yf == null || yf.dim2 < h+1 then yf = forecastAll (h, y_) // redo all forecasts - val yy = y_(h to y_.dim) - val yfh = yf(?, h)(h to y_.dim) // pull column h from the forecast matrix and align - resetDF (p+q, yy.dim - (p+q)) // reset the degrees of freedom - (yfh, diagnose (yy, yfh)) // evaluate and return the QoF of these forecasts - end testF - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the parameter vector for the ARMA2(p, q) model. - */ - override def parameter: VectorD = φ ++ θ :+ δ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Predict a value for y_t+1 using 1-step ahead forecasts. - * y_t+1 = φ_0 y_t + φ_1 y_t-1 + ... + φ_p-1 y_t-(p-1) + - * θ_0 e_t + θ_1 e_t-1 + ... + θ_q-1 e_t-(q-1) + e_t+1 - * When k < 0 let y_k = y_0 (i.e., assume first value repeats back in time), - * but do not assume errors repeat. - * @see predictAll in `Forecaster` - * @param t the time point/index to be predicted - * @param y_ the actual values to use in making predictions - */ - def predict (t: Int, y_ : VectorD): Double = - var sum = δ // intercept - for j <- 0 until p do sum += φ(j) * y_(max (0, t-j)) - for j <- 0 until q if t-j >= 0 do sum += θ(j) * e(t-j) - if t < y_.dim-1 then e(t+1) = y_(t+1) - sum // update the error vector - sum // prediction for y_t, yp_t - end predict - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all y_.dim time points and all horizons (1 through h-steps ahead). - * Record these in the yf matrix, where - * yf(t, k) = k-steps ahead forecast for y_t - * Note, column 0, yf(?, 0), is set to y (the actual time-series values). - * Forecast recurse down diagonals in the yf forecast matrix. - * The top right and bottom left triangles in yf matrix are not forecastable. - * @param h the maximum forecasting horizon, number of steps ahead to produce forecasts - * @param y_ the actual values to use in making forecasts - */ - def forecastAll (h: Int, y_ : VectorD): MatrixD = - yf = new MatrixD (y_.dim+h, h+1) // forecasts for all time points t & horizons to h - for t <- 0 until m do yf(t, 0) = y_(t) // first column is actual values, horizon 0 - for k <- 1 to h do - for t <- y_.indices do // make forecasts over all time points for horizon k - var sum = δ - for j <- 0 until p do sum += φ(j) * yf(max (0, t+k-1-j), max (0, k-1-j)) - yf(t+k, k) = sum // forecast down the diagonal - end for - debug ("forecastAll", s"yf(?, $k) = ${yf(?, k)}") - end for - yf // return matrix of forecasted values - end forecastAll - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all y_.dim time points at horizon h (h-steps ahead). - * Assign to forecasting matrix and return h-step ahead forecast. - * For 1-step ahead (h = 1), - * y_t = δ + φ_0 y_t-1 + φ_1 y_t-2 + ... + φ_p-1 y_t-p - * When k < 0 let y_k = y_0 (i.e., assume first value repeats back in time). - * @param yf the forecasting matrix (time x horizons) - * @param y_ the actual values to use in making forecasts - * @param h the forecasting horizon, number of steps ahead to produce forecasts - */ - override def forecastAt (yf: MatrixD, y_ : VectorD, h: Int): VectorD = - if h < 1 then flaw ("forecastAt", s"horizon h = $h must be at least 1") - e(0) // assume error at time 0 is 0 - val m = y_.dim - for t <- y_.indices do // make forecasts over all time points for horizon k - val t1 = t+h-1 // time point prior to horizon - var sum = δ - for j <- 0 until p do sum += φ(j) * yf(max (0, t1-j), max (0, h-1-j)) - for j <- 0 until q if t1-j >= 0 do sum += θ(j) * e(t1-j) - yf(t+h, h) = sum // forecast down the diagonal - if h == 1 && t < m-1 then e(t+1) = y_(t+1) - sum // update the next element in the error vector - end for - yf(?, h) // return the h-step ahead forecast vector - end forecastAt - -end ARMA2 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARMA2` companion object provides factory methods for the `ARMA2` class. - */ -object ARMA2: - - /** Base hyper-parameter specification for `ARMA2` class - */ - val hp = new HyperParameter - hp += ("p", 1, 1) - hp += ("q", 0, 0) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create an `ARMA2` object. - * @param y the response vector (time series data) - * @param tt the time vector, if relevant (time index may suffice) - * @param hparam the hyper-parameters - */ - def apply (y: VectorD, tt: VectorD = null, hparam: HyperParameter = ARMA2.hp): ARMA2 = - new ARMA2 (y, tt, hparam) - end apply - -end ARMA2 - -import Example_LakeLevels.y - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRMA2Test` main function tests the `ARMA2` class on real data: Forecasting lake levels. - * Test predictions (one step ahead forecasts). - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.aRMA2Test - */ -@main def aRMA2Test (): Unit = - - banner (s"Test Predictions: ARMA(1, 0) on LakeLevels Dataset") - var mod = new ARMA2 (y) // create model for time series data ARMA(1, 0) - mod.train (null, y) // train the model on full dataset - val (yp, qof) = mod.test (null, y) // test the model on full dataset - println (mod.report (qof)) // report on Quality of Fit (QoF) - - banner (s"Test Predictions: ARMA(1, 1) on LakeLevels Dataset") - hp("q") = 1 // set moving-average hyper-parameter q to 1 - mod = new ARMA2 (y) // create model for time series data ARMA(1, 1) - mod.train (null, y) // train the model on full dataset - val (yp2, qof2) = mod.test (null, y) // test the model on full dataset - println (mod.report (qof2)) // report on Quality of Fit (QoF) - - banner ("Select model based on ACF and PACF") - mod.plotFunc (mod.acF, "ACF") // Auto-Correlation Function (ACF) - mod.plotFunc (mod.pacF, "PACF") // Partial Auto-Correlation Function (PACF) - -end aRMA2Test - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRMA2Test2` main function tests the `ARMA2` class on real data: Forecasting lake levels. - * Test the test, predictAll, testf and forecastAll methods over the whole times-series. - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.aRMA2Test2 - */ -@main def aRMA2Test2 (): Unit = - - val m = y.dim - val t = VectorD.range (1, m) - val h = 3 // the forecasting horizon - val q = 1; hp("q") = q // moving-average hyper-parameter q - - var mod: ARMA2 = null - for p <- 1 to 1 do // autoregressive hyper-parameter p - hp("p") = p // set p hyper-parameter - banner (s"Test: ARMA2($p, $q}") - mod = new ARMA2 (y) // create model for time series data - mod.train (null, y) // train the model on full dataset - val (yp, qof) = mod.test (null, y) -/* - val yf = mod.forecastAll (h, y) // forecast h-steps ahead for all y - println (s"yf = $yf") - println (s"yf.dims = ${yf.dims}") - assert (yf(?, 0)(0 until m) == y) // column 0 must agree with actual values - assert (yf(?, 1)(1 to m+1) == yp) // column 1 must agree with one step-ahead predictions - for k <- 1 to h do - println (s"evalaute QoF for horizon $k:") - println (Fit.fitMap (mod.testf (k, y))) // evaluate k-units ahead forecasts - end for -*/ - end for - - banner ("Select model based on ACF and PACF") - mod.plotFunc (mod.acF, "ACF") // Auto-Correlation Function (ACF) - mod.plotFunc (mod.pacF, "PACF") // Partial Auto-Correlation Function (PACF) - -end aRMA2Test2 - diff --git a/src/main/scala/scalation/modeling/forecasting_old/old/ARX.scala.bak b/src/main/scala/scalation/modeling/forecasting_old/old/ARX.scala.bak deleted file mode 100644 index 10d0b4511..000000000 --- a/src/main/scala/scalation/modeling/forecasting_old/old/ARX.scala.bak +++ /dev/null @@ -1,532 +0,0 @@ - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Tue Feb 22 23:14:31 EST 2022 - * @see LICENSE (MIT style license file). - * - * @note Model: AutoRegressive with eXogenous Variables (Time Series Regression) - */ - -package scalation -package modeling -package forecasting - -import scala.math.{max, min} - -import scalation.mathstat._ - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARX` class supports regression for Time Series data. - * Multi-horizon forecasting supported via the RECURSIVE method. - * Given a response vector y, a predictor matrix x is built that consists of - * lagged y vectors, - * - * y_t = b dot x - * where x = [1, y_{t-1}, y_{t-2}, ... y_{t-lags}] - * - * @param x the input/predictor matrix built out of lags of y - (and optionally from exogenous variables ex) - * @param yy the output/response vector trimmed to match x.dim - * @param lags the maximum lag included (inclusive) - * @param fname the feature/variable names - * @param hparam the hyper-parameters (use Regression.hp for default) - */ -class ARX (x: MatrixD, yy: VectorD, lags: Int, fname: Array [String] = null, - hparam: HyperParameter = Regression.hp) - extends Regression (x, yy, fname, hparam) - with ForecasterX (lags): - - private val debug = debugf ("ARX", true) // debug function - private val flaw = flawf ("ARX") // flaw function - - modelName = s"ARX_$lags" - - debug ("init", s"$modelName: x.dims = ${x.dims}, yy.dim = ${yy.dim}") - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Produce a vector of size h, of 1 through h-steps ahead forecasts for the model. - * forecast the following time points: t+1, ..., t-1+h. - * Note, must create the yf matrix before calling the forecast method. - * Intended to work with rolling validation (analog of predict method) - * Must call `forecastAll` first. - * @param t the time point from which to make forecasts - * @param yf the forecasting matrix (time x horizons) - * @param h the forecasting horizon, number of steps ahead to produce forecasts - */ - def forecast (t: Int, yf: MatrixD, h: Int): VectorD = - if h < 1 then flaw ("forecast", s"horizon h = $h must be at least 1") - VectorD (for k <- 1 to h yield yf(t+k, k)) // get yf diagonal from time t - end forecast - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all y_.dim time points at horizon h (h-steps ahead). - * Assign to forecasting matrix and return h-step ahead forecast. - * For 1-step ahead (h = 1), - * y_t = δ + φ_0 y_t-1 + φ_1 y_t-2 + ... + φ_p-1 y_t-p - * When k < 0 let y_k = y_0 (i.e., assume first value repeats back in time). - * @param yf the forecasting matrix for the endogenous variable y (time x horizons) - * @param yx the matrix of endogenous y and exogenous x values - * @param h the forecasting horizon, number of steps ahead to produce forecasts - */ - def forecastAt (yf: MatrixD, yx: MatrixD, h: Int): VectorD = - if h < 1 then flaw ("forecastAt", s"horizon h = $h must be at least 1") - for t <- yx.indices do // make forecasts over all time points for horizon h - val t1 = t + h - 1 // time point prior to horizon - yf(t+h, h) = b dot yx(min (t1, yx.dim-1)) // forecast down the diagonal ?? - end for - yf(?, h) // return the h-step ahead forecast vector - end forecastAt - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test FORECASTS of a `ARX` forecasting model y_ = f(x) + e - * and return its forecasts and QoF vector. Testing may be in-sample - * (on the training set) or out-of-sample (on the testing set) as determined - * by the parameters passed in. Note: must call train and forecastAll before testF. - * @param h the forecasting horizon, number of steps ahead to produce forecasts - * @param y_ the testing/full response/output vector - * @param yx the matrix of endogenous y and exogenous x values - */ - def testF (h: Int, y_ : VectorD, yx: MatrixD): (VectorD, VectorD) = - val (yy, yfh) = testSetupF (y_, yx, h) // get and align actual and forecasted values - val params = x.dim2 - resetDF (params, yy.dim - params) // reset the degrees of freedom - println (s"testF: yy.dim = ${yy.dim}, yfh.dim = ${yfh.dim}") -// differ (yy, yfh) // uncomment for debugging - (yfh, diagnose (yy, yfh)) // return predictions and QoF vector - end testF - -end ARX - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARX` companion object provides factory methods. - */ -object ARX: - - private val debug = debugf ("ARX", true) // debug function - - private val TREND = false // include quadratic trend - private val DAY = false // include day of the week effect - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `ARX` object from a response vector. The input/data matrix - * x is formed from the lagged y vectors as columns in matrix x. - * @param y the original un-expanded output/response vector - * @param lags the maximum lag included (inclusive) - * @param hparam the hyper-parameters (use Regression.hp for default) - */ - def apply (y: VectorD, lags: Int, hparam: HyperParameter = Regression.hp): ARX = - val (x_, yy) = buildMatrix4TS (y, lags) // column for each lag - var x = VectorD.one (yy.dim) +^: x_ // add first column of all ones - - if TREND then - x = VectorD.range (0, yy.dim) +^: x // add trend/time - x = VectorD.range (0, yy.dim)~^2 +^: x // add quadratic trend/time - end if - if DAY then - val day = VectorI (for t <- yy.indices yield t % 7) - x = day.toDouble +^: x // add DAY of week as ordinal var - -// val dum = Variable.dummyVars (day) -// x = x ++^ dum // add DAY of week as dummy vars - end if - - println (s"apply: x.dims = ${x.dims}, yy.dim = ${yy.dim}") -// println (s"apply: x = $x \n yy = $yy") - new ARX (x, yy, lags, null, hparam) - end apply - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `ARX` object from a response vector. The input/data matrix - * x is formed from the lagged y vectors as columns in matrix x. - * In addition, lagged exogenous variables are added. - * @param y the original un-expanded output/response vector - * @param lags the maximum lag included (inclusive) - * @parax ex the input matrix for exogenous variables (one per column) - * @param hparam the hyper-parameters (use Regression.hp for default) - * @param elag1 the minimum exo lag included (inclusive) - * @param elag2 the maximum exo lag included (inclusive) - */ - def exo (y: VectorD, lags: Int, ex: MatrixD, hparam: HyperParameter = Regression.hp) - (elag1: Int = max (1, lags / 5), elag2: Int = max (1, lags)): ARX = - val (x_, yy) = buildMatrix4TS (y, lags) // column for each lag - var x = VectorD.one (yy.dim) +^: x_ // add first column of all ones - val endoCols = x.dim2 - println (s"endogenous: columns = $endoCols") - - x = x ++^ makeExoCols (lags, ex, elag1, elag2) // add columns for each lagged exo var - println (s"exogenous: columns = ${x.dim2 - endoCols}") - - if TREND then - x = VectorD.range (0, yy.dim) +^: x // add trend/time - x = VectorD.range (0, yy.dim)~^2 +^: x // add quadratic trend/time - end if - if DAY then - val day = VectorI (for t <- yy.indices yield t % 7) - val dum = Variable.dummyVars (day) - x = x ++^ dum // add DAY of week as dummy vars - end if - - println (s"exo: x.dims = ${x.dims}, yy.dim = ${yy.dim}") -// println (s"exo: x = $x \n yy = $yy") - new ARX (x, yy, lags, null, hparam) - end exo - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Make a matrix whose columns are lagged exogenous variables to be added to a data matrix. - * @param lags the maximum lag included (inclusive) for checking purposes - * @param ex the matrix of data for the exogenous variables - * @param elag1 the minimum exo lag included (inclusive) - * @param elag2 the maximum exo lag included (inclusive) - */ - def makeExoCols (lags: Int, ex: MatrixD, elag1: Int, elag2: Int): MatrixD = - var xx: MatrixD = buildMatrix4TS_exo (ex(?, 0), lags, elag1, elag2) - for j <- 1 until ex.dim2 do - xx = xx ++^ buildMatrix4TS_exo (ex(?, j), lags, elag1, elag2) - end for - println (s"addExoVars: collects lags of ${ex.dim2} exo variables into #xx.dim2 columns") - xx - end makeExoCols - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Split the x matrix and y vector into training and testing sets. - * @param x the x data/input matrix - * @param y the y response/output vector - * @param ratio the ratio of the TESTING set to the full dataset (most common 70-30, 80-20) - */ - def split_TnT (x: MatrixD, y: VectorD, ratio: Double = 0.30): (MatrixD, VectorD, MatrixD, VectorD) = - val n = x.dim - val tr_size = (n * (1.0 - ratio)).toInt - println (s"ARX.split_TnT: tr_size = $tr_size, te_size = ${n - tr_size}") - (x(0 until tr_size), y(0 until tr_size), x(tr_size until n), y(tr_size until n)) - end split_TnT - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Use rolling-validation to compute test Quality of Fit (QoF) measures - * by dividing the dataset into a TESTING SET (tr) and a TRAINING SET (te) - * as follows: [ <-- tr_size --> | <-- te_size --> ] - * This version calls predict for one-step ahead out-of-sample forecasts. - * @see `RollingValidation` - * @param mod the forecasting model being used (e.g., `ARX`) - * @param rc the retraining cycle (number of forecasts until retraining occurs) - */ - def rollValidate (mod: Predictor & Fit, rc: Int): Unit = - val x = mod.getX // get data/input matrix - val y = mod.getY // get response/output vector - val tr_size = RollingValidation.trSize (y.dim) // size of initial training set - val te_size = y.dim - tr_size // size of testing set - debug ("rollValidate", s"train: tr_size = $tr_size; test: te_size = $te_size, rc = $rc") - - val yp = new VectorD (te_size) // y-predicted over testing set - for i <- 0 until te_size do // iterate through testing set - val t = tr_size + i // next time point to forecast - if i % rc == 0 then mod.train (x(0 until t), y(0 until t)) // retrain on sliding training set - yp(i) = mod.predict (x(t-1)) // predict the next value - end for - - val (t, yy) = RollingValidation.align (tr_size, y) // align vectors - val df = max (0, mod.parameter.size - 1) // degrees of freedom for model - mod.resetDF (df, te_size - df) // reset degrees of freedom - new Plot (t, yy, yp, "Plot yy, yp vs. t", lines = true) - println (FitM.fitMap (mod.diagnose (yy, yp), QoF.values.map (_.toString))) - end rollValidate - -end ARX - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRXTest` main function tests the `ARX` class. - * This test is used to CHECK that the buildMatrix4TS function is working correctly. - * May get NaN for some maximum lags (p) due to multi-collinearity. - * > runMain scalation.modeling.forecasting.aRXTest - */ -@main def aRXTest (): Unit = - - val m = 30 - val y = VectorD.range (1, m) // used to CHECK the buildMatrix4TS function - - for p <- 1 to 10 do // autoregressive hyper-parameter p - banner (s"Test: ARX with $p lags") - val mod = ARX (y, p) // create model for time series data - mod.trainNtest ()() // train the model on full dataset - println (mod.summary) - - val yp = mod.predict (mod.getX) - new Plot (null, mod.getY, yp, s"y vs. yp for ${mod.modelName} with $p lags", lines = true) - end for - -end aRXTest - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRXTest2` main function tests the `ARX` class on real data: - * Forecasting lake levels. - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.aRXTest2 - */ -@main def aRXTest2 (): Unit = - - import Example_LakeLevels.y - val h = 2 // the forecasting horizon - - for p <- 1 to 8 do // autoregressive hyper-parameter p - banner (s"Test: ARX with $p lags") - val mod = ARX (y, p) // create model for time series data - mod.trainNtest ()() // train the model on full dataset - println (mod.summary) // parameter/coefficient statistics - - banner ("Predictions") - val yy = mod.getY // trimmed actual response vector - val xx = mod.getX - val yp = mod.predict (xx) // predicted response vector - new Plot (null, yy, yp, s"y vs. yp for ${mod.modelName} with $p lags", lines = true) - println (s"yp = $yp") - - banner ("Forecasts") -// val yf = mod.forecast (yp, h) // forecasted response matrix - val yf = mod.forecastAll (yy, xx, h) // forecasted response matrix - for k <- yf.indices2 do - new Plot (null, yy, yf(?, k), s"yy vs. yf_$k for ${mod.modelName} with $p lags", lines = true) - end for - println (s"yf = $yf") - println (s"yf.dims = ${yf.dims}") - assert (yf(?, 0) == yp) // first forecast = predicted values -/* - banner ("Forecast QoF") - println (testForecast (mod, y, yf, p)) // QoF -// println (Fit.fitMap (mod.testf (k, y))) // evaluate k-units ahead forecasts -*/ - end for - -end aRXTest2 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRXTest3` main function tests the `ARX` class on real data: - * Forecasting COVID-19 Daily Data. Does In-Sample Testing on Endogenous variable. - * > runMain scalation.modeling.forecasting.aRXTest3 - */ -@main def aRXTest3 (): Unit = - - val LAGS = 5 // number of lags of y - val h = 2 // forecasting horizon - - val exo_vars = Array.ofDim [String] (0) // no exogenous variables in this case - val (xx, yy) = Example_Covid.loadData (exo_vars, "new_deaths") - val iskip = yy.indexWhere (_ >= 6.0) // find day with at least 6 deaths - println (s"iskip = $iskip is first week with at least 6 deaths") - - val ex = xx(iskip until xx.dim) // trim away the first iskip rows - val y = yy(iskip until yy.dim) - println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") - - banner ("Test In-Sample ARX on COVID-19 Weekly Data") - val mod = ARX (y, LAGS) // create model for time series data - val (yp, qof) = mod.trainNtest ()() // train the model on full dataset - new Plot (null, mod.getY, yp, s"${mod.modelName}, y vs. yp", lines = true) - - banner (s"Multi-horizon forecasting using the recursive method") - val yx = mod.getX - val yf = mod.forecastAll (y, yx, h) // forecasted response matrix - for k <- 0 to h do - new Plot (null, y, yf(?, k), s"y vs. yf_$k for ${mod.modelName} with $LAGS lags", lines = true) - end for - println (s"yf = $yf") - println (s"yf.dims = ${yf.dims}, y.dim = ${y.dim}, yp.dim = ${yp.dim}") - val yf0 = yf(?, 0)(0 until y.dim) - val yf1 = yf(?, 1)(1 until y.dim) - Forecaster.differ (yf0, y) - Forecaster.differ (yf1, yp) - assert (yf0 =~ y) // zeroth forecast = actual values - assert (yf1 =~ yp) // first forecast = predicted values - - for k <- 1 to h do - val (yfh, qof) = mod.testF (k, y, yx) // k-steps ahead forecast and its QoF - println (s"Evaluate QoF for horizon $k:") - println (FitM.fitMap (qof, QoF.values.map (_.toString))) // evaluate k-steps ahead forecasts - end for - - banner (s"Feature Selection Technique: stepRegression") - val (cols, rSq) = mod.stepRegressionAll (cross = false) // R^2, R^2 bar, sMAPE, NA - val k = cols.size - println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for ARX with tech", lines = true) - println (mod.summary ()) - - banner ("Feature Importance") - println (s"Stepwise: rSq = $rSq") -// val imp = mod.importance (cols.toArray, rSq) -// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") - -end aRXTest3 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRXTest4` main function tests the `ARX` class on real data: - * Forecasting COVID-19 Weekly Data. Does In-Sample Testing on endogenous and exogenous variables. - * > runMain scalation.modeling.forecasting.aRXTest4 - */ -@main def aRXTest4 (): Unit = - - val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (xx, yy) = Example_Covid.loadData (exo_vars, "new_deaths") - val iskip = yy.indexWhere (_ >= 6.0) // find day with at least 6 deaths - println (s"iskip = $iskip is first week with at least 6 deaths") - - val ex = xx(iskip until xx.dim) // trim away the first iskip rows - val y = yy(iskip until yy.dim) - println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") - - banner ("Test In-Sample ARX.exo on COVID-19 Weekly Data") - val mod = ARX.exo (y, 10, ex)(1, 11) // create model for time series data with exo - val (yp, qof) = mod.trainNtest ()() // train the model on full dataset - new Plot (null, mod.getY, yp, s"${mod.modelName}, y vs. yp", lines = true) - -// val tech = SelectionTech.Forward // pick one feature selection technique -// val tech = SelectionTech.Backward - val tech = SelectionTech.Stepwise - - banner (s"Feature Selection Technique: $tech") - val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA - val k = cols.size - println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for ARX with tech", lines = true) - println (mod.summary ()) - - banner ("Feature Importance") - println (s"$tech: rSq = $rSq") -// val imp = mod.importance (cols.toArray, rSq) -// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") - -end aRXTest4 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRXTest5` main function tests the `ARX` class on real data: - * Forecasting COVID-19 Weekly Data. Does TnT Testing on endogenous and exogenous variables. - * Determine the terms to include in the model using Stepwise on In-Sample. - * > runMain scalation.modeling.forecasting.aRXTest5 - */ -@main def aRXTest5 (): Unit = - - val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (xx, yy) = Example_Covid.loadData (exo_vars, "new_deaths") - val iskip = yy.indexWhere (_ >= 6.0) // find day with at least 6 deaths - println (s"iskip = $iskip is first week with at least 6 deaths") - - val ex = xx(iskip until xx.dim) // trim away the first iskip rows - val y = yy(iskip until yy.dim) - println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") - - banner ("Test In-Sample ARX.exo on COVID-19 Weekly Data") - val mod = ARX.exo (y, 10, ex)(1, 11) // create model for time series data with exo - val (yp, qof) = mod.trainNtest ()() // train on full and test on full - new Plot (null, mod.getY, yp, s"${mod.modelName}, y vs. yp", lines = true) - -// val tech = SelectionTech.Forward // pick one feature selection technique -// val tech = SelectionTech.Backward - val tech = SelectionTech.Stepwise - - banner (s"Feature Selection Technique: $tech") - val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA - val k = cols.size - println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for ARX with tech", lines = true) - println (mod.summary ()) - - banner ("Feature Importance") - println (s"$tech: rSq = $rSq") -// val imp = mod.importance (cols.toArray, rSq) -// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") - - banner ("Run TnT on Best model") - val bmod = mod.getBest._3 // get the best model from feature selection - val (x_, y_, xtest, ytest) = ARX.split_TnT (bmod.getX, bmod.getY) - val (yptest, qoftest) = bmod.trainNtest (x_, y_)(xtest, ytest) // train on (x_, y_) and test on (xtest, ytest) - new Plot (null, ytest, yptest, s"${mod.modelName}, ytest vs. yptest", lines = true) - -end aRXTest5 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRXTest6` main function tests the `ARX` class on real data: - * Forecasting COVID-19 Weekly Data. Does Rolling Validation on endogenous and exogenous - * variables. Determine the terms to include in the model using Stepwise on In-Sample. - * > runMain scalation.modeling.forecasting.aRXTest6 - */ -@main def aRXTest6 (): Unit = - - val LAGS = 7 - - val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (xx, yy) = Example_Covid.loadData (exo_vars, "new_deaths") - val iskip = yy.indexWhere (_ >= 6.0) // find day with at least 6 deaths - println (s"iskip = $iskip is first week with at least 6 deaths") - - val ex = xx(iskip until xx.dim) // trim away the first iskip rows - val y = yy(iskip until yy.dim) - println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") - - banner ("Test In-Sample ARX.exo on COVID-19 Weekly Data") - val mod = ARX.exo (y, LAGS, ex)(1, LAGS+1) // create model for time series data with exo - - val (yp, qof) = mod.trainNtest ()() // train on full and test on full - new Plot (null, mod.getY, yp, s"${mod.modelName}, y vs. yp", lines = true) - -// val tech = SelectionTech.Forward // pick one feature selection technique -// val tech = SelectionTech.Backward - val tech = SelectionTech.Stepwise - - banner (s"Feature Selection Technique: $tech") - val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA - val k = cols.size - println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for ARX with tech", lines = true) - println (mod.summary ()) - - banner ("Feature Importance") - println (s"$tech: rSq = $rSq") -// val imp = mod.importance (cols.toArray, rSq) -// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") - - banner ("Run Rolling Validation on ARX Best model") - val bmod = mod.getBest._3 // get the best model from feature selection - ARX.rollValidate (bmod, 1) - -end aRXTest6 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRXTest7` main function tests the `ARX` class on real data: - * Forecasting COVID-19 Weekly Data. Preliminary investigation of Symbolic Regression. - * > runMain scalation.modeling.forecasting.aRXTest7 - */ -@main def aRXTest7 (): Unit = - - val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (xx, yy) = Example_Covid.loadData (exo_vars, "new_deaths") - val iskip = yy.indexWhere (_ >= 6.0) // find day with at least 6 deaths - println (s"iskip = $iskip is first week with at least 6 deaths") - - val ex = xx(iskip until xx.dim) // trim away the first iskip rows - val y = yy(iskip until yy.dim) - - banner ("Plot Variables on COVID-19 Weekly Data") - - for lag <- 0 to 4 do - val xx_ = ex(lag until y.dim) - val yy_ = y(0 until y.dim - lag) -// new Plot (xx_, yy_, null, s"deaths vs. exo-vars @ lag = $lag") - - val mod = SymbolicRegression (xx_, yy_, null, collection.mutable.Set (1.0), cross = false) - mod.trainNtest ()() - println (mod.summary ()) - end for - -end aRXTest7 - diff --git a/src/main/scala/scalation/modeling/forecasting_old/old/ARX.scala.bak2 b/src/main/scala/scalation/modeling/forecasting_old/old/ARX.scala.bak2 deleted file mode 100644 index 17bd75682..000000000 --- a/src/main/scala/scalation/modeling/forecasting_old/old/ARX.scala.bak2 +++ /dev/null @@ -1,592 +0,0 @@ - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Tue Feb 22 23:14:31 EST 2022 - * @see LICENSE (MIT style license file). - * - * @note Model: AutoRegressive with eXogenous Variables (Time Series Regression) - */ - -package scalation -package modeling -package forecasting - -import scala.math.{max, min} - -import scalation.mathstat._ - -import scalation.modeling.{Regression => REGRESSION} -//import scalation.modeling.{RidgeRegression => REGRESSION} -//import scalation.modeling.{LassoRegression => REGRESSION} - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARX` class supports regression for Time Series data. - * Multi-horizon forecasting supported via the RECURSIVE method. - * Given a response vector y, a predictor matrix x is built that consists of - * lagged y vectors, - * - * y_t = b dot x - * where x = [1, y_{t-1}, y_{t-2}, ... y_{t-lags}] - * - * @param x the input/predictor matrix built out of lags of y - * (and optionally from exogenous variables ex) - * @param yy the output/response vector trimmed to match x.dim (@see ARX object) - * @param lags the maximum lag included (inclusive) - * @param fname the feature/variable names - * @param hparam the hyper-parameters (use REGRESSION.hp for default) - */ -class ARX (x: MatrixD, yy: VectorD, lags: Int, fname: Array [String] = null, - hparam: HyperParameter = REGRESSION.hp) - extends REGRESSION (x, yy, fname, hparam) - with ForecasterX (lags): - - private val debug = debugf ("ARX", true) // debug function - private val flaw = flawf ("ARX") // flaw function - - modelName = s"ARX_$lags" - - debug ("init", s"$modelName: x.dims = ${x.dims}, yy.dim = ${yy.dim}") - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Get the internally row trimed and column expanded input matrix and response vector. - */ - def getXY: (MatrixD, VectorD) = (x, yy) // (getX, getY) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Predict a value for y_t+1 using the 1-step ahead forecast. - * y_t+1 = f (y_t, ...) + e_t+1 - * @param t the time point from which to make prediction - * @param yx the matrix of endogenous y and exogenous x values - */ - def predict (t: Int, yx: MatrixD): Double = ??? -/* - // FIX - prints for debugging assertion failure yp(i) != yd(0) - println (yx) - println (s"t-1: ${yx(min (t-1, yx.dim-1))} --> ${b dot yx(min (t-1, yx.dim-1))}") - println (s"t: ${yx(min (t, yx.dim-1))} --> ${b dot yx(min (t, yx.dim-1))}") - b dot yx(min (t-1, yx.dim-1)) -*/ - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Produce a vector of size h, of 1 through h-steps ahead forecasts for the model. - * forecast the following time points: t+1, ..., t-1+h. - * Note, must create the yf matrix before calling the forecast method. - * Intended to work with rolling validation (analog of predict method) - * Must call `forecastAll` first. - * @param t the time point from which to make forecasts - * @param yf the forecast matrix (time x horizons) - * @param h the forecasting horizon, number of steps ahead to produce forecasts - */ - def forecast (t: Int, yf: MatrixD, h: Int): VectorD = - if h < 1 then flaw ("forecast", s"horizon h = $h must be at least 1") - VectorD (for k <- 1 to h yield yf(t+k, k)) // get yf diagonal from time t - end forecast - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all y_.dim time points at horizon h (h-steps ahead). - * Assign to FORECAST MATRIX and return h-step ahead forecast. - * Note, `predictAll` provides predictions for h = 1. - * @see `forecastAll` method in `Forecaster` trait. - * @param yf the forecast matrix for the endogenous variable y (time x horizons) - * @param yx the matrix of endogenous y and exogenous x values - * @param h the forecasting horizon, number of steps ahead to produce forecasts - */ - def forecastAt (yf: MatrixD, yx: MatrixD, h: Int): VectorD = - if h < 2 then flaw ("forecastAt", s"horizon h = $h must be at least 2") - - for t <- yx.indices do // make forecasts over all time points for horizon h - yf(t+h-1, h) = b dot yx(min (t, yx.dim-1)) // forecast down the diagonal ?? - yf(?, h) // return the h-step ahead forecast vector - end forecastAt - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test FORECASTS of a `ARX` forecasting model y_ = f(x) + e - * and return its forecasts and QoF vector. Testing may be in-sample - * (on the training set) or out-of-sample (on the testing set) as determined - * by the parameters passed in. Note: must call train and forecastAll before testF. - * @param h the forecasting horizon, number of steps ahead to produce forecasts - * @param y_ the testing/full response/output vector - * @param yx the matrix of endogenous y and exogenous x values - */ - def testF (h: Int, y_ : VectorD, yx: MatrixD): (VectorD, VectorD, VectorD) = - val (yy, yfh) = testSetupF (y_, yx, h) // get and align actual and forecasted values - val params = x.dim2 - resetDF (params, yy.dim - params) // reset the degrees of freedom - println (s"testF: yy.dim = ${yy.dim}, yfh.dim = ${yfh.dim}") -// differ (yy, yfh) // uncomment for debugging - (yy, yfh, diagnose (yy, yfh)) // return aligned actual, forecasted and QoF vectors - end testF - -end ARX - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARX` companion object provides factory methods. - */ -object ARX: - - private val debug = debugf ("ARX", true) // debug function - - private var TREND = false // include quadratic trend - private val DAY = false // include day of the week effect - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Set whether to include a simple linear (in time) trend. - * @param trend flag indicating whether to include a trend - */ - def setTrend (trend: Boolean): Unit = TREND = trend - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create an `ARX` object from a response vector. The input/data matrix - * x is formed from the lagged y vectors as columns in matrix x. - * @param y the original un-expanded output/response vector - * @param lags the maximum lag included (inclusive) - * @param hparam the hyper-parameters (use REGRESSION.hp for default) - */ - def apply (y: VectorD, lags: Int, hparam: HyperParameter = REGRESSION.hp): ARX = - val (x_, yy) = buildMatrix4TS (y, lags) // column for each lag; yy is y trimmed - var x = VectorD.one (yy.dim) +^: x_ // add first column of all ones - - if TREND then - x = VectorD.range (0, yy.dim) +^: x // add trend/time - x = VectorD.range (0, yy.dim)~^2 +^: x // add quadratic trend/time - if DAY then - val day = VectorI (for t <- yy.indices yield t % 7) - x = day.toDouble +^: x // add DAY of week as ordinal var - -// val dum = Variable.dummyVars (day) -// x = x ++^ dum // add DAY of week as dummy vars - - debug ("apply", s"x.dims = ${x.dims}, yy.dim = ${yy.dim}") - debug ("apply", "x = $x \n yy = $yy") - new ARX (x, yy, lags, null, hparam) - end apply - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create an `ARX` object from a response vector. The input/data matrix - * x is formed from the lagged y vectors as columns in matrix x. - * In addition, lagged exogenous variables are added. - * @param y the original un-expanded output/response vector - * @param lags the maximum lag included (inclusive) - * @parax ex the input matrix for exogenous variables (one per column) - * @param hparam the hyper-parameters (use REGRESSION.hp for default) - * @param elag1 the minimum exo lag included (inclusive) - * @param elag2 the maximum exo lag included (inclusive) - */ - def exo (y: VectorD, lags: Int, ex: MatrixD, hparam: HyperParameter = REGRESSION.hp) - (elag1: Int = max (1, lags / 5), elag2: Int = max (1, lags)): ARX = - val (x_, yy) = buildMatrix4TS (y, lags) // column for each lag - var x = VectorD.one (yy.dim) +^: x_ // add first column of all ones - val endoCols = x.dim2 - println (s"endogenous: columns = $endoCols") - - x = x ++^ makeExoCols (lags, ex, elag1, elag2) // add columns for each lagged exo var - println (s"exogenous: columns = ${x.dim2 - endoCols}") - - if TREND then - x = VectorD.range (0, yy.dim) +^: x // add trend/time - x = VectorD.range (0, yy.dim)~^2 +^: x // add quadratic trend/time - end if - if DAY then - val day = VectorI (for t <- yy.indices yield t % 7) - val dum = Variable.dummyVars (day) - x = x ++^ dum // add DAY of week as dummy vars - end if - - debug ("exo", s"x.dims = ${x.dims}, yy.dim = ${yy.dim}") -// debug ("exo", s"x = $x \n yy = $yy") - new ARX (x, yy, lags, null, hparam) - end exo - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Make a matrix whose columns are lagged exogenous variables to be added to a data matrix. - * @param lags the maximum lag included (inclusive) for checking purposes - * @param ex the matrix of data for the exogenous variables - * @param elag1 the minimum exo lag included (inclusive) - * @param elag2 the maximum exo lag included (inclusive) - */ - def makeExoCols (lags: Int, ex: MatrixD, elag1: Int, elag2: Int): MatrixD = - var xx: MatrixD = buildMatrix4TS_exo (ex(?, 0), lags, elag1, elag2) - for j <- 1 until ex.dim2 do - xx = xx ++^ buildMatrix4TS_exo (ex(?, j), lags, elag1, elag2) - end for - println (s"addExoVars: collects lags of ${ex.dim2} exo variables into ${xx.dim2} columns") - xx - end makeExoCols - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Split the x matrix and y vector into training and testing sets. - * @param x the x data/input matrix - * @param y the y response/output vector - * @param ratio the ratio of the TESTING set to the full dataset (most common 70-30, 80-20) - */ - def split_TnT (x: MatrixD, y: VectorD, ratio: Double = 0.30): (MatrixD, VectorD, MatrixD, VectorD) = - val n = x.dim - val tr_size = (n * (1.0 - ratio)).toInt - println (s"ARX.split_TnT: tr_size = $tr_size, te_size = ${n - tr_size}") - (x(0 until tr_size), y(0 until tr_size), x(tr_size until n), y(tr_size until n)) - end split_TnT - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Use rolling-validation to compute test Quality of Fit (QoF) measures - * by dividing the dataset into a TRAINING SET (tr) and a TESTING SET (te) - * as follows: [ <-- tr_size --> | <-- te_size --> ] - * This version calls predict for one-step ahead out-of-sample forecasts. - * @see `RollingValidation` - * @param mod the forecasting model being used (e.g., `ARX`) - * @param rc the retraining cycle (number of forecasts until retraining occurs) - */ - def rollValidate (mod: Predictor & Fit, rc: Int): Unit = - val x = mod.getX // get data/input matrix - val y = mod.getY // get response/output vector - val te_size = RollingValidation.teSize (y.dim) // size of initial testing set - val tr_size = y.dim - te_size // size of initial training set - debug ("rollValidate", s"train: tr_size = $tr_size; test: te_size = $te_size, rc = $rc") - - val yp = new VectorD (te_size) // y-predicted over testing set - for i <- 0 until te_size do // iterate through testing set - val t = tr_size + i // next time point to forecast -// if i % rc == 0 then mod.train (x(0 until t), y(0 until t)) // retrain on sliding training set (growing set) - if i % rc == 0 then mod.train (x(i until t), y(i until t)) // retrain on sliding training set (fixed size set) - yp(i) = mod.predict (x(t-1)) // predict the next value (only for h=1) - end for - - val (t, yy) = RollingValidation.align (tr_size, y) // align vectors - val df = max (0, mod.parameter.size - 1) // degrees of freedom for model - mod.resetDF (df, te_size - df) // reset degrees of freedom - new Plot (t, yy, yp, "Plot yy, yp vs. t", lines = true) - println (FitM.fitMap (mod.diagnose (yy, yp), qoF_names)) - end rollValidate - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Use rolling-validation to compute test Quality of Fit (QoF) measures - * by dividing the dataset into a TRAINING SET (tr) and a TESTING SET (te) - * as follows: [ <-- tr_size --> | <-- te_size --> ] - * This version calls predict for h-steps ahead out-of-sample forecasts. - * @see `RollingValidation` - * @param mod the forecasting model being used (e.g., `ARX`) - * @param rc the retraining cycle (number of forecasts until retraining occurs) - * @param hh the forecasting horizon (1, 2, ... h) - * @param te_size the size of the testing set (negative => use ratio to calculate - def rollValidate (mod: ARX, rc: Int, hh: Int, te_size_ : Int = -1): Unit = - val x = mod.getX // get data/input matrix - val y = mod.getY // get response/output vector - val ftMat = new MatrixD (hh, Fit.N_QoF) - -// println (s"rollValidate x = $x") - val yf = mod.forecastAll (y, x, hh) // get in-sample forecast matrix -// println (s"rollValidate x = $x") // FIX - forecastAll destroys x ??? - - val te_size = if te_size_ > 0 then te_size_ // size of initial testing set - else RollingValidation.teSize (y.dim) // calculate using testing ratio - val tr_size = y.dim - te_size // size of initial training set - debug ("rollValidate", s"y.dim = ${y.dim}, train: tr_size = $tr_size; test: te_size = $te_size, rc = $rc") - -// val yp = new VectorD (te_size) // y-predicted over testing set (only for h=1) - for i <- 0 until te_size do // iterate through testing set - val t = tr_size + i // next time point to forecast -// if i % rc == 0 then mod.train (x(0 until t), y(0 until t)) // retrain on sliding training set (gtowing set) - if i % rc == 0 then mod.train (x(i until t), y(i until t)) // retrain on sliding training set (fixed size set) -// yp(i) = mod.predict (x(t-1)) // predict the next value (only for h=1) -// yp(i) = mod.predict (t-1, x) // predict the next value (only for h=1) - mod.forecast (t-1, yf, hh) // forecast the next h-values - end for // yf is updated down its diagonals - - val df = max (1, mod.parameter.size - 1) // degrees of freedom for model - mod.resetDF (df, te_size - df) // reset degrees of freedom - val (t, yy) = RollingValidation.align (tr_size, y) // align vectors - - for h <- 1 to hh do - val yfh = yf(tr_size until y.dim, h) - debug ("rollValidate", s"horizon $h: actual: yy.dim = ${yy.dim}, forecasted: yfh.dim = ${yfh.dim}") - new Plot (t, yy, yfh, s"Plot yy, yfh vs. t for horizon h = $h)", lines = true) - ftMat(h-1) = mod.diagnose (yy, yfh) - end for - - banner (s"rollValidate: Evaluate ${mod.modelName}'s QoF for the horizons: 1 to $hh") - println ("fitMap qof = ") - println (FitM.showFitMap (ftMat.transpose, QoF.values.map (_.toString))) - end rollValidate - */ - -end ARX - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRXTest` main function tests the `ARX` class. - * This test is used to CHECK that the buildMatrix4TS function is working correctly. - * May get NaN for some maximum lags (p) due to multi-collinearity. - * > runMain scalation.modeling.forecasting.aRXTest - */ -@main def aRXTest (): Unit = - - val m = 30 - val y = VectorD.range (1, m) // used to CHECK the buildMatrix4TS function - - for p <- 1 to 9 do // autoregressive hyper-parameter p - banner (s"Test: ARX with $p lags") - val mod = ARX (y, p) // create model for time series data - mod.trainNtest ()() // train the model on full dataset - println (mod.summary) - - val yp = mod.predict (mod.getX) - new Plot (null, mod.getY, yp, s"y vs. yp for ${mod.modelName} with $p lags", lines = true) - end for - -end aRXTest - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRXTest2` main function tests the `ARX` class on real data: - * Forecasting lake levels. - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.aRXTest2 - */ -@main def aRXTest2 (): Unit = - - import Example_LakeLevels.y - val h = 2 // the forecasting horizon - ARX.setTrend (true) - - for p <- 1 to 7 do // autoregressive hyper-parameter p - banner (s"Test: ARX with $p lags") - val mod = ARX (y, p) // create model for time series data - mod.trainNtest ()() // train the model on full dataset - println (mod.summary) // parameter/coefficient statistics - - banner ("Predictions") - val (yx, yy) = mod.getXY // trimmed, input matrix and actual response vector - println (s"y.dim = ${y.dim}, yy.dim = ${yy.dim}, yx.dims = ${yx.dims}") - println (s"y = $y") - println (s"yy = $yy") - val yp = mod.predict (yx) // predicted response vector - new Plot (null, yy, yp, s"y vs. yp for ${mod.modelName} with $p lags", lines = true) - println (s"yp = $yp") - - banner ("Forecasts") -// val yf = mod.forecast (yp, h) // forecasted response matrix - val yf = mod.forecastAll (y, yx, h) // forecasted response matrix - for k <- yf.indices2 do - new Plot (null, yy, yf(?, k), s"yy vs. yf_$k for ${mod.modelName} with $p lags", lines = true) - -// mod.testHorizons (h, y, yx) // calls testF for horizons 1 to h - ForecasterX.evalForecasts (mod, y, yx, h) - -// banner ("Forecast QoF") -// println (testForecast (mod, y, yf, p)) // QoF -// println (Fit.fitMap (mod.testf (k, y))) // evaluate k-units ahead forecasts - end for - -end aRXTest2 - -// val iskip = yy.indexWhere (_ >= 6.0) // find week with at least 6 deaths -// println (s"iskip = $iskip is first week with at least 6 deaths") - -import Example_Covid.{loadData, NO_EXO, response} - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRXTest3` main function tests the `ARX` class on real data: - * Forecasts COVID-19 Weekly Data using endogenous variable only. - * Does In-Sample Testing (In_ST). - * Determines the terms to include in the model using Feature Selection. - * > runMain scalation.modeling.forecasting.aRXTest3 - */ -@main def aRXTest3 (): Unit = - - val LAGS = 10 // number of lags of y - val h = 6 // forecasting horizon - - val (ex, y) = loadData (NO_EXO, response) - println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") - - banner ("Test In-Sample ARX on COVID-19 Weekly Data") - val mod = ARX (y, LAGS) // create ARX model for time series data - val (yp, qof) = mod.trainNtest ()() // train the model on full dataset - new Plot (null, mod.getY, yp, s"${mod.modelName}, y vs. yp", lines = true) - - banner (s"Multi-horizon forecasting using the recursive method") - val yx = mod.getX - val yf = mod.forecastAll (y, yx, h) // forecasted response matrix - for k <- 0 to h do - new Plot (null, y, yf(?, k), s"y vs. yf_$k for ${mod.modelName} with $LAGS lags @ horizon $k", lines = true) - -// mod.testHorizons (h, y, yx) // calls testF for horizons 1 to h - ForecasterX.evalForecasts (mod, y, yx, h) - - banner (s"Feature Selection Technique: stepRegression") - val (cols, rSq) = mod.stepRegressionAll (cross = false) // R^2, R^2 bar, sMAPE, NA - val k = cols.size - println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for ARX with tech", lines = true) - println (mod.summary ()) - - banner ("Feature Importance") - println (s"Stepwise: rSq = $rSq") -// val imp = mod.importance (cols.toArray, rSq) -// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") - -end aRXTest3 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRXTest4` main function tests the `ARX` class on real data: - * Forecasts COVID-19 Weekly Data using endogenous and exogenous variables. - * Does In-Sample Testing (In-ST). - * Determines the terms to include in the model using Feature Selection. - * > runMain scalation.modeling.forecasting.aRXTest4 - */ -@main def aRXTest4 (): Unit = - - val LAGS = 10 // number of lags of y - - val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (ex, y) = loadData (exo_vars, response) - println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") - - banner ("Test In-Sample ARX.exo on COVID-19 Weekly Data") - val mod = ARX.exo (y, LAGS, ex)(1, LAGS+1) // create model for time series data with exo - val (yp, qof) = mod.trainNtest ()() // train the model on full dataset - new Plot (null, mod.getY, yp, s"${mod.modelName}, y vs. yp", lines = true) - -// val tech = SelectionTech.Forward // pick one feature selection technique -// val tech = SelectionTech.Backward - val tech = SelectionTech.Stepwise - - banner (s"Feature Selection Technique: $tech") - val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA - val k = cols.size - println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for ARX with tech", lines = true) - println (mod.summary ()) - - banner ("Feature Importance") - println (s"$tech: rSq = $rSq") -// val imp = mod.importance (cols.toArray, rSq) -// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") - -end aRXTest4 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRXTest5` main function tests the `ARX` class on real data: - * Forecasts COVID-19 Weekly Data using endogenous and exogenous variables. - * Does In-Sample Testing (In-ST). - * Determines the terms to include in the model using Feature Selection. - * Run Train-n-Test (TnT) Split testing on best model. - * > runMain scalation.modeling.forecasting.aRXTest5 - */ -@main def aRXTest5 (): Unit = - - val LAGS = 10 // number of lags of y - - val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (ex, y) = loadData (exo_vars, response) - println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") - - banner ("Test In-Sample ARX.exo on COVID-19 Weekly Data") - val mod = ARX.exo (y, LAGS, ex)(1, LAGS+1) // create model for time series data with exo - val (yp, qof) = mod.trainNtest ()() // train on full and test on full - new Plot (null, mod.getY, yp, s"${mod.modelName}, y vs. yp", lines = true) - -// val tech = SelectionTech.Forward // pick one feature selection technique -// val tech = SelectionTech.Backward - val tech = SelectionTech.Stepwise - - banner (s"Feature Selection Technique: $tech") - val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA - val k = cols.size - println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for ARX with tech", lines = true) - println (mod.summary ()) - - banner ("Feature Importance") - println (s"$tech: rSq = $rSq") -// val imp = mod.importance (cols.toArray, rSq) -// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") - - banner ("Run TnT on Best model") - val bmod = mod.getBest._3 // get the best model from feature selection - val (x_, y_, xtest, ytest) = ARX.split_TnT (bmod.getX, bmod.getY) - val (yptest, qoftest) = bmod.trainNtest (x_, y_)(xtest, ytest) // train on (x_, y_) and test on (xtest, ytest) - new Plot (null, ytest, yptest, s"${mod.modelName}, ytest vs. yptest", lines = true) - -end aRXTest5 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRXTest6` main function tests the `ARX` class on real data: - * Forecasts COVID-19 Weekly Data using endogenous and exogenous variables. - * Does In-Sample Testing (In-ST). - * Determines the terms to include in the model using Feature Selection. - * Run Train-n-Test (TnT) Split testing on best model using Rolling Validation. - * > runMain scalation.modeling.forecasting.aRXTest6 - */ -@main def aRXTest6 (): Unit = - - val LAGS = 10 // number of lags (values from past) - val rc = 1 // retraining cycle - val h = 6 // forecasting horizon - - val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (ex, y) = loadData (exo_vars, response) - println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") - - banner ("Test In-Sample ARX.exo on COVID-19 Weekly Data") - val mod = ARX.exo (y, LAGS, ex)(1, LAGS+1) // create model for time series data with exo - - val (yp, qof) = mod.trainNtest ()() // train on full and test on full - new Plot (null, mod.getY, yp, s"${mod.modelName}, y vs. yp", lines = true) - -// val tech = SelectionTech.Forward // pick one feature selection technique -// val tech = SelectionTech.Backward - val tech = SelectionTech.Stepwise - - banner (s"Feature Selection Technique: $tech") - val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA - val k = cols.size - println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for ARX with tech", lines = true) - println (mod.summary ()) - - banner ("Feature Importance") - println (s"$tech: rSq = $rSq") -// val imp = mod.importance (cols.toArray, rSq) -// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") - - banner ("Run Rolling Validation on ARX Best model") - val bmod = mod.getBest._3.asInstanceOf [ARX] // get the best model from feature selection - ForecasterX.rollValidate (bmod, rc, h) - -end aRXTest6 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRXTest7` main function tests the `ARX` class on real data: - * Forecasts COVID-19 Weekly Data using endogenous and exogenous variables. - * Preliminary investigation of Symbolic Regression. - * > runMain scalation.modeling.forecasting.aRXTest7 - */ -@main def aRXTest7 (): Unit = - - val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (ex, y) = loadData (exo_vars, response) - println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") - - banner ("Plot Variables on COVID-19 Weekly Data") - for lag <- 10 to 10 do - val xx_ = ex(lag until y.dim) - val yy_ = y(0 until y.dim - lag) -// new Plot (xx_, yy_, null, s"deaths vs. exo-vars @ lag = $lag") - - val mod = SymbolicRegression (xx_, yy_, null, collection.mutable.Set (1.0), cross = false) - mod.trainNtest ()() - println (mod.summary ()) - end for - -end aRXTest7 - diff --git a/src/main/scala/scalation/modeling/forecasting_old/old/Attention.scala.bak b/src/main/scala/scalation/modeling/forecasting_old/old/Attention.scala.bak deleted file mode 100644 index 3ea85de37..000000000 --- a/src/main/scala/scalation/modeling/forecasting_old/old/Attention.scala.bak +++ /dev/null @@ -1,204 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Mon Sep 4 13:09:52 EDT 2023 - * @see LICENSE (MIT style license file). - * - * @title Model Framework: Context and Attention for Transformers - * - * @see https://sebastianraschka.com/blog/2023/self-attention-from-scratch.html - * @see https://arxiv.org/pdf/1706.03762.pdf (main paper) - */ - -package scalation -package modeling -package forecasting - -import scala.math.sqrt - -import scalation.mathstat._ -import scalation.random.{RandomMatD, RandomTenD} - -import ActivationFun.f_softmax - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Attention` trait provides methods for computing context vectors, single-head - * attention matrices and multi-head attention matrices. - * @param n_var the size of the input vector x_t (number of variables) - * @param n_mod the size of the output (dimensionality of the model, d_model) - * @param heads the number of attention heads - */ -trait Attention (n_var: Int, n_mod: Int = 512, heads: Int = 8): - - val n_k = n_mod / heads // size per head (dimensionality d_k, d_v) - val rmg = RandomMatD (n_k, n_var, 1) // random (0, 1) matrix generator for q, k, v - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the Query, Key, Value matrices from the given input and weight matrices. - * @param x the input matrix - * @param w_q the weight matrix for query Q - * @param w_v the weight matrix for key K - * @param w_v the weight matrix for value V - */ - def queryKeyValue (x: MatrixD, w_q: MatrixD, w_k: MatrixD, w_v: MatrixD): (MatrixD, MatrixD, MatrixD) = - val x_t = x.transpose - (w_q * x_t, w_k * x_t, w_v * x_t) - end queryKeyValue - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute a Context Vector from the given query at time t (q_t), key (K) and value (V). - * @param q_t the query vector at time t (based on input vector x_t) - * @param k the key matrix K - * @param v the value matrix V - */ - def context (q_t: VectorD, k: MatrixD, v: MatrixD): VectorD = - val root_n = sqrt (q_t.dim) - f_softmax.f_ (k * (q_t / root_n)) *: v - end context - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute a Self-Attention Weight Matrix from the given query (Q), key (K) and value (V). - * @param q the query matrix Q (q_t over all time) - * @param k the key matrix K - * @param v the value matrix V - */ - def attention (q: MatrixD, k: MatrixD, v: MatrixD): MatrixD = - val root_n = sqrt (q.dim2) - f_softmax.fM (q * (k.transpose / root_n)) * v - end attention - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute a Multi-Head, Self-Attention Weight Matrix by taking attention for each head - * and concatenating them; finally multiplying by the overall weight matrix w_o. - * The operator ++^ concatenates matrices column-wise. - * @param q the query matrix Q (q_t over all time) - * @param k the key matrix K - * @param v the value matrix V - * @param w_q the weight tensor for query Q (w_q(i) matrix for i-th head) - * @param w_v the weight tensor for key K (w_k(i) matrix for i-th head) - * @param w_v the weight tensor for value V (w_v(i) matrix for i-th head) - * @param w_o the overall weight matrix to be applied to concatenated attention - */ - def attentionMH (q: MatrixD, k: MatrixD, v: MatrixD, - w_q: TensorD, w_k: TensorD, w_v: TensorD, - w_o: MatrixD): MatrixD = - var att = attention (q * w_q(0), k * w_k(0), v * w_v(0)) - for i <- 1 until heads do att = att ++^ attention (q * w_q(i), k * w_k(i), v * w_v(i)) - println (s"att.dims = ${att.dims}, w_o.dims = ${w_o.dims}") - att * w_o - end attentionMH - -end Attention - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Attention` object contains sample a input matrix from - * @see https://sebastianraschka.com/blog/2023/self-attention-from-scratch.html - * The example is from 6 words with 16 dimensional encoding. - */ -object Attention: - - val x = MatrixD ((6, 16), 0.3374, -0.1778, -0.3035, -0.5880, 0.3486, 0.6603, -0.2196, -0.3792, // row 0 - 0.7671, -1.1925, 0.6984, -1.4097, 0.1794, 1.8951, 0.4954, 0.2692, - - 0.5146, 0.9938, -0.2587, -1.0826, -0.0444, 1.6236, -2.3229, 1.0878, // row 1 - 0.6716, 0.6933, -0.9487, -0.0765, -0.1526, 0.1167, 0.4403, -1.4465, - - 0.2553, -0.5496, 1.0042, 0.8272, -0.3948, 0.4892, -0.2168, -1.7472, // row 2 - -1.6025, -1.0764, 0.9031, -0.7218, -0.5951, -0.7112, 0.6230, -1.3729, - - -1.3250, 0.1784, -2.1338, 1.0524, -0.3885, -0.9343, -0.4991, -1.0867, // row 3 - 0.8805, 1.5542, 0.6266, -0.1755, 0.0983, -0.0935, 0.2662, -0.5850, - - -0.0770, -1.0205, -0.1690, 0.9178, 1.5810, 1.3010, 1.2753, -0.2010, // row 4 - 0.4965, -1.5723, 0.9666, -1.1481, -1.1589, 0.3255, -0.6315, -2.8400, - - 0.8768, 1.6221, -1.4779, 1.1331, -1.2203, 1.3139, 1.0533, 0.1388, // row 5 - 2.2473, -0.8036, -0.2808, 0.7697, -0.6596, -0.7979, 0.1838, 0.2293) - - val m = x.dim // number of time points - val n = x.dim2 // size of input x_t - -end Attention - -import Attention._ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `attentionTest` function tests the `context` and `attention` top-level functions. - * > runMain scalation.modeling.forecasting.attentionTest - */ -@main def attentionTest (): Unit = - - val n_var = x.dim2 // number of variables in input vector x_t - val n_mod = 24 // size of each query/key vector (q_t, k_t, v_t) - val heads = 1 // number of attention heads - object att extends Attention (n_var, n_mod, heads) - - val (q, k, v) = att.queryKeyValue (x, att.rmg.gen, att.rmg.gen, att.rmg.gen) - - banner ("Dimensions for input x, query q, key k, value v") - println (s"x.dims = ${x.dims}") - println (s"q.dims = ${q.dims}") - println (s"k.dims = ${k.dims}") - println (s"v.dims = ${v.dims}") - - banner ("Attention Matrix") - val aw = att.attention (q, k, v) - println (s"aw.dims = ${aw.dims}") - println (s"aw = $aw") - - banner ("Context Vectors Collected into Matrix") - val cxt = new MatrixD (aw.dim, aw.dim2) - println (s"cxt.dims = ${cxt.dims}") - for i <- q.indices do cxt(i) = att.context (q(i), k, v) - println (s"cxt = $cxt") - - assert (cxt =~ aw) - -end attentionTest - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `attentionTest2` function tests the `attentionMH` top-level function. - * Test Multi-Head, Self-Head Attention. - * > runMain scalation.modeling.forecasting.attentionTest2 - */ -@main def attentionTest2 (): Unit = - - val n_var = x.dim2 // number of variables in input vector x_t - val n_mod = 72 // size of each query/key vector (q_t, k_t, v_t) - val heads = 3 // number of attention heads - object att extends Attention (n_var, n_mod, heads) - - val (q, k, v) = att.queryKeyValue (x, att.rmg.gen, att.rmg.gen, att.rmg.gen) - - banner ("Dimensions for input x, query q, key k, value v") - println (s"x.dims = ${x.dims}") - println (s"q.dims = ${q.dims}") - println (s"k.dims = ${k.dims}") - println (s"v.dims = ${v.dims}") - - // Multi-Head (MH) - - val rtg = RandomTenD (heads, n_mod, att.n_k, 1) // random (0, 1) tensor generator for q, k, v - val rmg = RandomMatD (n_mod, n_mod, 1) // random (0, 1) matrix generator for for w_o - - val wt_q = rtg.gen // MH query weight tensor: heads x n_mod x n_k - val wt_k = rtg.gen // MH key weight tensor: heads x n_mod x n_k - val wt_v = rtg.gen // MH value weight tensor; heads x n_mod x n_k - val w_o = rmg.gen // MH overall weight matrix: n_mod x n_mod - - banner ("Dimensions for query wt_q, key wt_k, value wt_v, overall w_o") - println (s"wt_q.dims = ${wt_q.dims}") - println (s"wt_k.dims = ${wt_k.dims}") - println (s"wt_v.dims = ${wt_v.dims}") - println (s"w_o.dims = ${w_o.dims}") - - banner ("Multi-Head Attention Matrix") - val aw = att.attentionMH (q, k, v, wt_q, wt_k, wt_v, w_o) - println (s"aw.dims = ${aw.dims}") - println (s"aw = $aw") - -end attentionTest2 - diff --git a/src/main/scala/scalation/modeling/forecasting_old/old/ForecastUtil.scala.bak b/src/main/scala/scalation/modeling/forecasting_old/old/ForecastUtil.scala.bak deleted file mode 100644 index c4697a22f..000000000 --- a/src/main/scala/scalation/modeling/forecasting_old/old/ForecastUtil.scala.bak +++ /dev/null @@ -1,117 +0,0 @@ - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Sun Feb 13 16:22:21 EST 2022 - * @see LICENSE (MIT style license file). - * - * @title Model: Utilities for Time Series Forecasting - */ - -package scalation -package modeling -package forecasting - -import scala.math.max - -import scalation.mathstat._ - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Given a response vector y, build and return - * (1) an input/predictor MATRIX xx and - * (2) an output/multi-horizon output/response MATRIX yy. - * The first lag responses can't be predicted due to missing past values. - * The last h-1 responses can't be predicted due to missing future values. - * Therefore the number of rows in xx and yy is reduced to y.dim + 1 - lag - h. - * @param y the given output/response vector - * @param lag the maximum lag included (inclusive) - * @param h the forecasting horizon (1, 2, ... h) - */ -def buildMatrix4TS (y: VectorD, lag: Int, h: Int): (MatrixD, MatrixD) = - val xx = new MatrixD (y.dim + 1 - lag - h, lag) - val yy = new MatrixD (y.dim + 1 - lag - h, h) - for i <- lag to y.dim - h do - for j <- xx.indices2 do xx(i-lag, lag - 1 - j) = y(i - 1 - j) - for j <- yy.indices2 do yy(i-lag, j) = y(i + j) - end for -// println (s"buildMatrix4TS: xx = $xx \n yy = $yy") - (xx, yy) -end buildMatrix4TS - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Given a response vector y, build and return - * (1) an input/predictor MATRIX xx and - * (2) an output/single-horizon output/response VECTOR yy. - * The first response can't be predicted due to missing past values. - * Therefore the number of rows in xx and yy is reduced to y.dim - 1. - * @param y the given output/response vector - * @param lag the maximum lag included (inclusive) - */ -def buildMatrix4TS (y: VectorD, lag: Int): (MatrixD, VectorD) = - val xx = new MatrixD (y.dim - 1, lag) - val yy = new VectorD (y.dim - 1) - for i <- 1 until y.dim do - for j <- xx.indices2 do xx(i-1, lag - 1 - j) = y(max(i - 1 - j, 0)) - yy(i-1) = y(i) - end for -// println (s"buildMatrix4TS: xx = $xx \n yy = $yy") - (xx, yy) -end buildMatrix4TS - -/* -def buildMatrix4TS (y: VectorD, lag: Int): (MatrixD, VectorD) = - val xx = new MatrixD (y.dim - lag, lag) - val yy = new VectorD (y.dim - lag) - for i <- lag until y.dim do - for j <- xx.indices2 do xx(i-lag, lag - 1 - j) = y(i - 1 - j) - yy(i-lag) = y(i) - end for -// println (s"buildMatrix4TS: xx = $xx \n yy = $yy") - (xx, yy) -end buildMatrix4TS -*/ - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Given an exogenous variable vector ex corresponding to an endogenous response - * vector y, build and return an input/predictor MATRIX xx. - * The first lag responses can't be predicted due to missing past values. - * Therefore the number of rows in xx is reduced to ex.dim - lag. - * @param ex the exogenous variable vector - * @param lag the maximum lag included (inclusive) for the endogenous variable - * @param elag1 the minimum lag included (inclusive) for the exogenous variable - * @param elag2 the maximum lag included (inclusive) for the exogenous variable - */ -def buildMatrix4TS_exo (ex: VectorD, lag: Int, elag1: Int, elag2: Int): MatrixD = - val flaw = flawf ("top") - val n = elag2 - elag1 - if n < 1 then flaw ("buildMatrix4TS_exo", "min exo lag must be smaller than max exo lag") - if elag2 > lag then flaw ("buildMatrix4TS_exo", "exo lag cannot exceed endogenous lag") - - val xx = new MatrixD (ex.dim - lag, n) - for i <- lag until ex.dim do - for j <- xx.indices2 do xx(i-lag, n - 1 - j) = ex(i - elag1 - j) - end for -// println (s"buildMatrix4TS_exo: xx = $xx") - xx -end buildMatrix4TS_exo - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Test the actual response vector vs. forecasted matrix, returning the QoF - * for all forecasting horizons 1 to h. - * @param mod the fittable model (one that extends `Fit`) - * @param y the orginal actual response vector - * @param yf the forecasted response matrix - * @param p the number of variables/lags used in the model - */ -def testForecast (mod: Fit, y: VectorD, yf: MatrixD, p: Int): MatrixD = - MatrixD (for k <- yf.indices2 yield - val y_ = y(p + k until y.dim) - val yf_ = yf(?, k)(0 until y.dim - p - k) - println (s"y_.dim = ${y_.dim}, yf_.dim = ${yf_.dim}") - mod.resetDF (p, y.dim - p - (k+1)) // reset the degrees of freedom - mod.diagnose (y_, yf_)) // return the QoF of the forecasts -end testForecast - diff --git a/src/main/scala/scalation/modeling/forecasting_old/old/ForecastUtil.scala.bak2 b/src/main/scala/scalation/modeling/forecasting_old/old/ForecastUtil.scala.bak2 deleted file mode 100644 index 6e1b5dca5..000000000 --- a/src/main/scala/scalation/modeling/forecasting_old/old/ForecastUtil.scala.bak2 +++ /dev/null @@ -1,112 +0,0 @@ - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Sun Feb 13 16:22:21 EST 2022 - * @see LICENSE (MIT style license file). - * - * @title Model Framework: Utilities for Time Series Forecasting - */ - -package scalation -package modeling -package forecasting - -import scala.math.max - -import scalation.mathstat._ - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Given a response vector y, build and return - * (1) an input/predictor MATRIX xx and - * (2) an output/multi-horizon output/response MATRIX yy. - * Used by Multi-Variate (MV) forecast models such as `RegressionMV4TS`. - * that use DIRECT multi-horizon forecasting. - * The first lag responses can't be predicted due to missing past values. - * The last h-1 responses can't be predicted due to missing future values. - * Therefore the number of rows in xx and yy is reduced to y.dim + 1 - lags - h. - * - * FIX - try to extend to "val xx = new MatrixD (y.dim - h, lags)" - * - * @param y the given output/response vector - * @param lags the maximum lag included (inclusive) - * @param h the forecasting horizon (1, 2, ... h) - */ -def buildMatrix4TS (y: VectorD, lags: Int, h: Int): (MatrixD, MatrixD) = - val xx = new MatrixD (y.dim + 1 - lags - h, lags) - val yy = new MatrixD (y.dim + 1 - lags - h, h) - for i <- lags to y.dim - h do - for j <- xx.indices2 do xx(i-lags, lags - 1 - j) = y(i - 1 - j) - for j <- yy.indices2 do yy(i-lags, j) = if i + j >= y.dim then -0.0 else y(i + j) -// for j <- yy.indices2 do yy(i-lags, j) = y(i + j) - end for -// println (s"buildMatrix4TS: xx = $xx \n yy = $yy") - (xx, yy) -end buildMatrix4TS - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Given a response vector y, build and return - * (1) an input/predictor MATRIX xx and - * (2) an output/single-horizon output/response VECTOR yy. - * Used by Single-Variate forecast models such as `Regression4TS`. - * that use RECURSIVE multi-horizon forecasting. - * The first response can't be predicted due to missing past values. - * Therefore the number of rows in xx and yy is reduced to y.dim - 1. - * @param y the given output/response vector - * @param lags the maximum lag included (inclusive) - */ -def buildMatrix4TS (y: VectorD, lags: Int): (MatrixD, VectorD) = - val xx = new MatrixD (y.dim - 1, lags) - val yy = new VectorD (y.dim - 1) - for i <- 1 until y.dim do - for j <- xx.indices2 do xx(i-1, lags - 1 - j) = y(max(i - 1 - j, 0)) - yy(i-1) = y(i) - end for -// println (s"buildMatrix4TS: xx = $xx \n yy = $yy") - (xx, yy) -end buildMatrix4TS - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Given an exogenous variable vector ex corresponding to an endogenous response - * vector y, build and return an input/predictor MATRIX xx. - * The first lag responses can't be predicted due to missing past values. - * Therefore the number of rows in xx is reduced to ex.dim - lags. - * @param ex the exogenous variable vector - * @param lags the maximum lag included (inclusive) for the endogenous variable - * @param elag1 the minimum lag included (inclusive) for the exogenous variable - * @param elag2 the maximum lag included (inclusive) for the exogenous variable - */ -def buildMatrix4TS_exo (ex: VectorD, lags: Int, elag1: Int, elag2: Int): MatrixD = - val flaw = flawf ("top") - val n = elag2 - elag1 - if n < 1 then flaw ("buildMatrix4TS_exo", "min exo lag must be smaller than max exo lag") -// if elag2 > lags then flaw ("buildMatrix4TS_exo", "exo lag cannot exceed endogenous lag") - - val xx = new MatrixD (ex.dim - lags, n) - for i <- lags until ex.dim do - for j <- xx.indices2 do xx(i-lags, n - 1 - j) = ex(i - elag1 - j) - end for -// println (s"buildMatrix4TS_exo: xx = $xx") - xx -end buildMatrix4TS_exo - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Test the actual response vector vs. forecasted matrix, returning the QoF - * for all forecasting horizons 1 to h. - * @param mod the fittable model (one that extends `Fit`) - * @param y the orginal actual response vector - * @param yf the forecasted response matrix - * @param p the number of variables/lags used in the model - */ -def testForecast (mod: Fit, y: VectorD, yf: MatrixD, p: Int): MatrixD = - MatrixD (for k <- yf.indices2 yield - val y_ = y(p + k until y.dim) - val yf_ = yf(?, k)(0 until y.dim - p - k) - println (s"y_.dim = ${y_.dim}, yf_.dim = ${yf_.dim}") - mod.resetDF (p, y.dim - p - (k+1)) // reset the degrees of freedom - mod.diagnose (y_, yf_)) // return the QoF of the forecasts -end testForecast - diff --git a/src/main/scala/scalation/modeling/forecasting_old/old/GRU.scala.bak b/src/main/scala/scalation/modeling/forecasting_old/old/GRU.scala.bak deleted file mode 100644 index ed8ea4fd4..000000000 --- a/src/main/scala/scalation/modeling/forecasting_old/old/GRU.scala.bak +++ /dev/null @@ -1,321 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller, Sai - * @version 2.0 - * @date - * @see LICENSE (MIT style license file). - * - * @title Gated Recurrent Unit (GRU) - * - * Translated from Matlab to Scala - * @see https://www.math.ucla.edu/~minchen/doc/BPTTTutorial.pdf - */ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/* This program tests the BPTT process we manually developed for GRU. - * We calculate the gradients of GRU parameters with chain rule , and then - * compare them to the numerical gradients to check whether our chain rule - * derivation is correct. - * - * Here, we provided 2 versions of BPTT, backward_direct() and backward(). - * The former one is the direct idea to calculate gradient within each step - * and add them up (O(sentence_sizeˆ2) time) . The latter one is optimized to - * calculate the contribution of each step to the overall gradient , which is - * only O(sentence_size) time. - * - * This is very helpful for people who want to implement GRU in Caffe since - * Caffe does not support auto−differentiation. This is also very helpful for - * the people who want to know the details about Back-propagation Through - * Time algorithm in the Recurrent Neural Networks (such as GRU and LSTM) - * and also get a sense on how auto−differentiation is possible. - * - * NOTE: We does not involve SGD training here. With SGD training, this - * program would become a complete implementation of GRU which can be - * trained with sequence data . However, since this is only a CPU serial - * Matlab version of GRU, applying it on large datasets will be dramatically - * slow. - * - * by Minchen Li, at The University of British Columbia. 2016−04−21 - */ - -package scalation -package modeling -package forecasting - -import scala.math.log - -import scalation.mathstat.{MatrixD, VectorD} -import scalation.random.{NormalMat, NormalVec_c} - -import ActivationFun.{sigmoid_, softmax_, tanh_} -import MatrixD.outer - -def log_ (x: VectorD): VectorD = x.map (log) - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `CRU` class implements Gated Recurrent Unit (GRU) via Back Propagation Through - * Time (BPTT). - * val (x, y) = getTrainingData (vocabulary_size, sentence_size) - * @param x the input sequence/time series - * @param y the output sequence/time series - */ -class GRU (x: MatrixD, y: MatrixD): - - // set GRU and data scale - private val iMem_size = 4 - private val vocabulary_size = x.dim // e.g., 64, number of distinct words - private val sentence_size = x.dim2 // e.g., 20, number of words in a sentence (including start and end symbol) - // since we will only use one sentence for training, - // this is also the total steps during training. - - private val _1 = VectorD.one (iMem_size) // vector of all ones for e.g., 1 - z - - // initialize parameters (weights and biases) - // multiplier for input x_t of intermediate variables - // note: Matlab rand -> NormalMat - private val rmg1 = NormalMat (iMem_size, vocabulary_size, 0.0, 0.01) - private val U_z = rmg1.gen - private val U_r = rmg1.gen - private val U_c = rmg1.gen - - // multiplier for previous s of intermediate variables - private val rmg2 = NormalMat (iMem_size, iMem_size, 0.0, 0.01) - private val W_z = rmg2.gen - private val W_r = rmg2.gen - private val W_c = rmg2.gen - - // bias terms of intermediate variables - converted to VectorD - private val rvg1 = NormalVec_c (iMem_size, 0.0, 0.01) - private val b_z = rvg1.gen - private val b_r = rvg1.gen - private val b_c = rvg1.gen - - // decoder for generating output - private val rmg3 = NormalMat (vocabulary_size, iMem_size, 0.0, 0.01) - private val rvg3 = NormalVec_c (vocabulary_size, 0.0, 0.01) - private val V = rmg3.gen - private val b_V = rvg3.gen // bias of decoder - converted to Vector - - // previous s of step 1 - private val s_0 = rvg1.gen // converted to vector - - private val max_epochs = 2 // maximum number of iterations - private val L = new VectorD (sentence_size) // store loss function values - println (s"L = $L") - - // initialize results - // Matlab: zeros -> new MatrixD - private val s = new MatrixD (iMem_size, sentence_size) // hidden state - private val y_hat = new MatrixD (vocabulary_size, sentence_size) // predicted output - private val z = new MatrixD (iMem_size, sentence_size) // update gate - private val r = new MatrixD (iMem_size, sentence_size) // reset gate - private val c = new MatrixD (iMem_size, sentence_size) // candidate state - - // the partial derivative of weights and biases - private var ds_0 = new VectorD (s_0.dim) - private var dU_c = new MatrixD (U_c.dim, U_c.dim2) - private var dU_r = new MatrixD (U_r.dim, U_r.dim2) - private var dU_z = new MatrixD (U_z.dim, U_z.dim2) - private var dW_c = new MatrixD (W_c.dim, W_c.dim2) - private var dW_r = new MatrixD (W_r.dim, W_r.dim2) - private var dW_z = new MatrixD (W_z.dim, W_z.dim2) - - private var db_z = new VectorD (b_z.dim) - private var db_r = new VectorD (b_r.dim) - private var db_c = new VectorD (b_c.dim) - - private var db_V: VectorD = null - private var dV = new MatrixD (V.dim, V.dim2) - - private val eta = 0.1 // the learning rate - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train the GRU. - */ - def train (): Unit = - - for it <- 1 to max_epochs do - // forward propagate: get the intermediate and output results - forward () - - println (s"train: for epoch $it: loss function L = $L") - - // back propogate: calculate the gradient (the partial derivatives) - backward () - - // update the parameters (weights and biases) - update_params () - end for - end train - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forward propagate calculates, y_hat, loss and intermediate variables for each step. - */ - def forward (): Unit = - - // calculate result for step 1 since s_0 is not in s - // note: Matlab row wild-card : becomes ? - // note: Matlab starts at 1, Scala at 0 - - z(?, 0) = sigmoid_ (U_z * x(?, 0) + W_z * s_0 + b_z) - r(?, 0) = sigmoid_ (U_r * x(?, 0) + W_r * s_0 + b_r) - c(?, 0) = tanh_ (U_c * x(?, 0) + W_c * (s_0 * r(?, 0) ) + b_c) - s(?, 0) = (_1 - z(?, 0)) * c(?, 0) + z(?, 0) * s_0 - y_hat(?, 0) = softmax_ (V * s(?, 0) + b_V) - L(0) = (-y(?, 0) * log_ (y_hat(?, 0))).sum - - // calculate results for step 2 − sentence_size similarly (i-th word) - // note Matlab element-wise multiplication .* becomes * - - for word <- 1 until sentence_size do - z(?, word) = sigmoid_ (U_z * x(?, word) + W_z * s(?, word-1) + b_z) - r(?, word) = sigmoid_ (U_r * x(?, word) + W_r * s(?, word-1) + b_r) - c(?, word) = tanh_ (U_c * x(?, word) + W_c * (s(?, word-1) * r(?, word)) + b_c) - s(?, word) = (_1 - z(?, word)) * c(?, word) + z(?, word) * s(?, word-1) - y_hat(?, word) = softmax_ (V * s(?, word) + b_V) - L(word) = (-y(?, word) * log_ (y_hat(?, word))).sum - end for - end forward - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Backward propagate to calculate gradient using chain rule (O(sentence_size) time) - */ - def backward (): Unit = - - // calculate gradient using chain rule - // note Matlab: A' is A.transpose - // note Matlab: sum (delta_y, 2) returns the row sums of matrix delta_y - // note Matlab: delta_y(?, word) * s(?, word)' -> delta_y(?, word) outer s(?, word) - val delta_y = y_hat - y - db_V = delta_y.sumVr -// dV = new MatrixD (V.dim, V.dim2) - for word <- 0 until sentence_size do - dV += outer (delta_y(?, word), s(?, word)) // outer vector product - end for - - val ds_single = V.transpose * delta_y - - // calculate the derivative contribution of each step and add them up - var ds_cur = new VectorD (ds_single.dim) - for word <- sentence_size-1 to 1 by -1 do - ds_cur += ds_single(?, word) - val ds_cur_bk = ds_cur - - // mix for new state - val dtanhInput = (ds_cur * (_1 - z(?, word)) * (_1 - c(?, word) * c(?, word))) - db_c += dtanhInput - dU_c += outer (dtanhInput, x(?, word)) - dW_c += outer (dtanhInput, (s(?, word-1) * r(?, word))) - val dsr = W_c.transpose * dtanhInput - ds_cur = dsr * r(?, word) - - // reset gate - val dsigInput_r = dsr * s(?, word-1) * r(?, word) * (_1 - r(?, word)) - db_r += dsigInput_r - dU_r += outer (dsigInput_r, x(?, word)) - dW_r += outer (dsigInput_r, s(?, word-1)) - ds_cur += W_r.transpose * dsigInput_r - ds_cur += ds_cur_bk * z(?, word) - val dz = ds_cur_bk * (s(?, word-1) - c(?, word)) - - // update gate - val dsigInput_z = dz * z(?, word) * (_1 - z(?, word)) - db_z += dsigInput_z - dU_z += outer (dsigInput_z, x(?, word)) - dW_z += outer (dsigInput_z, s(?, word-1)) - ds_cur += W_z.transpose * dsigInput_z - end for - - // case: s_1 -> s_0 - ds_cur += ds_single(?, 0) - - val dtanhInput = (ds_cur * (_1 - z(?, 0)) * (_1 - c(?, 0) * c(?, 0))) - db_c += dtanhInput - dU_c += outer (dtanhInput, x(?, 0)) - dW_c += outer (dtanhInput, (s_0 * r(?, 0))) - val dsr = W_c.transpose * dtanhInput - ds_0 += dsr * r(?, 0) - - val dsigInput_r = dsr * s_0 * r(?, 0) * (_1 - r(?, 0)) - db_r += dsigInput_r - dU_r += outer (dsigInput_r, x(?, 0)) - dW_r += outer (dsigInput_r, s_0) - ds_0 += W_r.transpose * dsigInput_r - ds_0 += ds_cur * z(?, 0) - val dz = ds_cur * (s_0 - c(?, 0)) - - val dsigInput_z = dz * z(?, 0) * (_1 - z(?, 0)) - db_z += dsigInput_z - dU_z += outer (dsigInput_z, x(?, 0)) - dW_z += outer (dsigInput_z, s_0) - ds_0 += W_z.transpose * dsigInput_z - end backward - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Based on the calculated partial derivatives, update the parameters (weights - * and biases). - */ - def update_params (): Unit = - println ("To Be Implemented") - end update_params - -end GRU - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Create a fake training dataset: generate only one sentence for training. - * Only for testing. Needs to be changed to read in training data from files. - * The words are one-hot encoded into a column vector. - */ -def getTrainingData (vocabulary_size: Int, sentence_size: Int): (MatrixD, MatrixD) = - - import scalation.random.Randi - - assert (vocabulary_size > 2) // for start and end of sentence symbols - assert (sentence_size > 0) - - // define start and end of sentence in the vocabulary - val SENTENCE_START = new VectorD (vocabulary_size) // start: [1, 0, 0, ...] - SENTENCE_START(0) = 1 - val SENTENCE_END = new VectorD (vocabulary_size) // end: [0, 1, 0, ...] - SENTENCE_END(1) = 1 - - println (s"SENTENCE_START = $SENTENCE_START") - println (s"SENTENCE_END = $SENTENCE_END") - - // generate sentence - val i_ran = Randi (0, vocabulary_size-3) // random integer generator - val z_t = new MatrixD (vocabulary_size, sentence_size-1) // leave one slot for SENTENCE START - for word <- 0 until sentence_size-1 do - // generate a random word excludes start and end symbol - z_t(i_ran.igen+2, word) = 1 // set a 1 in position to indicate a word - end for - - val x_t = SENTENCE_START +^: z_t // training input matrix (prepend vector) - val y_t = z_t :^+ SENTENCE_END // training output matrix (append vector) - - (x_t, y_t) -end getTrainingData - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `gRUTest` main function tests the `GRU` class. - * > runMain scalation.modeling.forecasting.gRUTest - */ -@main def gRUTest (): Unit = - - val vocabulary_size = 5 - val sentence_size = 8 - - val (x_t, y_t) = getTrainingData (vocabulary_size, sentence_size) - - println (s"x_t = $x_t") - println (s"y_t = $y_t") - - banner ("Create a Gated Recurrent Unit (GRU)") - val mod = new GRU (x_t, y_t) - mod.train () -// mod.test () - -end gRUTest - diff --git a/src/main/scala/scalation/modeling/forecasting_old/old/GRU.scala.bak2 b/src/main/scala/scalation/modeling/forecasting_old/old/GRU.scala.bak2 deleted file mode 100644 index 8d48e08a0..000000000 --- a/src/main/scala/scalation/modeling/forecasting_old/old/GRU.scala.bak2 +++ /dev/null @@ -1,370 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller, Sainagesh Veeravalli - * @version 2.0 - * @date Thu May 11 15:38:07 EDT 2023 - * @see LICENSE (MIT style license file). - * - * @title Gated Recurrent Unit (GRU) for Multivariate Time Series - * - * Translated from Matlab to Scala - * @see https://www.math.ucla.edu/~minchen/doc/BPTTTutorial.pdf - */ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/* This Matlab program tests the BPTT process we manually developed for GRU. - * We calculate the gradients of GRU parameters with chain rule, and then - * compare them to the numerical gradients to check whether our chain rule - * derivation is correct. - * - * Here, we provided 2 versions of BPTT, backward_direct() and backward(). - * The former one is the direct idea to calculate gradient within each step - * and add them up (O(seq_sizeˆ2) time) . The latter one is optimized to - * calculate the contribution of each step to the overall gradient, which is - * only O(seq_size) time. - * - * This is very helpful for people who want to implement GRU in Caffe since - * Caffe does not support auto−differentiation. This is also very helpful for - * the people who want to know the details about Back-propagation Through - * Time algorithm in the Recurrent Neural Networks (such as GRU and LSTM) - * and also get a sense on how auto−differentiation is possible. - * - * NOTE: We does not involve SGD training here. With SGD training, this - * program would become a complete implementation of GRU which can be - * trained with sequence data. However, since this is only a CPU serial - * Matlab version of GRU, applying it on large datasets will be dramatically - * slow. - * - * Matlab code: - * by Minchen Li, at The University of British Columbia. 2016−04−21 - */ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Standard RNN (GRU/LSTM) Formats for Multivariate Time Series Data: - * Matlab: - * - * Keras: 3D format expected by GRU/LSTM is [samples, timesteps, features]. - * => indexing [timestamp t, lags k, variable j] - * PyTorch: - */ - -package scalation -package modeling -package forecasting - -import scala.math.log - -import scalation.mathstat.{MatrixD, VectorD} -import scalation.random.{NormalMat, NormalVec_c} - -import ActivationFun.{sigmoid_, softmax_, tanh_} -import MatrixD.outer - -def log_ (x: VectorD): VectorD = x.map (log) - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `CRU` class implements Gated Recurrent Unit (GRU) via Back Propagation Through - * Time (BPTT). At each time point x_t, there is a vector representing several variables - * or the encoding of a word. - * Time series: (x_t: t = 0, 1, ..., seq_size-1) where seq_size is the number of time points/words - * @param x the input sequence/time series - * @param y the output sequence/time series - */ -class GRU (x: MatrixD, y: MatrixD): - - // set GRU and data scale - private val mem_size = 4 // memory size for hidden state - private val vocab_size = x.dim // e.g., 64, number of variable or distinct words - private val seq_size = x.dim2 // e.g., 20, number of words in a sentence (including start and end symbol) - // since we will only use one sentence for training, - // this is also the total steps during training. - - private val _1 = VectorD.one (mem_size) // vector of all ones for e.g., 1 - z - - // initialize parameters (weights and biases) - // multiplier for input x_t of intermediate variables - // note: Matlab rand -> NormalMat or NormalVec_c - private val rmg1 = NormalMat (mem_size, vocab_size, 0.0, 0.01) - private var U_z = rmg1.gen - private var U_r = rmg1.gen - private var U_c = rmg1.gen - - // multiplier for previous s of intermediate variables - private val rmg2 = NormalMat (mem_size, mem_size, 0.0, 0.01) - private var W_z = rmg2.gen - private var W_r = rmg2.gen - private var W_c = rmg2.gen - - // bias terms of intermediate variables - converted to VectorD - private val rvg1 = NormalVec_c (mem_size, 0.0, 0.01) - private var b_z = rvg1.gen - private var b_r = rvg1.gen - private var b_c = rvg1.gen - - // decoder for generating output - private val rmg3 = NormalMat (vocab_size, mem_size, 0.0, 0.01) - private val rvg3 = NormalVec_c (vocab_size, 0.0, 0.01) - private var V = rmg3.gen - private var b_V = rvg3.gen // bias of decoder - converted to Vector - - // previous s of step 1 - private var s_0 = rvg1.gen // converted to vector - - private val max_epochs = 20 // maximum number of iterations - private val L = new VectorD (seq_size) // store loss function values - println (s"L = $L") - - // initialize results - // Matlab: zeros -> new MatrixD - private val s = new MatrixD (mem_size, seq_size) // hidden state (change s -> h) - private val yp = new MatrixD (vocab_size, seq_size) // predicted output - private val z = new MatrixD (mem_size, seq_size) // update gate - private val r = new MatrixD (mem_size, seq_size) // reset gate - private val c = new MatrixD (mem_size, seq_size) // candidate state - - // the partial derivative of weights and biases - private var ds_0 = new VectorD (s_0.dim) - private var dU_c = new MatrixD (U_c.dim, U_c.dim2) - private var dU_r = new MatrixD (U_r.dim, U_r.dim2) - private var dU_z = new MatrixD (U_z.dim, U_z.dim2) - private var dW_c = new MatrixD (W_c.dim, W_c.dim2) - private var dW_r = new MatrixD (W_r.dim, W_r.dim2) - private var dW_z = new MatrixD (W_z.dim, W_z.dim2) - - private var db_z = new VectorD (b_z.dim) - private var db_r = new VectorD (b_r.dim) - private var db_c = new VectorD (b_c.dim) - - private var db_V: VectorD = null - private var dV = new MatrixD (V.dim, V.dim2) - - private val eta = 0.25 // the learning rate - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train the GRU using simple gradient descent. - */ - def train (): Unit = - - for it <- 1 to max_epochs do - // forward propagate: get the intermediate and output results - forward () - - println (s"train: for epoch $it: loss function L = $L") - println(s"train: for epoch $it: total loss function L.sum = ${L.sum}") - - // back propogate: calculate the gradient (the partial derivatives) - backward () - - // update the parameters (weights and biases) - update_params () - end for - end train - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forward propagate calculates, yp, loss and intermediate variables for each step. - */ - def forward (): Unit = - - // calculate result for step 1 since s_0 is not in s - // note: Matlab: row wild-card : becomes ? - // note: Matlab: starts at 1, Scala at 0 - - z(?, 0) = sigmoid_ (U_z * x(?, 0) + W_z * s_0 + b_z) - r(?, 0) = sigmoid_ (U_r * x(?, 0) + W_r * s_0 + b_r) - c(?, 0) = tanh_ (U_c * x(?, 0) + W_c * (s_0 * r(?, 0) ) + b_c) - s(?, 0) = (_1 - z(?, 0)) * c(?, 0) + z(?, 0) * s_0 - yp(?, 0) = softmax_ (V * s(?, 0) + b_V) - L(0) = (-y(?, 0) * log_ (yp(?, 0))).sum - - // calculate results for step 2 − seq_size similarly (t-th word) - // note Matlab element-wise multiplication .* becomes * - - for t <- 1 until seq_size do - z(?, t) = sigmoid_ (U_z * x(?, t) + W_z * s(?, t-1) + b_z) - r(?, t) = sigmoid_ (U_r * x(?, t) + W_r * s(?, t-1) + b_r) - c(?, t) = tanh_ (U_c * x(?, t) + W_c * (s(?, t-1) * r(?, t)) + b_c) - s(?, t) = (_1 - z(?, t)) * c(?, t) + z(?, t) * s(?, t-1) - yp(?, t) = softmax_ (V * s(?, t) + b_V) - L(t) = (-y(?, t) * log_ (yp(?, t))).sum - end for - end forward - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Backward propagate to calculate gradient using chain rule (O(seq_size) time). - */ - def backward (): Unit = - - // calculate gradient using chain rule - // note Matlab: A' is A.transpose - // note Matlab: sum (delta_y, 2) returns the row sums of matrix delta_y - // note Matlab: delta_y(?, word) * s(?, word)' -> delta_y(?, t) outer s(?, t) - - val delta_y = yp - y - db_V = delta_y.sumVr -// dV = new MatrixD (V.dim, V.dim2) - for t <- 0 until seq_size do dV += outer (delta_y(?, t), s(?, t)) // outer vector product - - val ds_single = V.transpose * delta_y - - // calculate the derivative contribution of each step and add them up - var ds_cur = new VectorD (ds_single.dim) - for t <- seq_size-1 to 1 by -1 do - ds_cur += ds_single(?, t) - val ds_cur_bk = ds_cur - - // mix for new state - val dtanhIn = (ds_cur * (_1 - z(?, t)) * (_1 - c(?, t) * c(?, t))) - db_c += dtanhIn - dU_c += outer (dtanhIn, x(?, t)) - dW_c += outer (dtanhIn, (s(?, t-1) * r(?, t))) - val dsr = W_c.transpose * dtanhIn - ds_cur = dsr * r(?, t) - - // reset gate (r) - val dsigIn_r = dsr * s(?, t-1) * r(?, t) * (_1 - r(?, t)) - db_r += dsigIn_r - dU_r += outer (dsigIn_r, x(?, t)) - dW_r += outer (dsigIn_r, s(?, t-1)) - ds_cur += W_r.transpose * dsigIn_r - ds_cur += ds_cur_bk * z(?, t) - val dz = ds_cur_bk * (s(?, t-1) - c(?, t)) - - // update gate (z) - val dsigIn_z = dz * z(?, t) * (_1 - z(?, t)) - db_z += dsigIn_z - dU_z += outer (dsigIn_z, x(?, t)) - dW_z += outer (dsigIn_z, s(?, t-1)) - ds_cur += W_z.transpose * dsigIn_z - end for - - // case: s_1 -> s_0 - ds_cur += ds_single(?, 0) - - val dtanhIn = (ds_cur * (_1 - z(?, 0)) * (_1 - c(?, 0) * c(?, 0))) - db_c += dtanhIn - dU_c += outer (dtanhIn, x(?, 0)) - dW_c += outer (dtanhIn, (s_0 * r(?, 0))) - val dsr = W_c.transpose * dtanhIn - ds_0 += dsr * r(?, 0) - - val dsigIn_r = dsr * s_0 * r(?, 0) * (_1 - r(?, 0)) - db_r += dsigIn_r - dU_r += outer (dsigIn_r, x(?, 0)) - dW_r += outer (dsigIn_r, s_0) - ds_0 += W_r.transpose * dsigIn_r - ds_0 += ds_cur * z(?, 0) - val dz = ds_cur * (s_0 - c(?, 0)) - - val dsigIn_z = dz * z(?, 0) * (_1 - z(?, 0)) - db_z += dsigIn_z - dU_z += outer (dsigIn_z, x(?, 0)) - dW_z += outer (dsigIn_z, s_0) - ds_0 += W_z.transpose * dsigIn_z - end backward - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Based on the calculated partial derivatives, update the parameters (weights - * and biases). - */ - def update_params (): Unit = - U_z -= dU_z * eta - U_r -= dU_r * eta - U_c -= dU_c * eta - W_z -= dW_z * eta - W_r -= dW_r * eta - W_c -= dW_c * eta - b_z -= db_z * eta - b_r -= db_r * eta - b_c -= db_c * eta - V -= dV * eta - b_V -= db_V * eta - end update_params - -end GRU - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Generate a fake sequence dataset: generate only one sentence for training. - * Only for testing. Needs to be changed to read in training data from files. - * The words are one-hot encoded into a column vector. - * @param vocab_size the number of variables/word encoding size - * @param seq_size the sequence size (number of time points/words) - */ -def genSequenceData (vocab_size: Int, seq_size: Int): (MatrixD, MatrixD) = - - import scalation.random.Randi - - assert (vocab_size > 2) // for start and end of sentence symbols - assert (seq_size > 0) - - // define start and end of sentence in the vocabulary - val SENTENCE_START = new VectorD (vocab_size) // start: [1, 0, 0, ...] - SENTENCE_START(0) = 1 - val SENTENCE_END = new VectorD (vocab_size) // end: [0, 1, 0, ...] - SENTENCE_END(1) = 1 - - println (s"SENTENCE_START = $SENTENCE_START") - println (s"SENTENCE_END = $SENTENCE_END") - - // generate sentence - val i_ran = Randi (0, vocab_size-3) // random integer generator - val z_t = new MatrixD (vocab_size, seq_size-1) // leave one slot for SENTENCE START - for t <- 0 until seq_size-1 do - // generate a random word excludes start and end symbol - z_t(i_ran.igen+2, t) = 1 // set a 1 in position to indicate a word - end for - - val x_t = SENTENCE_START +^: z_t // training input matrix (prepend vector) - val y_t = z_t :^+ SENTENCE_END // training output matrix (append vector) - - (x_t, y_t) -end genSequenceData - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `gRUTest` main function tests the `GRU` class on randomly generated - * sequence data meant to represent encoded words - * > runMain scalation.modeling.forecasting.gRUTest - */ -@main def gRUTest (): Unit = - - val vocab_size = 5 - val seq_size = 8 - - val (x_t, y_t) = genSequenceData (vocab_size, seq_size) - - println (s"x_t = $x_t") - println (s"y_t = $y_t") - - banner ("Create a Gated Recurrent Unit (GRU)") - val mod = new GRU (x_t, y_t) - mod.train () -// mod.test () - -end gRUTest - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `gRUTest2` main function tests the `GRU` class on sequence data read as words - * in a file that encoded and pass into `GRU` - * > runMain scalation.modeling.forecasting.gRUTest2 - */ -@main def gRUTest2 (): Unit = - - println ("read words from a text file") - -end gRUTest2 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `gRUTest3` main function tests the `GRU` class on sequence data corresponding - * to multivariate time series data - * in a file that encoded and pass into `GRU` - * > runMain scalation.modeling.forecasting.gRUTest3 - */ -@main def gRUTest3 (): Unit = - - println ("read multivariate time series from a CSV file") - -end gRUTest3 - diff --git a/src/main/scala/scalation/modeling/forecasting_old/old/GRU.scala.bak3 b/src/main/scala/scalation/modeling/forecasting_old/old/GRU.scala.bak3 deleted file mode 100644 index f8b80544f..000000000 --- a/src/main/scala/scalation/modeling/forecasting_old/old/GRU.scala.bak3 +++ /dev/null @@ -1,421 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller, Sainagesh Veeravalli - * @version 2.0 - * @date Thu May 11 15:38:07 EDT 2023 - * @see LICENSE (MIT style license file). - * - * @title Gated Recurrent Unit (GRU) for Multivariate Time Series - * - * @see https://www.frontiersin.org/articles/10.3389/fncom.2021.678158/full - * - * Translated from Matlab to Scala - * @see https://www.math.ucla.edu/~minchen/doc/BPTTTutorial.pdf - */ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/* This Matlab program tests the BPTT process we manually developed for GRU. - * We calculate the gradients of GRU parameters with chain rule, and then - * compare them to the numerical gradients to check whether our chain rule - * derivation is correct. - * - * Here, we provided 2 versions of BPTT, backward_direct() and backward(). - * The former one is the direct idea to calculate gradient within each step - * and add them up (O(n_seqˆ2) time) . The latter one is optimized to - * calculate the contribution of each step to the overall gradient, which is - * only O(n_seq) time. - * - * This is very helpful for people who want to implement GRU in Caffe since - * Caffe does not support auto−differentiation. This is also very helpful for - * the people who want to know the details about Back-propagation Through - * Time algorithm in the Recurrent Neural Networks (such as GRU and LSTM) - * and also get a sense on how auto−differentiation is possible. - * - * NOTE: We does not involve SGD training here. With SGD training, this - * program would become a complete implementation of GRU which can be - * trained with sequence data. However, since this is only a CPU serial - * Matlab version of GRU, applying it on large datasets will be dramatically - * slow. - * - * Matlab code: - * by Minchen Li, at The University of British Columbia. 2016−04−21 - */ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Standard RNN (GRU/LSTM) Formats for Multivariate Time Series Data: - * Matlab: - * - * Keras: 3D format expected by GRU/LSTM is [samples, timesteps, features]. - * => indexing [timestamp t, lags k, variable j] - * PyTorch: - */ - -package scalation -package modeling -package forecasting - -import scala.math.log - -import scalation.mathstat.{MatrixD, VectorD} -import scalation.random.{NormalMat, NormalVec_c} - -import ActivationFun.{sigmoid_, softmax_, tanh_} -import MatrixD.outer - -def log_ (x: VectorD): VectorD = x.map (log) - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `GRU` class implements Gated Recurrent Unit (GRU) via Back Propagation Through - * Time (BPTT). At each time point x_t, there is a vector representing several variables - * or the encoding of a word. Intended to work for guessing the next work in a sentence - * or for multi-horizon forecasting. - * Time series: (x_t: t = 0, 1, ..., n_seq-1) where n_seq is the number of time points/words - * @param x the input sequence/time series - * @param y the output sequence/time series - * @param fname the feature/variable names - * @param n_mem the size for hidden state (h) (dimensionality of memory) - */ -class GRU (x: MatrixD, y: MatrixD, fname: Array [String] = null, n_mem: Int = 4): - - private val CLASSIF = false // whether to apply classification (e.g., guess next word) or forecast a value - - // set GRU and data scale - private val n_seq = x.dim // e.g., 20, number of words in a sentence (including start and end symbol) - private val n_var = x.dim2 // e.g., 64, number of variable or distinct words (vocabulary size) - // since we will only use one sentence for training, - // this is also the total steps during training. - - private val _1 = VectorD.one (n_mem) // vector of all ones for e.g., 1 - z - - // initialize parameters (weights and biases) - // multiplier for input x_t of intermediate variables - private val rmg1 = NormalMat (n_mem, n_var, 0.0, 0.01) - private var Uz = rmg1.gen - private var Ur = rmg1.gen - private var Uc = rmg1.gen - - // multiplier for previous s of intermediate variables - private val rmg2 = NormalMat (n_mem, n_mem, 0.0, 0.01) - private var Wz = rmg2.gen - private var Wr = rmg2.gen - private var Wc = rmg2.gen - - // bias terms of intermediate variables - converted to VectorD - private val rvg1 = NormalVec_c (n_mem, 0.0, 0.01) - private var b_z = rvg1.gen - private var b_r = rvg1.gen - private var b_c = rvg1.gen - - // decoder for generating output - private val rmg3 = NormalMat (n_var, n_mem, 0.0, 0.01) - private val rvg3 = NormalVec_c (n_var, 0.0, 0.01) - private var V = rmg3.gen - private var b_V = rvg3.gen // bias of decoder - converted to Vector - - // previous state h of step 1 - private var h_0 = rvg1.gen // converted to vector - - private val max_epochs = 20 // maximum number of iterations - private val L = new VectorD (n_seq) // store loss function values - println (s"L = $L") - - // initialize results - // Matlab: zeros -> new MatrixD - private val z = new MatrixD (n_seq, n_mem) // update gate (z) - private val r = new MatrixD (n_seq, n_mem) // reset gate (r) - private val c = new MatrixD (n_seq, n_mem) // candidate state (c) - private val h = new MatrixD (n_seq, n_mem) // hidden state (h) - private val yp = new MatrixD (n_seq, n_var) // predicted output - - // the partial derivative of weights and biases - private var dh_0 = new VectorD (h_0.dim) - private var dUc = new MatrixD (Uc.dim, Uc.dim2) - private var dUr = new MatrixD (Ur.dim, Ur.dim2) - private var dUz = new MatrixD (Uz.dim, Uz.dim2) - private var dWc = new MatrixD (Wc.dim, Wc.dim2) - private var dWr = new MatrixD (Wr.dim, Wr.dim2) - private var dWz = new MatrixD (Wz.dim, Wz.dim2) - - private var db_z = new VectorD (b_z.dim) - private var db_r = new VectorD (b_r.dim) - private var db_c = new VectorD (b_c.dim) - - private var db_V: VectorD = null - private var dV = new MatrixD (V.dim, V.dim2) - - private val eta = 0.02 // the learning rate (0.25 for gRUTest) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train the GRU using simple gradient descent. - */ - def train (): Unit = - - for it <- 1 to max_epochs do - // forward propagate: get the intermediate and output results - forward () - - println (s"train: for epoch $it: loss function L = $L") - println(s"train: for epoch $it: total loss function L.sum = ${L.sum}") - - // back propogate: calculate the gradient (the partial derivatives) - backward () - - // update the parameters (weights and biases) - update_params () - end for - end train - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forward propagate calculates yp, loss and intermediate variables for each step. - */ - def forward (): Unit = - - // calculate result for step 1 since h_0 is not in state h - - z(0) = sigmoid_ (Uz * x(0) + Wz * h_0 + b_z) // update gate - r(0) = sigmoid_ (Ur * x(0) + Wr * h_0 + b_r) // reset gate - c(0) = tanh_ (Uc * x(0) + Wc * (h_0 * r(0) ) + b_c) // candidate state - h(0) = z(0) * h_0 + (_1 - z(0)) * c(0) // hidden state - if CLASSIF then - yp(0) = softmax_ (V * h(0) + b_V) // activation: softmax for classification - L(0) = (-y(0) * log_ (yp(0))).sum // cross-entropy loss function - else - yp(0) = V * h(0) + b_V // activation: id for forecasting - L(0) = (y(0) - yp(0)).normSq // sse loss function - end if - - // calculate results for step 2 − n_seq similarly (t-th word) - - for t <- 1 until n_seq do - z(t) = sigmoid_ (Uz * x(t) + Wz * h(t-1) + b_z) // update gate - r(t) = sigmoid_ (Ur * x(t) + Wr * h(t-1) + b_r) // reset gate - c(t) = tanh_ (Uc * x(t) + Wc * (h(t-1) * r(t)) + b_c) // candidate state - h(t) = z(t) * h(t-1) + (_1 - z(t)) * c(t) // hidden state - if CLASSIF then - yp(t) = softmax_ (V * h(t) + b_V) // activation: softmax for classification - L(t) = (-y(t) * log_ (yp(t))).sum // cross-entropy loss function - else - yp(t) = V * h(t) + b_V // activation: id for forecasting - L(t) = (y(t) - yp(t)).normSq // sse loss function - end if - end for - end forward - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Backward propagate to calculate gradient using chain rule (O(n_seq) time). - * FIX - add option of using sse loss function and fix affected partial derivatives - */ - def backward (): Unit = - - // calculate gradient using chain rule - - println (s"yp.dims = ${yp.dims}, y.dims = ${y.dims}") - val delta_y = yp - y - db_V = delta_y.sumVr -// dV = new MatrixD (V.dim, V.dim2) - for t <- 0 until n_seq do dV += outer (delta_y(t), h(t)) // outer vector product - -// val dh_single = V.transpose * delta_y - val dh_single = delta_y * V // n_seq by n_mem matrix - - // calculate the derivative contribution of each step and add them up - var dh_cur = new VectorD (dh_single.dim2) - for t <- n_seq-1 to 1 by -1 do - dh_cur += dh_single(t) - val dh_cur_bk = dh_cur - - // mix for new state - val dtanhIn = (dh_cur * (_1 - z(t)) * (_1 - c(t) * c(t))) - db_c += dtanhIn - dUc += outer (dtanhIn, x(t)) - dWc += outer (dtanhIn, (h(t-1) * r(t))) - val dhr = Wc.transpose * dtanhIn - dh_cur = dhr * r(t) - - // reset gate (r) - val dsigIn_r = dhr * h(t-1) * r(t) * (_1 - r(t)) - db_r += dsigIn_r - dUr += outer (dsigIn_r, x(t)) - dWr += outer (dsigIn_r, h(t-1)) - dh_cur += Wr.transpose * dsigIn_r - dh_cur += dh_cur_bk * z(t) - val dz = dh_cur_bk * (h(t-1) - c(t)) - - // update gate (z) - val dsigIn_z = dz * z(t) * (_1 - z(t)) - db_z += dsigIn_z - dUz += outer (dsigIn_z, x(t)) - dWz += outer (dsigIn_z, h(t-1)) - dh_cur += Wz.transpose * dsigIn_z - end for - - // case: state s_1 -> h_0 - dh_cur += dh_single(0) - - val dtanhIn = (dh_cur * (_1 - z(0)) * (_1 - c(0) * c(0))) - db_c += dtanhIn - dUc += outer (dtanhIn, x(0)) - dWc += outer (dtanhIn, (h_0 * r(0))) - val dhr = Wc.transpose * dtanhIn - dh_0 += dhr * r(0) - - val dsigIn_r = dhr * h_0 * r(0) * (_1 - r(0)) - db_r += dsigIn_r - dUr += outer (dsigIn_r, x(0)) - dWr += outer (dsigIn_r, h_0) - dh_0 += Wr.transpose * dsigIn_r - dh_0 += dh_cur * z(0) - val dz = dh_cur * (h_0 - c(0)) - - val dsigIn_z = dz * z(0) * (_1 - z(0)) - db_z += dsigIn_z - dUz += outer (dsigIn_z, x(0)) - dWz += outer (dsigIn_z, h_0) - dh_0 += Wz.transpose * dsigIn_z - end backward - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Based on the calculated partial derivatives, update the parameters (weights - * and biases). - */ - def update_params (): Unit = - Uz -= dUz * eta - Ur -= dUr * eta - Uc -= dUc * eta - Wz -= dWz * eta - Wr -= dWr * eta - Wc -= dWc * eta - b_z -= db_z * eta - b_r -= db_r * eta - b_c -= db_c * eta - V -= dV * eta - b_V -= db_V * eta - end update_params - -end GRU - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `GRU` companion object provides factory methods. - */ -object GRU: - - import ActivationFun._ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `GRU` with automatic rescaling from a data matrix and response matrix. - * @param x the input/data matrix - * @param y the output/response matrix - * @param fname the feature/variable names - * @param n_mem the size of the hidden state (dimensionality of memory) - */ - def rescale (x: MatrixD, y: MatrixD, fname: Array [String] = null, n_mem: Int = 4): GRU = - val x_s = rescaleX (x, f_sigmoid) - val y_s = rescaleY (y, f_sigmoid)._1 - -// println (s" scaled: x = $x_s \n scaled y = $y_s") - new GRU (x_s, y_s, fname, n_mem) - end rescale - -end GRU - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Generate a fake sequence dataset: generate only one sentence for training. - * Only for testing. Needs to be changed to read in training data from files. - * The words are one-hot encoded into a column vector. - * @param n_seq the sequence size (number of time points/words) - * @param n_var the number of variables/word encoding size - */ -def genSequenceData (n_seq: Int, n_var: Int): (MatrixD, MatrixD) = - - import scalation.random.Randi - - assert (n_var > 2) // for start and end of sentence symbols - assert (n_seq > 0) - - // define start and end of sentence in the vocabulary - val SENTENCE_START = new VectorD (n_var) // start: [1, 0, 0, ...] - SENTENCE_START(0) = 1 - val SENTENCE_END = new VectorD (n_var) // end: [0, 1, 0, ...] - SENTENCE_END(1) = 1 - - println (s"SENTENCE_START = $SENTENCE_START") - println (s"SENTENCE_END = $SENTENCE_END") - - // generate sentence - val i_ran = Randi (0, n_var-3) // random integer generator - val z_t = new MatrixD (n_seq-1, n_var) // leave one slot for SENTENCE START - for t <- 0 until n_seq-1 do - // generate a random word excludes start and end symbol - z_t(t, i_ran.igen+2) = 1 // set a 1 in position to indicate a word - end for - - val x_t = SENTENCE_START +: z_t // training input matrix (prepend vector) - val y_t = z_t :+ SENTENCE_END // training output matrix (append vector) - - (x_t, y_t) -end genSequenceData - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `gRUTest` main function tests the `GRU` class on randomly generated - * sequence data meant to represent encoded words - * > runMain scalation.modeling.forecasting.gRUTest - */ -@main def gRUTest (): Unit = - - val n_seq = 8 - val n_var = 5 - - val (x_t, y_t) = genSequenceData (n_seq, n_var) - - println (s"x_t = $x_t") - println (s"y_t = $y_t") - - banner ("Create a Gated Recurrent Unit (GRU)") - val mod = new GRU (x_t, y_t) - mod.train () -// mod.test () - -end gRUTest - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `gRUTest2` main function tests the `GRU` class on sequence data read as words - * in a file that encoded and pass into `GRU` - * > runMain scalation.modeling.forecasting.gRUTest2 - */ -@main def gRUTest2 (): Unit = - - println ("read words from a text file") - -// FIX - find example text - -end gRUTest2 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `gRUTest3` main function tests the `GRU` class on sequence/time series data - * corresponding to the lake level dataset using multiple lags. - * > runMain scalation.modeling.forecasting.gRUTest3 - */ -@main def gRUTest3 (): Unit = - - import Example_LakeLevels.y - val lag = 2 // number of lags to include - val hh = 2 // forecasting horizon - FIX - currently lags == hh - - val y_s = scaleV (extreme (y), (-2.0, 2.0))(y) // rescale y to active domain of sigmoid, tanh - - val (x, yy) = RegressionMV4TS.buildMatrix (y_s, lag, hh) // column for each lag - - println (s"x.dims = ${x.dims}, yy.dims = ${yy.dims}") - - banner ("Create a Gated Recurrent Unit (GRU)") - val mod = new GRU (x, yy) // call constructor - mod.train () -// mod.test () - -end gRUTest3 - diff --git a/src/main/scala/scalation/modeling/forecasting_old/old/GRU.scala.bak4 b/src/main/scala/scalation/modeling/forecasting_old/old/GRU.scala.bak4 deleted file mode 100644 index 6b3b273e3..000000000 --- a/src/main/scala/scalation/modeling/forecasting_old/old/GRU.scala.bak4 +++ /dev/null @@ -1,432 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller, Sainagesh Veeravalli - * @version 2.0 - * @date Thu May 11 15:38:07 EDT 2023 - * @see LICENSE (MIT style license file). - * - * @title Gated Recurrent Unit (GRU) for Multivariate Time Series - * - * @see https://www.frontiersin.org/articles/10.3389/fncom.2021.678158/full - * - * Translated from Matlab to Scala - * @see https://www.math.ucla.edu/~minchen/doc/BPTTTutorial.pdf - */ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/* This Matlab program tests the BPTT process we manually developed for GRU. - * We calculate the gradients of GRU parameters with chain rule, and then - * compare them to the numerical gradients to check whether our chain rule - * derivation is correct. - * - * Here, we provided 2 versions of BPTT, backward_direct() and backward(). - * The former one is the direct idea to calculate gradient within each step - * and add them up (O(n_seqˆ2) time) . The latter one is optimized to - * calculate the contribution of each step to the overall gradient, which is - * only O(n_seq) time. - * - * This is very helpful for people who want to implement GRU in Caffe since - * Caffe does not support auto−differentiation. This is also very helpful for - * the people who want to know the details about Back-propagation Through - * Time algorithm in the Recurrent Neural Networks (such as GRU and LSTM) - * and also get a sense on how auto−differentiation is possible. - * - * NOTE: We does not involve SGD training here. With SGD training, this - * program would become a complete implementation of GRU which can be - * trained with sequence data. However, since this is only a CPU serial - * Matlab version of GRU, applying it on large datasets will be dramatically - * slow. - * - * Matlab code: - * by Minchen Li, at The University of British Columbia. 2016−04−21 - */ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Standard RNN (GRU/LSTM) Formats for Multivariate Time Series Data: - * Matlab: - * - * Keras: 3D format expected by GRU/LSTM is [samples, timesteps, features]. - * => indexing [timestamp t, lags k, variable j] - * PyTorch: - */ - -package scalation -package modeling -package forecasting - -import scala.math.log - -import scalation.mathstat.{MatrixD, VectorD} -import scalation.random.{NormalMat, NormalVec_c} - -import ActivationFun.{sigmoid_, softmax_, tanh_} -import MatrixD.outer - -def log_ (x: VectorD): VectorD = x.map (log) - - -case class Gate (n_seq: Int, n_mem: Int, n_var: Int): - - val v = new MatrixD (n_seq, n_mem) // gate value time x state - var dU = new MatrixD (n_mem, n_var) // partial w.r.t. weight matrix U - var dW = new MatrixD (n_mem, n_mem) // partial w.r.t. weight matrix W - var db = new VectorD (n_mem) // partial w.r.t. bias vector b - - def apply (t: Int): VectorD = v(t) - def update (t: Int, vv: VectorD): Unit = v(t) = vv - -end Gate - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `GRU` class implements Gated Recurrent Unit (GRU) via Back Propagation Through - * Time (BPTT). At each time point x_t, there is a vector representing several variables - * or the encoding of a word. Intended to work for guessing the next work in a sentence - * or for multi-horizon forecasting. - * Time series: (x_t: t = 0, 1, ..., n_seq-1) where n_seq is the number of time points/words - * @param x the input sequence/time series - * @param y the output sequence/time series - * @param fname the feature/variable names - * @param n_mem the size for hidden state (h) (dimensionality of memory) - */ -class GRU (x: MatrixD, y: MatrixD, fname: Array [String] = null, n_mem: Int = 4): - - private val CLASSIF = false // whether to apply classification (e.g., guess next word) or forecast a value - - // set GRU and data scale - private val n_seq = x.dim // e.g., 20, number of words in a sentence (including start and end symbol) - private val n_var = x.dim2 // e.g., 64, number of variable or distinct words (vocabulary size) - // since we will only use one sentence for training, - // this is also the total steps during training. - - private val _1 = VectorD.one (n_mem) // vector of all ones for e.g., 1 - z - - // initialize parameters (weights and biases) - // multiplier for input x_t of intermediate variables - private val rmg1 = NormalMat (n_mem, n_var, 0.0, 0.01) - private var Uz = rmg1.gen - private var Ur = rmg1.gen - private var Uc = rmg1.gen - - // multiplier for previous s of intermediate variables - private val rmg2 = NormalMat (n_mem, n_mem, 0.0, 0.01) - private var Wz = rmg2.gen - private var Wr = rmg2.gen - private var Wc = rmg2.gen - - // bias terms of intermediate variables - converted to VectorD - private val rvg1 = NormalVec_c (n_mem, 0.0, 0.01) - private var b_z = rvg1.gen - private var b_r = rvg1.gen - private var b_c = rvg1.gen - - // decoder for generating output - private val rmg3 = NormalMat (n_var, n_mem, 0.0, 0.01) - private val rvg3 = NormalVec_c (n_var, 0.0, 0.01) - private var V = rmg3.gen - private var b_V = rvg3.gen // bias of decoder - converted to Vector - - // previous state h of step 1 (t = -1) - private var h_init = rvg1.gen // converted to vector - - private val max_epochs = 20 // maximum number of iterations - private val L = new VectorD (n_seq) // store loss function values - println (s"L = $L") - - private val z = Gate (n_seq, n_mem, n_var) // update gate (z) - private val r = Gate (n_seq, n_mem, n_var) // reset gate (r) - private val c = Gate (n_seq, n_mem, n_var) // candidate state (c) - -/* - private val z = new MatrixD (n_seq, n_mem) // update gate (z) - private var dUz = new MatrixD (n_mem, n_var) - private var dWz = new MatrixD (n_mem, n_mem) - private var db_z = new VectorD (n_mem) - - private val r = new MatrixD (n_seq, n_mem) // reset gate (r) - private var dUr = new MatrixD (n_mem, n_var) - private var dWr = new MatrixD (n_mem, n_mem) - private var db_r = new VectorD (n_mem) - - private val c = new MatrixD (n_seq, n_mem) // candidate state (c) - private var dUc = new MatrixD (n_mem, n_var) - private var dWc = new MatrixD (n_mem, n_mem) - private var db_c = new VectorD (n_mem) -*/ - - private val h = new MatrixD (n_seq, n_mem) // hidden state (h) - private val yp = new MatrixD (n_seq, n_var) // predicted output - - // the partial derivative of weights and biases - private var dh_init = new VectorD (h_init.dim) - - private var db_V: VectorD = null - private var dV = new MatrixD (V.dim, V.dim2) - - private val eta = 0.02 // the learning rate (0.25 for gRUTest) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train the GRU using simple gradient descent. - */ - def train (): Unit = - - for it <- 1 to max_epochs do - // forward propagate: get the intermediate and output results - forward () - - println (s"train: for epoch $it: loss function L = $L") - println(s"train: for epoch $it: total loss function L.sum = ${L.sum}") - - // back propogate: calculate the gradient (the partial derivatives) - backward () - - // update the parameters (weights and biases) - update_params () - end for - end train - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forward propagate calculates yp, loss and intermediate variables for each step. - */ - def forward (): Unit = - for t <- 0 until n_seq do - val h_pre = if t == 0 then h_init else h(t-1) // get previous hidden state - z(t) = sigmoid_ (Uz * x(t) + Wz * h_pre + b_z) // update gate - r(t) = sigmoid_ (Ur * x(t) + Wr * h_pre + b_r) // reset gate - c(t) = tanh_ (Uc * x(t) + Wc * (h_pre * r(t)) + b_c) // candidate state - h(t) = z(t) * h_pre + (_1 - z(t)) * c(t) // hidden state - if CLASSIF then - yp(t) = softmax_ (V * h(t) + b_V) // activation: softmax for classification - L(t) = (-y(t) * log_ (yp(t))).sum // cross-entropy loss function - else - yp(t) = V * h(t) + b_V // activation: id for forecasting - L(t) = (y(t) - yp(t)).normSq // sse loss function - end if - end for - end forward - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Backward propagate to calculate gradient using chain rule (O(n_seq) time). - * FIX - add option of using sse loss function and fix affected partial derivatives - */ - def backward (): Unit = - - // calculate gradient using chain rule - - println (s"yp.dims = ${yp.dims}, y.dims = ${y.dims}") - val delta_y = yp - y - db_V = delta_y.sumVr -// dV = new MatrixD (V.dim, V.dim2) - for t <- 0 until n_seq do dV += outer (delta_y(t), h(t)) // outer vector product - -// val dh_single = V.transpose * delta_y - val dh_single = delta_y * V // n_seq by n_mem matrix - - // calculate the derivative contribution of each step and add them up - var dh_cur = new VectorD (dh_single.dim2) - for t <- n_seq-1 to 1 by -1 do - dh_cur += dh_single(t) - val dh_cur_bk = dh_cur - - // mix for new candidate state - val dtanhIn = (dh_cur * (_1 - z(t)) * (_1 - c(t) * c(t))) - c.db += dtanhIn - c.dU += outer (dtanhIn, x(t)) - c.dW += outer (dtanhIn, (h(t-1) * r(t))) - val dhr = Wc.transpose * dtanhIn - dh_cur = dhr * r(t) - - // reset gate (r) - val dsigIn_r = dhr * h(t-1) * r(t) * (_1 - r(t)) - r.db += dsigIn_r - r.dU += outer (dsigIn_r, x(t)) - r.dW += outer (dsigIn_r, h(t-1)) - dh_cur += Wr.transpose * dsigIn_r - dh_cur += dh_cur_bk * z(t) - - // update gate (z) - val dz = dh_cur_bk * (h(t-1) - c(t)) - val dsigIn_z = dz * z(t) * (_1 - z(t)) - z.db += dsigIn_z - z.dU += outer (dsigIn_z, x(t)) - z.dW += outer (dsigIn_z, h(t-1)) - dh_cur += Wz.transpose * dsigIn_z - end for - - // case: state s_1 -> h_init - dh_cur += dh_single(0) - - val dtanhIn = (dh_cur * (_1 - z(0)) * (_1 - c(0) * c(0))) - c.db += dtanhIn - c.dU += outer (dtanhIn, x(0)) - c.dW += outer (dtanhIn, (h_init * r(0))) - val dhr = Wc.transpose * dtanhIn - dh_init += dhr * r(0) - - val dsigIn_r = dhr * h_init * r(0) * (_1 - r(0)) - r.db += dsigIn_r - r.dU += outer (dsigIn_r, x(0)) - r.dW += outer (dsigIn_r, h_init) - dh_init += Wr.transpose * dsigIn_r - dh_init += dh_cur * z(0) - - // update gate (z) - val dz = dh_cur * (h_init - c(0)) - val dsigIn_z = dz * z(0) * (_1 - z(0)) - z.db += dsigIn_z - z.dU += outer (dsigIn_z, x(0)) - z.dW += outer (dsigIn_z, h_init) - dh_init += Wz.transpose * dsigIn_z - end backward - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Based on the calculated partial derivatives, update the parameters (weights - * and biases). - */ - def update_params (): Unit = - // update gate (z) - Uz -= z.dU * eta - Wz -= z.dW * eta - b_z -= z.db * eta - - // reset gate (r) - Ur -= r.dU * eta - Wr -= r.dW * eta - b_r -= r.db * eta - - // candidate state (c) - Uc -= c.dU * eta - Wc -= c.dW * eta - b_c -= c.db * eta - - V -= dV * eta - b_V -= db_V * eta - end update_params - -end GRU - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `GRU` companion object provides factory methods. - */ -object GRU: - - import ActivationFun._ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `GRU` with automatic rescaling from a data matrix and response matrix. - * @param x the input/data matrix - * @param y the output/response matrix - * @param fname the feature/variable names - * @param n_mem the size of the hidden state (dimensionality of memory) - */ - def rescale (x: MatrixD, y: MatrixD, fname: Array [String] = null, n_mem: Int = 4): GRU = - val x_s = rescaleX (x, f_sigmoid) - val y_s = rescaleY (y, f_sigmoid)._1 - -// println (s" scaled: x = $x_s \n scaled y = $y_s") - new GRU (x_s, y_s, fname, n_mem) - end rescale - -end GRU - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Generate a fake sequence dataset: generate only one sentence for training. - * Only for testing. Needs to be changed to read in training data from files. - * The words are one-hot encoded into a column vector. - * @param n_seq the sequence size (number of time points/words) - * @param n_var the number of variables/word encoding size - */ -def genSequenceData (n_seq: Int, n_var: Int): (MatrixD, MatrixD) = - - import scalation.random.Randi - - assert (n_var > 2) // for start and end of sentence symbols - assert (n_seq > 0) - - // define start and end of sentence in the vocabulary - val SENTENCE_START = new VectorD (n_var) // start: [1, 0, 0, ...] - SENTENCE_START(0) = 1 - val SENTENCE_END = new VectorD (n_var) // end: [0, 1, 0, ...] - SENTENCE_END(1) = 1 - - println (s"SENTENCE_START = $SENTENCE_START") - println (s"SENTENCE_END = $SENTENCE_END") - - // generate sentence - val i_ran = Randi (0, n_var-3) // random integer generator - val z_t = new MatrixD (n_seq-1, n_var) // leave one slot for SENTENCE START - for t <- 0 until n_seq-1 do - // generate a random word excludes start and end symbol - z_t(t, i_ran.igen+2) = 1 // set a 1 in position to indicate a word - end for - - val x_t = SENTENCE_START +: z_t // training input matrix (prepend vector) - val y_t = z_t :+ SENTENCE_END // training output matrix (append vector) - - (x_t, y_t) -end genSequenceData - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `gRUTest` main function tests the `GRU` class on randomly generated - * sequence data meant to represent encoded words - * > runMain scalation.modeling.forecasting.gRUTest - */ -@main def gRUTest (): Unit = - - val n_seq = 8 - val n_var = 5 - - val (x_t, y_t) = genSequenceData (n_seq, n_var) - - println (s"x_t = $x_t") - println (s"y_t = $y_t") - - banner ("Create a Gated Recurrent Unit (GRU)") - val mod = new GRU (x_t, y_t) - mod.train () -// mod.test () - -end gRUTest - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `gRUTest2` main function tests the `GRU` class on sequence data read as words - * in a file that encoded and pass into `GRU` - * > runMain scalation.modeling.forecasting.gRUTest2 - */ -@main def gRUTest2 (): Unit = - - println ("read words from a text file") - -// FIX - find example text - -end gRUTest2 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `gRUTest3` main function tests the `GRU` class on sequence/time series data - * corresponding to the lake level dataset using multiple lags. - * > runMain scalation.modeling.forecasting.gRUTest3 - */ -@main def gRUTest3 (): Unit = - - import Example_LakeLevels.y - val lag = 2 // number of lags to include - val hh = 2 // forecasting horizon - FIX - currently lags == hh - - val y_s = scaleV (extreme (y), (-2.0, 2.0))(y) // rescale y to active domain of sigmoid, tanh - - val (x, yy) = RegressionMV4TS.buildMatrix (y_s, lag, hh) // column for each lag - - println (s"x.dims = ${x.dims}, yy.dims = ${yy.dims}") - - banner ("Create a Gated Recurrent Unit (GRU)") - val mod = new GRU (x, yy) // call constructor - mod.train () -// mod.test () - -end gRUTest3 - diff --git a/src/main/scala/scalation/modeling/forecasting_old/old/GRU.scala.bak5 b/src/main/scala/scalation/modeling/forecasting_old/old/GRU.scala.bak5 deleted file mode 100644 index 01079ee26..000000000 --- a/src/main/scala/scalation/modeling/forecasting_old/old/GRU.scala.bak5 +++ /dev/null @@ -1,401 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller, Sainagesh Veeravalli - * @version 2.0 - * @date Thu May 11 15:38:07 EDT 2023 - * @see LICENSE (MIT style license file). - * - * @title Gated Recurrent Unit (GRU) for Multivariate Time Series - * - * @see https://www.frontiersin.org/articles/10.3389/fncom.2021.678158/full - * - * Translated from Matlab to Scala - * @see https://www.math.ucla.edu/~minchen/doc/BPTTTutorial.pdf - */ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/* This Matlab program tests the BPTT process we manually developed for GRU. - * We calculate the gradients of GRU parameters with chain rule, and then - * compare them to the numerical gradients to check whether our chain rule - * derivation is correct. - * - * Here, we provided 2 versions of BPTT, backward_direct() and backward(). - * The former one is the direct idea to calculate gradient within each step - * and add them up (O(n_seqˆ2) time) . The latter one is optimized to - * calculate the contribution of each step to the overall gradient, which is - * only O(n_seq) time. - * - * This is very helpful for people who want to implement GRU in Caffe since - * Caffe does not support auto−differentiation. This is also very helpful for - * the people who want to know the details about Back-propagation Through - * Time algorithm in the Recurrent Neural Networks (such as GRU and LSTM) - * and also get a sense on how auto−differentiation is possible. - * - * NOTE: We does not involve SGD training here. With SGD training, this - * program would become a complete implementation of GRU which can be - * trained with sequence data. However, since this is only a CPU serial - * Matlab version of GRU, applying it on large datasets will be dramatically - * slow. - * - * Matlab code: - * by Minchen Li, at The University of British Columbia. 2016−04−21 - */ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Standard RNN (GRU/LSTM) Formats for Multivariate Time Series Data: - * Matlab: - * - * Keras: 3D format expected by GRU/LSTM is [samples, timesteps, features]. - * => indexing [timestamp t, lags k, variable j] - * PyTorch: - */ - -package scalation -package modeling -package forecasting - -import scala.math.log - -import scalation.mathstat.{MatrixD, VectorD} -import scalation.random.{NormalMat, NormalVec_c} - -import ActivationFun.{sigmoid_, softmax_, tanh_} -import MatrixD.outer - -def log_ (x: VectorD): VectorD = x.map (log) - - -case class Gate (n_seq: Int, n_mem: Int, n_var: Int): - - val v = new MatrixD (n_seq, n_mem) // gate value: time x state - var dU = new MatrixD (n_mem, n_var) // partial w.r.t. weight matrix U - var dW = new MatrixD (n_mem, n_mem) // partial w.r.t. weight matrix W - var db = new VectorD (n_mem) // partial w.r.t. bias vector b - - def apply (t: Int): VectorD = v(t) - def update (t: Int, vv: VectorD): Unit = v(t) = vv - def += (a1: MatrixD, a2: MatrixD, a3: VectorD): Unit = - { dU += a1; dW += a2; db += a3 } - - def += (dIn: VectorD, x_t: VectorD, h_tm1: VectorD): Unit = - { dU += outer (dIn, x_t); dW += outer (dIn, h_tm1); db += dIn } - -end Gate - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `GRU` class implements Gated Recurrent Unit (GRU) via Back Propagation Through - * Time (BPTT). At each time point x_t, there is a vector representing several variables - * or the encoding of a word. Intended to work for guessing the next work in a sentence - * or for multi-horizon forecasting. - * Time series: (x_t: t = 0, 1, ..., n_seq-1) where n_seq is the number of time points/words - * @param x the input sequence/time series - * @param y the output sequence/time series - * @param fname the feature/variable names - * @param n_mem the size for hidden state (h) (dimensionality of memory) - */ -class GRU (x: MatrixD, y: MatrixD, fname: Array [String] = null, n_mem: Int = 4): - - private val CLASSIF = false // whether to apply classification (e.g., guess next word) or forecast a value - private val max_epochs = 20 // maximum number of iterations - private val eta = 0.02 // the learning rate (0.25 for gRUTest) - - private val n_seq = x.dim // e.g., 20, number of words in a sentence (including start and end symbol) - private val n_var = x.dim2 // e.g., 64, number of variable or distinct words (vocabulary size) - // since we will only use one sentence for training, - // this is also the total steps during training. - - private val _1 = VectorD.one (n_mem) // vector of all ones for e.g., 1 - z - - // initialize parameters (weights and biases) - private val rmg1 = NormalMat (n_mem, n_var, 0.0, 0.01) - private var Uz = rmg1.gen - private var Ur = rmg1.gen - private var Uc = rmg1.gen - - // multiplier for previous s of intermediate variables - private val rmg2 = NormalMat (n_mem, n_mem, 0.0, 0.01) - private var Wz = rmg2.gen - private var Wr = rmg2.gen - private var Wc = rmg2.gen - - // bias terms of intermediate variables - converted to VectorD - private val rvg1 = NormalVec_c (n_mem, 0.0, 0.01) - private var b_z = rvg1.gen - private var b_r = rvg1.gen - private var b_c = rvg1.gen - - // decoder for generating output - private val rmg3 = NormalMat (n_var, n_mem, 0.0, 0.01) - private val rvg3 = NormalVec_c (n_var, 0.0, 0.01) - private var V = rmg3.gen - private var b_V = rvg3.gen // bias of decoder - converted to Vector - - // previous state h of step 1 (t = -1) - private var h_m1 = rvg1.gen // hidden state @ t = -1, converted to vector - - private val L = new VectorD (n_seq) // store loss function values - println (s"L = $L") - - private val z = Gate (n_seq, n_mem, n_var) // update gate (z) - private val r = Gate (n_seq, n_mem, n_var) // reset gate (r) - private val c = Gate (n_seq, n_mem, n_var) // candidate state (c) - - private val h = new MatrixD (n_seq, n_mem) // hidden state (h) - private val yp = new MatrixD (n_seq, n_var) // predicted output - - // the partial derivative of weights and biases - private var dh_m1 = new VectorD (h_m1.dim) - - private var db_V: VectorD = null - private var dV = new MatrixD (V.dim, V.dim2) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train the GRU using simple gradient descent. - */ - def train (): Unit = - - for it <- 1 to max_epochs do - // forward propagate: get the intermediate and output results - forward () - - println (s"train: for epoch $it: loss function L = $L") - println(s"train: for epoch $it: total loss function L.sum = ${L.sum}") - - // back propogate: calculate the gradient (the partial derivatives) - backward () - - // update the parameters (weights and biases) - update_params () - end for - end train - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forward propagate calculates yp, loss and intermediate variables for each step. - */ - def forward (): Unit = - for t <- 0 until n_seq do - val h_pre = if t == 0 then h_m1 else h(t-1) // get previous hidden state - z(t) = sigmoid_ (Uz * x(t) + Wz * h_pre + b_z) // update gate - r(t) = sigmoid_ (Ur * x(t) + Wr * h_pre + b_r) // reset gate - c(t) = tanh_ (Uc * x(t) + Wc * (h_pre * r(t)) + b_c) // candidate state - h(t) = z(t) * h_pre + (_1 - z(t)) * c(t) // hidden state - if CLASSIF then - yp(t) = softmax_ (V * h(t) + b_V) // activation: softmax for classification - L(t) = (-y(t) * log_ (yp(t))).sum // cross-entropy loss function - else - yp(t) = V * h(t) + b_V // activation: id for forecasting - L(t) = (y(t) - yp(t)).normSq // sse loss function - end if - end for - end forward - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Backward propagate to calculate gradients using chain rules in O(n_seq) time. - * FIX - add option of using sse loss function and fix affected partial derivatives - */ - def backward (): Unit = - - println (s"yp.dims = ${yp.dims}, y.dims = ${y.dims}") - val e = yp - y // negative error - db_V = e.sumVr - for t <- 0 until n_seq do dV += outer (e(t), h(t)) // outer vector product - - val dh_single = e * V // n_seq by n_mem matrix - var dh = new VectorD (dh_single.dim2) // partial for hidden state (dh) - - // calculate the derivative contribution of each step and add them up - - for t <- n_seq-1 to 1 by -1 do - dh += dh_single(t) // update partial for hidden state (dh) # time t - val dh_bk = dh // save dh - - // mix for new candidate state (c) - val dIn_c = (dh * (_1 - z(t)) * (_1 - c(t) * c(t))) // input to tanh for candidate (c) - c += (outer (dIn_c, x(t)), outer (dIn_c, (h(t-1) * r(t))), dIn_c) // update partials for c candidate - val dhr = Wc.Ƭ * dIn_c - dh = dhr * r(t) - - // reset gate (r) - val dIn_r = dhr * h(t-1) * r(t) * (_1 - r(t)) // input to sigmoid reset gate r - r += (dIn_r, x(t), h(t-1)) -// r += (outer (dIn_r, x(t)), outer (dIn_r, h(t-1)), dIn_r) // update partials for r gate - dh += Wr.Ƭ * dIn_r + dh_bk * z(t) - - // update gate (z) - val dIn_z = dh_bk * (h(t-1) - c(t)) * z(t) * (_1 - z(t)) // input to sigmoid update gate z - z += (dIn_z, x(t), h(t-1)) -// z += (outer (dIn_z, x(t)), outer (dIn_z, h(t-1)), dIn_z) // update partials for z gate - dh += Wz.Ƭ * dIn_z - end for - - // end case @ time t = 0 -> use h_m1 - - dh += dh_single(0) // update partial for hidden state (dh) @ time 0 - - val dIn_c = (dh * (_1 - z(0)) * (_1 - c(0) * c(0))) - c += (outer (dIn_c, x(0)), outer (dIn_c, (h_m1 * r(0))), dIn_c) - val dhr = Wc.Ƭ * dIn_c - dh_m1 += dhr * r(0) - - val dIn_r = dhr * h_m1 * r(0) * (_1 - r(0)) - r += (outer (dIn_r, x(0)), outer (dIn_r, h_m1), dIn_r) - dh_m1 += Wr.Ƭ * dIn_r + dh * z(0) - - val dIn_z = dh * (h_m1 - c(0)) * z(0) * (_1 - z(0)) - z += (outer (dIn_z, x(0)), outer (dIn_z, h_m1), dIn_z) - dh_m1 += Wz.Ƭ * dIn_z - end backward - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Based on the calculated partial derivatives, update the parameters (weights - * and biases). - */ - def update_params (): Unit = - // update gate (z) - Uz -= z.dU * eta - Wz -= z.dW * eta - b_z -= z.db * eta - - // reset gate (r) - Ur -= r.dU * eta - Wr -= r.dW * eta - b_r -= r.db * eta - - // candidate state (c) - Uc -= c.dU * eta - Wc -= c.dW * eta - b_c -= c.db * eta - - // output layer - V -= dV * eta - b_V -= db_V * eta - end update_params - -end GRU - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `GRU` companion object provides factory methods. - */ -object GRU: - - import ActivationFun._ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `GRU` with automatic rescaling from a data matrix and response matrix. - * @param x the input/data matrix - * @param y the output/response matrix - * @param fname the feature/variable names - * @param n_mem the size of the hidden state (dimensionality of memory) - */ - def rescale (x: MatrixD, y: MatrixD, fname: Array [String] = null, n_mem: Int = 4): GRU = - val x_s = rescaleX (x, f_sigmoid) - val y_s = rescaleY (y, f_sigmoid)._1 - -// println (s" scaled: x = $x_s \n scaled y = $y_s") - new GRU (x_s, y_s, fname, n_mem) - end rescale - -end GRU - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Generate a fake sequence dataset: generate only one sentence for training. - * Only for testing. Needs to be changed to read in training data from files. - * The words are one-hot encoded into a column vector. - * @param n_seq the sequence size (number of time points/words) - * @param n_var the number of variables/word encoding size - */ -def genSequenceData (n_seq: Int, n_var: Int): (MatrixD, MatrixD) = - - import scalation.random.Randi - - assert (n_var > 2) // for start and end of sentence symbols - assert (n_seq > 0) - - // define start and end of sentence in the vocabulary - val SENTENCE_START = new VectorD (n_var) // start: [1, 0, 0, ...] - SENTENCE_START(0) = 1 - val SENTENCE_END = new VectorD (n_var) // end: [0, 1, 0, ...] - SENTENCE_END(1) = 1 - - println (s"SENTENCE_START = $SENTENCE_START") - println (s"SENTENCE_END = $SENTENCE_END") - - // generate sentence - val i_ran = Randi (0, n_var-3) // random integer generator - val z_t = new MatrixD (n_seq-1, n_var) // leave one slot for SENTENCE START - for t <- 0 until n_seq-1 do - // generate a random word excludes start and end symbol - z_t(t, i_ran.igen+2) = 1 // set a 1 in position to indicate a word - end for - - val x_t = SENTENCE_START +: z_t // training input matrix (prepend vector) - val y_t = z_t :+ SENTENCE_END // training output matrix (append vector) - - (x_t, y_t) -end genSequenceData - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `gRUTest` main function tests the `GRU` class on randomly generated - * sequence data meant to represent encoded words - * > runMain scalation.modeling.forecasting.gRUTest - */ -@main def gRUTest (): Unit = - - val n_seq = 8 - val n_var = 5 - - val (x_t, y_t) = genSequenceData (n_seq, n_var) - - println (s"x_t = $x_t") - println (s"y_t = $y_t") - - banner ("Create a Gated Recurrent Unit (GRU)") - val mod = new GRU (x_t, y_t) - mod.train () -// mod.test () - -end gRUTest - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `gRUTest2` main function tests the `GRU` class on sequence data read as words - * in a file that encoded and pass into `GRU` - * > runMain scalation.modeling.forecasting.gRUTest2 - */ -@main def gRUTest2 (): Unit = - - println ("read words from a text file") - -// FIX - find example text - -end gRUTest2 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `gRUTest3` main function tests the `GRU` class on sequence/time series data - * corresponding to the lake level dataset using multiple lags. - * > runMain scalation.modeling.forecasting.gRUTest3 - */ -@main def gRUTest3 (): Unit = - - import Example_LakeLevels.y - val lag = 2 // number of lags to include - val hh = 2 // forecasting horizon - FIX - currently lags == hh - - val y_s = scaleV (extreme (y), (-2.0, 2.0))(y) // rescale y to active domain of sigmoid, tanh - - val (x, yy) = RegressionMV4TS.buildMatrix (y_s, lag, hh) // column for each lag - - println (s"x.dims = ${x.dims}, yy.dims = ${yy.dims}") - - banner ("Create a Gated Recurrent Unit (GRU)") - val mod = new GRU (x, yy) // call constructor - mod.train () -// mod.test () - -end gRUTest3 - diff --git a/src/main/scala/scalation/modeling/forecasting_old/old/GRU.scala.bak6 b/src/main/scala/scalation/modeling/forecasting_old/old/GRU.scala.bak6 deleted file mode 100644 index 6f61ff152..000000000 --- a/src/main/scala/scalation/modeling/forecasting_old/old/GRU.scala.bak6 +++ /dev/null @@ -1,393 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller, Sainagesh Veeravalli - * @version 2.0 - * @date Thu May 11 15:38:07 EDT 2023 - * @see LICENSE (MIT style license file). - * - * @title Model: Gated Recurrent Unit (GRU) for Multivariate Time Series - * - * @see https://www.frontiersin.org/articles/10.3389/fncom.2021.678158/full - * - * Translated from Matlab to Scala - * @see https://www.math.ucla.edu/~minchen/doc/BPTTTutorial.pdf - */ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/* This Matlab program tests the BPTT process we manually developed for GRU. - * We calculate the gradients of GRU parameters with chain rule, and then - * compare them to the numerical gradients to check whether our chain rule - * derivation is correct. - * - * Here, we provided 2 versions of BPTT, backward_direct() and backward(). - * The former one is the direct idea to calculate gradient within each step - * and add them up (O(n_seqˆ2) time) . The latter one is optimized to - * calculate the contribution of each step to the overall gradient, which is - * only O(n_seq) time. - * - * This is very helpful for people who want to implement GRU in Caffe since - * Caffe does not support auto−differentiation. This is also very helpful for - * the people who want to know the details about Back-propagation Through - * Time algorithm in the Recurrent Neural Networks (such as GRU and LSTM) - * and also get a sense on how auto−differentiation is possible. - * - * NOTE: We does not involve SGD training here. With SGD training, this - * program would become a complete implementation of GRU which can be - * trained with sequence data. However, since this is only a CPU serial - * Matlab version of GRU, applying it on large datasets will be dramatically - * slow. - * - * Matlab code: - * by Minchen Li, at The University of British Columbia. 2016−04−21 - */ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Standard RNN (GRU/LSTM) Formats for Multivariate Time Series Data: - * Matlab: - * - * Keras: 3D format expected by GRU/LSTM is [samples, timesteps, features]. - * => indexing [timestamp t, lags k, variable j] - * PyTorch: - */ - -package scalation -package modeling -package forecasting - -import scala.math.log - -import scalation.mathstat.{MatrixD, VectorD} -import scalation.random.{NormalMat, NormalVec_c} - -import ActivationFun.{sigmoid_, softmax_, tanh_} -import MatrixD.outer - -def log_ (x: VectorD): VectorD = x.map (log) - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Gate` case class holds information on the gate's value and its partial derivatives. - * @param n_seq the length of the time series - * @param n_mem the size for hidden state (h) (dimensionality of memory) - * @param n_var the number of variables - */ -case class Gate (n_seq: Int, n_mem: Int, n_var: Int): - - val v = new MatrixD (n_seq, n_mem) // gate value: time x state - var dU = new MatrixD (n_mem, n_var) // partial w.r.t. weight matrix U - var dW = new MatrixD (n_mem, n_mem) // partial w.r.t. weight matrix W - var db = new VectorD (n_mem) // partial w.r.t. bias vector b - - def apply (t: Int): VectorD = v(t) - - def update (t: Int, vv: VectorD): Unit = v(t) = vv - - def += (dIn: VectorD, x_t: VectorD, h_tm1: VectorD): Unit = - { dU += outer (dIn, x_t); dW += outer (dIn, h_tm1); db += dIn } - -end Gate - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `GRU` class implements Gated Recurrent Unit (GRU) via Back Propagation Through - * Time (BPTT). At each time point x_t, there is a vector representing several variables - * or the encoding of a word. Intended to work for guessing the next work in a sentence - * or for multi-horizon forecasting. - * Time series: (x_t: t = 0, 1, ..., n_seq-1) where n_seq is the number of time points/words - * @param x the input sequence/time series - * @param y the output sequence/time series - * @param fname the feature/variable names - * @param n_mem the size for hidden state (h) (dimensionality of memory) - */ -class GRU (x: MatrixD, y: MatrixD, fname: Array [String] = null, n_mem: Int = 4): - - private val CLASSIF = false // whether to apply classification (e.g., guess next word) or forecast a value - private val max_epochs = 20 // maximum number of iterations - private val eta = 0.02 // the learning rate (0.25 for gRUTest) - - private val n_seq = x.dim // e.g., 20, number of words in a sentence (including start and end symbol) - private val n_var = x.dim2 // e.g., 64, number of variables or distinct words (vocabulary size) - // since we will only use one sentence for training, - // this is also the total steps during training. - - private val _1 = VectorD.one (n_mem) // vector of all ones for e.g., 1 - z - - // initialize parameters (weights and biases) - private val rmg1 = NormalMat (n_mem, n_var, 0.0, 0.01) // random (Normal) matrix generators - private val rmg2 = NormalMat (n_mem, n_mem, 0.0, 0.01) - private val rmg3 = NormalMat (n_var, n_mem, 0.0, 0.01) - private val rvg1 = NormalVec_c (n_mem, 0.0, 0.01) // random (Normal) vector generators - private val rvg3 = NormalVec_c (n_var, 0.0, 0.01) - - private var Uz = rmg1.gen // parameters for update gate z - private var Wz = rmg2.gen - private var b_z = rvg1.gen - - private var Ur = rmg1.gen // parameters for reset gate r - private var Wr = rmg2.gen - private var b_r = rvg1.gen - - private var Uc = rmg1.gen // parameters for candidate state mixin c - private var Wc = rmg2.gen - private var b_c = rvg1.gen - - // decoder for generating output - private var V = rmg3.gen // decoder weight matrix - private var b_V = rvg3.gen // decoder bias vector - - private val z = Gate (n_seq, n_mem, n_var) // update gate z - private val r = Gate (n_seq, n_mem, n_var) // reset gate r - private val c = Gate (n_seq, n_mem, n_var) // candidate state mixin c - - private var h_m1 = rvg1.gen // hidden state @ t = -1 (m1 means minus 1) - private val h = new MatrixD (n_seq, n_mem) // hidden state h - private val yp = new MatrixD (n_seq, n_var) // predicted output - private val L = new VectorD (n_seq) // store loss function values - - // the partial derivative of weights and biases (outside gates) - private var dh_m1 = new VectorD (h_m1.dim) - private var db_V: VectorD = null - private var dV = new MatrixD (V.dim, V.dim2) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train the GRU using simple gradient descent. - */ - def train (): Unit = - for it <- 1 to max_epochs do - forward () // forward propagate: get intermediate and output results - - println (s"train: for epoch $it: loss function L = $L") - println (s"train: for epoch $it: total loss function L.sum = ${L.sum}") - - backward () // back propagate: calculate gradients (partial derivatives) - - update_params () // update parameters (weights and biases) - end for - end train - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forward propagate calculates yp, loss and intermediate variables for each step. - */ - def forward (): Unit = - for t <- 0 until n_seq do - val h_pre = if t == 0 then h_m1 else h(t-1) // get previous hidden state - z(t) = sigmoid_ (Uz * x(t) + Wz * h_pre + b_z) // update gate - r(t) = sigmoid_ (Ur * x(t) + Wr * h_pre + b_r) // reset gate - c(t) = tanh_ (Uc * x(t) + Wc * (h_pre * r(t)) + b_c) // candidate state - h(t) = z(t) * h_pre + (_1 - z(t)) * c(t) // hidden state - if CLASSIF then - yp(t) = softmax_ (V * h(t) + b_V) // activation: softmax for classification - L(t) = (-y(t) * log_ (yp(t))).sum // cross-entropy loss function - else - yp(t) = V * h(t) + b_V // activation: id for forecasting - L(t) = (y(t) - yp(t)).normSq // sse loss function - end if - end for - end forward - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Backward propagate to calculate gradients using chain rules in O(n_seq) time. - * FIX - add option of using sse loss function and fix affected partial derivatives - */ - def backward (): Unit = - - import ActivationFun.{sigmoidD, tanhD} - - // start back-propagation with the final/feed-forward (ff) layer (uses id for activation) - - val e = yp - y // negative error matrix - db_V = e.sumVr // vector of row sums - for t <- 0 until n_seq do dV += outer (e(t), h(t)) // outer vector product - val dh_ff = e * V // partial w.r.t. h: n_seq by n_mem matrix - var dh = new VectorD (dh_ff.dim2) // hold partial for hidden state (dh) @ time t - var dIn, dhr: VectorD = null - - // calculate the derivative contribution of each step and add them up - - for t <- n_seq-1 to 1 by -1 do // move back in time to t = 1 - dh += dh_ff(t) // update partial for hidden state (dh) @ time t - val dh_bk = dh // save dh - - dIn = dh * (_1 - z(t)) * tanhD (c(t)) // input to tanh for candidate mixin c - c += (dIn, x(t), h(t-1) * r(t)) // update partials for c mixin - dhr = Wc.Ƭ * dIn // Ƭ => matrix transpose - dh = dhr * r(t) - - dIn = dhr * h(t-1) * sigmoidD (r(t)) // input to sigmoid reset gate r - r += (dIn, x(t), h(t-1)) // update partials for r gate - dh += Wr.Ƭ * dIn + dh_bk * z(t) - - dIn = dh_bk * (h(t-1) - c(t)) * sigmoidD (z(t)) // input to sigmoid update gate z - z += (dIn, x(t), h(t-1)) // update partials for z gate - dh += Wz.Ƭ * dIn - end for - - // end case @ time t = 0 -> use h_m1 for hidden state - - dh += dh_ff(0) // update partial for hidden state (dh) @ t = 0 - - dIn = dh * (_1 - z(0)) * tanhD (c(0)) - c += (dIn, x(0), h_m1 * r(0)) // update partials for c mixin @ t = 0 - dhr = Wc.Ƭ * dIn - dh_m1 += dhr * r(0) - - dIn = dhr * h_m1 * sigmoidD (r(0)) - r += (dIn, x(0), h_m1) // update partials for r gate @ t = 0 - dh_m1 += Wr.Ƭ * dIn + dh * z(0) - - dIn = dh * (h_m1 - c(0)) * sigmoidD (z(0)) - z += (dIn, x(0), h_m1) // update partials for z gate @ t = 0 - dh_m1 += Wz.Ƭ * dIn - end backward - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Based on the calculated partial derivatives, update the parameters (weights - * and biases). - */ - def update_params (): Unit = - // update gate (z) - Uz -= z.dU * eta - Wz -= z.dW * eta - b_z -= z.db * eta - - // reset gate (r) - Ur -= r.dU * eta - Wr -= r.dW * eta - b_r -= r.db * eta - - // candidate state (c) - Uc -= c.dU * eta - Wc -= c.dW * eta - b_c -= c.db * eta - - // output layer - V -= dV * eta - b_V -= db_V * eta - end update_params - -end GRU - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `GRU` companion object provides factory methods. - */ -object GRU: - - import ActivationFun._ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `GRU` with automatic rescaling from a data matrix and response matrix. - * @param x the input/data matrix - * @param y the output/response matrix - * @param fname the feature/variable names - * @param n_mem the size of the hidden state (dimensionality of memory) - */ - def rescale (x: MatrixD, y: MatrixD, fname: Array [String] = null, n_mem: Int = 4): GRU = - val x_s = rescaleX (x, f_sigmoid) - val y_s = rescaleY (y, f_sigmoid)._1 - -// println (s" scaled: x = $x_s \n scaled y = $y_s") - new GRU (x_s, y_s, fname, n_mem) - end rescale - -end GRU - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Generate a fake sequence dataset: generate only one sentence for training. - * Only for testing. Needs to be changed to read in training data from files. - * The words are one-hot encoded into a column vector. - * @param n_seq the sequence size (number of time points/words) - * @param n_var the number of variables/word encoding size - */ -def genSequenceData (n_seq: Int, n_var: Int): (MatrixD, MatrixD) = - - import scalation.random.Randi - - assert (n_var > 2) // for start and end of sentence symbols - assert (n_seq > 0) - - // define start and end of sentence in the vocabulary - val SENTENCE_START = new VectorD (n_var) // start: [1, 0, 0, ...] - SENTENCE_START(0) = 1 - val SENTENCE_END = new VectorD (n_var) // end: [0, 1, 0, ...] - SENTENCE_END(1) = 1 - - println (s"SENTENCE_START = $SENTENCE_START") - println (s"SENTENCE_END = $SENTENCE_END") - - // generate sentence - val i_ran = Randi (0, n_var-3) // random integer generator - val z_t = new MatrixD (n_seq-1, n_var) // leave one slot for SENTENCE START - for t <- 0 until n_seq-1 do - // generate a random word excludes start and end symbol - z_t(t, i_ran.igen+2) = 1 // set a 1 in position to indicate a word - end for - - val x_t = SENTENCE_START +: z_t // training input matrix (prepend vector) - val y_t = z_t :+ SENTENCE_END // training output matrix (append vector) - - (x_t, y_t) -end genSequenceData - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `gRUTest` main function tests the `GRU` class on randomly generated - * sequence data meant to represent encoded words - * > runMain scalation.modeling.forecasting.gRUTest - */ -@main def gRUTest (): Unit = - - val n_seq = 8 - val n_var = 5 - - val (x_t, y_t) = genSequenceData (n_seq, n_var) - - println (s"x_t = $x_t") - println (s"y_t = $y_t") - - banner ("Create a Gated Recurrent Unit (GRU)") - val mod = new GRU (x_t, y_t) - mod.train () -// mod.test () - -end gRUTest - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `gRUTest2` main function tests the `GRU` class on sequence data read as words - * in a file that encoded and pass into `GRU` - * > runMain scalation.modeling.forecasting.gRUTest2 - */ -@main def gRUTest2 (): Unit = - - println ("read words from a text file") - -// FIX - find example text - -end gRUTest2 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `gRUTest3` main function tests the `GRU` class on sequence/time series data - * corresponding to the lake level dataset using multiple lags. - * > runMain scalation.modeling.forecasting.gRUTest3 - */ -@main def gRUTest3 (): Unit = - - import Example_LakeLevels.y - val lags = 2 // number of lags to include - val hh = 2 // forecasting horizon - FIX - currently lags == hh - - val y_s = scaleV (extreme (y), (-2.0, 2.0))(y) // rescale y to active domain of sigmoid, tanh - - val (x, yy) = buildMatrix4TS (y_s, lags, hh) // column for each lag - - println (s"x.dims = ${x.dims}, yy.dims = ${yy.dims}") - - banner ("Create a Gated Recurrent Unit (GRU)") - val mod = new GRU (x, yy) // call constructor - mod.train () -// mod.test () - -end gRUTest3 - diff --git a/src/main/scala/scalation/modeling/forecasting_old/old/QuadRegressionMV4TS.scala.bak b/src/main/scala/scalation/modeling/forecasting_old/old/QuadRegressionMV4TS.scala.bak deleted file mode 100644 index 84dc3db20..000000000 --- a/src/main/scala/scalation/modeling/forecasting_old/old/QuadRegressionMV4TS.scala.bak +++ /dev/null @@ -1,380 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Sun Feb 13 16:22:21 EST 2022 - * @see LICENSE (MIT style license file). - * - * @title Model: Quadratic Multi-Variate Regression for Time Series - */ - -package scalation -package modeling -package forecasting - -import scala.math.max - -import scalation.mathstat._ - -import neuralnet.RegressionMV - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `QuadRegressionMV4TS` object supports regression for Time Series data. - * Given a response vector y, and a predictor matrix x is built that consists of - * lagged y vectors. Additional future response vectors are built for training. - * y_t = b dot x - * where x = [y_{t-1}, y_{t-2}, ... y_{t-lag}]. - * Matrix x includes constant, linear and quadratic terms. - */ -object QuadRegressionMV4TS: - - private val debug = debugf ("QuadRegressionMV4TS", true) // debug function - private val flaw = flawf ("QuadRegressionMV4TS") // flaw function - private val MISSING = -0.0 // missing value - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `RegressionMV` object from a Time Series response vector y. - * The input/data matrix x is formed from the lagged y vectors as columns in matrix x. - * Quadratic terms are added to the model, one for each lag. - * @param y the original un-expanded output/response vector - * @param lag the maximum lag included (inclusive) - * @param h the forecasting horizon (1, 2, ... h) - * @param intercept whether to add a column of all ones to the matrix (intercept) - * @param hparam the hyper-parameters ((use Regression.hp for default) - */ - def apply (y: VectorD, lag: Int, h: Int, intercept: Boolean = true, - hparam: HyperParameter = Regression.hp): RegressionMV = - var (x, yy) = buildMatrix4TS (y, lag, h) // column for each lag - x = x ++^ x~^2 // add quadratic terms - if intercept then x = VectorD.one (yy.dim) +^: x // add first column of all ones - - val mod = new RegressionMV (x, yy, null, hparam) - mod.modelName = s"QuadRegressionMV4TS_$lag" - mod - end apply - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `RegressionMV` object from a response vector. The input/data matrix - * x is formed from the lagged y vectors as columns in matrix x. - * In addition, lagged exogenous variables are added. - * @param y the original un-expanded output/response vector - * @param lag the maximum lag included (inclusive) - * @parax ex the input matrix for 1st exogenous variable - * @parax ex2 the input matrix for 2nd exogenous variable (optional) - * @parax ex3 the input matrix for 3rd exogenous variable (optional) - * @param h the forecasting horizon (1, 2, ... h) - * @param intercept whether to add a column of all ones to the matrix (intercept) - * @param hparam the hyper-parameters (use Regression.hp for default) - * @param elag1 the minimum exo lag included (inclusive) - * @param elag2 the maximum exo lag included (inclusive) - def exo (y: VectorD, lag: Int, ex: VectorD, ex2: VectorD = null, ex3: VectorD = null, - h: Int, intercept: Boolean = true, hparam: HyperParameter = Regression.hp) - (elag1: Int = max (1, lag / 5), - elag2: Int = max (1, lag)): RegressionMV = - var (x, yy) = buildMatrix4TS (y, lag, h) // column for each lag - if intercept then x = VectorD.one (yy.dim) +^: x // add first column of all ones - val endoCols = x.dim2 - println (s"endogenous: columns = $endoCols") - - var xx = buildMatrix4TS_exo (ex, lag, elag1, elag2) - x = x ++^ xx // add columns for 1st lagged exo var - if ex2 != null then - val xx2 = buildMatrix4TS_exo (ex2, lag, elag1, elag2) - x = x ++^ xx2 // add columns for 2nd lagged exo var - end if - if ex3 != null then - val xx3 = buildMatrix4TS_exo (ex3, lag, elag1, elag2) - x = x ++^ xx3 // add columns for 2nd lagged exo var - end if - println (s"exogenous: columns = ${x.dim2 - endoCols}") - - println (s"exo: x.dims = ${x.dims}, yy.dim = ${yy.dim}") -// println (s"exo: x = $x \n yy = $yy") - val mod = new RegressionMV (x, yy, null, hparam) - mod.modelName = s"QuadRegressionMV4TS.exo_$lag" - mod - end exo - */ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `RegressionMV` object from a response vector to fit a quadratic - * surface to Time Series data. The input/data matrix x is formed from the - * lagged y vectors as columns in matrix x. - * In addition, lagged exogenous variables are added. - * @param y the original un-expanded output/response vector - * @param lag the maximum lag included (inclusive) - * @parax ex the input matrix for exogenous variables (one per column) - * @param h the forecasting horizon (1, 2, ... h) - * @param intercept whether to add a column of all ones to the matrix (intercept) - * @param hparam the hyper-parameters (use Regression.hp for default) - * @param elag1 the minimum exo lag included (inclusive) - * @param elag2 the maximum exo lag included (inclusive) - */ - def exo (y: VectorD, lag: Int, ex: MatrixD, h: Int, - intercept: Boolean = true, hparam: HyperParameter = Regression.hp) - (elag1: Int = max (1, lag / 5), - elag2: Int = max (1, lag)): RegressionMV = - var (x, yy) = buildMatrix4TS (y, lag, h) // column for each lag - x = x ++^ x~^2 // add quadratic terms - if intercept then x = VectorD.one (yy.dim) +^: x // add first column of all ones - val endoCols = x.dim2 - println (s"endogenous: columns = $endoCols") - - x = x ++^ Regression4TS.makeExoCols (lag, ex, elag1, elag2) // add columns for each lagged exo var - println (s"exogenous: columns = ${x.dim2 - endoCols}") - - println (s"exo: x.dims = ${x.dims}, yy.dim = ${yy.dim}") -// println (s"exo: x = $x \n yy = $yy") - val mod = new RegressionMV (x, yy, null, hparam) - mod.modelName = s"QuadRegressionMV4TS.exo$lag" - mod - end exo - -end QuadRegressionMV4TS - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `quadRegressionMV4TSTest` main function tests the `QuadRegressionMV4TS` object. - * This test is used to CHECK that the buildMatrix4TS function is working correctly. - * May get NaN for some maximum lags (p) due to multi-collinearity. - * > runMain scalation.modeling.forecasting.quadRegressionMV4TSTest - */ -@main def quadRegressionMV4TSTest (): Unit = - - val m = 30 - val y = VectorD.range (1, m) // used to CHECK the buildMatrix4TS function - val h = 3 // the forecasting horizon - - for p <- 5 to 5 do // autoregressive hyper-parameter p - banner (s"Test: QuadRegressionMV4TS with $p lags") - val mod = QuadRegressionMV4TS (y, p, h) // create model for time series data - mod.trainNtest ()() // train the model on full dataset - println (mod.summary) - - val yy = mod.getY - val yp = mod.predict (mod.getX) - for k <- yp.indices2 do - new Plot (null, yy(?, k), yp(?, k), s"yy_$k vs. yp_$k for ${mod.modelName} (h=${k+1}) with $p lags", lines = true) - end for - end for - -end quadRegressionMV4TSTest - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `quadRegressionMV4TSTest2` main function tests the `RegressionMV4TS` class on real data: - * Forecasting lake levels. Uses quadratic regression. - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.quadRegressionMV4TSTest2 - */ -@main def quadRegressionMV4TSTest2 (): Unit = - - import Example_LakeLevels.y - val m = y.dim - val h = 2 // the forecasting horizon - - for p <- 1 to 7 do // autoregressive hyper-parameter p - banner (s"Test: QuadRegressionMV4TS with $p lags") - val mod = QuadRegressionMV4TS (y, p, h) // create model for time series data - mod.trainNtest ()() // train the model on full dataset - println (mod.summary) - - banner ("Predictions/Forecasts") // direct forecasting technique - val yy = mod.getY - val yf = mod.predict (mod.getX) - for k <- yf.indices2 do - new Plot (null, yy(?, k), yf(?, k), s"yy_$k vs. yf_$k for ${mod.modelName} (h=${k+1}) with $p lags", lines = true) - end for - println (s"yf = $yf") - println (s"yf.dims = ${yf.dims}") - end for - -end quadRegressionMV4TSTest2 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `quadRegressionMV4TSTest3` main function tests the `RegressionMV4TS` class on real data: - * Forecasting COVID-19 Weekly Data. Uses quadratic regression, In-Sample Testing using - * endogenous variable. - * > runMain scalation.modeling.forecasting.quadRegressionMV4TSTest3 - */ -@main def quadRegressionMV4TSTest3 (): Unit = - - val LAGS = 7 // number of lags - val h = 4 // forecasting horizon - - val exo_vars = Array ("icu_patients") - val (xx, yy) = Example_Covid.loadData (exo_vars, "new_deaths") - val iskip = yy.indexWhere (_ >= 6.0) // find day with at least 6 deaths - println (s"iskip = $iskip is first day with at least 6 deaths") - - val ex = xx(iskip until xx.dim) // trim away the first iskip rows - val y = yy(iskip until yy.dim) - println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") - - banner ("Test In-Sample QuadRegressionMV4TS on COVID-19 Weekly Data") -// val mod = QuadRegressionMV4TS (y, LAGS, h) // create model for time series data - val mod = QuadRegressionMV4TS.rescale (y, LAGS, h) // create model for time series data - scaling - val (yp, qof) = mod.trainNtest ()() // train on full and test on full - val yy_ = y(LAGS until y.dim) - - for k <- 0 until h do - new Plot (null, yy_, yp(?, k), s"${mod.modelName}, yy vs. yp @ h = $k", lines = true) - end for - - banner (s"Feature Selection Technique: Stepwise") - val (cols, rSq) = mod.stepRegressionAll (cross = false) // R^2, R^2 bar, sMAPE, NA - val k = cols.size - println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for RegressionMV4TS with tech", lines = true) - println (mod.summary ()) - - banner ("Feature Importance") - println (s"Stepwise: rSq = $rSq") -// val imp = mod.importance (cols.toArray, rSq) -// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") - -end quadRegressionMV4TSTest3 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `quadRegressionMV4TSTest4` main function tests the `RegressionMV4TS` class on real data: - * Forecasting COVID-19 Weekly Data. Uses quadratic regression, In-Sample Testing using endogenous - * and exogeneous variables. - * > runMain scalation.modeling.forecasting.quadRegressionMV4TSTest4 - */ -@main def quadRegressionMV4TSTest4 (): Unit = - - val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (xx, yy) = Example_Covid.loadData (exo_vars, "new_deaths") - val iskip = yy.indexWhere (_ >= 6.0) // find day with at least 6 deaths - println (s"iskip = $iskip is first day with at least 6 deaths") - - val ex = xx(iskip until xx.dim) // trim away the first iskip rows - val y = yy(iskip until yy.dim) - println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") - - banner ("Test In-Sample QuadRegressionMV4TS.exo on COVID-19 Weekly Data") - val mod = QuadRegressionMV4TS.exo (y, 10, ex, 4)(1, 11) // create model for time series data with exo - val (yp, qof) = mod.trainNtest ()() // train on full and test on full - val yy_ = y(10 until y.dim) - new Plot (null, yy_, yp(?, 0), s"${mod.modelName}, yy vs. yp", lines = true) - -// val tech = SelectionTech.Forward // pick one feature selection technique -// val tech = SelectionTech.Backward - val tech = SelectionTech.Stepwise - - banner (s"Feature Selection Technique: $tech") - val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA - val k = cols.size - println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for RegressionMV4TS with tech", lines = true) - println (mod.summary ()) - - banner ("Feature Importance") - println (s"$tech: rSq = $rSq") -// val imp = mod.importance (cols.toArray, rSq) -// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") - -end quadRegressionMV4TSTest4 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `quadRegressionMV4TSTest5` main function tests the `QuadRegressionMV4TS` class on real data: - * Forecasting COVID-19 Weekly Data. Uses Quadratic Regression. Does TnT Testing on endogenous - * and exogenous variables. Determine the terms to include in the model for TnT from using - * Stepwise on In-Sample. - * > runMain scalation.modeling.forecasting.quadRegressionMV4TSTest5 - */ -@main def quadRegressionMV4TSTest5 (): Unit = - - val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (xx, yy) = Example_Covid.loadData (exo_vars, "new_deaths") - val iskip = yy.indexWhere (_ >= 6.0) // find day with at least 6 deaths - println (s"iskip = $iskip is first day with at least 6 deaths") - - val ex = xx(iskip until xx.dim) // trim away the first iskip rows - val y = yy(iskip until yy.dim) - println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") - - banner ("Test In-Sample QuadRegressionMV4TS.exo on COVID-19 Weekly Data") - val mod = QuadRegressionMV4TS.exo (y, 10, ex, 4)(1, 11) // create model for time series data with exo - val (yp, qof) = mod.trainNtest ()() // train on full and test on full - val yy_ = y(10 until y.dim) - new Plot (null, yy_, yp(?, 0), s"${mod.modelName}, yy vs. yp", lines = true) - -// val tech = SelectionTech.Forward // pick one feature selection technique -// val tech = SelectionTech.Backward - val tech = SelectionTech.Stepwise - - banner (s"Feature Selection Technique: $tech") - val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA - val k = cols.size - println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for RegressionMV4TS with tech", lines = true) - println (mod.summary ()) - - banner ("Feature Importance") - println (s"$tech: rSq = $rSq") -// val imp = mod.importance (cols.toArray, rSq) -// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") - - banner ("Run TnT on Best model") - val bmod = mod.getBest._3 // get the best model from feature selection - val (x_, y_, xtest, ytest) = RegressionMV4TS.split_TnT (bmod.getX, bmod.getY) - val (yptest, qoftest) = bmod.trainNtest (x_, y_)(xtest, ytest) // train on (x_, y_) and test on (xtest, ytest) - new Plot (null, ytest(?, 0), yptest(?, 0), s"${mod.modelName}, ytest vs. yptest", lines = true) - -end quadRegressionMV4TSTest5 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `quadRegressionMV4TSTest6` main function tests the `QuadRegressionMV4TS` class on real data: - * Forecasting COVID-19 Weekly Data. Uses Quadratic Regression. Does TnT Testing on endogenous - * and exogenous variables. Determine the terms to include in the model for TnT from using - * Stepwise on In-Sample. - * > runMain scalation.modeling.forecasting.quadRegressionMV4TSTest6 - */ -@main def quadRegressionMV4TSTest6 (): Unit = - - val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (xx, yy) = Example_Covid.loadData (exo_vars, "new_deaths") - val iskip = yy.indexWhere (_ >= 6.0) // find day with at least 6 deaths - println (s"iskip = $iskip is first day with at least 6 deaths") - - val ex = xx(iskip until xx.dim) // trim away the first iskip rows - val y = yy(iskip until yy.dim) - println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") - - banner ("Test In-Sample QuadRegressionMV4TS.exo on COVID-19 Weekly Data") - val mod = QuadRegressionMV4TS.exo (y, 10, ex, 4)(1, 11) // create model for time series data with exo - val (yp, qof) = mod.trainNtest ()() // train on full and test on full - val yy_ = y(10 until y.dim) - new Plot (null, yy_, yp(?, 0), s"${mod.modelName}, yy vs. yp", lines = true) - -// val tech = SelectionTech.Forward // pick one feature selection technique -// val tech = SelectionTech.Backward - val tech = SelectionTech.Stepwise - - banner (s"Feature Selection Technique: $tech") - val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA - val k = cols.size - println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for RegressionMV4TS with tech", lines = true) - println (mod.summary ()) - - banner ("Feature Importance") - println (s"$tech: rSq = $rSq") -// val imp = mod.importance (cols.toArray, rSq) -// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") - - banner ("Run Rolling Validation on QuadRegressionMV4TS Best model") - val bmod = mod.getBest._3 // get the best model from feature selection - RegressionMV4TS.rollValidate (bmod, 1) - -end quadRegressionMV4TSTest6 - diff --git a/src/main/scala/scalation/modeling/forecasting_old/old/QuadSpline.scala.bak b/src/main/scala/scalation/modeling/forecasting_old/old/QuadSpline.scala.bak deleted file mode 100644 index 226eef599..000000000 --- a/src/main/scala/scalation/modeling/forecasting_old/old/QuadSpline.scala.bak +++ /dev/null @@ -1,236 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 1.6 - * @date Tue May 11 16:25:40 EDT 2021 - * @see LICENSE (MIT style license file). - * - * @title Model: Quadratic Spline - */ - -package scalation.analytics -package forecaster - -import scalation.linalgebra.{MatriD, MatrixD, VectoD, VectorD} -import scalation.plot.Plot -import scalation.util.banner - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `QuadSpline` class fits quadratic splines to time-series data that are equally - * spaced in time. A sliding window consisting of three data points is perfectly fit - * to a quadratic curve. - *

    - * y_t = a + bt + ct^2 - *

    - * Note, slope matching and smoothness issues are ignored. - * @see wordsandbuttons.online/quadratic_splines_are_useful_too.html - * Any time point from t = 3 to the end of time series may be forecasted. - * @param y the time-series - * @param hparam the hyper-parameters - */ -class QuadSpline (y: VectoD, hp: HyperParameter = null) - extends ForecasterVec (y, 1) with NoFeatureSelectionF -{ - private val DEBUG = true // debug flag - private val shift = 2 // shift center from y_t or y_{t-2} - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the model name including its current hyper-parameter. - */ - override def modelName: String = "QuadSpline" - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Based on three points y_{t-1}, y_t, t_{t+1}, determine values for the - * coefficients 'a', 'b' and 'c'. - * @param t the center time point - */ - def splineFit (t: Int): (Double, Double, Double) = - { - val c = 0.5 * (y(t+1) - 2*y(t) + y(t-1)) - val b = 0.5 * (y(t+1) - y(t-1) - 4*c*t) - val a = y(t) - b*t - c*t*t - (a, b, c) - } // splineFit - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Evaluate the spline function at time point 't', given the coefficients 'a', 'b' and 'c'. - * @param t the time - * @param a the constant term - * @param b the linear term coefficient - * @param c the quadratic term coefficient - */ - def spline (t: Double, a: Double, b: Double, c: Double): Double = - { - a + b*t + c*t*t - } // spline - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast a one-step ahead value for 'y_t' based on the quadratic curve fit to - * the previous three vales: y_{t-3}, y_{t-2}, t_{t-1}. - * @param t the time at which to forecast y - */ - def forecast1 (t: Int): Double = - { - if (t <= shift) y(t) - else { - val (a, b, c) = splineFit (t-shift) - spline (t, a, b, c) - } // if - } // forecast1 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train/fit a `QuadSpline` model to the times-series data in vector 'y_'. - * Note: for `QuadSpline` there are no parameters to train. - * @param x_null the data/input matrix (ignored) - * @param y_ the response/output vector (currently only works for y) - */ - override def train (x_null: MatriD, y_ : VectoD): QuadSpline = { super.train (null, y_); this } - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the parameter vector (its null). - */ - def parameter: VectoD = null - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return a vector that is the predictions of a quad spline model, by making forecasts - * for all values from time 3 to the end of the time-series. Note, y_0, y_1 and y_2 - * can't have forecasts, since they would need a value for y_{-1}. - */ - override def predictAll (): VectoD = - { - val yf = new VectorD (m) // forecasts for all z - for (t <- 0 to shift) yf(t) = y(t) // copy actual value - for (t <- shift+1 until m) yf(t) = forecast1(t) // enter forecasted value - yf // return the vector of predicted values - } // predictAll - - def predictAllz (): VectoD = predictAll () - stats.mu - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all 'm' time points and all horizons (1 through 'h'-steps ahead). - * Record these in the 'yf' matrix, where - *

    - * yf(t, k) = k-steps ahead forecast for y_t - *

    - * Note, 'yf.col(0)' is set to 'y' (the actual time-series values). - * @param h the maximum forecasting horizon, number of steps ahead to produce forecasts - */ - def forecastAll (h: Int): MatriD = - { - yf = new MatrixD (m, h+1) // forecasts for all time points t & horizons to h - yf.setCol (0, y) // first column is actual values, horizon 0 - for (k <- 1 to h) { - yf(0, k) = y(0) // copy first actual value - for (t <- 1 until m) { // forecast the rest - yf(t, k) = forecast1 (t) // FIX - implement for other h beyond 1 - } // for - if (DEBUG) println (s"forecastAll: yf.col ($k) = ${yf.col (k)}") - } // for - yf // return matrix of forecasted values - } // forecastAll - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test the curve produced by the multiple splines. - */ - def testCurve (): Unit = - { - println (s"y = $y") - for (i <- 1 until y.dim - 1) { - val (a, b, c) = splineFit(i) - for (j <- -2 to 2) { - val t = i + 0.5 * j - val f_t = spline (t, a, b, c) - print (s"spline($t) = $f_t \t") - } // for - println () - } // for - } // testCurve - -} // QuadSpline class - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `QuadSpline` companion object provides factory methods for the `QuadSpline` class. - */ -object QuadSpline -{ - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `QuadSpline` object. - * @param y the response vector (time series data) - * @param hparam the hyper-parameters - */ - def apply (y: VectoD, hparam: HyperParameter = null): QuadSpline = - { - new QuadSpline (y, hparam) - } // apply - -} // RandomWalk object - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `QuadSplineTest` object is used to test the `QuadSpline` class. - * Forecasting Fibonacci numbers. - * > runMain scalation.analytics.forecaster.QuadSplineTest - */ -object QuadSplineTest extends App -{ - val y = VectorD (1, 2, 3, 5, 8, 13, 21, 34, 55, 89) - - banner ("RandomWalk Model") - val rw = new RandomWalk (y) - rw.train (null, y).eval () - println (rw.report) - val yp = rw.predictAll () - new Plot (null, y, yp, "RandomWalk: y vs. yp", lines = true) - - banner ("QuadSpline Model") - val qs = new QuadSpline (y) - qs.train (null, y).eval () - println (qs.report) - qs.testCurve () - val yf = qs.predictAll () - new Plot (null, y, yf, "QuadSpline: y vs. yf", lines = true) - -} // QuadSplineTest object - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `QuadSplineTest2` object is used to test the `QuadSpline` class. - * > runMain scalation.analytics.forecaster.QuadSplineTest2 - */ -object QuadSplineTest2 extends App -{ - // TBD - -} // QuadSplineTest object - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `QuadSplineTest3` object is used to test the `QuadSpline` class. - * Forecasting lake levels. - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.analytics.forecaster.QuadSplineTest3 - */ -object QuadSplineTest3 extends App -{ - import ForecasterVec.y - - banner ("RandomWalk Model") - val rw = new RandomWalk (y) - rw.train (null, y).eval () - println (rw.report) - val yp = rw.predictAll () - new Plot (null, y, yp, "RandomWalk: y vs. yp", lines = true) - - banner ("QuadSpline Model") - val qs = new QuadSpline (y) - qs.train (null, y).eval () - println (qs.report) - val yf = qs.predictAll () - new Plot (null, y, yf, "QuadSpline: y vs. yf", lines = true) - - val mix = (yp + yf) * 0.5 - new Plot (null, y, mix, "Mix: y vs. mix", lines = true) - -} // QuadSplineTest3 object - diff --git a/src/main/scala/scalation/modeling/forecasting_old/old/Regression4TS.scala.bak b/src/main/scala/scalation/modeling/forecasting_old/old/Regression4TS.scala.bak deleted file mode 100644 index 8503d9309..000000000 --- a/src/main/scala/scalation/modeling/forecasting_old/old/Regression4TS.scala.bak +++ /dev/null @@ -1,237 +0,0 @@ - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Sun Feb 13 16:22:21 EST 2022 - * @see LICENSE (MIT style license file). - * - * @title Model: Regression for Time Series - */ - -package scalation -package modeling -package forecasting - -import scalation.mathstat._ - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Regression4TS` object supports regression for Time Series data. - * Given a response vector y, and a predictor matrix x is built that consists of - * lagged y vectors. - * y_t = b dot x - * where x = [y_{t-1}, y_{t-2}, ... y_{t-lag}]. - */ -object Regression4TS: - - private val debug = debugf ("Regression4TS", true) // debug function - private val flaw = flawf ("Regression4TS") // flaw function - private val MISSING = -0.0 // missing value - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `Regression` object from a response vector. The input/data matrix - * x is formed from the lagged y vectors as columns in matix x. - * @param y the original un-expanded output/response vector - * @param lag the maximum lag included (inclusive) - * @param hparam the hyper-parameters (use Regression.hp for default) - */ - def apply (y: VectorD, lag: Int, - hparam: HyperParameter = Regression.hp): Regression = - var (x, yy) = buildMatrix4TS (y, lag) // column for each lag - x = VectorD.one (yy.dim) +^: x // add first column of all ones - - debug ("apply", s" x = $x \n yy = $yy") - val mod = new Regression (x, yy, null, hparam) - mod.modelName = s"Regression4TS_$lag" - mod - end apply - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast h steps ahead using the recursive method, returning forecasts in - * matrix yf with columns: [1-step, 2-steps, ... h-steps]. - * @param yp the predicted response vector (horizon 1 forecasts) - * @param h the forecasting horizon - */ - def forecast (mod: Regression, yp: VectorD, h: Int): MatrixD = - val xx = mod.getX // get the predictor matrix - val b = mod.parameter // get the model parameters - val b_ = b(1 until b.dim) // paramters excluding intercept - - val yf = new MatrixD (yp.dim, h) // matrix to hold forecasts - yf(?, 0) = yp // column 0 is predicted values - for k <- 1 until h do // forecast into future: columns 1 to h-1 - for i <- yf.indices do - val xy = xx(i)(k+1 until xx.dim2) ++ yf(i)(0 until k) // last from xx ++ first from yf -// println (s"xy = $xy") - yf(i, k) = b(0) + (b_ dot xy) // record forecasted value - end for - end for - yf - end forecast - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `Regression` object that uses multiple regression to fit a quadratic - * surface to Time Series data. - * @param y the original un-expanded output/response vector - * @param lag the maximum lag included (inclusive) - * @param hparam the hyper-parameters ((use Regression.hp for default) - */ - def quadratic (y: VectorD, lag: Int, - hparam: HyperParameter = Regression.hp): Regression = - var (x, yy) = buildMatrix4TS (y, lag) // column for each lag - val xx = new MatrixD (x.dim, 2*x.dim2+1) - xx(?, 0) = VectorD.one (yy.dim) // add first column of all ones - for j <- x.indices2 do // add terms in an interleaved fashion - xx(?, 2*j+1) = x(?, j) // linear terms - xx(?, 2*j+2) = x(?, j)~^2 // add quadratic terms - end for - - debug ("quadratic", s" xx = $xx \n yy = $yy") - val mod = new Regression (xx, yy, null, hparam) - mod.modelName = s"Regression4TS.quadratic$lag" - mod - end quadratic - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast h steps ahead using the recursive method, returning forecasts in - * matrix yf with columns: [1-step, 2-steps, ... h-steps]. - * @param yp the predicted response vector (horizon 1 forecasts) - * @param h the forecasting horizon - */ - def forecastq (mod: Regression, yp: VectorD, h: Int): MatrixD = - val xx = mod.getX // get the predictor matrix - val b = mod.parameter // get the model parameters - val b_ = b(1 until b.dim) // paramters excluding intercept - - val yf = new MatrixD (yp.dim, h) // matrix to hold forecasts - yf(?, 0) = yp // column 0 is predicted values - for k <- 1 until h do // forecast into future: columns 1 to h-1 - for i <- yf.indices do - val xi = xx(i) - val yi = yf(i) - var sum = b(0) - var l = 0 - for j <- 1 until b.dim-1 by 2 do // add terms in an interleaved fashion - if j+k+1 < b.dim then - sum += b(j) * xi(j+k) // linear terms - sum += b(j+1) * xi(j+k+1) // add quadratic terms - else - sum += b(j) * yi(l) - sum += b(j+1) * yi(l)~^2 - l += 1 - end if - end for - yf(i, k) = sum // record forecasted value - end for - end for - yf - end forecastq - -end Regression4TS - -import Regression4TS._ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `regression4TSTest` main function tests the `Regression4TS` class. - * This test is used to CHECK that the buildMatrix4TS function is working correctly. - * May get NaN for some maximum lags (p) due to multi-collinearity. - * > runMain scalation.modeling.forecasting.regression4TSTest - */ -@main def regression4TSTest (): Unit = - - val m = 30 - val y = VectorD.range (1, m) // used to CHECK the buildMatrix4TS function - - for p <- 1 to 10 do // autoregressive hyper-parameter p - banner (s"Test: Regression4TS with $p lags") - val mod = Regression4TS (y, p) // create model for time series data - mod.trainNtest ()() // train the model on full dataset - println (mod.summary) - - val yp = mod.predict (mod.getX) - new Plot (null, mod.getY, yp, s"y vs. yp for ${mod.modelName} with $p lags", lines = true) - end for - -end regression4TSTest - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `regression4TSTest2` main function tests the `Regression4TS` class on real data: - * Forecasting lake levels. - * Test the test, predictAll, testForecast and forecastAll methods over the whole times-series. - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.regression4TSTest2 - */ -@main def regression4TSTest2 (): Unit = - - import Example_LakeLevels.y - val m = y.dim - val h = 3 // the forecasting horizon - - for p <- 1 to 8 do // autoregressive hyper-parameter p - banner (s"Test: Regression4TS with $p lags") - val mod = Regression4TS (y, p) // create model for time series data - mod.trainNtest ()() // train the model on full dataset - println (mod.summary) // parameter/coefficient statistics - - banner ("Predictions") - val yy = mod.getY // trimmed actual response vector - val yp = mod.predict (mod.getX) // predicted response vector - new Plot (null, mod.getY, yp, s"y vs. yp for ${mod.modelName} with $p lags", lines = true) - println (s"yp = $yp") - - banner ("Forecasts") - val yf = forecast (mod, yp, h) // forecasted response matrix - for k <- yf.indices2 do - new Plot (null, yy, yf(?, k), s"yy vs. yf_$k for ${mod.modelName} with $p lags", lines = true) - end for - println (s"yf = $yf") - println (s"yf.dims = ${yf.dims}") - assert (yf(?, 0) == yp) // first forecast = predicted values - - println (testForecast (mod, y, yf, p)) // QoF - end for - -end regression4TSTest2 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `regression4TSTest3` main function tests the `Regression4TS` class on real data: - * Forecasting lake levels. Uses quadratic regression. - * Test the test, predictAll, testf and forecastAll methods over the whole times-series. - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.regression4TSTest3 - */ -@main def regression4TSTest3 (): Unit = - - import Example_LakeLevels.y - val m = y.dim - val h = 3 // the forecasting horizon - - for p <- 1 to 1 do // autoregressive hyper-parameter p - banner (s"Test: Regression4TS with $p lags") - val mod = Regression4TS.quadratic (y, p) // create model for time series data - mod.trainNtest ()() // train the model on full dataset - println (mod.summary) // parameter/coefficient statistics - - banner ("Predictions") - val yy = mod.getY // trimmed actual response vector - val yp = mod.predict (mod.getX) - new Plot (null, mod.getY, yp, s"y vs. yp for ${mod.modelName} with $p lags", lines = true) - println (s"yp = $yp") - - banner ("Forecasts") - val yf = forecastq (mod, yp, h) // forecasted response matrix - for k <- yf.indices2 do - new Plot (null, yy, yf(?, k), s"yy vs. yf_$k for ${mod.modelName} with $p lags", lines = true) - end for - println (s"yf = $yf") - println (s"yf.dims = ${yf.dims}") - assert (yf(?, 0) == yp) // first forecast = predicted values - - println (testForecast (mod, y, yf, p)) // QoF - -// println (Fit.fitMap (mod.testf (k, y))) // evaluate k-units ahead forecasts - end for - -end regression4TSTest3 - diff --git a/src/main/scala/scalation/modeling/forecasting_old/old/Regression4TS.scala.bak2 b/src/main/scala/scalation/modeling/forecasting_old/old/Regression4TS.scala.bak2 deleted file mode 100644 index 898cc7ebf..000000000 --- a/src/main/scala/scalation/modeling/forecasting_old/old/Regression4TS.scala.bak2 +++ /dev/null @@ -1,529 +0,0 @@ - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Tue Feb 22 23:14:31 EST 2022 - * @see LICENSE (MIT style license file). - * - * @title Model: Regression for Time Series - */ - -package scalation -package modeling -package forecasting - -import scala.math.{max, min} - -import scalation.mathstat._ - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Regression4TS` class supports regression for Time Series data. - * Given a response vector y, a predictor matrix x is built that consists of - * lagged y vectors, - * - * y_t = b dot x - * where x = [y_{t-1}, y_{t-2}, ... y_{t-lags}] - * - * @param x the input/predictor matrix built out of lags of y - * @param yy the output/response vector trimmed to match x.dim - * @param lags the maximum lag included (inclusive) - * @param fname the feature/variable names - * @param hparam the hyper-parameters (use Regression.hp for default) - */ -class Regression4TS (x: MatrixD, yy: VectorD, lags: Int, fname: Array [String] = null, - hparam: HyperParameter = Regression.hp) - extends Regression (x, yy, fname, hparam) - with ForecasterX (x, yy, lags): - - private val debug = debugf ("Regression4TS", true) // debug function - private val flaw = flawf ("Regression4TS") // flaw function - private val MISSING = -0.0 // missing value - - modelName = s"Regression4TS_$lags" - - debug ("init", s"$modelName: x.dims = ${x.dims}, yy.dim = ${yy.dim}") - -// FIX - add methods similar to those in Forecaster - may need another trait - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast h steps ahead using the recursive method, returning forecasts in - * matrix yf with columns: [1-step, 2-steps, ... h-steps]. - * @param yp the predicted response vector (horizon 1 forecasts) - * @param h the forecasting horizon - * def forecast (yp: VectorD, h: Int): MatrixD = - - * @param t the time point from which to make forecasts - * @param yf the forecasting matrix for the endogenous variable y (time x horizons) - * @param yx the matrix of endogenous y and exogenous x values - * @param h the forecasting horizon, number of steps ahead to produce forecasts - */ - def forecast (t: Int, yf: MatrixD, yx: MatrixD, h: Int): VectorD = -// val b_ = b(1 until b.dim) // parameters excluding intercept - - yf(?, 0) = yx(?, 1) // yp // column 0 is predicted values - for k <- 1 to h do // forecast into future: columns 1 to h-1 - for i <- yf.indices do - val xy = x(i)(k+1 until x.dim2) ++ yf(i)(0 until k) // last from x ++ first from yf -// println (s"xy = $xy") -// yf(i, k) = b(0) + (b_ dot xy) // record forecasted value - yf(i, k) = b dot xy // record forecasted value - end for - end for - yf(?, h-1) - end forecast - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all y_.dim time points at horizon h (h-steps ahead). - * Assign to forecasting matrix and return h-step ahead forecast. - * For 1-step ahead (h = 1), - * y_t = δ + φ_0 y_t-1 + φ_1 y_t-2 + ... + φ_p-1 y_t-p - * When k < 0 let y_k = y_0 (i.e., assume first value repeats back in time). - * @param yxf the forecasting tensor (time x horizons x variables) - * @param y_ the actual values to use in making forecasts - * @param h the forecasting horizon, number of steps ahead to produce forecasts - * def forecastAt (yxf: TensorD, y_ : VectorD, h: Int): VectorD = - * @param yf the forecasting matrix for the endogenous variable y (time x horizons) - * @param yx the matrix of endogenous y and exogenous x values - * @param h the forecasting horizon, number of steps ahead to produce forecasts - */ - def forecastAt (yf: MatrixD, yx: MatrixD, h: Int): VectorD = - if h < 1 then flaw ("forecastAt", s"horizon h = $h must be at least 1") - for t <- yx.indices do // make forecasts over all time points for horizon h - val t1 = t + h - 1 // time point prior to horizon - yf(t+h, h) = b dot yx(min (t1, yx.dim-1)) // forecast down the diagonal ?? - end for - yf(?, h) // return the h-step ahead forecast vector - end forecastAt - -end Regression4TS - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Regression4TS` companion object provides factory methods. - */ -object Regression4TS: - - private val debug = debugf ("Regression4TS", true) // debug function - private val flaw = flawf ("Regression4TS") // flaw function - - private val TREND = false // include quadratic trend - private val DAY = false // include day of the week effect - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `Regression4TS` object from a response vector. The input/data matrix - * x is formed from the lagged y vectors as columns in matrix x. - * @param y the original un-expanded output/response vector - * @param lags the maximum lag included (inclusive) - * @param hparam the hyper-parameters (use Regression.hp for default) - */ - def apply (y: VectorD, lags: Int, - hparam: HyperParameter = Regression.hp): Regression4TS = - var (x, yy) = buildMatrix4TS (y, lags) // column for each lag - x = VectorD.one (yy.dim) +^: x // add first column of all ones - - if TREND then - x = VectorD.range (0, yy.dim) +^: x // add trend/time - x = VectorD.range (0, yy.dim)~^2 +^: x // add quadratic trend/time - end if - if DAY then - val day = VectorI (for t <- yy.indices yield t % 7) - x = day.toDouble +^: x // add DAY of week as ordinal var - -// val dum = Variable.dummyVars (day) -// x = x ++^ dum // add DAY of week as dummy vars - end if - -// println (s"apply: x = $x \n yy = $yy") - new Regression4TS (x, yy, lags, null, hparam) - end apply - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Make a matrix whose columns are lagged exogenous variables to be added to a data matrix. - * @param lags the maximum lag included (inclusive) for checking purposes - * @param ex the matrix of data for the exogenous variables - * @param elag1 the minimum exo lag included (inclusive) - * @param elag2 the maximum exo lag included (inclusive) - */ - def makeExoCols (lags: Int, ex: MatrixD, elag1: Int, elag2: Int): MatrixD = - var xx: MatrixD = buildMatrix4TS_exo (ex(?, 0), lags, elag1, elag2) - for j <- 1 until ex.dim2 do - xx = xx ++^ buildMatrix4TS_exo (ex(?, j), lags, elag1, elag2) - end for - println (s"addExoVars: collects lags of $ex.dim2 exo variables into #xx.dim2 columns") - xx - end makeExoCols - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `Regression4TS` object from a response vector. The input/data matrix - * x is formed from the lagged y vectors as columns in matrix x. - * In addition, lagged exogenous variables are added. - * @param y the original un-expanded output/response vector - * @param lags the maximum lag included (inclusive) - * @parax ex the input matrix for exogenous variables (one per column) - * @param hparam the hyper-parameters (use Regression.hp for default) - * @param elag1 the minimum exo lag included (inclusive) - * @param elag2 the maximum exo lag included (inclusive) - */ - def exo (y: VectorD, lags: Int, ex: MatrixD, hparam: HyperParameter = Regression.hp) - (elag1: Int = max (1, lags / 5), - elag2: Int = max (1, lags)): Regression4TS = - var (x, yy) = buildMatrix4TS (y, lags) // column for each lag - x = VectorD.one (yy.dim) +^: x // add first column of all ones - val endoCols = x.dim2 - println (s"endogenous: columns = $endoCols") - - x = x ++^ makeExoCols (lags, ex, elag1, elag2) // add columns for each lagged exo var - println (s"exogenous: columns = ${x.dim2 - endoCols}") - - if TREND then - x = VectorD.range (0, yy.dim) +^: x // add trend/time - x = VectorD.range (0, yy.dim)~^2 +^: x // add quadratic trend/time - end if - if DAY then - val day = VectorI (for t <- yy.indices yield t % 7) - val dum = Variable.dummyVars (day) - x = x ++^ dum // add DAY of week as dummy vars - end if - - println (s"exo: x.dims = ${x.dims}, yy.dim = ${yy.dim}") -// println (s"exo: x = $x \n yy = $yy") - new Regression4TS (x, yy, lags, null, hparam) - end exo - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Split the x matrix and y vector into training and testing sets. - * @param x the x data/input matrix - * @param y the y response/output vector - * @param ratio the ratio of the TESTING set to the full dataset (most common 70-30, 80-20) - */ - def split_TnT (x: MatrixD, y: VectorD, ratio: Double = 0.30): (MatrixD, VectorD, MatrixD, VectorD) = - val n = x.dim - val tr_size = (n * (1.0 - ratio)).toInt - println (s"Regression4TS.split_TnT: tr_size = $tr_size, te_size = ${n - tr_size}") - (x(0 until tr_size), y(0 until tr_size), x(tr_size until n), y(tr_size until n)) - end split_TnT - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Use rolling-validation to compute test Quality of Fit (QoF) measures - * by dividing the dataset into a TESTING SET (tr) and a TRAINING SET (te) - * as follows: [ <-- tr_size --> | <-- te_size --> ] - * This version calls predict for one-step ahead out-of-sample forecasts. - * @see `RollingValidation` - * @param mod the forecasting model being used (e.g., `Regression4TS`) - * @param rc the retraining cycle (number of forecasts until retraining occurs) - */ - def rollValidate (mod: Predictor & Fit, rc: Int): Unit = - val x = mod.getX // get data/input matrix - val y = mod.getY // get response/output vector - val tr_size = RollingValidation.trSize (y.dim) // size of initial training set - val te_size = y.dim - tr_size // size of testing set - debug ("rollValidate", s"train: tr_size = $tr_size; test: te_size = $te_size, rc = $rc") - - val yp = new VectorD (te_size) // y-predicted over testing set - for i <- 0 until te_size do // iterate through testing set - val t = tr_size + i // next time point to forecast - if i % rc == 0 then mod.train (x(0 until t), y(0 until t)) // retrain on sliding training set - yp(i) = mod.predict (x(t-1)) // predict the next value - end for - - val (t, yy) = RollingValidation.align (tr_size, y) // align vectors - val df = max (0, mod.parameter.size - 1) // degrees of freedom for model - mod.resetDF (df, te_size - df) // reset degrees of freedom - new Plot (t, yy, yp, "Plot yy, yp vs. t", lines = true) - println (FitM.fitMap (mod.diagnose (yy, yp), QoF.values.map (_.toString))) - end rollValidate - -end Regression4TS - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `regression4TSTest` main function tests the `Regression4TS` class. - * This test is used to CHECK that the buildMatrix4TS function is working correctly. - * May get NaN for some maximum lags (p) due to multi-collinearity. - * > runMain scalation.modeling.forecasting.regression4TSTest - */ -@main def regression4TSTest (): Unit = - - val m = 30 - val y = VectorD.range (1, m) // used to CHECK the buildMatrix4TS function - - for p <- 1 to 10 do // autoregressive hyper-parameter p - banner (s"Test: Regression4TS with $p lags") - val mod = Regression4TS (y, p) // create model for time series data - mod.trainNtest ()() // train the model on full dataset - println (mod.summary) - - val yp = mod.predict (mod.getX) - new Plot (null, mod.getY, yp, s"y vs. yp for ${mod.modelName} with $p lags", lines = true) - end for - -end regression4TSTest - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `regression4TSTest2` main function tests the `Regression4TS` class on real data: - * Forecasting lake levels. - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.regression4TSTest2 - */ -@main def regression4TSTest2 (): Unit = - - import Example_LakeLevels.y - val m = y.dim - val h = 2 // the forecasting horizon - - for p <- 1 to 8 do // autoregressive hyper-parameter p - banner (s"Test: Regression4TS with $p lags") - val mod = Regression4TS (y, p) // create model for time series data - mod.trainNtest ()() // train the model on full dataset - println (mod.summary) // parameter/coefficient statistics - - banner ("Predictions") - val yy = mod.getY // trimmed actual response vector - val xx = mod.getX - val yp = mod.predict (xx) // predicted response vector - new Plot (null, yy, yp, s"y vs. yp for ${mod.modelName} with $p lags", lines = true) - println (s"yp = $yp") - - banner ("Forecasts") -// val yf = mod.forecast (yp, h) // forecasted response matrix - val yf = mod.forecastAll (yy, xx, h) // forecasted response matrix - for k <- yf.indices2 do - new Plot (null, yy, yf(?, k), s"yy vs. yf_$k for ${mod.modelName} with $p lags", lines = true) - end for - println (s"yf = $yf") - println (s"yf.dims = ${yf.dims}") - assert (yf(?, 0) == yp) // first forecast = predicted values -/* - banner ("Forecast QoF") - println (testForecast (mod, y, yf, p)) // QoF -// println (Fit.fitMap (mod.testf (k, y))) // evaluate k-units ahead forecasts -*/ - end for - -end regression4TSTest2 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `regression4TSTest3` main function tests the `Regression4TS` class on real data: - * Forecasting COVID-19 Daily Data. Does In-Sample Testing on Endogenous variable. - * > runMain scalation.modeling.forecasting.regression4TSTest3 - */ -@main def regression4TSTest3 (): Unit = - - val LAGS = 5 // number of lags of y - val h = 2 // forecasting horizon - - val exo_vars = Array ("icu_patients") // no exogenous variables in this case - val (xx, yy) = Example_Covid.loadData (exo_vars, "new_deaths") - val iskip = yy.indexWhere (_ >= 6.0) // find day with at least 6 deaths - println (s"iskip = $iskip is first week with at least 6 deaths") - - val ex = xx(iskip until xx.dim) // trim away the first iskip rows - val y = yy(iskip until yy.dim) - println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") - - banner ("Test In-Sample Regression4TS on COVID-19 Weekly Data") - val mod = Regression4TS (y, LAGS) // create model for time series data - val (yp, qof) = mod.trainNtest ()() // train the model on full dataset - new Plot (null, mod.getY, yp, s"${mod.modelName}, y vs. yp", lines = true) - - banner (s"Multi-horizon forecasting using the recursive method") - val yf = mod.forecastAll (y, mod.getX, h) // forecasted response matrix - for k <- 0 to h do - new Plot (null, y, yf(?, k), s"y vs. yf_$k for ${mod.modelName} with $LAGS lags", lines = true) - end for - println (s"yf = $yf") - println (s"yf.dims = ${yf.dims}, y.dim = ${y.dim}, yp.dim = ${yp.dim}") - val yf0 = yf(?, 0)(0 until y.dim) - val yf1 = yf(?, 1)(1 until y.dim) - Forecaster.differ (yf0, y) - Forecaster.differ (yf1, yp) - assert (yf0 =~ y) // zeroth forecast = actual values - assert (yf1 =~ yp) // first forecast = predicted values - -/* - banner (s"Feature Selection Technique: stepRegression") - val (cols, rSq) = mod.stepRegressionAll (cross = false) // R^2, R^2 bar, sMAPE, NA - val k = cols.size - println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for Regression4TS with tech", lines = true) - println (mod.summary ()) - - banner ("Feature Importance") - println (s"Stepwise: rSq = $rSq") - val imp = mod.importance (cols.toArray, rSq) -// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") -*/ - -end regression4TSTest3 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `regression4TSTest4` main function tests the `Regression4TS` class on real data: - * Forecasting COVID-19 Weekly Data. Does In-Sample Testing on endogenous and exogenous variables. - * > runMain scalation.modeling.forecasting.regression4TSTest4 - */ -@main def regression4TSTest4 (): Unit = - - val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (xx, yy) = Example_Covid.loadData (exo_vars, "new_deaths") - val iskip = yy.indexWhere (_ >= 6.0) // find day with at least 6 deaths - println (s"iskip = $iskip is first week with at least 6 deaths") - - val ex = xx(iskip until xx.dim) // trim away the first iskip rows - val y = yy(iskip until yy.dim) - println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") - - banner ("Test In-Sample Regression4TS.exo on COVID-19 Weekly Data") - val mod = Regression4TS.exo (y, 10, ex)(1, 11) // create model for time series data with exo - val (yp, qof) = mod.trainNtest ()() // train the model on full dataset - new Plot (null, mod.getY, yp, s"${mod.modelName}, y vs. yp", lines = true) - -// val tech = SelectionTech.Forward // pick one feature selection technique -// val tech = SelectionTech.Backward - val tech = SelectionTech.Stepwise - - banner (s"Feature Selection Technique: $tech") - val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA - val k = cols.size - println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for Regression4TS with tech", lines = true) - println (mod.summary ()) - - banner ("Feature Importance") - println (s"$tech: rSq = $rSq") - val imp = mod.importance (cols.toArray, rSq) -// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") - -end regression4TSTest4 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `regression4TSTest5` main function tests the `Regression4TS` class on real data: - * Forecasting COVID-19 Weekly Data. Does TnT Testing on endogenous and exogenous variables. - * Determine the terms to include in the model using Stepwise on In-Sample. - * > runMain scalation.modeling.forecasting.regression4TSTest5 - */ -@main def regression4TSTest5 (): Unit = - - val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (xx, yy) = Example_Covid.loadData (exo_vars, "new_deaths") - val iskip = yy.indexWhere (_ >= 6.0) // find day with at least 6 deaths - println (s"iskip = $iskip is first week with at least 6 deaths") - - val ex = xx(iskip until xx.dim) // trim away the first iskip rows - val y = yy(iskip until yy.dim) - println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") - - banner ("Test In-Sample Regression4TS.exo on COVID-19 Weekly Data") - val mod = Regression4TS.exo (y, 10, ex)(1, 11) // create model for time series data with exo - val (yp, qof) = mod.trainNtest ()() // train on full and test on full - new Plot (null, mod.getY, yp, s"${mod.modelName}, y vs. yp", lines = true) - -// val tech = SelectionTech.Forward // pick one feature selection technique -// val tech = SelectionTech.Backward - val tech = SelectionTech.Stepwise - - banner (s"Feature Selection Technique: $tech") - val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA - val k = cols.size - println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for Regression4TS with tech", lines = true) - println (mod.summary ()) - - banner ("Feature Importance") - println (s"$tech: rSq = $rSq") - val imp = mod.importance (cols.toArray, rSq) -// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") - - banner ("Run TnT on Best model") - val bmod = mod.getBest._3 // get the best model from feature selection - val (x_, y_, xtest, ytest) = Regression4TS.split_TnT (bmod.getX, bmod.getY) - val (yptest, qoftest) = bmod.trainNtest (x_, y_)(xtest, ytest) // train on (x_, y_) and test on (xtest, ytest) - new Plot (null, ytest, yptest, s"${mod.modelName}, ytest vs. yptest", lines = true) - -end regression4TSTest5 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `regression4TSTest6` main function tests the `Regression4TS` class on real data: - * Forecasting COVID-19 Weekly Data. Does Rolling Validation on endogenous and exogenous - * variables. Determine the terms to include in the model using Stepwise on In-Sample. - * > runMain scalation.modeling.forecasting.regression4TSTest6 - */ -@main def regression4TSTest6 (): Unit = - - val LAGS = 7 - - val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (xx, yy) = Example_Covid.loadData (exo_vars, "new_deaths") - val iskip = yy.indexWhere (_ >= 6.0) // find day with at least 6 deaths - println (s"iskip = $iskip is first week with at least 6 deaths") - - val ex = xx(iskip until xx.dim) // trim away the first iskip rows - val y = yy(iskip until yy.dim) - println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") - - banner ("Test In-Sample Regression4TS.exo on COVID-19 Weekly Data") - val mod = Regression4TS.exo (y, LAGS, ex)(1, LAGS+1) // create model for time series data with exo - - val (yp, qof) = mod.trainNtest ()() // train on full and test on full - new Plot (null, mod.getY, yp, s"${mod.modelName}, y vs. yp", lines = true) - -// val tech = SelectionTech.Forward // pick one feature selection technique -// val tech = SelectionTech.Backward - val tech = SelectionTech.Stepwise - - banner (s"Feature Selection Technique: $tech") - val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA - val k = cols.size - println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for Regression4TS with tech", lines = true) - println (mod.summary ()) - - banner ("Feature Importance") - println (s"$tech: rSq = $rSq") - val imp = mod.importance (cols.toArray, rSq) -// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") - - banner ("Run Rolling Validation on Regression4TS Best model") - val bmod = mod.getBest._3 // get the best model from feature selection - Regression4TS.rollValidate (bmod, 1) - -end regression4TSTest6 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `regression4TSTest7` main function tests the `Regression4TS` class on real data: - * Forecasting COVID-19 Weekly Data. Preliminary investigation of Symbolic Regression. - * > runMain scalation.modeling.forecasting.regression4TSTest7 - */ -@main def regression4TSTest7 (): Unit = - - val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (xx, yy) = Example_Covid.loadData (exo_vars, "new_deaths") - val iskip = yy.indexWhere (_ >= 6.0) // find day with at least 6 deaths - println (s"iskip = $iskip is first week with at least 6 deaths") - - val ex = xx(iskip until xx.dim) // trim away the first iskip rows - val y = yy(iskip until yy.dim) - - banner ("Plot Variables on COVID-19 Weekly Data") - - for lag <- 0 to 4 do - val xx_ = ex(lag until y.dim) - val yy_ = y(0 until y.dim - lag) -// new Plot (xx_, yy_, null, s"deaths vs. exo-vars @ lag = $lag") - - val mod = SymbolicRegression (xx_, yy_, null, collection.mutable.Set (1.0), cross = false) - mod.trainNtest ()() - println (mod.summary ()) - end for - -end regression4TSTest7 - diff --git a/src/main/scala/scalation/modeling/forecasting_old/old/Regression4TS.scala.bak3 b/src/main/scala/scalation/modeling/forecasting_old/old/Regression4TS.scala.bak3 deleted file mode 100644 index 9f95a3d63..000000000 --- a/src/main/scala/scalation/modeling/forecasting_old/old/Regression4TS.scala.bak3 +++ /dev/null @@ -1,535 +0,0 @@ - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Tue Feb 22 23:14:31 EST 2022 - * @see LICENSE (MIT style license file). - * - * @title Model: Regression for Time Series - */ - -package scalation -package modeling -package forecasting - -import scala.math.{max, min} - -import scalation.mathstat._ - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Regression4TS` class supports regression for Time Series data. - * Given a response vector y, a predictor matrix x is built that consists of - * lagged y vectors, - * - * y_t = b dot x - * where x = [1, y_{t-1}, y_{t-2}, ... y_{t-lags}] - * - * @param x the input/predictor matrix built out of lags of y - * @param yy the output/response vector trimmed to match x.dim - * @param lags the maximum lag included (inclusive) - * @param fname the feature/variable names - * @param hparam the hyper-parameters (use Regression.hp for default) - */ -class Regression4TS (x: MatrixD, yy: VectorD, lags: Int, fname: Array [String] = null, - hparam: HyperParameter = Regression.hp) - extends Regression (x, yy, fname, hparam) - with ForecasterX (x, yy, lags): - - private val debug = debugf ("Regression4TS", true) // debug function - private val flaw = flawf ("Regression4TS") // flaw function - private val MISSING = -0.0 // missing value - - modelName = s"Regression4TS_$lags" - - debug ("init", s"$modelName: x.dims = ${x.dims}, yy.dim = ${yy.dim}") - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Produce a vector of size h, of 1 through h-steps ahead forecasts for the model. - * forecast the following time points: t+1, ..., t-1+h. - * Note, must create the yf matrix before calling the forecast method. - * Intended to work with rolling validation (analog of predict method) - * Must call `forecastAll` first. - * @param t the time point from which to make forecasts - * @param yf the forecasting matrix (time x horizons) - * @param h the forecasting horizon, number of steps ahead to produce forecasts - */ - def forecast (t: Int, yf: MatrixD, h: Int): VectorD = - if h < 1 then flaw ("forecast", s"horizon h = $h must be at least 1") - VectorD (for k <- 1 to h yield yf(t+k, k)) // get yf diagonal from time t - end forecast - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all y_.dim time points at horizon h (h-steps ahead). - * Assign to forecasting matrix and return h-step ahead forecast. - * For 1-step ahead (h = 1), - * y_t = δ + φ_0 y_t-1 + φ_1 y_t-2 + ... + φ_p-1 y_t-p - * When k < 0 let y_k = y_0 (i.e., assume first value repeats back in time). - * @param yf the forecasting matrix for the endogenous variable y (time x horizons) - * @param yx the matrix of endogenous y and exogenous x values - * @param h the forecasting horizon, number of steps ahead to produce forecasts - */ - def forecastAt (yf: MatrixD, yx: MatrixD, h: Int): VectorD = - if h < 1 then flaw ("forecastAt", s"horizon h = $h must be at least 1") - for t <- yx.indices do // make forecasts over all time points for horizon h - val t1 = t + h - 1 // time point prior to horizon - yf(t+h, h) = b dot yx(min (t1, yx.dim-1)) // forecast down the diagonal ?? - end for - yf(?, h) // return the h-step ahead forecast vector - end forecastAt - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test FORECASTS of a `Regression4TS` forecasting model y_ = f(x) + e - * and return its forecasts and QoF vector. Testing may be in-sample - * (on the training set) or out-of-sample (on the testing set) as determined - * by the parameters passed in. Note: must call train and forecastAll before testF. - * @param h the forecasting horizon, number of steps ahead to produce forecasts - * @param y_ the testing/full response/output vector - * @param yx the matrix of endogenous y and exogenous x values - */ - def testF (h: Int, y_ : VectorD, yx: MatrixD): (VectorD, VectorD) = - val (yy, yfh) = testSetupF (y_, yx, h) // get and align actual and forecasted values - val params = x.dim2 - resetDF (params, yy.dim - params) // reset the degrees of freedom - println (s"testF: yy.dim = ${yy.dim}, yfh.dim = ${yfh.dim}") -// differ (yy, yfh) // uncomment for debugging - (yfh, diagnose (yy, yfh)) // return predictions and QoF vector - end testF - -end Regression4TS - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Regression4TS` companion object provides factory methods. - */ -object Regression4TS: - - private val debug = debugf ("Regression4TS", true) // debug function - private val flaw = flawf ("Regression4TS") // flaw function - - private val TREND = false // include quadratic trend - private val DAY = false // include day of the week effect - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `Regression4TS` object from a response vector. The input/data matrix - * x is formed from the lagged y vectors as columns in matrix x. - * @param y the original un-expanded output/response vector - * @param lags the maximum lag included (inclusive) - * @param hparam the hyper-parameters (use Regression.hp for default) - */ - def apply (y: VectorD, lags: Int, - hparam: HyperParameter = Regression.hp): Regression4TS = - var (x, yy) = buildMatrix4TS (y, lags) // column for each lag - x = VectorD.one (yy.dim) +^: x // add first column of all ones - - if TREND then - x = VectorD.range (0, yy.dim) +^: x // add trend/time - x = VectorD.range (0, yy.dim)~^2 +^: x // add quadratic trend/time - end if - if DAY then - val day = VectorI (for t <- yy.indices yield t % 7) - x = day.toDouble +^: x // add DAY of week as ordinal var - -// val dum = Variable.dummyVars (day) -// x = x ++^ dum // add DAY of week as dummy vars - end if - - println (s"apply: x.dims = ${x.dims}, yy.dim = ${yy.dim}") -// println (s"apply: x = $x \n yy = $yy") - new Regression4TS (x, yy, lags, null, hparam) - end apply - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Make a matrix whose columns are lagged exogenous variables to be added to a data matrix. - * @param lags the maximum lag included (inclusive) for checking purposes - * @param ex the matrix of data for the exogenous variables - * @param elag1 the minimum exo lag included (inclusive) - * @param elag2 the maximum exo lag included (inclusive) - */ - def makeExoCols (lags: Int, ex: MatrixD, elag1: Int, elag2: Int): MatrixD = - var xx: MatrixD = buildMatrix4TS_exo (ex(?, 0), lags, elag1, elag2) - for j <- 1 until ex.dim2 do - xx = xx ++^ buildMatrix4TS_exo (ex(?, j), lags, elag1, elag2) - end for - println (s"addExoVars: collects lags of ${ex.dim2} exo variables into #xx.dim2 columns") - xx - end makeExoCols - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `Regression4TS` object from a response vector. The input/data matrix - * x is formed from the lagged y vectors as columns in matrix x. - * In addition, lagged exogenous variables are added. - * @param y the original un-expanded output/response vector - * @param lags the maximum lag included (inclusive) - * @parax ex the input matrix for exogenous variables (one per column) - * @param hparam the hyper-parameters (use Regression.hp for default) - * @param elag1 the minimum exo lag included (inclusive) - * @param elag2 the maximum exo lag included (inclusive) - */ - def exo (y: VectorD, lags: Int, ex: MatrixD, hparam: HyperParameter = Regression.hp) - (elag1: Int = max (1, lags / 5), - elag2: Int = max (1, lags)): Regression4TS = - var (x, yy) = buildMatrix4TS (y, lags) // column for each lag - x = VectorD.one (yy.dim) +^: x // add first column of all ones - val endoCols = x.dim2 - println (s"endogenous: columns = $endoCols") - - x = x ++^ makeExoCols (lags, ex, elag1, elag2) // add columns for each lagged exo var - println (s"exogenous: columns = ${x.dim2 - endoCols}") - - if TREND then - x = VectorD.range (0, yy.dim) +^: x // add trend/time - x = VectorD.range (0, yy.dim)~^2 +^: x // add quadratic trend/time - end if - if DAY then - val day = VectorI (for t <- yy.indices yield t % 7) - val dum = Variable.dummyVars (day) - x = x ++^ dum // add DAY of week as dummy vars - end if - - println (s"exo: x.dims = ${x.dims}, yy.dim = ${yy.dim}") -// println (s"exo: x = $x \n yy = $yy") - new Regression4TS (x, yy, lags, null, hparam) - end exo - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Split the x matrix and y vector into training and testing sets. - * @param x the x data/input matrix - * @param y the y response/output vector - * @param ratio the ratio of the TESTING set to the full dataset (most common 70-30, 80-20) - */ - def split_TnT (x: MatrixD, y: VectorD, ratio: Double = 0.30): (MatrixD, VectorD, MatrixD, VectorD) = - val n = x.dim - val tr_size = (n * (1.0 - ratio)).toInt - println (s"Regression4TS.split_TnT: tr_size = $tr_size, te_size = ${n - tr_size}") - (x(0 until tr_size), y(0 until tr_size), x(tr_size until n), y(tr_size until n)) - end split_TnT - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Use rolling-validation to compute test Quality of Fit (QoF) measures - * by dividing the dataset into a TESTING SET (tr) and a TRAINING SET (te) - * as follows: [ <-- tr_size --> | <-- te_size --> ] - * This version calls predict for one-step ahead out-of-sample forecasts. - * @see `RollingValidation` - * @param mod the forecasting model being used (e.g., `Regression4TS`) - * @param rc the retraining cycle (number of forecasts until retraining occurs) - */ - def rollValidate (mod: Predictor & Fit, rc: Int): Unit = - val x = mod.getX // get data/input matrix - val y = mod.getY // get response/output vector - val tr_size = RollingValidation.trSize (y.dim) // size of initial training set - val te_size = y.dim - tr_size // size of testing set - debug ("rollValidate", s"train: tr_size = $tr_size; test: te_size = $te_size, rc = $rc") - - val yp = new VectorD (te_size) // y-predicted over testing set - for i <- 0 until te_size do // iterate through testing set - val t = tr_size + i // next time point to forecast - if i % rc == 0 then mod.train (x(0 until t), y(0 until t)) // retrain on sliding training set - yp(i) = mod.predict (x(t-1)) // predict the next value - end for - - val (t, yy) = RollingValidation.align (tr_size, y) // align vectors - val df = max (0, mod.parameter.size - 1) // degrees of freedom for model - mod.resetDF (df, te_size - df) // reset degrees of freedom - new Plot (t, yy, yp, "Plot yy, yp vs. t", lines = true) - println (FitM.fitMap (mod.diagnose (yy, yp), QoF.values.map (_.toString))) - end rollValidate - -end Regression4TS - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `regression4TSTest` main function tests the `Regression4TS` class. - * This test is used to CHECK that the buildMatrix4TS function is working correctly. - * May get NaN for some maximum lags (p) due to multi-collinearity. - * > runMain scalation.modeling.forecasting.regression4TSTest - */ -@main def regression4TSTest (): Unit = - - val m = 30 - val y = VectorD.range (1, m) // used to CHECK the buildMatrix4TS function - - for p <- 1 to 10 do // autoregressive hyper-parameter p - banner (s"Test: Regression4TS with $p lags") - val mod = Regression4TS (y, p) // create model for time series data - mod.trainNtest ()() // train the model on full dataset - println (mod.summary) - - val yp = mod.predict (mod.getX) - new Plot (null, mod.getY, yp, s"y vs. yp for ${mod.modelName} with $p lags", lines = true) - end for - -end regression4TSTest - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `regression4TSTest2` main function tests the `Regression4TS` class on real data: - * Forecasting lake levels. - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.regression4TSTest2 - */ -@main def regression4TSTest2 (): Unit = - - import Example_LakeLevels.y - val m = y.dim - val h = 2 // the forecasting horizon - - for p <- 1 to 8 do // autoregressive hyper-parameter p - banner (s"Test: Regression4TS with $p lags") - val mod = Regression4TS (y, p) // create model for time series data - mod.trainNtest ()() // train the model on full dataset - println (mod.summary) // parameter/coefficient statistics - - banner ("Predictions") - val yy = mod.getY // trimmed actual response vector - val xx = mod.getX - val yp = mod.predict (xx) // predicted response vector - new Plot (null, yy, yp, s"y vs. yp for ${mod.modelName} with $p lags", lines = true) - println (s"yp = $yp") - - banner ("Forecasts") -// val yf = mod.forecast (yp, h) // forecasted response matrix - val yf = mod.forecastAll (yy, xx, h) // forecasted response matrix - for k <- yf.indices2 do - new Plot (null, yy, yf(?, k), s"yy vs. yf_$k for ${mod.modelName} with $p lags", lines = true) - end for - println (s"yf = $yf") - println (s"yf.dims = ${yf.dims}") - assert (yf(?, 0) == yp) // first forecast = predicted values -/* - banner ("Forecast QoF") - println (testForecast (mod, y, yf, p)) // QoF -// println (Fit.fitMap (mod.testf (k, y))) // evaluate k-units ahead forecasts -*/ - end for - -end regression4TSTest2 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `regression4TSTest3` main function tests the `Regression4TS` class on real data: - * Forecasting COVID-19 Daily Data. Does In-Sample Testing on Endogenous variable. - * > runMain scalation.modeling.forecasting.regression4TSTest3 - */ -@main def regression4TSTest3 (): Unit = - - val LAGS = 5 // number of lags of y - val h = 2 // forecasting horizon - - val exo_vars = Array.ofDim [String] (0) // no exogenous variables in this case - val (xx, yy) = Example_Covid.loadData (exo_vars, "new_deaths") - val iskip = yy.indexWhere (_ >= 6.0) // find day with at least 6 deaths - println (s"iskip = $iskip is first week with at least 6 deaths") - - val ex = xx(iskip until xx.dim) // trim away the first iskip rows - val y = yy(iskip until yy.dim) - println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") - - banner ("Test In-Sample Regression4TS on COVID-19 Weekly Data") - val mod = Regression4TS (y, LAGS) // create model for time series data - val (yp, qof) = mod.trainNtest ()() // train the model on full dataset - new Plot (null, mod.getY, yp, s"${mod.modelName}, y vs. yp", lines = true) - - banner (s"Multi-horizon forecasting using the recursive method") - val yx = mod.getX - val yf = mod.forecastAll (y, yx, h) // forecasted response matrix - for k <- 0 to h do - new Plot (null, y, yf(?, k), s"y vs. yf_$k for ${mod.modelName} with $LAGS lags", lines = true) - end for - println (s"yf = $yf") - println (s"yf.dims = ${yf.dims}, y.dim = ${y.dim}, yp.dim = ${yp.dim}") - val yf0 = yf(?, 0)(0 until y.dim) - val yf1 = yf(?, 1)(1 until y.dim) - Forecaster.differ (yf0, y) - Forecaster.differ (yf1, yp) - assert (yf0 =~ y) // zeroth forecast = actual values - assert (yf1 =~ yp) // first forecast = predicted values - - for k <- 1 to h do - val (yfh, qof) = mod.testF (k, y, yx) // k-steps ahead forecast and its QoF - println (s"Evaluate QoF for horizon $k:") - println (FitM.fitMap (qof, QoF.values.map (_.toString))) // evaluate k-steps ahead forecasts - end for - - banner (s"Feature Selection Technique: stepRegression") - val (cols, rSq) = mod.stepRegressionAll (cross = false) // R^2, R^2 bar, sMAPE, NA - val k = cols.size - println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for Regression4TS with tech", lines = true) - println (mod.summary ()) - - banner ("Feature Importance") - println (s"Stepwise: rSq = $rSq") - val imp = mod.importance (cols.toArray, rSq) -// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") - -end regression4TSTest3 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `regression4TSTest4` main function tests the `Regression4TS` class on real data: - * Forecasting COVID-19 Weekly Data. Does In-Sample Testing on endogenous and exogenous variables. - * > runMain scalation.modeling.forecasting.regression4TSTest4 - */ -@main def regression4TSTest4 (): Unit = - - val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (xx, yy) = Example_Covid.loadData (exo_vars, "new_deaths") - val iskip = yy.indexWhere (_ >= 6.0) // find day with at least 6 deaths - println (s"iskip = $iskip is first week with at least 6 deaths") - - val ex = xx(iskip until xx.dim) // trim away the first iskip rows - val y = yy(iskip until yy.dim) - println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") - - banner ("Test In-Sample Regression4TS.exo on COVID-19 Weekly Data") - val mod = Regression4TS.exo (y, 10, ex)(1, 11) // create model for time series data with exo - val (yp, qof) = mod.trainNtest ()() // train the model on full dataset - new Plot (null, mod.getY, yp, s"${mod.modelName}, y vs. yp", lines = true) - -// val tech = SelectionTech.Forward // pick one feature selection technique -// val tech = SelectionTech.Backward - val tech = SelectionTech.Stepwise - - banner (s"Feature Selection Technique: $tech") - val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA - val k = cols.size - println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for Regression4TS with tech", lines = true) - println (mod.summary ()) - - banner ("Feature Importance") - println (s"$tech: rSq = $rSq") - val imp = mod.importance (cols.toArray, rSq) -// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") - -end regression4TSTest4 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `regression4TSTest5` main function tests the `Regression4TS` class on real data: - * Forecasting COVID-19 Weekly Data. Does TnT Testing on endogenous and exogenous variables. - * Determine the terms to include in the model using Stepwise on In-Sample. - * > runMain scalation.modeling.forecasting.regression4TSTest5 - */ -@main def regression4TSTest5 (): Unit = - - val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (xx, yy) = Example_Covid.loadData (exo_vars, "new_deaths") - val iskip = yy.indexWhere (_ >= 6.0) // find day with at least 6 deaths - println (s"iskip = $iskip is first week with at least 6 deaths") - - val ex = xx(iskip until xx.dim) // trim away the first iskip rows - val y = yy(iskip until yy.dim) - println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") - - banner ("Test In-Sample Regression4TS.exo on COVID-19 Weekly Data") - val mod = Regression4TS.exo (y, 10, ex)(1, 11) // create model for time series data with exo - val (yp, qof) = mod.trainNtest ()() // train on full and test on full - new Plot (null, mod.getY, yp, s"${mod.modelName}, y vs. yp", lines = true) - -// val tech = SelectionTech.Forward // pick one feature selection technique -// val tech = SelectionTech.Backward - val tech = SelectionTech.Stepwise - - banner (s"Feature Selection Technique: $tech") - val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA - val k = cols.size - println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for Regression4TS with tech", lines = true) - println (mod.summary ()) - - banner ("Feature Importance") - println (s"$tech: rSq = $rSq") - val imp = mod.importance (cols.toArray, rSq) -// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") - - banner ("Run TnT on Best model") - val bmod = mod.getBest._3 // get the best model from feature selection - val (x_, y_, xtest, ytest) = Regression4TS.split_TnT (bmod.getX, bmod.getY) - val (yptest, qoftest) = bmod.trainNtest (x_, y_)(xtest, ytest) // train on (x_, y_) and test on (xtest, ytest) - new Plot (null, ytest, yptest, s"${mod.modelName}, ytest vs. yptest", lines = true) - -end regression4TSTest5 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `regression4TSTest6` main function tests the `Regression4TS` class on real data: - * Forecasting COVID-19 Weekly Data. Does Rolling Validation on endogenous and exogenous - * variables. Determine the terms to include in the model using Stepwise on In-Sample. - * > runMain scalation.modeling.forecasting.regression4TSTest6 - */ -@main def regression4TSTest6 (): Unit = - - val LAGS = 7 - - val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (xx, yy) = Example_Covid.loadData (exo_vars, "new_deaths") - val iskip = yy.indexWhere (_ >= 6.0) // find day with at least 6 deaths - println (s"iskip = $iskip is first week with at least 6 deaths") - - val ex = xx(iskip until xx.dim) // trim away the first iskip rows - val y = yy(iskip until yy.dim) - println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") - - banner ("Test In-Sample Regression4TS.exo on COVID-19 Weekly Data") - val mod = Regression4TS.exo (y, LAGS, ex)(1, LAGS+1) // create model for time series data with exo - - val (yp, qof) = mod.trainNtest ()() // train on full and test on full - new Plot (null, mod.getY, yp, s"${mod.modelName}, y vs. yp", lines = true) - -// val tech = SelectionTech.Forward // pick one feature selection technique -// val tech = SelectionTech.Backward - val tech = SelectionTech.Stepwise - - banner (s"Feature Selection Technique: $tech") - val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA - val k = cols.size - println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for Regression4TS with tech", lines = true) - println (mod.summary ()) - - banner ("Feature Importance") - println (s"$tech: rSq = $rSq") - val imp = mod.importance (cols.toArray, rSq) -// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") - - banner ("Run Rolling Validation on Regression4TS Best model") - val bmod = mod.getBest._3 // get the best model from feature selection - Regression4TS.rollValidate (bmod, 1) - -end regression4TSTest6 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `regression4TSTest7` main function tests the `Regression4TS` class on real data: - * Forecasting COVID-19 Weekly Data. Preliminary investigation of Symbolic Regression. - * > runMain scalation.modeling.forecasting.regression4TSTest7 - */ -@main def regression4TSTest7 (): Unit = - - val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (xx, yy) = Example_Covid.loadData (exo_vars, "new_deaths") - val iskip = yy.indexWhere (_ >= 6.0) // find day with at least 6 deaths - println (s"iskip = $iskip is first week with at least 6 deaths") - - val ex = xx(iskip until xx.dim) // trim away the first iskip rows - val y = yy(iskip until yy.dim) - - banner ("Plot Variables on COVID-19 Weekly Data") - - for lag <- 0 to 4 do - val xx_ = ex(lag until y.dim) - val yy_ = y(0 until y.dim - lag) -// new Plot (xx_, yy_, null, s"deaths vs. exo-vars @ lag = $lag") - - val mod = SymbolicRegression (xx_, yy_, null, collection.mutable.Set (1.0), cross = false) - mod.trainNtest ()() - println (mod.summary ()) - end for - -end regression4TSTest7 - diff --git a/src/main/scala/scalation/modeling/forecasting_old/old/RollingValidation.scala.bak b/src/main/scala/scalation/modeling/forecasting_old/old/RollingValidation.scala.bak deleted file mode 100644 index 6480ab5d3..000000000 --- a/src/main/scala/scalation/modeling/forecasting_old/old/RollingValidation.scala.bak +++ /dev/null @@ -1,478 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Wed Jun 17 12:08:35 EDT 2020 - * @see LICENSE (MIT style license file). - * - * @title Model Framework: Rolling Validation for Forecasters - */ - -package scalation -package modeling -package forecasting - -import scalation.mathstat._ - -import Fit._ - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Chop the testing te and training tr datasets out of the full dataset - * for rolling validation where the training set is before the testing set. - * @param x the full data/input matrix - * @param y the full response/output vector - * @param te the start (inclusive) of the testing region - * @param te_size the size of the testing region - * @param tr_size the size of the training region - */ -def chopr (x: MatrixD, y: VectorD, te: Int, te_size: Int, tr_size: Int): - (MatrixD, VectorD, MatrixD, VectorD) = - val DEBUG = false // debug flag - - val te2 = te + te_size // end (exclusive) of testing region - val tr = te - tr_size // start of training region - - val x_e = x(te until te2) // testing data/input matrix - val y_e = y(te until te2) // testing response/output vector - val x_ = x(tr until te) // training data/input matrix - val y_ = y(tr until te) // training response/output vector - - if DEBUG then - println (s"test: x_e($te .. ${te2 - 1})") - println (s"test: y_e($te .. ${te2 - 1})") - println (s"train: x_($tr .. ${te - 1})") - println (s"train: y_($tr .. ${te - 1})") - end if - - (x_e, y_e, x_, y_) -end chopr - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Chop the testing te and training tr datasets out of the full dataset - * for rolling validation where the training set is before the testing set. - * This version works for models without an x componenet, only y. - * @param y the full response/output vector - * @param te the start (inclusive) of the testing region - * @param te_size the size of the testing region - * @param tr_size the size of the training region - */ -def chopr (y: VectorD, te: Int, te_size: Int, tr_size: Int): - (VectorD, VectorD) = - val DEBUG = false // debug flag - - val te2 = te + te_size // end (exclusive) of testing region - val tr = te - tr_size // start of training region - - val y_e = y(te until te2) // testing response/output vector - val y_ = y(tr until te) // training response/output vector - - if DEBUG then - println (s"test: y_e($te .. ${te2 - 1})") - println (s"train: y_($tr .. ${te - 1})") - end if - - (y_e, y_) -end chopr - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Shift the training dataset right by d2 = xy2._2.dim instances, filling in from - * the testing dataset. Used to update the training dataset before retraining, - * e.g., in rolling validation. - * @param xy1 the training dataset (matrix, vector) - * @param xy2 the portion of the testing dataset to be shifted in (matrix, vector) - */ -def shiftr (xy1: (MatrixD, VectorD), xy2: (MatrixD, VectorD)): (MatrixD, VectorD) = - val d1 = xy1._2.dim // number of training instances - val d2 = xy2._2.dim // number of testing instances to shift in - val gap = d1 - d2 // gap from training to be keep - if xy1._1.dim != d1 then println ("shiftr: dimension mismatch between matrix and vector in xy1") - if xy2._1.dim != d2 then println ("shiftr: dimension mismatch between matrix and vector in xy2") - if gap < 1 then println ("shiftr: no gap => nothing needed from training set") - - val x = new MatrixD (d1, xy1._1.dim2) - val y = new VectorD (d1) - for i <- y.indices do - if i < gap then - for j <- x.indices2 do x(i, j) = xy1._1(i+d2, j) - y(i) = xy1._2(i+d2) - else - for j <- x.indices2 do x(i, j) = xy2._1(i-gap, j) - y(i) = xy2._2(i-gap) - end if - end for - (x, y) -end shiftr - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Shift the training dataset right by d2 = y2.dim instances, filling in from - * the testing dataset. Used to update the training dataset before retraining, - * e.g., in rolling validation. - * This version works for models without an x componenet, only y. - * @param y1 the training dataset (vector) - * @param y2 the portion of the testing dataset to be shifted in (vector) - */ -def shiftr (y1: VectorD, y2: VectorD): VectorD = - val d1 = y1.dim // number of training instances - val d2 = y2.dim // number of testing instances to shift in - val gap = d1 - d2 // gap from training to be keep - if gap < 1 then println ("shiftr: no gap => nothing needed from training set") - - val y = new VectorD (d1) - for i <- y.indices do y(i) = if i < gap then y1(i+d2) else y2(i-gap) - y -end shiftr - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `RollingValidation` object provides 1-fold rolling validations, e.g., - * for m = 1200 and k = 1, kt = 5: - * 1: tr(ain) 0 until 600, te(st) 600 until 1200 - * In rolling validation for this case, each retraining dataset has 600 instances, - * while the testing dataset has 600. Re-training occurs before every kt = 2 - * forecasts are made. - */ -object RollingValidation: - - private val debug = debugf ("RollingValidation", true) // debug function - private val flaw = flawf ("RollingValidation") // debug function - private val DEBUG2 = false // verbose debug flag - private val TR_RATIO = 0.5 // min ratio train to full datasets - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Calculate the size (number of instances) for a training dataset. - * @param m the size of the full dataset - */ - def trSize (m: Int): Int = (m * TR_RATIO).toInt - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Use rolling 1-fold cross-validation to compute test Quality of Fit (QoF) measures - * by dividing the dataset into a test dataset and a training dataset. - * The test dataset is defined by a range of indices (test start until start + te_size) - * and tr_size of the data before this is the training dataset. - *------------------------------------------------------------------------- - * This version is for models that have an x component and y component, e.g., `Regression4TS`. - * @see analytics.package.scala for chopr and shiftr methods - * @param mod the forecastering model being used (e.g., `QuadRegression4TS`) - * @param kt_ the frequency of re-training (number of forecasts to make before re-training) (defaults to 5) - * @param h the forecasting horizon, number of steps ahead to produce forecasts (defaults to 1) - */ - def crossValidate (mod: Regression4TS, kt_ : Int = 5, h: Int = 1): Array [Statistic] = - val x = mod.getX // get the (opt. expanded) data/input matrix - val y = mod.getY // get the (opt. expanded) response/output vector - val m = y.dim // number of instances in full dataset - val tr_size = trSize (m) // size of each training dataset - val te_size = m - tr_size // size of each testing dataset - val kt = if (kt_ < 0) te_size else kt_ // given size or size of testing dataset - - debug ("crossValidate", s"m = $m, tr_size = $tr_size, te_size = $te_size, kt = $kt, h = $h") - - if kt < h then flaw ("crossValidate", s"kt = $kt must be at least h = $h") - - val stats = qofStatTable // table of statistics for QoF measures - var te = tr_size // start of initial testing region - - banner (s"crossValidate: iteration 0: test start te = $te") - val (x_e, y_e, x_, y_) = chopr (x, y, te, te_size, tr_size) // chop out testing and training regions - - var xy = (x_, y_) // initial training dataset (matrix, vector) - var ym = xy._2.mean // mean of actual training response - val yf = new VectorD (y_e.dim) // vector to hold forecasts - var rt = 0 // re-training counter - - for i <- y_e.indices do // iterate thru testing instances - if i % kt == 0 then // trigger re-training every kt-th iteration - rt += 1 - if i > 0 then - xy = shiftr (xy, (x_e(i-kt until i), y_e(i-kt until i))) // update training dataset by shifting -// ym = xy._2.mean // update training mean - end if - mod.train (xy._1, xy._2) // periodically re-train model on updated training dataset - if (DEBUG2) println (s"crossValidate: rt = $rt, parameter = ${mod.parameter}") - end if -// yf(i) = mod.predict (x_e(i)) // save i-th forecasted value for h = 1 - yf(i) = mod.forecast (x_e(i), h)(i, h) // save i-th forecasted value - FIX - end for - -// FIX - what should the mean be: ym (from tr) or ym2 (from te)? -// val ym2 = y_e.mean -// mod.eval (ym, y_e, yf) // evaluate model on testing dataset - - val qof = mod.diagnose (y_e, yf) // get Quality of Fit (QoF) measures - tallyQof (stats, qof) - debug ("crossValidate", s"number of re-trainings rt = $rt \nqof = " + qof) - debug ("crossValidate", mod.report (qof) + "\n" + mod.summary) - new Plot (null, y_e, yf, s"crossValidate (h = $h): ${mod.modelName} fold 0", lines = true) - // plot actual test response against forecasted test response - stats // return the statistics table - end crossValidate - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Use rolling 1-fold cross-validation to compute test Quality of Fit (QoF) measures - * by dividing the dataset into a test dataset and a training dataset. - * The test dataset is defined by a range of indices (test start until start + te_size) - * and tr_size of the data before this is the training dataset. - *------------------------------------------------------------------------- - * This version is for models that have no x component, only the y component, e.g., `AR`. - * @see analytics.package.scala for chopr and shiftr methods - * @param mod the forecastering model being used (e.g., `ARIMA`) - * @param kt_ the frequency of re-training (number of forecasts to make before re-training) (defaults to 5) - * @param h the forecasting horizon, number of steps ahead to produce forecasts (defaults to 1) - */ - def crossValidate2 (mod: Forecaster & Fit, kt_ : Int = 5, h: Int = 1): Array [Statistic] = - val y = mod.getY // get the (opt. expanded) response/output vector - val m = y.dim // number of instances in full dataset - val tr_size = trSize (m) // size of each training dataset - val te_size = m - tr_size // size of each testing dataset - val kt = if kt_ < 0 then te_size else kt_ // given size or size of testing dataset - - debug ("crossValidate2", s"m = $m, tr_size = $tr_size, te_size = $te_size, kt = $kt, h = $h") - - if kt < h then flaw ("crossValidate2", s"kt = $kt must be at least h = $h") - - val stats = qofStatTable // table of statistics for QoF measures - var te = tr_size // start of initial testing region - - banner (s"crossValidate2: iteration 0: test start te = $te") - val (y_e, y_) = chopr (y, te, te_size, tr_size) // chop out testing and training regions - - var yy = y_ // initial training dataset (vector) - var ym = yy.mean // mean of actual training response - val yf = new VectorD (y_e.dim) // vector to hold forecasts - var rt = 0 // re-training counter - -// for i <- y_e.indices do // iterate thru testing instances - for i <- 0 until yf.dim-h+1 do // iterate thru testing instances - if i % kt == 0 then // trigger re-training every kt-th iteration - rt += 1 - if i > 0 then - yy = shiftr (yy, y_e(i-kt until i)) // update training dataset by shifting -// ym = yy.mean // update training mean - end if - mod.train (null, yy) // periodically re-train model on updated training dataset - if (DEBUG2) println (s"crossValidate2: rt = $rt, parameter = ${mod.parameter}") - end if - // use time t = tr_size + i to adjust the index with respect to the original y - yf(i+h-1) = mod.forecastX (y, tr_size + i, h) // , i % kt) // save i-th forecasted value - end for - - for i <- 0 until h-1 do yf(i) = y_e(i) // when h > 1, fill in initial blanks in yf with actual y values - -// FIX - what should the mean be: ym (from tr) or ym2 (from te)? -// val ym2 = y_e.mean -// mod.eval (ym, y_e, yf) // evaluate model on testing dataset -// val e = y_e - yf // must create local e since the original e may be required for MA models -// mod.diagnose (e, y_e, yf) -// mod.evalf (y_e, yf) - - val qof = mod.diagnose (y_e, yf) // get Quality of Fit (QoF) measures - tallyQof (stats, qof) - debug ("crossValidate2", s"number of re-trainings rt = $rt \nqof = " + qof) - debug ("crossValidate2", mod.report (qof)) - new Plot (null, y_e, yf, s"crossValidate2 (h = $h): ${mod.modelName} fold 0", lines = true) - // plot actual test response against forecasted test response - stats // return the statistics table - end crossValidate2 - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Use rolling 1-fold cross-validation to compute test Quality of Fit (QoF) measures - * by dividing the dataset into a test dataset and a training dataset. - * The test dataset is defined by a range of indices (test start until start + te_size) - * and tr_size of the data before this is the training dataset. - *------------------------------------------------------------------------- - * This version is for models that have no x component, only the y component, e.g., `AR`. - * @see analytics.package.scala for chopr and shiftr methods - * @param mod the forecastering model being used (e.g., `ARIMA`) - * @param kt_ the frequency of re-training (number of forecasts to make before re-training) (defaults to 5) - * @param h the forecasting horizon, number of steps ahead to produce forecasts (defaults to 1) - * - def crossValidate2S (mod: SARIMA, kt_ : Int = 5, h: Int = 1): Array [Statistic] = - val y = mod.getY // get the (opt. expanded) response/output vector - val m = y.dim // number of instances in full dataset - val tr_size = trSize (m) // size of each training dataset - val te_size = m - tr_size // size of each testing dataset - val kt = if kt_ < 0 then te_size else kt_ // given size or size of testing dataset - - debug ("crossValidate2S", s"m = $m, tr_size = $tr_size, te_size = $te_size, kt = $kt, h = $h") - - if kt < h then flaw ("crossValidate2S", s"kt = $kt must be at least h = $h") - - val stats = qofStatTable // table of statistics for QoF measures - val te = tr_size // start of initial testing region - - banner (s"crossValidate2S: iteration 0: test start te = $te") - val (y_e, y_) = chopr (y, te, te_size, tr_size) // chop out testing and training regions - - var yy = y_ // initial training dataset (vector) - var ym = yy.mean // mean of actual training response - val yf = new VectorD (y_e.dim) // vector to hold forecasts - var rt = 0 // re-training counter - -// for i <- y_e.indices do // iterate thru testing instances - for i <- 0 until yf.dim-h+1 do // iterate thru testing instances - yy = y(i until i+tr_size) - mod.setTS (yy) - - if i % kt == 0 then // trigger re-training every kt-th iteration - rt += 1 - mod.train () // periodically re-train model on updated training dataset - if (DEBUG2) println (s"crossValidate2: rt = $rt") //, parameter = ${mod.parameter}") - else mod.updateFittedValues() // update the fitted values without retraining - - - // use time t = tr_size + i to adjust the index with respect to the original y - yf(i+h-1) = mod.forecast (yy.dim, h).last // save i-th forecasted value - end for - - for i <- 0 until h-1 do yf(i) = y_e(i) // when h > 1, fill in initial blanks in yf with actual y values - -// FIX - what should the mean be: ym (from tr) or ym2 (from te)? -// val ym2 = y_e.mean -// mod.eval (ym, y_e, yf) // evaluate model on testing dataset -// val e = y_e - yf // must create local e since the original e may be required for MA models -// mod.diagnose (e, y_e, yf) -// mod.eval (y_e, yf) - - val (yp, qof) = mod.test (y_e, yf) // get Quality of Fit (QoF) measures - tallyQof (stats, qof) - debug ("crossValidate2S", s"number of re-trainings rt = $rt \nqof = " + qof) - debug (crossValidate2S", mod.fitMap) - new Plot (null, y_e, yf, s"crossValidate2S (h = $h): ${mod.modelName} fold 0", lines = true) - // plot actual test response against forecasted test response - stats // return the statistics table - end crossValidate2S - */ - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Use rolling 1-fold cross-validation to compute test Quality of Fit (QoF) measures - * by dividing the dataset into a test dataset and a training dataset. - * The test dataset is defined by a range of indices (test start until start + te_size) - * and tr_size of the data before this is the training dataset. - *------------------------------------------------------------------------- - * This version is for models that have an x component and y component, e.g., `NeuralNet_3L1_4TS`. - * @see analytics.package.scala for chopr and shiftr methods - * @param mod the forecastering model being used (e.g., `NeuralNet_3L1_4TS`) - * @param kt_ the frequency of re-training (number of forecasts to make before re-training) (defaults to 50) - * @param h the forecasting horizon, number of steps ahead to produce forecasts (defaults to 1) - * - def crossValidate3 (mod: Forecaster, kt_ : Int = 50, h: Int = 1): Array [Statistic] = - val x = mod.getX // get the (opt. expanded) data/input matrix - val y = mod.getY // get the (opt. expanded) response/output vector - val m = y.dim // number of instances in full dataset - val tr_size = trSize (m) // size of each training dataset - val te_size = m - tr_size // size of each testing dataset - val kt = if (kt_ < 0) te_size else kt_ // given size or size of testing dataset - - debug("crossValidate3", s"m = $m, tr_size = $tr_size, te_size = $te_size, kt = $kt, h = $h") - - if kt < h then flaw ("crossValidate3", s"kt = $kt must be at least h = $h") - - val stats = qofStatTable // table of statistics for QoF measures - var te = tr_size // start of initial testing region - - banner (s"crossValidate3: iteration 0: test start te = $te") - val (x_e, y_e, x_, y_) = chopr (x, y, te, te_size, tr_size) // chop out testing and training regions - - var xy = (x_, y_) // initial training dataset (matrix, vector) - var ym = xy._2.mean // mean of actual training response - val yf = new VectorD (y_e.dim) // vector to hold forecasts - var rt = 0 // re-training counter - - for i <- y_e.indices do // iterate thru testing instances - if i % kt == 0 then // trigger re-training every kt-th iteration - rt += 1 - if i > 0 then - xy = shiftr (xy, (x_e(i-kt until i), y_e(i-kt until i))) // update training dataset by shifting -// ym = xy._2.mean // update training mean - end if - mod.train (xy._1, xy._2) // periodically re-train model on updated training dataset - if DEBUG2 then println (s"crossValidate3: rt = $rt, parameter = ${mod.parameter}") - end if -// yf(i) = mod.predict (x_e(i)) // save i-th forecasted value for h = 1 - yf(i) = mod.forecast (x_e, i, h) // save i-th forecasted value - end for - -// FIX - what should the mean be: ym (from tr) or ym2 (from te)? -// val ym2 = y_e.mean - mod.eval (ym, y_e, yf) // evaluate model on testing dataset - - val qof = mod.fitA(0).fit // get Quality of Fit (QoF) measures - tallyQof (stats, qof) - debug ("crossValidate3", s"number of re-trainings rt = $rt \nqof = " + qof) -// debug ("crossValidate3", mod.report (qof) + "\n" + mod.summary) - new Plot (null, y_e, yf, s"crossValidate3 (h = $h): ${mod.modelName} fold 0", lines = true) - // plot actual test response against forecasted test response - stats // return the statistics table - end crossValidate3 - */ - -end RollingValidation - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `rollingValidationTest` object is used to test the crossValidate method - * in the `RollingValidation` object. - * > runMain scalation.analytics.forecaster.rollingValidationTest - */ -@main def rollingValidationTest (): Unit = - - import scalation.random.Normal - - val m = 1200 // number of instances - val x = new MatrixD (m, 2) // data/input matrix - val y = new VectorD (m) // response/output vector - val e = Normal (0, 20000000) // noise - - for i <- y.indices do - val j = i + 1 - x(i, 0) = 0.0000001 * (j - m/2)~^3 * - 5 * j - x(i, 1) = 10 * j - 0.0001 * j~^2 - y(i) = 10.0 + 3 * x(i, 0) + 2 * x(i, 1) + e.gen - end for - - val h = 1 // forecasting horizon, try changing - banner (s"Regression4TS full dataset results at forecasting horizon h = $h") - val mod = new Regression4TS (x, y, 3) - mod.train (null, y) // train the model on full dataset - val (yp, qof) = mod.test (null, y) // test the model on full dataset - println (mod.report (qof)) // report on Quality of Fit (QoF) - - banner (s"Regression4TS rolling validation results at forecasting horizon h = $h") - FitM.showQofStatTable (RollingValidation.crossValidate (mod, h = h)) - -end rollingValidationTest - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `rollingValidationTest2` object is used to test the crossValidate2 method - * in the `RollingValidation` object. - * > runMain scalation.analytics.forecaster.rollingValidationTest2 - */ -@main def rollingValidationTest2 (): Unit = - - import scalation.random.Normal - - val m = 1200 // number of instances - val y = new VectorD (m) // response/output vector - val e = Normal (0, 100) // noise - - y(0) = 50.0 - for i <- 1 until y.dim do y(i) = 0.8 * y(i-1) + e.gen - - println (s"y.min = ${y.min}, y.max = ${y.max}") - - val h = 2 // forecasting horizon, try changing - banner (s"AR full dataset results at forecasting horizon h = $h") - ARMA.hp("p") = 2 - val mod = new AR (y) // create an AR(p) model - mod.train (null, y) // train the model on full dataset - val (yp, qof) = mod.test (null, y) // test the model on full dataset - println (mod.report (qof)) // report on Quality of Fit (QoF) - - banner (s"AR rolling validation validation results at forecasting horizon h = $h") - FitM.showQofStatTable (RollingValidation.crossValidate2 (mod, h = h)) - -end rollingValidationTest2 - diff --git a/src/main/scala/scalation/modeling/forecasting_old/old/RollingValidation.scala.bak2 b/src/main/scala/scalation/modeling/forecasting_old/old/RollingValidation.scala.bak2 deleted file mode 100644 index bd55a1148..000000000 --- a/src/main/scala/scalation/modeling/forecasting_old/old/RollingValidation.scala.bak2 +++ /dev/null @@ -1,343 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Wed Jun 17 12:08:35 EDT 2020 - * @see LICENSE (MIT style license file). - * - * @title Model Framework: Rolling Validation for Forecasters - */ - -package scalation -package modeling -package forecasting - -import scalation.mathstat._ - -import Fit._ - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Chop the testing te and training tr datasets out of the full dataset - * for rolling validation where the training set is before the testing set. - * @param x the full data/input matrix - * @param y the full response/output vector - * @param te the start (inclusive) of the testing region - * @param te_size the size of the testing region - * @param tr_size the size of the training region - */ -def chopr (x: MatrixD, y: VectorD, te: Int, te_size: Int, tr_size: Int): - (MatrixD, VectorD, MatrixD, VectorD) = - val DEBUG = true // debug flag - - val te2 = te + te_size // end (exclusive) of testing region - val tr = te - tr_size // start of training region - - val x_e = x(te until te2) // testing data/input matrix - val y_e = y(te until te2) // testing response/output vector - val x_ = x(tr until te) // training data/input matrix - val y_ = y(tr until te) // training response/output vector - - if DEBUG then - println (s"chopr:test: x_e($te .. ${te2 - 1})") - println (s"chopr:test: y_e($te .. ${te2 - 1})") - println (s"chopr:train: x_($tr .. ${te - 1})") - println (s"chopr:train: y_($tr .. ${te - 1})") - end if - - (x_e, y_e, x_, y_) -end chopr - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Chop the testing te and training tr datasets out of the full dataset - * for rolling validation where the training set is before the testing set. - * This version works for models without an x componenet, only y. - * @param y the full response/output vector - * @param te the start (inclusive) of the testing region - * @param te_size the size of the testing region - * @param tr_size the size of the training region - */ -def chopr (y: VectorD, te: Int, te_size: Int, tr_size: Int): - (VectorD, VectorD) = - val DEBUG = true // debug flag - - val te2 = te + te_size // end (exclusive) of testing region - val tr = te - tr_size // start of training region - - val y_e = y(te until te2) // testing response/output vector - val y_ = y(tr until te) // training response/output vector - - if DEBUG then - println (s"chopr:test: y_e($te .. ${te2 - 1})") - println (s"chopr:train: y_($tr .. ${te - 1})") - end if - - (y_e, y_) -end chopr - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `RollingValidation` object provides k-fold rolling-validation, e.g., - * for TR_RATIO = 0.5, m = 1000 and k = 10 the folds are defined as follows: - * 0: tr(ain) 0 until 500, te(st) 500 until 550 - * 1: tr(ain) 50 until 550, te(st) 550 until 600 - * 2: tr(ain) 100 until 600, te(st) 600 until 650 - * 3: tr(ain) 150 until 650, te(st) 650 until 700 - * 4: tr(ain) 200 until 700, te(st) 700 until 750 - * 5: tr(ain) 250 until 750, te(st) 750 until 800 - * 6: tr(ain) 300 until 800, te(st) 800 until 850 - * 7: tr(ain) 350 until 850, te(st) 850 until 900 - * 8: tr(ain) 400 until 900, te(st) 900 until 950 - * 9: tr(ain) 450 until 950, te(st) 950 until 1000 - * In rolling validation for this case, each re-training set has 500 instances, - * and the testing set has 500 as well, with folds of length 50. - * Re-training occurs for every fold. - */ -object RollingValidation: - - private val debug = debugf ("RollingValidation", true) // debug function - private val flaw = flawf ("RollingValidation") // debug function - private val DEBUG2 = false // verbose debug flag - private val TR_RATIO = 0.5 // min ratio train to full datasets - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Calculate the size (number of instances) for a training dataset. - * @param m the size of the full dataset - */ - def trSize (m: Int): Int = (m * TR_RATIO).toInt - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Use rolling-validation to compute test Quality of Fit (QoF) measures - * by dividing the dataset into a TESTING SET and a TRAINING SET. - * The test dataset is defined by a range of indices (test start until - * start + te_size) * and the data before this is the training dataset. - * This version calls predict for one-step ahead out-of-sample forecasts. - * @param mod the forecastering model being used (e.g., `ARIMA`) - * @param rc the retraining cycle (number of forecasting until retraining occurs) - */ - def rollValidate (mod: Forecaster & Fit, rc: Int): Unit = - val y = mod.getY // get the (opt. expanded) response/output vector - val m = y.dim // number of instances in full dataset - val tr_size = trSize (m) // size of each training set - val te_size = m - tr_size // size of each testing set - - debug ("rollValidate", s"m = $m, tr_size = $tr_size, te_size = $te_size, rc = $rc") - - val yp = new VectorD (te_size) // y-predicted over testing set - - for i <- 0 until te_size do // iterate through testing set - val t = tr_size + i // next time point to forecast - if i % rc == 0 then // retrain 0 until t, every rc forecasts - val y_ = y(0 until t) // slice out training set - mod.train (null, y_) // train on training set - end if - yp(i) = mod.predict (t-1, y) // predict the next value - end for - - val t = VectorD.range (tr_size, m) // relevant time range - val yy = y(tr_size until m) // actual response vector sliced - val df = mod.parameter.size - 1 // degrees of freedom for model - mod.resetDF (df, te_size - df) // reset degrees of freedom - new Plot (t, yy, yp, "Plot yy, yp vs. t", lines = true) - println (FitM.fitMap (mod.diagnose (yy, yp), QoF.values.map (_.toString))) - end rollValidate - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Use rolling-validation to compute test Quality of Fit (QoF) measures - * by dividing the dataset into a TESTING SET and a TRAINING SET. - * The test dataset is defined by a range of indices (test start until - * start + te_size) and the data before this is the training dataset. - * This version calls forecast for h-steps ahead out-of-sample forecasts. - * @param mod the forecastering model being used (e.g., `ARIMA`) - * @param rc the retraining cycle (number of forecasting until retraining occurs) - * @param h the forecasting horizon (h-steps ahead) - */ - def rollValidate (mod: Forecaster & Fit, rc: Int, h: Int): Unit = - val y = mod.getY // get the (opt. expanded) response/output vector - val yf = mod.forecastAll (y, h) // get the in-sample forecasting matrix - val m = y.dim // number of instances in full dataset - val tr_size = trSize (m) // size of each training set - val te_size = m - tr_size // size of each testing set - - debug ("rollValidate", s"m = $m, tr_size = $tr_size, te_size = $te_size, rc = $rc, h = $h") - - val yp = new VectorD (te_size) // y-predicted over testing set (only for h=1) - - for i <- 0 until te_size do // iterate through testing set - val t = tr_size + i // next time point to forecast - if i % rc == 0 then // retrain 0 until t, every rc forecasts - val y_ = y(0 until t) // slice out training set - mod.train (null, y_) // train on training set - end if - yp(i) = mod.predict (t-1, y) // predict the next value (only for h=1) - val yd = mod.forecast (t-1, yf, y, h) // forecast the next h-values - // yf is updated down its diagonals - debug ("rollValidate", s"for (i, t) = ($i, $t): yp($i) = ${yp(i)}, yd = $yd") - assert (yp(i) =~ yd(0)) // make sure h=1 forecasts agree with predictions - end for // yf is updated down its diagonals - - val t = VectorD.range (tr_size, m) // relevant time ranmge - val yy = y(tr_size until m) // actual response vector trimed - val df = mod.parameter.size - 1 // degrees of freedom for model - mod.resetDF (df, te_size - df) // reset degrees of freedom - new Plot (t, yy, yp, s"Plot yy, yp vs. t (h = 1)", lines = true) - - for k <- 1 to h do - val yfh = yf(tr_size until m, k) - new Plot (t, yy, yfh, s"Plot yy, yfh vs. t (h = $k)", lines = true) - println (FitM.fitMap (mod.diagnose (yy, yfh), QoF.values.map (_.toString))) - end for - - end rollValidate - -/* - val cp = mod.cap // maximum lag (how far into the past) - val st = te_size - cp // size of shift from original y -// val yf = new MatrixD (te_size+cp+h, h+2) // extend before and after - val yf = new MatrixD (y.dim+h, h+2) // extend before and after - for t <- 0 until te_size + cp do yf(t, 0) = y(st+t) // first column is the timestep (e.g., logical day) -// for t <- yf.indices do yf(t, h+1) = te_size + t // last column is time (logical day) - for t <- yf.indices do yf(t, h+1) = t // last column is time (logical day) -*/ - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Use kf-fold rolling-validation to compute test Quality of Fit (QoF) measures - * by dividing the dataset into a TESTING SET and a TRAINING SET. - * The test dataset is defined by a range of indices (test start until start + te_size) - * and tr_size of the data before this is the training dataset. - * Further, divide the testing set into k FOLDS and let the training set be the - * tr_size elements before each fold. Call train, test and testF for each fold. - * @param mod the forecastering model being used (e.g., `ARIMA`) - * @param kf the number of testing folds - * @param h the forecasting horizon, number of steps ahead to produce forecasts (defaults to 1) - */ - def rollValidatek (mod: Forecaster & Fit, kf: Int = 10, h: Int = 1): Array [Statistic] = - val y = mod.getY // get the (opt. expanded) response/output vector - val m = y.dim // number of instances in full dataset - val tr_size = trSize (m) // size of each training set - val te_size = m - tr_size // size of each testing set - val t1_size = te_size / kf // size of each testing set fold - - debug ("rollValidatek", s"m = $m, tr_size = $tr_size, te_size = $te_size, kf = $kf, h = $h") - - if t1_size < h then flaw ("rollValidate", s"t1_size = $t1_size must be at least h = $h") - - val stats = qofStatTable // table of statistics for QoF measures - var te = tr_size // start of initial testing region - var tef = te // test start for fold 0 - - for fold <- 0 until kf do // iterate over each fold - banner (s"rollValidatek: fold $fold test start tef = $tef") - val (y_e, y_) = chopr (y, tef, t1_size, tr_size) // chop out testing and training regions - mod.train (null, y_) // train on training set - val (yp, qof) = mod.test (null, y_) // test predictions on in-sample testing - println (mod.report (qof)) // report prediction Quality of Fit (QoF) - - val (yfh, qofh) = mod.testF (h, y_e) // test forecasts on testing set fold - println (mod.report (qofh)) // report forecast Quality of Fit (QoF) - tallyQof (stats, qofh) - tef += t1_size // start test start for next fold - end for - - stats // return the statistics table - end rollValidatek - -end RollingValidation - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `rollingValidationTest` main function is used to test the rollValidate method - * in the `RollingValidation` object. - * > runMain scalation.modeling.forecasting.rollingValidationTest - */ -@main def rollingValidationTest (): Unit = - - import scalation.random.Normal - - val m = 1200 // number of instances - val y = new VectorD (m) // response/output vector - val e = Normal (0, 100) // noise - - y(0) = 50.0 - for i <- 1 until y.dim do y(i) = 0.8 * y(i-1) + e.gen - - val p = 3 // order of the model - val h = 2 // forecasting horizon, try changing - println (s"y.min = ${y.min}, y.max = ${y.max}") - - banner (s"AR full dataset results at forecasting horizon h = $h") - - ARMA.hp("p") = p -// val mod = new AR (y) // create an AR(p) model - val mod = new ARMA (y) // create an ARMA(p, 0) model - - mod.train (null, y) // train the model on full dataset - val (yp, qof) = mod.test (null, y) // test the model on full dataset - println (mod.report (qof)) // report on Quality of Fit (QoF) - - banner (s"AR rolling validation validation results at forecasting horizon h = $h") - FitM.showQofStatTable (RollingValidation.rollValidatek (mod, h = h)) - -end rollingValidationTest - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `rollingValidationTest2` main function is used to test the rollValidate method - * in the `RollingValidation` object. - * > runMain scalation.modeling.forecasting.rollingValidationTest2 - */ -@main def rollingValidationTest2 (): Unit = - - import Example_LakeLevels.y - - val p = 3 // order of the model - val h = 2 // forecasting horizon, try changing - println (s"y.min = ${y.min}, y.max = ${y.max}") - - banner (s"AR full dataset results at forecasting horizon h = $h") - - ARMA.hp("p") = p -// val mod = new AR (y) // create an AR(p) model - val mod = new ARMA (y) // create an ARMA(p, 0) model - - val (yp, qof) = mod.trainNtest ()() // train-test model on full dataset - - val t = VectorD.range (49 until 97) // note original y must be shifted - new Plot (t, y(50 until 98), yp(49 until 97), "y, yp vs t 2nd half", lines = true) - - val rc = 2 // retrain cycle - banner (s"AR($p) one-step ahead rolling validation results") - RollingValidation.rollValidate (mod, rc) - - banner (s"AR($p) $h-steps rolling validation results") - RollingValidation.rollValidate (mod, rc, h) - -end rollingValidationTest2 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `rollingValidationTest3` main function is used to test the rollValidate method - * in the `RollingValidation` object. - * > runMain scalation.modeling.forecasting.rollingValidationTest3 - */ -@main def rollingValidationTest3 (): Unit = - - val y = VectorD.range (1, 25) - - val h = 2 // forecasting horizon, try changing - banner (s"RW full dataset results at forecasting horizon h = $h") - val mod = new RandomWalk (y) // create an RW model - mod.train (null, y) // train the model on full dataset - - val (yp, qof) = mod.test (null, y) // test the model on full dataset - println (mod.report (qof)) // report on Quality of Fit (QoF) - println (s"yp = $yp") // print prediction matrix - - val yf = mod.forecastAll (y, h) // produce all foreacts up horizon h - println (s"yf = $yf") // print forecast matrix - - banner (s"RW rolling validation validation results at forecasting horizon h = $h") - FitM.showQofStatTable (RollingValidation.rollValidatek (mod, 3, h)) - -end rollingValidationTest3 - diff --git a/src/main/scala/scalation/modeling/forecasting_old/tensor_forecast_matrix.txt b/src/main/scala/scalation/modeling/forecasting_old/tensor_forecast_matrix.txt deleted file mode 100644 index f08f0c7bf..000000000 --- a/src/main/scala/scalation/modeling/forecasting_old/tensor_forecast_matrix.txt +++ /dev/null @@ -1,45 +0,0 @@ - -/*---------------------------------------------------------------------------- - -The FORECASTING TENSOR yxf: Example Calculation for AR(3) - move back the diagonal -and up after reaching column 0. - -yxf | h=0 h=1 h=2 ------------------------ -t=0 | [1.0] 0.0 0.0 - | \ \ -t=1 | [2.0] 1.1 0.0 - | \ \ -t=2 | 3.0 [1.9] 0.9 - | \ \ -t=3 | 4.0 3.1 [2.1] - | \ \ -t=4 | 5.0 3.9 2.9 - | \ \ -t=4 | 6.0 5.1 2.9 - -yf(3, 2, 0) = a + rdot = a + b(0) * yxf(2, 1, 0) + b(1) * yxf(1, 0, 0) + b(2) * yxf(0, 0, 0) - -Each sheet represents a variable (n1 endogenous (y) and n2 exogenous (x)), -e.g., endogenous: new_deaths, new_deaths^2 - exogenous: icu_patients, hosp_patients, new_tests, people_vaccinated, people_vaccinated^2 - -TensorD: time x horizon x variable - 170 4 7 - -Model: max lags per variable; selected lags per variable - -Note: 'a' is the constant term and rdot multiplies the parameter vector 'b' times -elements in a diagonal in reverse. Also, the upper right triangle is unknowable -unless back-casting is used. - -Column h = 0: zeroth horizon forecasts are the actual (e.g., today's known) values in the time series -Column h = 1: horizon one forecasts are the one-step ahead (e.g., tomorrow's) forecasts -Column h = 2: horizon two forecasts are the two-steps ahead (e.g., day after tomorrow's) forecasts - -Row time t = 3: yxf(3, 0, 0) = 4.0 = the actual value for day 3, - yxf(3, 1, 0) = 3.1 = the one-step ahead forecast for day 3, made yesterday - yxf(3, 2, 0) = 2.1 = the two-steps ahead forecast for day 3, made two days ago - -----------------------------------------------------------------------------*/ - diff --git a/src/main/scala/scalation/modeling/neuralnet/CNN_1D.scala b/src/main/scala/scalation/modeling/neuralnet/CNN_1D.scala index 2542cd1e6..a6d12ab04 100644 --- a/src/main/scala/scalation/modeling/neuralnet/CNN_1D.scala +++ b/src/main/scala/scalation/modeling/neuralnet/CNN_1D.scala @@ -1,6 +1,6 @@ //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller +/** @author John Miller, Riya Bangia * @version 2.0 * @date Wed Oct 28 20:43:47 EDT 2020 * @see LICENSE (MIT style license file). @@ -20,10 +20,10 @@ package modeling package neuralnet import scalation.mathstat._ +import scalation.modeling.forecasting.MakeMatrix4TS import ActivationFun._ import Initializer._ -import Optimizer._ import CoFilter_1D.conv @@ -35,36 +35,59 @@ import CoFilter_1D.conv * @param fname_ the feature/variable names (defaults to null) * @param nf the number of filters for this convolutional layer * @param nc the width of the filters (size of cofilters) + * @param pool the pooling window size (if pool > 1, pooling is applied) + * @param poolFun the pooling function to apply (e.g., CoFilter_1D.pool for max pooling, or + * CoFilter_1D.pool_a for average pooling) * @param hparam the hyper-parameters for the model/network * @param f the activation function family for layers 1->2 (input to hidden) * @param f1 the activation function family for layers 2->3 (hidden to output) * @param itran the inverse transformation function returns responses to original scale */ class CNN_1D (x: MatrixD, y: MatrixD, fname_ : Array [String] = null, - nf: Int = 1, nc: Int = 3, hparam: HyperParameter = Optimizer.hp, + nf: Int = 1, nc: Int = 3, + pool: Int = 1, poolFun: (VectorD, Int) => VectorD = CoFilter_1D.pool_a, + hparam: HyperParameter = Optimizer.hp ++ MakeMatrix4TS.hp, f: AFF = f_reLU, f1: AFF = f_reLU, val itran: FunctionM2M = null) extends PredictorMV (x, y, fname_, hparam) - with Fit (dfm = x.dim2 - 1, df = x.dim - x.dim2): + with Fit (dfr = x.dim2 - 1, df = x.dim - x.dim2): - private val debug = debugf ("CNN_1D", true) // debug function - private val flaw = flawf ("CNN_1D") // flaw function - private val eta = hp("eta").toDouble // learning rate -// private val bSize = hp("bSize").toInt // batch size - private val maxEpochs = hp("maxEpochs").toInt // maximum number of training epochs/iterations + private val debug = debugf ("CNN_1D", true) // debug function + private val flaw = flawf ("CNN_1D") // flaw function + private val eta = hparam("eta").toDouble // learning rate +// private val bSize = hparam("bSize").toInt // batch size + private val maxEpochs = hparam("maxEpochs").toInt // maximum number of training epochs/iterations private val (n, ny) = (x.dim2, y.dim2) - private val nz = n - nc + 1 + private val nz = n - nc + 1 // size without padding + private val pooled_nz = if pool > 1 then nz / pool else nz // after pooling + private val fcDim = nf * pooled_nz if nz < 2 then flaw ("init", s"the size of the hidden layer nz = $nz is too small") - private val c = weightVec (nc) // parameters (weights & biases) in to hid - private val b: NetParam = NetParam (weightMat (nz, ny), new VectorD (ny)) // parameters (weights & biases) hid to out + private val filt = Array.fill (nf)(new CoFilter_1D (nc)) // array of filters +// private val c = weightVec (nc) // parameters (weights & biases) in to hid +// private val b = NetParam (weightMat (nz, ny), new VectorD (ny)) + private val b = NetParam (weightMat (fcDim, ny), new VectorD (ny)) // parameters (weights & biases) hid to out - modelName = s"CNN_1D_${f.name}_${f1.name}" + _modelName = s"CNN_1D_${f.name}_${f1.name}" println (s"Create a CNN_1D with $n input, $nf filters and $ny output nodes") - private val filt = Array.fill (nf)(new CoFilter_1D (nc)) // array of filters + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Compute the feature maps using all filters and concatenate them. + * The result is a matrix with dimensions (number of instances) × (nf * nz). + * @parm x_ input matrix + */ + private def computeFeatureMaps (x_ : MatrixD): MatrixD = + val maps = for i <- 0 until nf yield // compute a feature map for each filter + val filterVec = filt(i).coef // vector for filter i + val convResult = CoFilter_1D.conv (filterVec, x_) // valid convolution on x_ + val activated = f.fM (convResult) // apply activation function on convolution result + if pool > 1 then activated.mmap (row => poolFun(row, pool)) else activated + + // Concatenate horizontally all feature maps (assumes same number of rows). + maps.reduce ((a, b) => a ++^ b) + end computeFeatureMaps //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Filter the i-th input vector with the f-th filter. @@ -73,9 +96,10 @@ class CNN_1D (x: MatrixD, y: MatrixD, fname_ : Array [String] = null, */ def filter (i: Int, f: Int): VectorD = val xi = x(i) -// val ft = filt(f) + val ft = filt(f) + debug ("filter", s"ft = $ft") // delete once it works val xf = new VectorD (xi.dim - nc + 1) -// for j <- xf.indices fo xf(j) = ft.dot (xi, j) +// for j <- xf.indices do xf(j) = ft.dot (xi, j) // FIX -- dot xf end filter @@ -86,12 +110,16 @@ class CNN_1D (x: MatrixD, y: MatrixD, fname_ : Array [String] = null, */ def updateFilterParams (f: Int, vec2: VectorD): Unit = filt(f).update (vec2) - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Return the parameters c and b. */ - override def parameters: NetParams = Array (NetParam (MatrixD.fromVector (c)), b) + override def parameters: NetParams = + val cMatrices = filt.map (filter => MatrixD.fromVector (filter.coef)) // create array of matrices, each representing one filter's coefs. + val cMat = cMatrices.reduce ((a, b) => a ++^ b) // concatenate them horizontally into one matrix. + Array (NetParam (cMat), b) + end parameters - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Given training data x_ and y_, fit the parametera c and b. * This is a simple algorithm that iterates over several epochs using gradient descent. * It does not use batching nor a sufficient stopping rule. @@ -101,27 +129,29 @@ class CNN_1D (x: MatrixD, y: MatrixD, fname_ : Array [String] = null, */ def train (x_ : MatrixD = x, y_ : MatrixD = y): Unit = println (s"train: eta = $eta") - var sse0 = Double.MaxValue // hold prior value of sse + var sse0 = Double.MaxValue // hold prior value of sse var (go, epoch) = (true, 1) cfor (go && epoch <= maxEpochs, epoch += 1) { - val φ = f.fM (conv (c, x_)) // φ = f(conv (c, X)) - val yp = f1.fM (b * φ) // Yp = f1(ZB) - val ε = yp - y // negative error E = Yp - Y - val δ1 = f1.dM (yp) ⊙ ε // delta matrix for y - val δ0 = f.dM (φ) ⊙ (δ1 * b.w.𝐓) // delta matrix for φ (transpose (𝐓)) - CNN_1D.updateParam (x_, φ, δ0, δ1, eta, c, b) - - val sse = (y_ - yp).normFSq // loss = sum of squared errors +// val φ = f.fM (conv (c, x_)) // φ = f(conv (c, X)) + val φ = computeFeatureMaps (x_) // compute concatenated feature maps from all filters + val yp = f1.fM (b * φ) // Yp = f1(ZB) + val ε = yp - y // negative error E = Yp - Y + val δ1 = f1.dM (yp) ⊙ ε // delta matrix for y + val δ0 = f.dM (φ) ⊙ (δ1 * b.w.ᵀ) // delta matrix for φ (transpose (ᵀ)) +// CNN_1D.updateParam (x_, φ, δ0, δ1, eta, c, b) + CNN_1D.updateParam (x_, φ, δ0, δ1, eta, filt, b, nz, pool) + + val sse = (y_ - yp).normFSq // loss = sum of squared errors debug ("train", s"sse for $epoch th epoch: sse = $sse") - if sse >= sse0 then go = false // return early if moving up - sse0 = sse // save prior sse + if sse >= sse0 then go = false // return early if moving up + sse0 = sse // save prior sse } // cfor end train -// val yp_ = f1.fM (f.fM (b * conv (c, x_))) // updated predictions +// val yp_ = f1.fM (f.fM (b * conv (c, x_))) // updated predictions - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Given training data x_ and y_, fit the parameters c and b. * Iterate over several epochs, where each epoch divides the training set into * batches. Each batch is used to update the weights. @@ -135,7 +165,7 @@ class CNN_1D (x: MatrixD, y: MatrixD, fname_ : Array [String] = null, // estat.tally (epochs._2) end train2 - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Test a predictive model y_ = f(x_) + e and return its QoF vector. * Testing may be be in-sample (on the training set) or out-of-sample * (on the testing set) as determined by the parameters passed in. @@ -144,31 +174,46 @@ class CNN_1D (x: MatrixD, y: MatrixD, fname_ : Array [String] = null, * @param y_ the testing/full response/output matrix (defaults to full y) */ def test (x_ : MatrixD = x, y_ : MatrixD = y): (MatrixD, MatrixD) = - val yp = predict (x_) // make predictions - val yy = if itran == null then y_ else itran (y_) // undo scaling, if used - e = yy - yp // RECORD the residuals/errors (@see `Predictor`) - val qof = MatrixD (for k <- yy.indices2 yield diagnose (yy(?, k), yp(?, k))).𝐓 // transpose (𝐓) - (yp, qof) // return predictions and QoF vector + val yp = predict (x_) // make predictions + val yy = if itran == null then y_ else itran (y_) // undo scaling, if used + e = yy - yp // RECORD the residuals/errors (@see `Predictor`) + val qof = MatrixD (for k <- yy.indices2 yield + diagnose (yy(?, k), yp(?, k))).ᵀ // transpose (ᵀ) + (yp, qof) // return predictions and QoF vector end test - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Given a new input vector z, predict the output/response vector f(z). + * Formula: f1.f_ (b dot f.f_ (c *+ z)) + * With multiple filters, first convert z to a 1-row matrix, compute its feature map + * (which will have one row and (nf*nz) columns), and then apply the fully connected layer. * @param z the new input vector */ - def predict (z: VectorD): VectorD = f1.f_ (b dot f.f_ (c *+ z)) + def predict (z: VectorD): VectorD = + val zMatrix = MatrixD.fromVector (z) // convert vector to one-row matrix. + val phi = computeFeatureMaps (zMatrix) // matrix with 1 row and nf*nz columns, compute feature maps from all filters + val phiVec = phi(0) // extract first (and only) row as a vector + f1.f_ (b dot phiVec) // compute prediction using the fully-connected layer + end predict - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Given an input matrix z, predict the output/response matrix f(z). + * Formula: f1.fM (b * f.fM (conv (c, z))) * @param z the input matrix */ - override def predict (z: MatrixD = x): MatrixD = f1.fM (b * f.fM (conv (c, z))) + override def predict (z: MatrixD = x): MatrixD = + val phi = computeFeatureMaps (z) // matrix with dimensions (instances x (nf * nz)), + // compute concatenated feature maps from all filters + f1.fM (b * phi) // apply fully-connected layer to produce the predictions + end predict - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Build a sub-model that is restricted to the given columns of the data matrix. * @param x_cols the columns that the new model is restricted to + * @param fname2 the variable/feature names for the new model (defaults to null) */ - def buildModel (x_cols: MatrixD): CNN_1D = - new CNN_1D (x_cols, y, null, nf, nc, hparam, f, f1, itran) + def buildModel (x_cols: MatrixD, fname2: Array [String] = null): CNN_1D = + new CNN_1D (x_cols, y, null, nf, nc, pool, poolFun, hparam, f, f1, itran) end buildModel end CNN_1D @@ -182,22 +227,26 @@ object CNN_1D extends Scaling: def apply (xy: MatrixD): CNN_1D = ??? - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Create a `CNN_1D` with automatic rescaling from a data matrix and response matrix. - * @param x the input/data matrix with instances stored in rows - * @param y the output/response matrix, where y_i = response for row i of matrix x - * @param fname the feature/variable names (defaults to null) - * @param nf the number of filters for this convolutional layer - * @param nc the width of the filters (size of cofilters) - * @param hparam the hyper-parameters for the model/network - * @param f the activation function family for layers 1->2 (input to hidden) - * @param f1 the activation function family for layers 2->3 (hidden to output) - * @param itran the inverse transformation function returns responses to original scale + * @param x the input/data matrix with instances stored in rows + * @param y the output/response matrix, where y_i = response for row i of matrix x + * @param fname the feature/variable names (defaults to null) + * @param nf the number of filters for this convolutional layer + * @param nc the width of the filters (size of cofilters) + * @param pool the pooling window size (if pool > 1, pooling is applied) + * @param poolFun the pooling function to apply (e.g., CoFilter_1D.pool for max pooling, or + * CoFilter_1D.pool_a for average pooling) + * @param hparam the hyper-parameters for the model/network + * @param f the activation function family for layers 1->2 (input to hidden) + * @param f1 the activation function family for layers 2->3 (hidden to output) */ def rescale (x: MatrixD, y: MatrixD, fname: Array [String] = null, - nf: Int = 1, nc: Int = 3, hparam: HyperParameter = Optimizer.hp, + nf: Int = 1, nc: Int = 3, + pool: Int = 1, poolFun: (VectorD, Int) => VectorD = CoFilter_1D.pool, + hparam: HyperParameter = Optimizer.hp ++ MakeMatrix4TS.hp, f: AFF = f_reLU, f1: AFF = f_reLU): CNN_1D = - var itran: FunctionM2M = null // inverse transform -> original scale + var itran: FunctionM2M = null // inverse transform -> original scale val x_s = if scale then rescaleX (x, f) else x @@ -206,26 +255,66 @@ object CNN_1D extends Scaling: // val y_s = { val y_i = rescaleY (y, f_sigmoid); itran = y_i._2; y_i._1 } println (s" scaled: x = $x_s \n scaled y = $y_s") - new CNN_1D (x_s, y_s, fname, nf, nc, hparam, f, f1, itran) + new CNN_1D (x_s, y_s, fname, nf, nc, pool, poolFun, hparam, f, f1, itran) end rescale - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Build a matrix using `forecasting.ARX.buildMatrix` and call `rescale. + * @param xe the input/data matrix of exogenous varaibles + * @param y the output/response vector, where y_i = response for element i of vector x + * @param fname the feature/variable names (defaults to null) + * @param nf the number of filters for this convolutional layer + * @param nc the width of the filters (size of cofilters) + * @param pool the pooling window size (if pool > 1, pooling is applied) + * @param poolFun the pooling function to apply (e.g., CoFilter_1D.pool for max pooling, or + * CoFilter_1D.pool_a for average pooling) + * @param hparam the hyper-parameters for the model/network + * @param f the activation function family for layers 1->2 (input to hidden) + * @param f1 the activation function family for layers 2->3 (hidden to output) + */ + def buildNrescale (xe: MatrixD, y: VectorD, fname: Array [String] = null, + nf: Int = 1, nc: Int = 3, + pool: Int = 1, poolFun: (VectorD, Int) => VectorD = CoFilter_1D.pool, + hparam: HyperParameter = Optimizer.hp ++ MakeMatrix4TS.hp, + f: AFF = f_reLU, f1: AFF = f_reLU): CNN_1D = + val xy = forecasting.ARX.buildMatrix (xe, y, hparam, false) + println (s" buildNrescale: xy.dims = ${xy.dims}, y.dim = ${y.dim}") + rescale (xy, MatrixD.fromVector (y), fname, nf, nc, pool, poolFun, hparam, f, f1) + end buildNrescale + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Update the parameters: the weights in the convolutional filter c and * the weights biases in the fully-connected layer b. - * @param x_ the training/full data/input matrix - * @param z the training/full response/output matrix - * @param δ0 the convolutional layer delta - * @param δ1 the fully-connectd layer delta - * @param c the convolution filter vector - * @param b the fully-connectd layer parameters + * @param x_ the training/full data/input matrix + * @param φ the training/full response/output matrix + * @param δ0 the convolutional layer delta + * @param δ1 the fully-connectd layer delta + * @param η the learning rate + * @param filt the array of convolutional filters + * @param b the fully-connectd layer parameters + * @param nz the width of the feature map per filter (from valid convolution) + * @param pool the pooling window size (if pool > 1, pooling is applied */ - def updateParam (x_ : MatrixD, z: MatrixD, δ0: MatrixD, δ1: MatrixD, η: Double, c: VectorD, b: NetParam) = - for j <- c.indices do - var sum = 0.0 - for i <- x_.indices; h <- z.indices2 do sum += x_(i, h+j) * δ0(i, h) - c(j) -= (sum / x_.dim) * η // update c weights in conv filter + def updateParam (x_ : MatrixD, φ: MatrixD, δ0: MatrixD, δ1: MatrixD, η: Double, + filt: Array [CoFilter_1D], b: NetParam, nz: Int, pool: Int = 1): Unit = + + val pooledWidth = if pool > 1 then nz / pool else nz + for i <- filt.indices do + val startCol = i * pooledWidth // determine column range for filter's feature map in φ and δ0 + val endCol = startCol + pooledWidth - 1 + val δ0_i = δ0(0 until δ0.dim, startCol to endCol) // extract the portion of δ0 corresponding to filter i + val filtVec = filt(i).coef // get current filter coef vector (using `coef`) + val updatVec = filtVec.copy // update each coefficient in the filter + + for j <- filtVec.indices do + var sum = 0.0 + for row <- x_.indices; h <- 0 until pooledWidth do + sum += x_(row, h + j) * δ0_i(row, h) // x_(row, h+j) input value for h-th convolution output of filter + updatVec(j) -= (sum / (x_.dim * pooledWidth)) * η // update rule: gradient descent step + end for + filt(i).update (updatVec) // update i-th filter with the new weights end for - b -= (z.𝐓 * δ1 * η, δ1.mean * η) // update b weights & biases (transpose 𝐓) + b -= (φ.ᵀ * δ1 * η, δ1.mean * η) // update fully-connected layer parameters end updateParam end CNN_1D @@ -233,7 +322,7 @@ end CNN_1D //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `cNN_1DTest` main function is used to test the `CNN_1D` class. - * Test using the simple example from section 11.10 of ScalaTion textbook. + * Test using the simple example from section 10.10 of ScalaTion textbook. * Perform four training steps. * > runMain scalation.modeling.neuralnet.cNN_1DTest */ @@ -248,30 +337,35 @@ end CNN_1D 0.3, 0.4, 0.5, 0.6)) - val sst0 = (y(?, 0) - y(?, 0).mean).normSq // sum of squares total for y_:0 - val sst1 = (y(?, 1) - y(?, 1).mean).normSq // sum of squares total for y_:1 + val nc = 3 + val nz = x.dim2 - nc + 1 + val cfilter = new CoFilter_1D (c.dim) + cfilter.update (c) + + val sst0 = (y(?, 0) - y(?, 0).mean).normSq // sum of squares total for y_:0 + val sst1 = (y(?, 1) - y(?, 1).mean).normSq // sum of squares total for y_:1 println (s"sst0 = $sst0") println (s"sst1 = $sst1") - val η = 0.001 // learning rate + val η = 0.001 // learning rate - val f = f_reLU // first activation function - val f1 = f_reLU // second activation function + val f = f_reLU // first activation function + val f1 = f_reLU // second activation function - println (s"input x = $x") // input/data matrix - println (s"input y = $y") // output/response matrix + println (s"input x = $x") // input/data matrix + println (s"input y = $y") // output/response matrix println (s"η = $η") for epoch <- 1 to 4 do banner (s"Start of epoch $epoch") - println (s"filter c = $c") // values for cofilter - println (s"weights b = $b") // values for fully-connected layer + println (s"filter c = $c") // values for cofilter + println (s"weights b = $b") // values for fully-connected layer - val φ = f.fM (conv (c, x)) // φ = f(conv (c, X)) - val yp = f1.fM (φ *: b) // Yp = f1(φB) -- use *: as b is NetParam - val ε = yp - y // negative error E = Yp - Y - val δ1 = f1.dM (yp) ⊙ ε // delta matrix for y - val δ0 = f.dM (φ) ⊙ (δ1 * b.w.𝐓) // delta matrix for φ (transpose (𝐓)) + val φ = f.fM (conv (c, x)) // φ = f(conv (c, X)) + val yp = f1.fM (φ *: b) // Yp = f1(φB) -- use *: as b is NetParam + val ε = yp - y // negative error E = Yp - Y + val δ1 = f1.dM (yp) ⊙ ε // delta matrix for y + val δ0 = f.dM (φ) ⊙ (δ1 * b.w.ᵀ) // delta matrix for φ (transpose (ᵀ)) println (s"feature map φ = $φ") println (s"response yp = $yp") @@ -279,12 +373,12 @@ end CNN_1D println (s"delta 1 δ1 = $δ1") println (s"delta 0 V0 = $δ0") - CNN_1D.updateParam (x, φ, δ0, δ1, η, c, b) + CNN_1D.updateParam (x, φ, δ0, δ1, η, Array (cfilter), b, nz, 1) val sse = ε.normFSq println (s"sse for $epoch th epoch: sse = $sse") - val sse0 = ε(?, 0).normSq // sum of squared errors for column 0 - val sse1 = ε(?, 1).normSq // sum of squared errors for column 1 + val sse0 = ε(?, 0).normSq // sum of squared errors for column 0 + val sse1 = ε(?, 1).normSq // sum of squared errors for column 1 banner ("metrics") println (s"sse0 = $sse0") println (s"sse1 = $sse1") @@ -292,7 +386,7 @@ end CNN_1D println (s"R^2_1 = ${1 - sse1/sst1}") end for -// val yp_ = f1.fM (f.fM (conv (c, x)) *: b) // updated predictions +// val yp_ = f1.fM (f.fM (conv (c, x)) *: b) // updated predictions end cNN_1DTest @@ -305,6 +399,8 @@ end cNN_1DTest */ @main def cNN_1DTest2 (): Unit = + val hp = Optimizer.hp ++ MakeMatrix4TS.hp + val x = MatrixD ((2, 5), 1, 2, 3, 4, 5, 6, 7, 8, 9, 10) val y = MatrixD ((2, 2), 6, 9, @@ -316,15 +412,15 @@ end cNN_1DTest 0.5, 0.6)) */ - val sst0 = (y(?, 0) - y(?, 0).mean).normSq // sum of squares total for y_:0 - val sst1 = (y(?, 1) - y(?, 1).mean).normSq // sum of squares total for y_:1 + val sst0 = (y(?, 0) - y(?, 0).mean).normSq // sum of squares total for y_:0 + val sst1 = (y(?, 1) - y(?, 1).mean).normSq // sum of squares total for y_:1 println (s"sst0 = $sst0") println (s"sst1 = $sst1") - val η = 0.001 // learning rate + val η = 0.001 // learning rate - println (s"input x = $x") // input/data matrix - println (s"input y = $y") // output/response matrix + println (s"input x = $x") // input/data matrix + println (s"input y = $y") // output/response matrix println (s"η = $η") banner ("CNN_1D") @@ -343,16 +439,69 @@ end cNN_1DTest2 @main def cNN_1DTest3 (): Unit = import Example_AutoMPG._ - banner ("CNN_1D vs. Regession - ExampleAutoMPG") + banner ("CNN_1D vs. Regession - Example_AutoMPG") + + val hp = Optimizer.hp ++ MakeMatrix4TS.hp banner ("Regression") val reg = Regression (oxy)() reg.trainNtest ()() banner ("CNN_1D") - hp("eta") = 0.0013 - val cnn = CNN_1D.rescale (ox, MatrixD.fromVector (y), nc = 4) +// hp("eta") = 0.00013 +// hp("maxEpochs") = 1000 + hp("eta") = 0.000134 + hp("maxEpochs") = 1000 + val cnn = CNN_1D.rescale (ox, MatrixD.fromVector (y), + nc = 6, // filter width of 6 + nf = 2, // 2 distinct filters + hparam = hp, + f = ActivationFun.f_lreLU, // ReLU hidden + f1 = ActivationFun.f_id // linear output + ) cnn.trainNtest ()() end cNN_1DTest3 + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `cNN_1DTest4 main function is used to test the `CNN_1D` class + * using the Covid dataset. + * > runMain scalation.modeling.neuralnet.cNN_1DTest4 + */ +@main def cNN_1DTest4 (): Unit = + + import forecasting.Example_Covid._ + banner ("CNN_1D Example_Covid") + + val hp = Optimizer.hp ++ MakeMatrix4TS.hp + +// val exo_vars = NO_EXO + val exo_vars = Array ("icu_patients") +// val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") + val (xxe, yy) = loadData (exo_vars, response) + println (s"xxe.dims = ${xxe.dims}, yy.dim = ${yy.dim}") + +// val xe = xxe // full + val xe = xxe(0 until 116) // clip the flat end +// val y = yy // full + val y = yy(0 until 116) // clip the flat end +// val hh = 6 // maximum forecasting horizon + hp("eta") = 0.000005 // learning rate + hp("maxEpochs") = 1000 // max epoch + + new Plot (null, y, null, s"y (new_deaths) vs. t", lines = true) + for j <- exo_vars.indices do + new Plot (null, xe(?, j), null, s"x_$j (${exo_vars(j)}) vs. t", lines = true) + + for p <- 6 to 6; q <- 4 to 4; s <- 1 to 1 do // number of endo lags; exo lags; trend + hp("p") = p // number of endo lags + hp("q") = q // number of exo lags + hp("spec") = s // trend specification: 0, 1, 2, 3, 5 +// val mod = ARX (xe, y, hh) // create model for time series data + val mod = CNN_1D.buildNrescale (xe, y, nc = 3, nf = 10, hparam = hp, f1 = ActivationFun.f_reLU) + mod.trainNtest ()() + end for + +end cNN_1DTest4 + diff --git a/src/main/scala/scalation/modeling/neuralnet/CNN_1D.scala.bak b/src/main/scala/scalation/modeling/neuralnet/CNN_1D.scala.bak new file mode 100644 index 000000000..9248bdd22 --- /dev/null +++ b/src/main/scala/scalation/modeling/neuralnet/CNN_1D.scala.bak @@ -0,0 +1,483 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author John Miller, Riya Bangia + * @version 2.0 + * @date Wed Oct 28 20:43:47 EDT 2020 + * @see LICENSE (MIT style license file). + * + * @note Model: 1D Convolutional Neural Network (CNN) + * + * @see Hands-On Fundamentals of 1D Convolutional Neural Networks—A Tutorial for Beginner Users + * https://www.mdpi.com/2076-3417/14/18/8500 + */ + +// U N D E R D E V E L O P M E N T + +// FIX - extend training to handle multiple cofilters, pooling and multiple convolutional layers + +package scalation +package modeling +package neuralnet + +import scalation.mathstat._ +import scalation.modeling.forecasting.MakeMatrix4TS + +import ActivationFun._ +import Initializer._ + +import CoFilter_1D.conv + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `CNN_1D` class implements a Convolutionsl Network model. + * The model is trained using a data matrix x and response matrix y. + * @param x the input/data matrix with instances stored in rows + * @param y the output/response matrix, where y_i = response for row i of matrix x + * @param fname_ the feature/variable names (defaults to null) + * @param nf the number of filters for this convolutional layer + * @param nc the width of the filters (size of cofilters) + * @param pool the pooling window size (if pool > 1, pooling is applied) + * @param poolFun the pooling function to apply (e.g., CoFilter_1D.pool for max pooling, or + * CoFilter_1D.pool_a for average pooling) + * @param hparam the hyper-parameters for the model/network + * @param f the activation function family for layers 1->2 (input to hidden) + * @param f1 the activation function family for layers 2->3 (hidden to output) + * @param itran the inverse transformation function returns responses to original scale + */ +class CNN_1D (x: MatrixD, y: MatrixD, fname_ : Array [String] = null, + nf: Int = 1, nc: Int = 3, + pool: Int = 1, poolFun: (VectorD, Int) => VectorD = CoFilter_1D.pool_a, + hparam: HyperParameter = Optimizer.hp ++ MakeMatrix4TS.hp, + f: AFF = f_reLU, f1: AFF = f_reLU, + val itran: FunctionM2M = null) + extends PredictorMV (x, y, fname_, hparam) + with Fit (dfm = x.dim2 - 1, df = x.dim - x.dim2): + + private val debug = debugf ("CNN_1D", true) // debug function + private val flaw = flawf ("CNN_1D") // flaw function + private val eta = hparam("eta").toDouble // learning rate +// private val bSize = hparam("bSize").toInt // batch size + private val maxEpochs = hparam("maxEpochs").toInt // maximum number of training epochs/iterations + private val (n, ny) = (x.dim2, y.dim2) + private val nz = n - nc + 1 // size without padding + private val pooled_nz = if pool > 1 then nz / pool else nz // after pooling + private val fcDim = nf * pooled_nz + + if nz < 2 then flaw ("init", s"the size of the hidden layer nz = $nz is too small") + + private val filt = Array.fill (nf)(new CoFilter_1D (nc)) // array of filters +// private val c = weightVec (nc) // parameters (weights & biases) in to hid +// private val b = NetParam (weightMat (nz, ny), new VectorD (ny)) + private val b = NetParam (weightMat (fcDim, ny), new VectorD (ny)) // parameters (weights & biases) hid to out + + modelName = s"CNN_1D_${f.name}_${f1.name}" + + println (s"Create a CNN_1D with $n input, $nf filters and $ny output nodes") + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Compute the feature maps using all filters and concatenate them. + * The result is a matrix with dimensions (number of instances) × (nf * nz). + * @parm x_ input matrix + */ + private def computeFeatureMaps(x_ : MatrixD): MatrixD = + val maps = for (i <- 0 until nf) yield // compute a feature map for each filter + val filterVec = filt(i).coef // vector for filter i + val convResult = CoFilter_1D.conv (filterVec, x_) // valid convolution on x_ + val activated = f.fM (convResult) // apply activation function on convolution result + if pool > 1 then activated.mmap(row => poolFun(row, pool)) else activated + + // Concatenate horizontally all feature maps (assumes same number of rows). + maps.reduce ((a, b) => a ++^ b) + end computeFeatureMaps + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Filter the i-th input vector with the f-th filter. + * @param i the index of the i-th row of the matrix + * @param f the index of the f-th filter + */ + def filter (i: Int, f: Int): VectorD = + val xi = x(i) +// val ft = filt(f) + val xf = new VectorD (xi.dim - nc + 1) +// for j <- xf.indices fo xf(j) = ft.dot (xi, j) + xf + end filter + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Update filter f's parameters. + * @param f the index for the filter + * @param vec2 the new paramters for the filter's vector + */ + def updateFilterParams (f: Int, vec2: VectorD): Unit = filt(f).update (vec2) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the parameters c and b. + */ + override def parameters: NetParams = + val cMatrices = filt.map (filter => MatrixD.fromVector (filter.coef)) // create array of matrices, each representing one filter's coefs. + val cMat = cMatrices.reduce ((a, b) => a ++^ b) // concatenate them horizontally into one matrix. + Array (NetParam (cMat), b) + end parameters + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Given training data x_ and y_, fit the parametera c and b. + * This is a simple algorithm that iterates over several epochs using gradient descent. + * It does not use batching nor a sufficient stopping rule. + * In practice, use the train2 method that uses a better optimizer. + * @param x_ the training/full data/input matrix + * @param y_ the training/full response/output matrix + */ + def train (x_ : MatrixD = x, y_ : MatrixD = y): Unit = + println (s"train: eta = $eta") + var sse0 = Double.MaxValue // hold prior value of sse + + var (go, epoch) = (true, 1) + cfor (go && epoch <= maxEpochs, epoch += 1) { +// val φ = f.fM (conv (c, x_)) // φ = f(conv (c, X)) + val φ = computeFeatureMaps (x_) // compute concatenated feature maps from all filters + val yp = f1.fM (b * φ) // Yp = f1(ZB) + val ε = yp - y // negative error E = Yp - Y + val δ1 = f1.dM (yp) ⊙ ε // delta matrix for y + val δ0 = f.dM (φ) ⊙ (δ1 * b.w.𝐓) // delta matrix for φ (transpose (𝐓)) +// CNN_1D.updateParam (x_, φ, δ0, δ1, eta, c, b) + CNN_1D.updateParam (x_, φ, δ0, δ1, eta, filt, b, nz, pool) + + val sse = (y_ - yp).normFSq // loss = sum of squared errors + debug ("train", s"sse for $epoch th epoch: sse = $sse") + if sse >= sse0 then go = false // return early if moving up + sse0 = sse // save prior sse + } // cfor + end train + +// val yp_ = f1.fM (f.fM (b * conv (c, x_))) // updated predictions + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Given training data x_ and y_, fit the parameters c and b. + * Iterate over several epochs, where each epoch divides the training set into + * batches. Each batch is used to update the weights. + * FIX - to be implemented + * @param x_ the training/full data/input matrix + * @param y_ the training/full response/output matrix + */ + override def train2 (x_ : MatrixD = x, y_ : MatrixD = y): Unit = + val epochs = 0 // optimize3 (x_, y_, c, b, eta, bSize, maxEpochs, f, f1) // FIX: optimize parameters c, b + println (s"ending epoch = $epochs") +// estat.tally (epochs._2) + end train2 + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Test a predictive model y_ = f(x_) + e and return its QoF vector. + * Testing may be be in-sample (on the training set) or out-of-sample + * (on the testing set) as determined by the parameters passed in. + * Note: must call train before test. + * @param x_ the testing/full data/input matrix (defaults to full x) + * @param y_ the testing/full response/output matrix (defaults to full y) + */ + def test (x_ : MatrixD = x, y_ : MatrixD = y): (MatrixD, MatrixD) = + val yp = predict (x_) // make predictions + val yy = if itran == null then y_ else itran (y_) // undo scaling, if used + e = yy - yp // RECORD the residuals/errors (@see `Predictor`) + val qof = MatrixD (for k <- yy.indices2 yield diagnose (yy(?, k), yp(?, k))).𝐓 // transpose (𝐓) + (yp, qof) // return predictions and QoF vector + end test + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Given a new input vector z, predict the output/response vector f(z). + * Formula: f1.f_ (b dot f.f_ (c *+ z)) + * With multiple filters, first convert z to a 1-row matrix, compute its feature map + * (which will have one row and (nf*nz) columns), and then apply the fully connected layer. + * @param z the new input vector + */ + def predict (z: VectorD): VectorD = + val zMatrix = MatrixD.fromVector (z) // convert vector to one-row matrix. + val phi = computeFeatureMaps (zMatrix) // matrix with 1 row and nf*nz columns, compute feature maps from all filters + val phiVec = phi(0) // extract first (and only) row as a vector + f1.f_ (b dot phiVec) // compute prediction using the fully-connected layer + end predict + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Given an input matrix z, predict the output/response matrix f(z). + * Formula: f1.fM (b * f.fM (conv (c, z))) + * @param z the input matrix + */ + override def predict (z: MatrixD = x): MatrixD = + val phi = computeFeatureMaps (z) // matrix with dimensions (instances x (nf * nz)), + // compute concatenated feature maps from all filters + f1.fM (b * phi) // apply fully-connected layer to produce the predictions + end predict + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Build a sub-model that is restricted to the given columns of the data matrix. + * @param x_cols the columns that the new model is restricted to + */ + def buildModel (x_cols: MatrixD): CNN_1D = + new CNN_1D (x_cols, y, null, nf, nc, pool, poolFun, hparam, f, f1, itran) + end buildModel + +end CNN_1D + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `CNN_1D` companion object provides factory methods for creating 1D + * convolutional neural networks. + */ +object CNN_1D extends Scaling: + + def apply (xy: MatrixD): CNN_1D = ??? + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a `CNN_1D` with automatic rescaling from a data matrix and response matrix. + * @param x the input/data matrix with instances stored in rows + * @param y the output/response matrix, where y_i = response for row i of matrix x + * @param fname the feature/variable names (defaults to null) + * @param nf the number of filters for this convolutional layer + * @param nc the width of the filters (size of cofilters) + * @param pool the pooling window size (if pool > 1, pooling is applied) + * @param poolFun the pooling function to apply (e.g., CoFilter_1D.pool for max pooling, or + * CoFilter_1D.pool_a for average pooling) + * @param hparam the hyper-parameters for the model/network + * @param f the activation function family for layers 1->2 (input to hidden) + * @param f1 the activation function family for layers 2->3 (hidden to output) + * @param itran the inverse transformation function returns responses to original scale + */ + def rescale (x: MatrixD, y: MatrixD, fname: Array [String] = null, + nf: Int = 1, nc: Int = 3, + pool: Int = 1, poolFun: (VectorD, Int) => VectorD = CoFilter_1D.pool, + hparam: HyperParameter = Optimizer.hp ++ MakeMatrix4TS.hp, + f: AFF = f_reLU, f1: AFF = f_reLU): CNN_1D = + var itran: FunctionM2M = null // inverse transform -> original scale + + val x_s = if scale then rescaleX (x, f) + else x + val y_s = if f1.bounds != null then { val y_i = rescaleY (y, f1); itran = y_i._2; y_i._1 } + else y +// val y_s = { val y_i = rescaleY (y, f_sigmoid); itran = y_i._2; y_i._1 } + + println (s" scaled: x = $x_s \n scaled y = $y_s") + new CNN_1D (x_s, y_s, fname, nf, nc, pool, poolFun, hparam, f, f1, itran) + end rescale + + def buildNrescale (xe: MatrixD, y: VectorD, fname: Array [String] = null, + nf: Int = 1, nc: Int = 3, + pool: Int = 1, poolFun: (VectorD, Int) => VectorD = CoFilter_1D.pool, + hparam: HyperParameter = Optimizer.hp ++ MakeMatrix4TS.hp, + f: AFF = f_reLU, f1: AFF = f_reLU): CNN_1D = + val xy = forecasting.ARX.buildMatrix (xe, y, hparam, false) + println (s" buildNrescale: xy.dims = ${xy.dims}, y.dim = ${y.dim}") + rescale (xy, MatrixD.fromVector (y), fname, nf, nc, pool, poolFun, hparam, f, f1) + end buildNrescale + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Update the parameters: the weights in the convolutional filter c and + * the weights biases in the fully-connected layer b. + * @param x_ the training/full data/input matrix + * @param φ the training/full response/output matrix + * @param δ0 the convolutional layer delta + * @param δ1 the fully-connectd layer delta + * @param η the learning rate + * @param filt the array of convolutional filters + * @param b the fully-connectd layer parameters + * @param nz the width of the feature map per filter (from valid convolution) + * @param pool the pooling window size (if pool > 1, pooling is applied + */ + def updateParam (x_ : MatrixD, φ: MatrixD, δ0: MatrixD, δ1: MatrixD, η: Double, + filt: Array [CoFilter_1D], b: NetParam, nz: Int, pool: Int = 1): Unit = + + val pooledWidth = if pool > 1 then nz / pool else nz + for i <- filt.indices do + val startCol = i * pooledWidth // determine column range for filter's feature map in φ and δ0 + val endCol = startCol + pooledWidth - 1 + val δ0_i = δ0.apply(0 until δ0.dim, startCol to endCol) // extract the portion of δ0 corresponding to filter i + val filtVec = filt(i).coef // get current filter coef vector (using `coef`) + val updatVec = filtVec.copy // update each coefficient in the filter + + for j <- filtVec.indices do + var sum = 0.0 + for row <- x_.indices; h <- 0 until pooledWidth do + sum += x_(row, h + j) * δ0_i(row, h) // x_(row, h+j) input value for h-th convolution output of filter + updatVec(j) -= (sum / (x_.dim * pooledWidth)) * η // update rule: gradient descent step + end for + filt(i).update (updatVec) // update i-th filter with the new weights + end for + b -= (φ.𝐓 * δ1 * η, δ1.mean * η) // update fully-connected layer parameters + end updateParam + +end CNN_1D + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `cNN_1DTest` main function is used to test the `CNN_1D` class. + * Test using the simple example from section 11.10 of ScalaTion textbook. + * Perform four training steps. + * > runMain scalation.modeling.neuralnet.cNN_1DTest + */ +@main def cNN_1DTest (): Unit = + + val x = MatrixD ((2, 5), 1, 2, 3, 4, 5, + 6, 7, 8, 9, 10) + val y = MatrixD ((2, 2), 6, 9, + 16, 24) + val c = VectorD (0.5, 1, 0.5) + val b = NetParam (MatrixD ((3, 2), 0.1, 0.2, + 0.3, 0.4, + 0.5, 0.6)) + + val nc = 3 + val nz = x.dim2 - nc + 1 + val cfilter = new CoFilter_1D (c.dim) + cfilter.update(c) + + val sst0 = (y(?, 0) - y(?, 0).mean).normSq // sum of squares total for y_:0 + val sst1 = (y(?, 1) - y(?, 1).mean).normSq // sum of squares total for y_:1 + println (s"sst0 = $sst0") + println (s"sst1 = $sst1") + + val η = 0.001 // learning rate + + val f = f_reLU // first activation function + val f1 = f_reLU // second activation function + + println (s"input x = $x") // input/data matrix + println (s"input y = $y") // output/response matrix + println (s"η = $η") + + for epoch <- 1 to 4 do + banner (s"Start of epoch $epoch") + println (s"filter c = $c") // values for cofilter + println (s"weights b = $b") // values for fully-connected layer + + val φ = f.fM (conv (c, x)) // φ = f(conv (c, X)) + val yp = f1.fM (φ *: b) // Yp = f1(φB) -- use *: as b is NetParam + val ε = yp - y // negative error E = Yp - Y + val δ1 = f1.dM (yp) ⊙ ε // delta matrix for y + val δ0 = f.dM (φ) ⊙ (δ1 * b.w.𝐓) // delta matrix for φ (transpose (𝐓)) + + println (s"feature map φ = $φ") + println (s"response yp = $yp") + println (s"- error ε = $ε") + println (s"delta 1 δ1 = $δ1") + println (s"delta 0 V0 = $δ0") + + CNN_1D.updateParam (x, φ, δ0, δ1, η, Array (cfilter), b, nz, 1) + val sse = ε.normFSq + println (s"sse for $epoch th epoch: sse = $sse") + + val sse0 = ε(?, 0).normSq // sum of squared errors for column 0 + val sse1 = ε(?, 1).normSq // sum of squared errors for column 1 + banner ("metrics") + println (s"sse0 = $sse0") + println (s"sse1 = $sse1") + println (s"R^2_0 = ${1 - sse0/sst0}") + println (s"R^2_1 = ${1 - sse1/sst1}") + end for + +// val yp_ = f1.fM (f.fM (conv (c, x)) *: b) // updated predictions + +end cNN_1DTest + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `cNN_1DTest` main function is used to test the `CNN_1D` class. + * Test using the simple example from section 11.10 of ScalaTion textbook. + * Perform four training steps. + * > runMain scalation.modeling.neuralnet.cNN_1DTest2 + */ +@main def cNN_1DTest2 (): Unit = + + val hp = Optimizer.hp ++ MakeMatrix4TS.hp + + val x = MatrixD ((2, 5), 1, 2, 3, 4, 5, + 6, 7, 8, 9, 10) + val y = MatrixD ((2, 2), 6, 9, + 16, 24) +/* + val c = VectorD (0.5, 1, 0.5) + val b = NetParam (MatrixD ((3, 2), 0.1, 0.2, + 0.3, 0.4, + 0.5, 0.6)) +*/ + + val sst0 = (y(?, 0) - y(?, 0).mean).normSq // sum of squares total for y_:0 + val sst1 = (y(?, 1) - y(?, 1).mean).normSq // sum of squares total for y_:1 + println (s"sst0 = $sst0") + println (s"sst1 = $sst1") + + val η = 0.001 // learning rate + + println (s"input x = $x") // input/data matrix + println (s"input y = $y") // output/response matrix + println (s"η = $η") + + banner ("CNN_1D") + hp("eta") = η + val cnn = new CNN_1D (x, y) + cnn.trainNtest ()() + +end cNN_1DTest2 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `cNN_1DTest3` main function is used to test the `CNN_1D` class + * using the AutoMPG dataset. + * > runMain scalation.modeling.neuralnet.cNN_1DTest3 + */ +@main def cNN_1DTest3 (): Unit = + + import Example_AutoMPG._ + banner ("CNN_1D vs. Regession - Example_AutoMPG") + + val hp = Optimizer.hp ++ MakeMatrix4TS.hp + + banner ("Regression") + val reg = Regression (oxy)() + reg.trainNtest ()() + + banner ("CNN_1D") + hp("eta") = 0.00013 + hp("maxEpochs") = 1000 + val cnn = CNN_1D.rescale (ox, MatrixD.fromVector (y), nc = 4) + cnn.trainNtest ()() + +end cNN_1DTest3 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `cNN_1DTest4 main function is used to test the `CNN_1D` class + * using the AutoMPG dataset. + * > runMain scalation.modeling.neuralnet.cNN_1DTest4 + */ +@main def cNN_1DTest4 (): Unit = + + import forecasting.Example_Covid._ + banner ("CNN_1D Example_Covid") + + val hp = Optimizer.hp ++ MakeMatrix4TS.hp + +// val exo_vars = NO_EXO + val exo_vars = Array ("icu_patients") +// val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") + val (xxe, yy) = loadData (exo_vars, response) + println (s"xxe.dims = ${xxe.dims}, yy.dim = ${yy.dim}") + +// val xe = xxe // full + val xe = xxe(0 until 116) // clip the flat end +// val y = yy // full + val y = yy(0 until 116) // clip the flat end +// val hh = 6 // maximum forecasting horizon + hp("eta") = 0.00013 // learning rate + hp("maxEpochs") = 1000 // max epoch + + new Plot (null, y, null, s"y (new_deaths) vs. t", lines = true) + for j <- exo_vars.indices do + new Plot (null, xe(?, j), null, s"x_$j (${exo_vars(j)}) vs. t", lines = true) + + for p <- 6 to 6; q <- 4 to 4; s <- 1 to 1 do // number of endo lags; exo lags; trend + hp("p") = p // number of endo lags + hp("q") = q // number of exo lags + hp("spec") = s // trend specification: 0, 1, 2, 3, 5 +// val mod = ARX (xe, y, hh) // create model for time series data + val mod = CNN_1D.buildNrescale (xe, y, nc = 4, hparam = hp) + mod.trainNtest ()() + end for + +end cNN_1DTest4 + diff --git a/src/main/scala/scalation/modeling/neuralnet/CNN_2D.scala b/src/main/scala/scalation/modeling/neuralnet/CNN_2D.scala index 053c0b592..a1e827c29 100644 --- a/src/main/scala/scalation/modeling/neuralnet/CNN_2D.scala +++ b/src/main/scala/scalation/modeling/neuralnet/CNN_2D.scala @@ -17,6 +17,8 @@ package scalation package modeling package neuralnet +import scala.collection.mutable.IndexedSeq + import scalation.mathstat._ import ActivationFun._ @@ -43,7 +45,7 @@ class CNN_2D (x: TensorD, y: MatrixD, fname_ : Array [String] = null, f: AFF = f_reLU, f1: AFF = f_reLU, val itran: FunctionM2M = null) extends Model // FIX: need a trait like `PredictorMV` at the tensor level - with Fit (dfm = x.dim2 - 1, df = x.dim - x.dim2): + with Fit (dfr = x.dim2 - 1, df = x.dim - x.dim2): private val debug = debugf ("CNN_2D", true) // debug function private val flaw = flawf ("CNN_2D") // flaw function @@ -62,7 +64,7 @@ class CNN_2D (x: TensorD, y: MatrixD, fname_ : Array [String] = null, private val fT = tensorize (f.f_) // activation function at tensor level private val dT = tensorize (f.d) // activation derivative at tensor level - modelName = s"CNN_2D_${f.name}_${f1.name}" + _modelName = s"CNN_2D_${f.name}_${f1.name}" println (s"Create a CNN_2D with $n input, $nf filters and $ny output nodes") @@ -74,29 +76,35 @@ class CNN_2D (x: TensorD, y: MatrixD, fname_ : Array [String] = null, * For convenience, these are usable as stub implementations. * FIX - put in new trait */ - def crossValidate(k: Int, rando: Boolean): Array[scalation.mathstat.Statistic] = ??? - def getFname: Array[String] = ??? - def getX: scalation.mathstat.MatrixD = ??? - def getY: scalation.mathstat.VectorD = ??? - def hparameter: scalation.HyperParameter = ??? - def parameter: scalation.mathstat.VectorD | scalation.mathstat.MatrixD = ??? - def predict(z: scalation.mathstat.VectorD): Double | scalation.mathstat.VectorD = ??? - def test (x_ : scalation.mathstat.MatrixD, y_ : scalation.mathstat.VectorD): ( - scalation.mathstat.VectorD, scalation.mathstat.VectorD) = ??? - def train(x_ : scalation.mathstat.MatrixD, y_ : scalation.mathstat.VectorD): Unit = ??? + def crossValidate (k: Int, rando: Boolean): Array [Statistic] = ??? + def getBest: BestStep = ??? + def getFname: Array [String] = ??? + def getX: MatrixD = ??? + def getY: VectorD = ??? + def hparameter: HyperParameter = ??? + def parameter: VectorD | MatrixD = ??? + def predict (z: VectorD): Double | VectorD = ??? + def test (x_ : MatrixD, y_ : VectorD): (VectorD, VectorD) = ??? + def train (x_ : MatrixD, y_ : VectorD): Unit = ??? + def inSample_Test(skip: Int, showYp: Boolean): Unit = ??? + def validate (rando: Boolean, ratio: Double) (idx: IndexedSeq [Int]): + (VectorD | MatrixD, VectorD | MatrixD) = ??? + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Filter the i-th input vector with the f-th filter. * @param i the index of the i-th row of the matrix * @param f the index of the f-th filter */ - def filter (i: Int, f: Int): MatrixD = + def filter (i: Int, f: Int): MatrixD = ??? +/* val xi = x(i) // val ft = filt(f) val xf = new MatrixD (xi.dim - nc + 1, xi.dim - nc + 1) // for j <- xf.indices fo xf(j) = ft.dot (xi, j) xf end filter +*/ //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Update filter f's parameters. @@ -129,7 +137,7 @@ class CNN_2D (x: TensorD, y: MatrixD, fname_ : Array [String] = null, val yp = f1.fM (b * z) // Yp = f1(ZB) val ε = yp - y // negative error E = Yp - Y val δ1 = f1.dM (yp) ⊙ ε // delta matrix for y - val δ0 = dT (φ).flatten ⊙ (δ1 * b.w.𝐓) // delta matrix for φ (transpose (𝐓)) + val δ0 = dT (φ).flatten ⊙ (δ1 * b.w.ᵀ) // delta matrix for φ (transpose (ᵀ)) CNN_2D.updateParam (x_, z, δ0, δ1, eta, c, b) val sse = (y_ - yp).normFSq // loss = sum of squared errors @@ -149,11 +157,13 @@ class CNN_2D (x: TensorD, y: MatrixD, fname_ : Array [String] = null, * @param x_ the training/full data/input matrix * @param y_ the training/full response/output matrix */ - def train2 (x_ : TensorD = x, y_ : MatrixD = y): Unit = + def train2 (x_ : TensorD = x, y_ : MatrixD = y): Unit = ??? +/* val epochs = 0 // optimize3 (x_, y_, c, b, eta, bSize, maxEpochs, f, f1) // FIX: optimize parameters c, b println (s"ending epoch = $epochs") // estat.tally (epochs._2) end train2 +*/ //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Test a predictive model y_ = f(x_) + e and return its QoF vector. @@ -167,7 +177,8 @@ class CNN_2D (x: TensorD, y: MatrixD, fname_ : Array [String] = null, val yp = predict (x_) // make predictions val yy = if itran == null then y_ else itran (y_) // undo scaling, if used e = yy - yp // RECORD the residuals/errors (@see `Predictor`) - val qof = MatrixD (for k <- yy.indices2 yield diagnose (yy(?, k), yp(?, k))).𝐓 // transpose (𝐓) + debug ("test", s"e = $e") + val qof = MatrixD (for k <- yy.indices2 yield diagnose (yy(?, k), yp(?, k))).ᵀ // transpose (ᵀ) (yp, qof) // return predictions and QoF vector end test @@ -186,9 +197,10 @@ class CNN_2D (x: TensorD, y: MatrixD, fname_ : Array [String] = null, //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Build a sub-model that is restricted to the given columns of the data matrix. * @param x_cols the columns that the new model is restricted to + * @param fname2 the variable/feature names for the new model (defaults to null) */ - def buildModel (x_cols: TensorD): CNN_2D = - new CNN_2D (x_cols, y, null, nf, nc, hparam, f, f1, itran) + def buildModel (x_cols: TensorD, fname2: Array [String] = null): CNN_2D = + new CNN_2D (x_cols, y, fname2, nf, nc, hparam, f, f1, itran) end buildModel end CNN_2D @@ -237,15 +249,17 @@ object CNN_2D extends Scaling: * @param c the convolution filter matrix * @param b the fully-connectd layer parameters */ - def updateParam (x_ : TensorD, z: MatrixD, δ0: MatrixD, δ1: MatrixD, η: Double, c: MatrixD, b: NetParam) = + def updateParam (x_ : TensorD, z: MatrixD, δ0: MatrixD, δ1: MatrixD, η: Double, c: MatrixD, b: NetParam) = ??? +/* for j <- c.indices do var sum = 0.0 sum += 0 // remove after FIX // for i <- x_.indices; h <- z.indices2 do sum += x_(i, h+j) * δ0(i, h) // FIX: c now a matrix, x_ a tensor c(j) -= (sum / x_.dim) * η // update c weights in conv filter end for - b -= (z.𝐓 * δ1 * η, δ1.mean * η) // update b weights & biases (transpose 𝐓) + b -= (z.ᵀ * δ1 * η, δ1.mean * η) // update b weights & biases (transpose ᵀ) end updateParam +*/ end CNN_2D @@ -290,7 +304,7 @@ end CNN_2D val yp = f1.fM (φ *: b) // Yp = f1(φB) -- use *: as b is NetParam val ε = yp - y // negative error E = Yp - Y val δ1 = f1.dM (yp) ⊙ ε // delta matrix for y - val δ0 = f.dM (φ) ⊙ (δ1 * b.w.𝐓) // delta matrix for φ (transpose (𝐓)) + val δ0 = f.dM (φ) ⊙ (δ1 * b.w.ᵀ) // delta matrix for φ (transpose (ᵀ)) println (s"feature map φ = $φ") println (s"response yp = $yp") @@ -380,3 +394,81 @@ end cNN_2DTest2 end cNN_2DTest3 + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `cNN_2DTest4` main function is used to test the `CNN_2D` class. + * It counts the number of learnable parameters in a CCN. + * > runMain scalation.modeling.neuralnet.cNN_2DTest4 + */ +@main def cNN_2DTest4 (): Unit = + + var isize = 0 + + def clayer (nfilter: Int, sfilter: Int, nchan: Int): Int = + val np = nfilter * (sfilter~^2 * nchan + 1) + println (s"clayer ($nfilter, $sfilter, $nchan) = $np") + np + + def fclayer (isize: Int, osize: Int): Int = + val np = (isize + 1) * osize + println (s"fclayer ($isize, $osize) = $np") + np + + banner ("1. clayer1 -> fclayer1: 1476") + isize = 12~^2 + println (clayer (1, 5, 1) + fclayer (isize, 10)) + + banner ("2. clayer1 -> clayer2 -> fclayer1 -> fclayer2: 1199882") + isize = 64 * 12~^2 + println (clayer (32, 3, 1) + clayer (64, 3, 32) + fclayer (isize, 128) + fclayer (128, 10)) + + banner ("3. clayer1 -> clayer2 -> fclayer1 -> fclayer2: 421642") + isize = 64 * 7~^2 + println (clayer (32, 3, 1) + clayer (64, 3, 32) + fclayer (isize, 128) + fclayer (128, 10)) + + banner ("4. clayer1 -> clayer2 -> fclayer1 -> fclayer2: 503562") + isize = 64 * 7~^2 + println (clayer (32, 3, 1) + clayer (64, 7, 32) + fclayer (isize, 128) + fclayer (128, 10)) + + banner ("5. clayer1 -> clayer2 -> fclayer1 -> fclayer2: 52138") + isize = 16 * 7~^2 + println (clayer (8, 3, 1) + clayer (16, 3, 8) + fclayer (isize, 64) + fclayer (64, 10)) + + banner ("6. clayer1 -> clayer2 -> fclayer1 -> fclayer2: 105194") + isize = 16 * 7~^2 + println (clayer (8, 5, 1) + clayer (16, 5, 8) + fclayer (isize, 128) + fclayer (128, 10)) + + banner ("7. clayer1 -> clayer2 -> fclayer1 -> fclayer2: 68714") + isize = 20 * 7~^2 + println (clayer (10, 5, 1) + clayer (20, 5, 10) + fclayer (isize, 64) + fclayer (64, 10)) + + banner ("8. clayer1 -> clayer2 -> fclayer1 -> fclayer2: 454922") + isize = 64 * 7~^2 + println (clayer (32, 5, 1) + clayer (64, 5, 32) + fclayer (isize, 128) + fclayer (128, 10)) + + banner ("9. clayer1 -> clayer2 -> fclayer1: 83466") + isize = 64 * 7~^2 + println (clayer (32, 5, 1) + clayer (64, 5, 32) + fclayer (isize, 10)) + + banner ("10. clayer1 -> fclayer1: 23466") + isize = 16 * 12~^2 + println (clayer (16, 5, 1) + fclayer (isize, 10)) + + banner ("11. clayer1 -> fclayer1: 15770") + isize = 8 * 14~^2 + println (clayer (8, 3, 1) + fclayer (isize, 10)) + + banner ("12. clayer1 -> clayer2 -> fclayer1: 34794") + isize = 32 * 7~^2 + println (clayer (64, 3, 1) + clayer (32, 3, 64) + fclayer (isize, 10)) + + banner ("13. clayer1 -> clayer2 -> fclayer1 -> fclayer2: 225034") + isize = 64 * 5~^2 + println (clayer (32, 3, 1) + clayer (64, 3, 32) + fclayer (isize, 128) + fclayer (128, 10)) + + banner ("14. clayer1 -> clayer2 -> fclayer1 -> fclayer2: 52138") + isize = 16 * 7~^2 + println (clayer (8, 3, 1) + clayer (16, 3, 8) + fclayer (isize, 128) + fclayer (128, 10)) + +end cNN_2DTest4 + diff --git a/src/main/scala/scalation/modeling/neuralnet/CoFilter_1D.scala b/src/main/scala/scalation/modeling/neuralnet/CoFilter_1D.scala index c3608e406..3c0cf5d5f 100644 --- a/src/main/scala/scalation/modeling/neuralnet/CoFilter_1D.scala +++ b/src/main/scala/scalation/modeling/neuralnet/CoFilter_1D.scala @@ -34,6 +34,11 @@ class CoFilter_1D (width: Int = 5): */ def update (vec_ : VectorD): Unit = vec = vec_ + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return/get the filter coefficients (needed for forward and gradient computation) + */ + def coef: VectorD = vec + end CoFilter_1D diff --git a/src/main/scala/scalation/modeling/neuralnet/CoFilter_2D.scala b/src/main/scala/scalation/modeling/neuralnet/CoFilter_2D.scala index 8baceca1b..166e0b7cf 100644 --- a/src/main/scala/scalation/modeling/neuralnet/CoFilter_2D.scala +++ b/src/main/scala/scalation/modeling/neuralnet/CoFilter_2D.scala @@ -34,6 +34,11 @@ class CoFilter_2D (width: Int = 5): */ def update (mat_ : MatrixD): Unit = mat = mat_ + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return/get the filter coefficients (needed for forward and gradient computation) + */ + def coef: MatrixD = mat + end CoFilter_2D diff --git a/src/main/scala/scalation/modeling/neuralnet/ELM_3L1.scala b/src/main/scala/scalation/modeling/neuralnet/ELM_3L1.scala index fcbc3df44..9fb8daa2f 100644 --- a/src/main/scala/scalation/modeling/neuralnet/ELM_3L1.scala +++ b/src/main/scala/scalation/modeling/neuralnet/ELM_3L1.scala @@ -43,28 +43,29 @@ class ELM_3L1 (x: MatrixD, y: VectorD, fname_ : Array [String] = null, private var nz: Int = -1, hparam: HyperParameter = null, f: AFF = f_tanh, val itran: FunctionV2V = null) extends Predictor (x, y, fname_, hparam) - with Fit (dfm = x.dim2 - 1, df = x.dim - x.dim2): + with Fit (dfr = x.dim2 - 1, df = x.dim - x.dim2): + private val debug = debugf ("ELM_3L1", false) // debug function private val n = x.dim2 // nodes in input layer private val s = 8 // random number stream to use (0 - 999) if nz < 1 then nz = 2 * n + 1 // default number of nodes for hidden layer - val df_m = compute_df_m (nz) // degrees of freedom for model (first output only) - resetDF (df_m, x.dim - df_m) // degrees of freedom for (model, error) + val df_r = compute_dfr (nz) // degrees of freedom for regression/model (first output only) + resetDF (df_r, x.dim - df_r) // degrees of freedom for (regression/model, error) private val a = new NetParam (weightMat3 (n, nz, s), weightVec3 (nz, s)) // parameters (weights & biases) in to hid (fixed) - modelName = "ELM_3L1_" + f.name + _modelName = s"ELM_3L1_${f.name}" - println (s"Create an ELM_3L1 with $n input, $nz hidden and 1 output nodes: df_m = $df_m") + println (s"Create an ELM_3L1 with $n input, $nz hidden and 1 output nodes: df_r = $df_r") //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Compute the degrees of freedom for the model (based on n, nz_, ny = 1). * Rough extimate based on total number of parameters - 1. * @param nz_ the number of nodes in the hidden layer */ - def compute_df_m (nz_ : Int): Int = nz_ + def compute_dfr (nz_ : Int): Int = nz_ //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Return the parameters b. Since the a weights are fixed, only return b. @@ -115,9 +116,11 @@ class ELM_3L1 (x: MatrixD, y: VectorD, fname_ : Array [String] = null, //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Build a sub-model that is restricted to the given columns of the data matrix. * @param x_cols the columns that the new model is restricted to + * @param fname2 the variable/feature names for the new model (defaults to null) */ - def buildModel (x_cols: MatrixD): ELM_3L1 = - new ELM_3L1 (x_cols, y, null, -1, hparam, f, itran) + def buildModel (x_cols: MatrixD, fname2: Array [String] = null): ELM_3L1 = + debug ("buildModel", s"${x_cols.dim} by ${x_cols.dim2}") + new ELM_3L1 (x_cols, y, fname2, -1, hparam, f, itran) end buildModel end ELM_3L1 @@ -336,7 +339,7 @@ end eLM_3L1Test4 elm.trainNtest ()() banner ("Forward Selection Test") - val (cols, rSq) = elm.forwardSelAll () // R^2, R^2 Bar, smape, R^2 cv + elm.forwardSelAll () // showing R^2, R^2 Bar, smape, R^2 cv end eLM_3L1Test5 @@ -368,8 +371,7 @@ end eLM_3L1Test5 val k = cols.size println (s"k = $k, n = $n") val t = VectorD.range (1, k) // instance index - new PlotM (t, rSq.transpose, Array ("R^2", "R^2 bar", "smape", "R^2 cv"), - "R^2 vs n for ELM_3L1", lines = true) + new PlotM (t, rSq.transpose, Regression.metrics, "R^2 vs n for ELM_3L1", lines = true) end eLM_3L1Test6 diff --git a/src/main/scala/scalation/modeling/neuralnet/Example_Concrete.scala b/src/main/scala/scalation/modeling/neuralnet/Example_Concrete.scala index 3e330a1f0..8b079ba1c 100644 --- a/src/main/scala/scalation/modeling/neuralnet/Example_Concrete.scala +++ b/src/main/scala/scalation/modeling/neuralnet/Example_Concrete.scala @@ -186,7 +186,7 @@ import Example_Concrete._ banner ("Concrete - RegressionMV") val mod = new RegressionMV (ox, y, ox_fname) // create model with intercept (else pass x) - val (yp, qof) = mod.trainNtest ()() // train and test the model + val qof = mod.trainNtest ()()._2 // train and test the model println (mod.summary ()) // parameter/coefficient statistics val sse = qof (QoF.sse.ordinal) @@ -264,7 +264,7 @@ end example_ConcreteTest2 val pt = NeuralNet_2L.rescale (ox, y) banner (s"Neural_2L as 3 perceptrons: trainNtest2") - val (yp, qof) = pt.trainNtest2 ()() // interval search on eta + val yp = pt.trainNtest2 ()()._1 // interval search on eta for j <- y.indices2 do val yj = y(?, j) val ypj = yp(?, j) diff --git a/src/main/scala/scalation/modeling/neuralnet/NeuralNet_2L.scala b/src/main/scala/scalation/modeling/neuralnet/NeuralNet_2L.scala index 8fd7694e8..c4f0c9c19 100644 --- a/src/main/scala/scalation/modeling/neuralnet/NeuralNet_2L.scala +++ b/src/main/scala/scalation/modeling/neuralnet/NeuralNet_2L.scala @@ -47,7 +47,7 @@ class NeuralNet_2L (x: MatrixD, y: MatrixD, fname_ : Array [String] = null, hparam: HyperParameter = Optimizer.hp, f: AFF = f_sigmoid, val itran: FunctionM2M = null) extends PredictorMV (x, y, fname_, hparam) - with Fit (dfm = x.dim2 - 1, df = x.dim - x.dim2): + with Fit (dfr = x.dim2 - 1, df = x.dim - x.dim2): private val eta = hp("eta").toDouble // learning rate val opti = new OPTIMIZER () // parameter optimizer @@ -56,7 +56,7 @@ class NeuralNet_2L (x: MatrixD, y: MatrixD, fname_ : Array [String] = null, // bb = Array (b.asInstanceOf [NetParam]) // inside array bb = Array (new NetParam (weightMat (x.dim2, y.dim2))) // initialize parameters bb - modelName = "NeuralNet_2L_" + f.name + _modelName = s"NeuralNet_2L_${f.name}" //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Given training data x_ and y_, fit the parameters bb. @@ -136,8 +136,9 @@ class NeuralNet_2L (x: MatrixD, y: MatrixD, fname_ : Array [String] = null, //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Build a sub-model that is restricted to the given columns of the data matrix. * @param x_cols the columns that the new model is restricted to + * @param fname2 the variable/feature names for the new model (defaults to null) */ - def buildModel (x_cols: MatrixD): NeuralNet_2L = + def buildModel (x_cols: MatrixD, fname2: Array [String] = null): NeuralNet_2L = new NeuralNet_2L (x_cols, y, null, hparam, f, itran) end buildModel @@ -318,7 +319,7 @@ end neuralNet_2LTest println (mod.summary2 ()) // parameter/coefficient statistics banner ("Concrete - NeuralNet_2L: validate") - println (FitM.showFitMap (mod.validate ()(), QoF.values.map (_.toString))) + println (Fit.showFitMap (mod.validate ()()._2)) banner ("Concrete - NeuralNet_2L: crossValidate") val stats = mod.crossValidate () @@ -357,7 +358,7 @@ import Example_AutoMPG._ println (mod.summary2 ()) // parameter/coefficient statistics banner ("AutoMPG - NeuralNet_2L: TnT validate") - println (FitM.showFitMap (mod.validate ()(), QoF.values.map (_.toString))) + println (Fit.showFitMap (mod.validate ()()._2)) banner ("AutoMPG - NeuralNet_2L: crossValidate") val stats = mod.crossValidate () @@ -389,8 +390,7 @@ end neuralNet_2LTest3 // val (cols, rSq) = mod.backwardElimAll () // R^2, R^2 bar, smape, R^2 cv val k = cols.size println (s"k = $k, n = ${ox.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "smape", "R^2 cv"), - s"R^2 vs n for ${mod.modelName}", lines = true) + new PlotM (null, rSq.transpose, Regression.metrics, s"R^2 vs n for ${mod.modelName}", lines = true) println (s"rSq = $rSq") end neuralNet_2LTest4 @@ -423,8 +423,7 @@ end neuralNet_2LTest4 val (cols, rSq) = mod.selectFeatures (tech) // R^2, R^2 bar, smape, R^2 cv val k = cols.size println (s"k = $k, n = ${ox.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "smape", "R^2 cv"), - s"R^2 vs n for ${mod.modelName} with $tech", lines = true) + new PlotM (null, rSq.transpose, Regression.metrics, s"R^2 vs n for ${mod.modelName} with $tech", lines = true) println (s"$tech: rSq = $rSq") end for @@ -448,7 +447,7 @@ end neuralNet_2LTest5 mod.trainNtest2 ()() // train and test the model - with auto-tuning banner ("AutoMPG Validation Test") - println (FitM.showFitMap (mod.validate ()(), QoF.values.map (_.toString))) + println (Fit.showFitMap (mod.validate ()()._2)) end for end neuralNet_2LTest6 @@ -530,7 +529,7 @@ end neuralNet_2LTest7 val yp = f.fM (u) // predicted response from calculation for sigmoid val ε = y - yp // error matrix val δ = f.dM(yp) ⊙ ε // delta matrix for y - b += x.𝐓 * δ * η // parameter update (transpose (𝐓)) + b += x.ᵀ * δ * η // parameter update (transpose (ᵀ)) val sse0 = ε(?, 0).normSq // sum of squared errors for column 0 val sse1 = ε(?, 1).normSq // sum of squared errors for column 1 @@ -554,7 +553,7 @@ end neuralNet_2LTest8 /** The `neuralNet_2LTest9` main function is used to test the `NeuralNet_2L` class. * It compares `NeuralNet_2L.perceptron` using sigmoid with `TransRegression` using logit. * > runMain scalation.modeling.neuralnet.neuralNet_2LTest9 - */ + * @main def neuralNet_2LTest9 (): Unit = // val x = VectorD (1, 2, 3, 4, 5, 6) @@ -575,4 +574,5 @@ end neuralNet_2LTest8 tr.trainNtest ()() // train and test the model end neuralNet_2LTest9 + */ diff --git a/src/main/scala/scalation/modeling/neuralnet/NeuralNet_3L.scala b/src/main/scala/scalation/modeling/neuralnet/NeuralNet_3L.scala index edde3c705..30a79ea11 100644 --- a/src/main/scala/scalation/modeling/neuralnet/NeuralNet_3L.scala +++ b/src/main/scala/scalation/modeling/neuralnet/NeuralNet_3L.scala @@ -52,7 +52,7 @@ class NeuralNet_3L (x: MatrixD, y: MatrixD, fname_ : Array [String] = null, f: AFF = f_sigmoid, f1: AFF = f_id, val itran: FunctionM2M = null) extends PredictorMV (x, y, fname_, hparam) - with Fit (dfm = x.dim2, df = x.dim - x.dim2): // under-estimate of degrees of freedom + with Fit (dfr = x.dim2, df = x.dim - x.dim2): // under-estimate of degrees of freedom private val eta = hp("eta").toDouble // learning rate val opti = new OPTIMIZER () // parameter optimizer @@ -65,7 +65,7 @@ class NeuralNet_3L (x: MatrixD, y: MatrixD, fname_ : Array [String] = null, bb = Array (new NetParam (weightMat (n, nz), new VectorD (nz)), // parameters (weights & biases) in to hid new NetParam (weightMat (nz, ny), new VectorD (ny))) // parameters (weights & biases) hid to out - modelName = s"NeuralNet_3L_${f.name}_${f1.name}" + _modelName = s"NeuralNet_3L_${f.name}_${f1.name}" //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Given training data x_ and y_, fit the parameters bb. @@ -147,8 +147,9 @@ class NeuralNet_3L (x: MatrixD, y: MatrixD, fname_ : Array [String] = null, //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Build a sub-model that is restricted to the given columns of the data matrix. * @param x_cols the columns that the new model is restricted to + * @param fname2 the variable/feature names for the new model (defaults to null) */ - def buildModel (x_cols: MatrixD): NeuralNet_3L = + def buildModel (x_cols: MatrixD, fname2: Array [String] = null): NeuralNet_3L = new NeuralNet_3L (x_cols, y, null, -1, hparam, f, f1, itran) end buildModel @@ -313,7 +314,7 @@ end neuralNet_3LTest println (mod.summary2 ()) // parameter/coefficient statistics banner ("Concrete - NeuralNet_3L: validate") - println (FitM.showFitMap (mod.validate ()(), QoF.values.map (_.toString))) + println (Fit.showFitMap (mod.validate ()()._2)) banner ("Concrete - NeuralNet_3L: crossValidate") val stats = mod.crossValidate () @@ -351,7 +352,7 @@ end neuralNet_3LTest2 mod.opti.plotLoss ("NeuralNet_3L") // loss function vs epochs banner ("AutoMPG - NeuralNet_3L: TNT validate") - println (FitM.showFitMap (mod.validate ()(), QoF.values.map (_.toString))) + println (Fit.showFitMap (mod.validate ()()._2)) /* banner ("AutoMPG - NeuralNet_3L: crossValidate") @@ -387,8 +388,7 @@ end neuralNet_3LTest3 // val (cols, rSq) = mod.backwardElimAll () // R^2, R^2 bar, smape, R^2 cv val k = cols.size println (s"k = $k, n = ${x.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "smape", "R^2 cv"), - s"R^2 vs n for ${mod.modelName}", lines = true) + new PlotM (null, rSq.transpose, Regression.metrics, s"R^2 vs n for ${mod.modelName}", lines = true) println (s"rSq = $rSq") end neuralNet_3LTest4 @@ -423,8 +423,7 @@ end neuralNet_3LTest4 val (cols, rSq) = mod.selectFeatures (tech) // R^2, R^2 bar, smape, R^2 cv val k = cols.size println (s"k = $k, n = ${x.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "smape", "R^2 cv"), - s"R^2 vs n for ${mod.modelName} with $tech", lines = true) + new PlotM (null, rSq.transpose, Regression.metrics, s"R^2 vs n for ${mod.modelName} with $tech", lines = true) println (s"$tech: rSq = $rSq") end for @@ -452,7 +451,7 @@ end neuralNet_3LTest5 mod.trainNtest2 ()() // train and test the model - with auto-tuning banner ("AutoMPG Validation Test") - println (FitM.showFitMap (mod.validate ()(), QoF.values.map (_.toString))) + println (Fit.showFitMap (mod.validate ()()._2)) end for end neuralNet_3LTest6 @@ -487,10 +486,10 @@ end neuralNet_3LTest6 val nz = 2 // number of hidden nodes banner (s"AutoMPG NeuralNet_3L") val mod = new NeuralNet_3L (xs, yy, x46_fname, nz) // create model without intercept - val (yp, qof) = mod.trainNtest2 ()() // train and test the model - with auto-tuning + val yp = mod.trainNtest2 ()()._1 // train and test the model - with auto-tuning banner ("AutoMPG Validation Test") - println (FitM.showFitMap (mod.validate ()(), QoF.values.map (_.toString))) + println (Fit.showFitMap (mod.validate ()()._2)) banner ("Compare Model with Formula f_nn") val a = MatrixD ((2, 2), -2.12262, -0.743867, // weights: input -> hidden layer @@ -509,10 +508,9 @@ end neuralNet_3LTest6 println (s"(yp_ - yp2).norm = ${(yp_ - yp2).norm}") // norm of difference new Plot (null, yp_, yp2, "yp_ (black/model) vs. yp2 (red/formula)") - def ff (x: VectorD, i: Int): Double = + def ff (x: VectorD): Double = val xx = (x(0) - 1.61300) * (4/3.537) - 2 val yy = (x(1) - 70) * (4.0/12) - 2 -// println (s"ff: [$xx, $yy], xs($i) = ${xs(i)}") val u = -2.12262 * xx - 0.200314 * yy - 2.15785 val v = -0.743867 * xx + 1.62988 * yy - 1.65227 val uu = 1.0 / (1.0 + exp(-u)) @@ -520,14 +518,9 @@ end neuralNet_3LTest6 15.7250 * uu + 13.1971 * vv + 13.4702 end ff - val yp3 = VectorD (for i <- xs.indices yield ff (x46(i), i)) // compute the response - - def ff2 (x: VectorD, i: Int): Double = -// val xx = (x(0) - 1.61300) * (4/3.537) - 2 -// val yy = (x(1) - 70) * (4.0/12) - 2 -// val u = -2.12262 * xx - 0.200314 * yy - 2.15785 -// val v = -0.743867 * xx + 1.62988 * yy - 1.65227 + val yp3 = VectorD (for i <- xs.indices yield ff (x46(i))) // compute the response + def ff2 (x: VectorD): Double = 15.7250 / (1.0 + exp (2.12262 * ((x(0) - 1.61300) * (4/3.537) - 2) + 0.200314 * ((x(1) - 70) * (4.0/12) - 2) + 2.15785)) + 13.1971 / (1.0 + exp (0.743867 * ((x(0) - 1.61300) * (4/3.537) - 2) - @@ -535,7 +528,7 @@ end neuralNet_3LTest6 13.4702 end ff2 - val yp4 = VectorD (for i <- xs.indices yield ff2 (x46(i), i)) // compute the response + val yp4 = VectorD (for i <- xs.indices yield ff2 (x46(i))) // compute the response println (s"(y - yp_).norm = ${(y - yp_).norm}") // norm of difference println (s"(y - yp2).norm = ${(y - yp2).norm}") // norm of difference @@ -790,9 +783,9 @@ end neuralNet_3LTest10 // backward val ε = y - yp // error matrix val δ1 = ε *~ f1.dM (yp) // delta1 @ output layer - val δ0 = δ1 * b.𝐓 *~ f0.dM (z) // delta0 @ hidden layer (transpose (𝐓)) - b += z.𝐓 * δ1 * η // parameter update Z -> Y - a += x.𝐓 * δ0 * η // parameter update X -> Z + val δ0 = δ1 * b.ᵀ *~ f0.dM (z) // delta0 @ hidden layer (transpose (ᵀ)) + b += z.ᵀ * δ1 * η // parameter update Z -> Y + a += x.ᵀ * δ0 * η // parameter update X -> Z val sse0 = ε(?, 0).normSq // sum of squared errors for column 0 val sse1 = ε(?, 1).normSq // sum of squared errors for column 1 diff --git a/src/main/scala/scalation/modeling/neuralnet/NeuralNet_3L_C2.scala b/src/main/scala/scalation/modeling/neuralnet/NeuralNet_3L_C2.scala index 1158cfd88..03af2e335 100644 --- a/src/main/scala/scalation/modeling/neuralnet/NeuralNet_3L_C2.scala +++ b/src/main/scala/scalation/modeling/neuralnet/NeuralNet_3L_C2.scala @@ -46,13 +46,13 @@ class NeuralNet_3L_C2 (x: MatrixD, y: VectorI, fname_ : Array [String] = null, with FitC (): private val debug = debugf ("NeuralNet_3L_C2", true) // debug function - private val cThresh = hparam ("cThresh").toDouble // classification/decision threshold + private val cThresh = hparam("cThresh").toDouble // classification/decision threshold private val ym = fromVector (y.toDouble) // y as a matrix private val nn3 = new NeuralNet_3L (x, ym, fname_, nz, hparam, f, f_sigmoid) - modelName = s"NeuralNet_3L_C2_${f.name}_sigmoid" // name of the model + _modelName = s"NeuralNet_3L_C2_${f.name}_sigmoid" // name of the model //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Train the classifier, i.e., calculate statistics and create conditional diff --git a/src/main/scala/scalation/modeling/neuralnet/NeuralNet_3L_Ck.scala b/src/main/scala/scalation/modeling/neuralnet/NeuralNet_3L_Ck.scala index db6382eaf..ebec37016 100644 --- a/src/main/scala/scalation/modeling/neuralnet/NeuralNet_3L_Ck.scala +++ b/src/main/scala/scalation/modeling/neuralnet/NeuralNet_3L_Ck.scala @@ -48,9 +48,9 @@ class NeuralNet_2L_Ck (x: MatrixD, y: MatrixI, fname_ : Array [String] = null, extends Classifier (x, y(0).toInt, fname_, y.dim2, cname_, hparam) // FIX y(0) - may need a new trait with FitC (): - private val debug = debugf ("NeuralNet_2L_Ck", true) // debug function + private val debug = debugf ("NeuralNet_2L_Ck", true) // debug function - modelName = s"NeuralNet_2L_Ck_${nz}_${f.name}_softmax" // name of the model + _modelName = s"NeuralNet_2L_Ck_${nz}_${f.name}_softmax" // name of the model def predictI (z: VectorD): Int = ??? def test (x_ : MatrixD, y_ : VectorI): (VectorI, VectorD) = ??? diff --git a/src/main/scala/scalation/modeling/neuralnet/NeuralNet_XL.scala b/src/main/scala/scalation/modeling/neuralnet/NeuralNet_XL.scala index 7cbdd3d3b..ff1054bef 100644 --- a/src/main/scala/scalation/modeling/neuralnet/NeuralNet_XL.scala +++ b/src/main/scala/scalation/modeling/neuralnet/NeuralNet_XL.scala @@ -57,7 +57,7 @@ class NeuralNet_XL (x: MatrixD, y: MatrixD, fname_ : Array [String] = null, f: Array [AFF] = Array (f_sigmoid, f_sigmoid, f_id), val itran: FunctionM2M = null) extends PredictorMV (x, y, fname_, hparam) - with Fit (dfm = x.dim2, df = x.dim - x.dim2): // under-estimate of degrees of freedom + with Fit (dfr = x.dim2, df = x.dim - x.dim2): // under-estimate of degrees of freedom private val flaw = flawf ("NeuralNet_XL") // flaw function private val eta = hp("eta").toDouble // learning rate @@ -72,7 +72,6 @@ class NeuralNet_XL (x: MatrixD, y: MatrixD, fname_ : Array [String] = null, if nz.length + 1 != nl then flaw ("init", "count mismatch among number of layers and activation functions") - end if if nl < 2 then flaw ("init", s"must have at least two ACTIVE layers, but nl = $nl") @@ -84,7 +83,7 @@ class NeuralNet_XL (x: MatrixD, y: MatrixD, fname_ : Array [String] = null, weightVec (sizes(l+1))) // biases per active layer end for - modelName = s"NeuralNet_XL_${stringOf (f.map (_.name))}" + _modelName = s"NeuralNet_XL_${stringOf (f.map (_.name))}" //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Return the network parameters (weights and biases) for the given layer. @@ -190,8 +189,9 @@ class NeuralNet_XL (x: MatrixD, y: MatrixD, fname_ : Array [String] = null, //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Build a sub-model that is restricted to the given columns of the data matrix. * @param x_cols the columns that the new model is restricted to + * @param fname2 the variable/feature names for the new model (defaults to null) */ - def buildModel (x_cols: MatrixD): NeuralNet_XL = + def buildModel (x_cols: MatrixD, fname2: Array [String] = null): NeuralNet_XL = new NeuralNet_XL (x_cols, y, null, null, hparam, f, itran) end buildModel @@ -351,7 +351,7 @@ end neuralNet_XLTest println (mod.summary2 ()) // parameter/coefficient statistics banner ("Concrete - NeuralNet_XL: validate") - println (FitM.showFitMap (mod.validate ()(), QoF.values.map (_.toString))) + println (Fit.showFitMap (mod.validate ()()._2)) banner ("Concrete - NeuralNet_XL: crossValidate") val stats = mod.crossValidate () @@ -390,7 +390,7 @@ end neuralNet_XLTest2 mod.opti.plotLoss ("NeuralNet_XL") // loss function vs epochs banner ("AutoMPG - NeuralNet_XL: TnT validate") - println (FitM.showFitMap (mod.validate ()(), QoF.values.map (_.toString))) + println (Fit.showFitMap (mod.validate ()()._2)) /* banner ("AutoMPG - NeuralNet_XL: crossValidate") @@ -426,8 +426,7 @@ end neuralNet_XLTest3 // val (cols, rSq) = mod.backwardElimAll () // R^2, R^2 bar, smape, R^2 cv val k = cols.size println (s"k = $k, n = ${x.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "smape", "R^2 cv"), - s"R^2 vs n for ${mod.modelName}", lines = true) + new PlotM (null, rSq.transpose, Regression.metrics, s"R^2 vs n for ${mod.modelName}", lines = true) println (s"rSq = $rSq") end neuralNet_XLTest4 @@ -462,8 +461,7 @@ end neuralNet_XLTest4 val (cols, rSq) = mod.selectFeatures (tech) // R^2, R^2 bar, smape, R^2 cv val k = cols.size println (s"k = $k, n = ${x.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "smape", "R^2 cv"), - s"R^2 vs n for ${mod.modelName} with $tech", lines = true) + new PlotM (null, rSq.transpose, Regression.metrics, s"R^2 vs n for ${mod.modelName} with $tech", lines = true) println (s"$tech: rSq = $rSq") end for @@ -492,7 +490,7 @@ end neuralNet_XLTest5 mod.trainNtest2 ()() // train and test the model - with auto-tuning banner ("AutoMPG Validation Test") - println (FitM.showFitMap (mod.validate ()(), QoF.values.map (_.toString))) + println (Fit.showFitMap (mod.validate ()()._2)) end for end neuralNet_XLTest6 diff --git a/src/main/scala/scalation/modeling/neuralnet/NeuralNet_XLT.scala b/src/main/scala/scalation/modeling/neuralnet/NeuralNet_XLT.scala index 6364e46ac..ce177aee6 100644 --- a/src/main/scala/scalation/modeling/neuralnet/NeuralNet_XLT.scala +++ b/src/main/scala/scalation/modeling/neuralnet/NeuralNet_XLT.scala @@ -51,7 +51,7 @@ class NeuralNet_XLT (x: MatrixD, y: MatrixD, fname_ : Array [String] = null, else new NetParam (weightMat (sizes(l), sizes(l+1)), // parameters weights & weightVec (sizes(l+1)))).toArray // biases per active layer - modelName = s"NeuralNet_XLT_${stringOf (f.map (_.name))}" + _modelName = s"NeuralNet_XLT_${stringOf (f.map (_.name))}" println (s"Create a NeuralNet_XLT with ${x.dim2} input, ${stringOf (nz)} hidden and ${y.dim2} output nodes") diff --git a/src/main/scala/scalation/modeling/neuralnet/Optimizer.scala b/src/main/scala/scalation/modeling/neuralnet/Optimizer.scala index 4d60294ba..ae451d58b 100644 --- a/src/main/scala/scalation/modeling/neuralnet/Optimizer.scala +++ b/src/main/scala/scalation/modeling/neuralnet/Optimizer.scala @@ -32,7 +32,7 @@ object Optimizer: */ val hp = new HyperParameter hp += ("eta", 0.1, 0.1) // learning/convergence rate (smaller for Adam) - hp += ("bSize", 20, 20) // mini-batch size, common range 10 to 40 + hp += ("bSize", 20, 20) // mini-batch size, common range 16 to 64 hp += ("maxEpochs", 400, 400) // maximum number of epochs/iterations hp += ("lambda", 0.01, 0.01) // regularization/shrinkage hyper-parameter hp += ("upLimit", 4, 4) // up-limit hyper-parameter for stopping rule @@ -122,7 +122,6 @@ trait Optimizer extends MonitorLoss with StoppingRule: best = result // save it, if better b_best = (for l <- b.indices yield b(l).copy).toArray // save best parameters println (s"auto_optimize: b = ${stringOf (b)}") - end if end if end for diff --git a/src/main/scala/scalation/modeling/neuralnet/Optimizer_Adam.scala b/src/main/scala/scalation/modeling/neuralnet/Optimizer_Adam.scala index 1cd451f31..1cd3260bf 100644 --- a/src/main/scala/scalation/modeling/neuralnet/Optimizer_Adam.scala +++ b/src/main/scala/scalation/modeling/neuralnet/Optimizer_Adam.scala @@ -74,7 +74,6 @@ class Optimizer_Adam extends Optimizer: go = false else if epoch % ADJUST_PERIOD == 0 then eta *= ADJUST_FACTOR // adjust the learning rate - end if } // cfor //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -89,7 +88,7 @@ class Optimizer_Adam extends Optimizer: val yp = f.fM(b * x) // prediction: Yp = f(XB) val ε = yp - y // negative of error matrix val δ = f.dM(yp) ⊙ ε // delta matrix for y - val g = x.𝐓 * δ // + b.w * l -- gradient matrix (transpose (𝐓)) + val g = x.ᵀ * δ // + b.w * l -- gradient matrix (transpose (ᵀ)) p = g * (1 - β1) + p * β1 // update biased first moment estimate v = v * β2 + g ~^ 2 * (1 - β2) // update biased second raw moment estimate @@ -162,7 +161,6 @@ class Optimizer_Adam extends Optimizer: go = false else if epoch % ADJUST_PERIOD == 0 then eta *= ADJUST_FACTOR // adjust the learning rate - end if } // cfor //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -179,8 +177,8 @@ class Optimizer_Adam extends Optimizer: val δ1 = f1.dM(yp) ⊙ ε // delta matrix for y val δ0 = f.dM(z) ⊙ (δ1 * b.w.transpose) // delta matrix for z - val gA = x.𝐓 * δ0 // gradient for a (transpose (𝐓)) - val gB = z.𝐓 * δ1 // gradient for b + val gA = x.ᵀ * δ0 // gradient for a (transpose (ᵀ)) + val gB = z.ᵀ * δ1 // gradient for b // Update biased first moment estimates for a and b pA = gA * (1 - β1) + pA * β1 @@ -284,7 +282,6 @@ class Optimizer_Adam extends Optimizer: go = false else if epoch % ADJUST_PERIOD == 0 then eta *= ADJUST_FACTOR // adjust the learning rate - end if } // cfor //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -306,7 +303,7 @@ class Optimizer_Adam extends Optimizer: val α = eta / x.dim // learning rate scaled by batch size for l <- layers do - val g = z(l).𝐓 * δ(l) // compute the gradient for each layer (transpose (𝐓)) + val g = z(l).ᵀ * δ(l) // compute the gradient for each layer (transpose (ᵀ)) val g_bias = δ(l).mean // compute the gradient for the biases p(l) = g * (1 - β1) + p(l) * β1 // update biased first moment estimates for weights diff --git a/src/main/scala/scalation/modeling/neuralnet/Optimizer_SGD.scala b/src/main/scala/scalation/modeling/neuralnet/Optimizer_SGD.scala index 3ebaa83ba..032872558 100644 --- a/src/main/scala/scalation/modeling/neuralnet/Optimizer_SGD.scala +++ b/src/main/scala/scalation/modeling/neuralnet/Optimizer_SGD.scala @@ -67,7 +67,6 @@ class Optimizer_SGD extends Optimizer: go = false else if epoch % ADJUST_PERIOD == 0 then η *= ADJUST_FACTOR // adjust the learning rate - end if } // cfor //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -82,7 +81,7 @@ class Optimizer_SGD extends Optimizer: val ε = yp - y // negative of error matrix val δ = f.dM (yp) ⊙ ε // delta matrix for y - x.𝐓 * δ * α // return change in parameters (transpose (𝐓)) + x.ᵀ * δ * α // return change in parameters (transpose (ᵀ)) end updateWeight debug ("optimize2", s"parameters b = $b") @@ -133,7 +132,6 @@ class Optimizer_SGD extends Optimizer: go = false else if epoch % ADJUST_PERIOD == 0 then η *= ADJUST_FACTOR // adjust the learning rate - end if } // cfor //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -148,10 +146,10 @@ class Optimizer_SGD extends Optimizer: val yp = f1.fM (b * z) // prediction: Yp = f(ZB) val ε = yp - y // negative of the error matrix val δ1 = f1.dM (yp) ⊙ ε // delta matrix for y - val δ0 = f.dM (z) ⊙ (δ1 * b.w.𝐓) // delta matrix for z (transpose (𝐓)) + val δ0 = f.dM (z) ⊙ (δ1 * b.w.ᵀ) // delta matrix for z (transpose (ᵀ)) - (NetParam (x.𝐓 * δ0 * α, δ0.mean * η), // change to a parameters (weights and biases) - NetParam (z.𝐓 * δ1 * α, δ1.mean * η)) // change to b parameters (weights and biases) + (NetParam (x.ᵀ * δ0 * α, δ0.mean * η), // change to a parameters (weights and biases) + NetParam (z.ᵀ * δ1 * α, δ1.mean * η)) // change to b parameters (weights and biases) end updateWeight debug ("optimize3", s"parameters a = $a \n b = $b") @@ -203,7 +201,6 @@ class Optimizer_SGD extends Optimizer: go = false else if epoch % ADJUST_PERIOD == 0 then η *= ADJUST_FACTOR // adjust the learning rate - end if } // cfor //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -221,11 +218,11 @@ class Optimizer_SGD extends Optimizer: val ε = yp - y // -E where E is the error matrix δ(nl-1) = f.last.dM (yp) ⊙ ε // delta for the last layer before output for l <- nl-2 to 0 by -1 do - δ(l) = f(l).dM (z(l+1)) ⊙ (δ(l+1) * b(l+1).w.𝐓) // deltas for all previous hidden layers (transpose (𝐓)) + δ(l) = f(l).dM (z(l+1)) ⊙ (δ(l+1) * b(l+1).w.ᵀ) // deltas for all previous hidden layers (transpose (ᵀ)) end for for l <- layers do - b(l) -= (z(l).𝐓 * δ(l) * α, // update parameters (weights + b(l) -= (z(l).ᵀ * δ(l) * α, // update parameters (weights δ(l).mean * η) // and biases end for diff --git a/src/main/scala/scalation/modeling/neuralnet/Optimizer_SGDM.scala b/src/main/scala/scalation/modeling/neuralnet/Optimizer_SGDM.scala index 9e5ca7334..e82afb767 100644 --- a/src/main/scala/scalation/modeling/neuralnet/Optimizer_SGDM.scala +++ b/src/main/scala/scalation/modeling/neuralnet/Optimizer_SGDM.scala @@ -70,7 +70,6 @@ class Optimizer_SGDM extends Optimizer: go = false else if epoch % ADJUST_PERIOD == 0 then η *= ADJUST_FACTOR // adjust the learning rate - end if } // cfor //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -85,7 +84,7 @@ class Optimizer_SGDM extends Optimizer: val yp = f.fM (b * x) // prediction: Yp = f(XB) val ε = yp - y // negative of error matrix val δ = f.dM (yp) ⊙ ε // delta matrix for y - val g = x.𝐓 * δ // gradient matrix (transpose (𝐓)) + val g = x.ᵀ * δ // gradient matrix (transpose (ᵀ)) p = g * (1 - β) + p * β // update momentum-based aggregated gradient (g * (1 - ν) + p * ν) * α // parameter update amount (to be subtracted) @@ -144,7 +143,6 @@ class Optimizer_SGDM extends Optimizer: go = false else if epoch % ADJUST_PERIOD == 0 then η *= ADJUST_FACTOR // adjust the learning rate - end if } // cfor //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -159,9 +157,9 @@ class Optimizer_SGDM extends Optimizer: val yp = f1.fM (b * z) // prediction: Yp = f(ZB) val ε = yp - y // negative of the error matrix val δ1 = f1.dM (yp) ⊙ ε // delta matrix for y - val δ0 = f.dM (z) ⊙ (δ1 * b.w.𝐓) // delta matrix for z (transpose (𝐓)) - val g1 = z.𝐓 * δ1 // gradient matrix for y to z - val g0 = x.𝐓 * δ0 // gradient matrix for z to x + val δ0 = f.dM (z) ⊙ (δ1 * b.w.ᵀ) // delta matrix for z (transpose (ᵀ)) + val g1 = z.ᵀ * δ1 // gradient matrix for y to z + val g0 = x.ᵀ * δ0 // gradient matrix for z to x pa = g0 * (1 - β) + pa * β // update momentum-based aggregated gradient pb = g1 * (1 - β) + pb * β // update momentum-based aggregated gradient @@ -224,7 +222,6 @@ class Optimizer_SGDM extends Optimizer: go = false else if epoch % ADJUST_PERIOD == 0 then η *= ADJUST_FACTOR // adjust the learning rate - end if } // cfor //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -241,10 +238,10 @@ class Optimizer_SGDM extends Optimizer: val yp = z.last // predicted value of y val ε = yp - y // negative of the error matrix δ(nl-1) = f.last.dM (yp) ⊙ ε // delta for the last layer before output - g(nl-1) = z(nl-1).𝐓 * δ(nl-1) // gradient for the last layer before output (transpose (𝐓)) + g(nl-1) = z(nl-1).ᵀ * δ(nl-1) // gradient for the last layer before output (transpose (ᵀ)) for l <- nl-2 to 0 by -1 do - δ(l) = f(l).dM (z(l+1)) ⊙ (δ(l+1) * b(l+1).w.𝐓) // deltas for all previous hidden layers - g(l) = z(l).𝐓 * δ(l) // corresponding gradient matrices + δ(l) = f(l).dM (z(l+1)) ⊙ (δ(l+1) * b(l+1).w.ᵀ) // deltas for all previous hidden layers + g(l) = z(l).ᵀ * δ(l) // corresponding gradient matrices end for for l <- layers do diff --git a/src/main/scala/scalation/modeling/neuralnet/PredictorMV.scala b/src/main/scala/scalation/modeling/neuralnet/PredictorMV.scala index 99b5728db..78e069676 100644 --- a/src/main/scala/scalation/modeling/neuralnet/PredictorMV.scala +++ b/src/main/scala/scalation/modeling/neuralnet/PredictorMV.scala @@ -28,8 +28,7 @@ import scalation.mathstat._ * a bias vector. * @see `NetParam` * @param x the input/data m-by-n matrix - * (augment with a first column of ones to include intercept in model - * or use bias) + * (augment with a first column of ones to include intercept in model or use bias) * @param y the response/output m-by-ny matrix * @param fname the feature/variable names (if null, use x_j's) * @param hparam the hyper-parameters for the model/network @@ -45,9 +44,9 @@ trait PredictorMV (x: MatrixD, y: MatrixD, protected var fname: Array [String], if x != null then if x.dim != y.dim then flaw ("init", "row dimensions of x and y are incompatible") + if x.dim2 < 1 then flaw ("init", s"dim2 = ${x.dim2} of the x matrix must be at least 1") if x.dim <= x.dim2 then flaw ("init", s"PredictorMV requires more rows ${x.dim} than columns ${x.dim2}") - end if private val MIN_FOLDS = 3 // minimum number of folds for cross validation private val stream = 0 // random number stream to use @@ -58,6 +57,11 @@ trait PredictorMV (x: MatrixD, y: MatrixD, protected var fname: Array [String], if x != null && fname == null then fname = x.indices2.map ("x" + _).toArray // default feature/variable names + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the set of columns (numbers) for the features in this model. + */ + def mcols: LSET [Int] = LSET.range (0, getX.dim2) + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Return the used data matrix x. Mainly for derived classes where x is expanded * from the given columns in x_. @@ -172,8 +176,7 @@ trait PredictorMV (x: MatrixD, y: MatrixD, protected var fname: Array [String], def makePlots (yy: MatrixD, yp: MatrixD): Unit = val (ryy, ryp) = orderByYY (yy, yp) // order by yy for k <- ryy.indices2 do - new Plot (null, ryy(?, k), ryp(?, k), s"$modelName: y$k black/actual vs. red/predicted") - end for + new Plot (null, ryy(?, k), ryp(?, k), s"$modelName: y$k black/actual vs. red/predicted", lines = true) end makePlots //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -192,7 +195,7 @@ REPORT ---------------------------------------------------------------------------- parameter bb = ${stringOf (parameters)} ---------------------------------------------------------------------------- - fitMap qof = ${FitM.showFitMap (ftMat, QoF.values.map (_.toString))} + fitMap qof = ${Fit.showFitMap (ftMat)} ---------------------------------------------------------------------------- """ end report @@ -245,14 +248,20 @@ REPORT // F E A T U R E S E L E C T I O N + // @see givens in `modeling.FeatureSelection` + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Build a sub-model that is restricted to the given columns of the data matrix. * Override for models that support feature section. * @param x_cols the columns that the new model is restricted to + * @param fname2 the variable/feature names for the new model (defaults to null) */ - def buildModel (x_cols: MatrixD): PredictorMV & Fit + def buildModel (x_cols: MatrixD, fname2: Array [String] = null): PredictorMV & Fit - private var theBest = BestStep ()() // record the best model from feature selection + protected val USE_MEAN = true // use mean vs. first of qof for feature selection + private var theBest = BestStep ()() // record the best model from feature selection + private val t_rng = if fullset_FS then 0 until y.dim // use full dataset for Feature Selection (FS) + else 0 until Model.trSize (y.dim) // use training set for Feature Selection (FS) //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Reset the best-step to default @@ -283,15 +292,14 @@ REPORT * @param fit_l the fit vector for the l-th iteration * @param mod_l the predictive model for the l-th iteration * // FIX - wrong param & can remove? - */ private def updateQoF (rSq: MatrixD, l: Int, cross: Boolean, best: BestStep): Unit = rSq(l) = if cross then Fit.qofVector (best.qof, best.mod.crossValidate ()) // results for model mod_l, with cross-validation else Fit.qofVector (best.qof, null) // results for model mod_l, no cross-validation - end if end updateQoF + */ //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Perform forward selection to find the most predictive variable to add the @@ -307,30 +315,35 @@ REPORT for j <- x.indices2 if ! (cols contains j) do val cols_j = cols union LSET (j) // try adding variable/column x_j val x_cols = x(?, cols_j) // x projected onto cols_j columns - val mod_j = buildModel (x_cols) // regress with x_j added - mod_j.train () // train model - best = best.better (j, mod_j.test ()._2(?, 0), mod_j) // which is better + val mod_j = buildModel (x_cols, newFname (fname, cols_j)) // regress with x_j added + + val (x_tr, y_tr) = (x_cols(t_rng), y(t_rng)) // get full/training data + mod_j.train (x_tr, y_tr) // train model + val qof = mod_j.test (x_tr, y_tr)._2 // get test qof for mod_j + if USE_MEAN then + best = best.better (j, qof.meanRow, mod_j, cols_j) // which is better based on mean of all targets + else + best = best.better (j, qof(?, 0), mod_j, cols_j) // which is better based of first target end for if best.col == -1 then flaw ("forwardSel", "could not find a variable x_j to add: best.col = -1") - end if best end forwardSel //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Perform forward selection to find the most predictive variables to have + /** Perform FORWARD SELECTION to find the MOST predictive variables to have * in the model, returning the variables added and the new Quality of Fit (QoF) * measures for all steps. * @see `Fit` for index of QoF measures. - * @param cross whether to include the cross-validation QoF measure + * @param cross indicator to include the cross-validation/validation QoF measure (defaults to "many") * @param qk index of Quality of Fit (QoF) to use for comparing quality */ - def forwardSelAll (cross: Boolean = true)(using qk: Int): (LSET [Int], MatrixD) = + def forwardSelAll (cross: String = "many")(using qk: Int): (LSET [Int], MatrixD) = resetBest () - val rSq = new MatrixD (x.dim2 - 1, Fit.qofVectorSize) // QoF: R^2, R^2 Bar, smape, R^2 cv - val cols = LSET (0) // start with x_0 in model -// updateQoF (rSq, 0, cross, select0 (qk)) // update Qof results for 0-th variable FIX? + val rSq = new MatrixD (x.dim2 - 1, Fit.qofVectorSize) // QoF: R^2, R^2 Bar, smape, R^2 cv + val cols = LSET (0) // start with x_0 in model +// updateQoF (rSq, 0, cross, select0 (qk)) // update Qof results for 0-th variable FIX? banner (s"forwardSelAll: (l = 0) INITIAL variable (0, ${fname(0)}) => cols = $cols") @@ -365,14 +378,19 @@ REPORT for j <- first until x.dim2 if cols contains j do val cols_j = cols diff LSET (j) // try removing variable/column x_j val x_cols = x(?, cols_j) // x projected onto cols_j columns - val mod_j = buildModel (x_cols) // regress with x_j added - mod_j.train () // train model - best = best.better (j, mod_j.test ()._2(?, 0), mod_j) // which is better + val mod_j = buildModel (x_cols, newFname (fname, cols_j)) // regress with x_j added + + val (x_tr, y_tr) = (x_cols(t_rng), y(t_rng)) // get full/training data + mod_j.train (x_tr, y_tr) // train model + val qof = mod_j.test (x_tr, y_tr)._2 // get test qof for mod_j + if USE_MEAN then + best = best.better (j, qof.meanRow, mod_j, cols_j) // which is better based on mean of all targets + else + best = best.better (j, qof(?, 0), mod_j, cols_j) // which is better based of first target end for if best.col == -1 then flaw ("backwardElim", "could not find a variable x_j to eliminate: best.col = -1") - end if best end backwardElim @@ -381,22 +399,28 @@ REPORT * backward elimination. */ private def fullModel (qk: Int): BestStep = - val mod_a = buildModel (x) // regress with all variables x_j - mod_a.train () // train model - val qof_a = mod_a.test ()._2(?, 0) - BestStep (-1, qof_a, mod_a)(qof_a(qk)) // results for full model + val mod_a = buildModel (x, fname) // regress with all variables x_j + + val (x_tr, y_tr) = (x(t_rng), y(t_rng)) // get full/training data + mod_a.train (x_tr, y_tr) // train model + val qof_a = mod_a.test (x_tr, y_tr)._2 // get test qof for mod_a + val qof_ = if USE_MEAN then + qof_a.meanRow // results for full model based on mean of all targets + else + qof_a(?, 0) // results for full model based of first target + BestStep (-1, qof_, mod_a)(qof_(qk)) // return best step end fullModel //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Perform backward elimination to find the least predictive variables to remove + /** Perform BACKWARD ELIMINATION to find the LEAST predictive variables to remove * from the full model, returning the variables left and the new Quality of Fit (QoF) * measures for all steps. * @see `Fit` for index of QoF measures. * @param first first variable to consider for elimination - * @param cross whether to include the cross-validation QoF measure + * @param cross indicator to include the cross-validation/validation QoF measure (defaults to "many") * @param qk index of Quality of Fit (QoF) to use for comparing quality */ - def backwardElimAll (first: Int = 1, cross: Boolean = true)(using qk: Int): (LSET [Int], MatrixD) = + def backwardElimAll (first: Int = 1, cross: String = "many")(using qk: Int): (LSET [Int], MatrixD) = resetBest () val rSq = new MatrixD (x.dim2 - 1, Fit.qofVectorSize) // R^2, R^2 Bar, smape, R^2 cv val cols = LSET.range (0, x.dim2) // start with all x_j in model @@ -422,15 +446,15 @@ REPORT end backwardElimAll //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Perform stepwise regression to find the most predictive variables to have - * in the model, returning the variables left and the new Quality of Fit (QoF) + /** Perform STEPWISE SELECTION to find a GOOD COMBINATION of predictive variables to have + * in the model, returning the variables selected and the new Quality of Fit (QoF) * measures for all steps. At each step it calls 'forwardSel' and 'backwardElim' * and takes the best of the two actions. Stops when neither action yields improvement. * @see `Fit` for index of QoF measures. - * @param cross whether to include the cross-validation QoF measure + * @param cross indicator to include the cross-validation/validation QoF measure (defaults to "many") * @param qk index of Quality of Fit (QoF) to use for comparing quality */ - def stepwiseSelAll (cross: Boolean = true, swap: Boolean = true)(using qk: Int): + def stepwiseSelAll (cross: String = "many", swap: Boolean = true)(using qk: Int): (LSET [Int], MatrixD) = resetBest () val rSq = new MatrixD (x.dim2 - 1, Fit.qofVectorSize) // QoF: R^2, R^2 Bar, smape, R^2 cv @@ -482,7 +506,6 @@ REPORT println (s"\nstepwiseSelAll: (l = $l) SWAP variable $bestb with $bestf") else break () // can't find a better model -> quit - end if end if end for } // breakable @@ -501,14 +524,33 @@ REPORT * @param in the variable to swap in */ private def swapVars (cols: LSET [Int], out: Int, in: Int, qk: Int): BestStep = - val cols_ = cols diff LSET (out) union LSET (in) // swap out var with in var + val cols_ = cols diff LSET (out) union LSET (in) // swap out var with in var val x_cols = x(?, cols_) // x projected onto cols_j columns - val mod_j = buildModel (x_cols) // regress with x_out removed and x_in added + val mod_j = buildModel (x_cols, newFname (fname, cols_)) // regress with x_out removed and x_in added mod_j.train () // train model val qof_in = mod_j.test ()._2(?, 0) BestStep (in, qof_in, mod_j)(qof_in(qk)) // candidate step end swapVars + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Perform BEAM SEARCH SELECTION to find a GOOD COMBINATION of predictive features/variables to + * have in the model, returning the top k sets of features/variables selected and the new Quality of + * Fit (QoF) measures/metrics for all steps. At each step, iterate over the models in the beam + * (top k) and create candidates by adding features (phase 1) and then removing (phase 2). + * From all the candidates, keep the best k and start a new iteration. Stops when there is + * no improvement in any of top k (or the maximum number of features is reached. + * @see `Fit` for index of QoF measures/metrics. + * @param cross indicator to include the cross-validation/validation QoF measure (defaults to "many") + * @param bk the beam width holding the top k models (defaults to 3) + * @param qk index of Quality of Fit (QoF) to use for comparing quality + */ + def beamSelAll (cross: String = "many", bk: Int = 3)(using qk: Int): (LSET [Int], MatrixD) = + + // FIX -- to be implemented + + null + end beamSelAll + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Compute the Variance Inflation Factor (VIF) for each variable to test * for multi-collinearity by regressing x_j against the rest of the variables. @@ -532,6 +574,20 @@ REPORT vifV end vif +// T E S T I N G S C E N A R I O S + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Perform In-Sample Testing, i.e., train and test on the full data set. + * @param skip the number of initial data points to skip (due to insufficient information) + * @param showYp whether to show the prediction vector + */ + def inSample_Test (skip: Int = 0, showYp: Boolean = false): Unit = + val (x_, y_) = (x.drop (skip), y.drop (skip)) + val yp = trainNtest (x_, y_)(x_, y_)._1 + if showYp then + println (s"Final In-Sample Prediction Vector yp = $yp") + end inSample_Test + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Return the indices for the test-set. * @see `scalation.mathstat.TnT_Split` @@ -542,26 +598,54 @@ REPORT TnT_Split.testIndices (permGen, n_test, rando) end testIndices + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the indices for the test-set for (1) RANDONLY or (3) LAST + * @see `scalation.mathstat.TnT_Split` + * @param n_total the size of full dataset + * @param n_test the size of test-set + * @param rando whether to select indices randomly or in blocks + */ + inline def testIndices (n_total: Int, n_test: Int, rando: Boolean): IndexedSeq [Int] = + TnT_Split.testIndices (permGen, n_total, n_test, rando) + end testIndices + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /* Use validation to compute test Quality of Fit (QoF) measures by dividing - * the full dataset into a TESTING set and a TRAINING set. + * the full dataset into a TESTING-set and a TRAINING-set. * The test set is defined by idx and the rest of the data is the training set. + * @see `modeling.Predictor.validate` about the RANDOM, FIRST, and LAST options + * for selecting the testing-set. * @param rando flag indicating whether to use randomized or simple validation - * @param ratio the ratio of the TESTING set to the full dataset (most common 70-30, 80-20) - * @param idx the prescribed TESTING set indices - */ - def validate (rando: Boolean = true, ratio: Double = 0.2) - (idx : IndexedSeq [Int] = testIndices ((ratio * y.dim).toInt, rando)): MatrixD = + * @param ratio the ratio of the TESTING-set to the full dataset (most common 70-30, 80-20) + * @param idx the prescribed TESTING-set indices (default => generate) + */ + def validate (rando: Boolean = true, ratio: Double = Model.TE_RATIO) +// (idx: IndexedSeq [Int] = testIndices ((ratio * y.dim).toInt, rando)): + (idx: IndexedSeq [Int] = testIndices (y.dim, (ratio * y.dim).toInt, rando)): + (MatrixD, MatrixD) = + debug ("validate", s"n_test = ${(ratio * y.dim).toInt}, rando = $rando") val (x_e, x_, y_e, y_) = TnT_Split (x, y, idx) // Test-n-Train Split train (x_, y_) // train model on the training set - val qof = test (x_e, y_e)._2 // test on test-set and get QoF measures + val (yp, qof) = test (x_e, y_e) // test on test-set and get QoF measures if qof(QoF.sst.ordinal)(0) <= 0.0 then // requires variation in test-set flaw ("validate", "chosen testing set has no variability") - end if - qof +// println (FitM.fitMap (qof, QoF.values.map (_.toString))) + (yp, qof) end validate + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Convert QoF results into an array (of size 1) of `Statistic` for compatibility + * with the `crossValidate` method. + * @param qof the Quality of Fit (QoF) results + def qof2Stat (qof: MatrixD): Array [Statistic] = + val stats = Fit.qofStatTable // create table for QoF measures + if qof(QoF.sst.ordinal)(0) > 0.0 then // requires variation in test-set + for q <- qof.indices do stats(q).tally (qof(q)(0)) // tally these QoF measures + stats + end qof2Stat + */ + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /* Use k-fold cross-validation to compute test Quality of Fit (QoF) measures * by iteratively dividing the full dataset into a TESTING set and a TRAINING set. @@ -581,11 +665,10 @@ REPORT for fold <- 0 until k do val idx = fullIdx (fold * sz until (fold+1) * sz).toMuIndexedSeq // instance indices for this fold debug ("crossValidate", s"fold $fold: test set size = $sz") - val qof = validate (rando, ratio)(idx) + val qof = validate (rando, ratio)(idx)._2 debug ("crossValidate", s"fold $fold: qof = $qof") if qof(QoF.sst.ordinal)(0) > 0.0 then // requires variation in test-set for q <- qof.indices do stats(q).tally (qof(q)(0)) // tally these QoF measures - end if end for stats @@ -611,12 +694,12 @@ object PredictorMV: def test (mod: PredictorMV, ext: String = "", check: Boolean = true): Unit = val iq = QoF.rSq.ordinal banner (s"Test ${mod.modelName} $ext") - val (yp, qof) = mod.trainNtest ()() // train and test the model on full dataset (in-sample) + val qof = mod.trainNtest ()()._2 // train and test the model on full dataset (in-sample) println ("Validate: Out-of-Sample Testing") - val qof2 = mod.validate ()() // train on training set, test on testing set + val qof2 = mod.validate ()()._2 // train on training set, test on testing set if check then assert (rel_diff (qof(iq)(0), qof2(iq)(0)) < 0.2) // check agreement of in-sample and out-of-sample results - println (FitM.showFitMap (mod.validate ()(), QoF.values.map (_.toString))) + println (Fit.showFitMap (qof2)) end test end PredictorMV diff --git a/src/main/scala/scalation/modeling/neuralnet/RegressionMV.scala b/src/main/scala/scalation/modeling/neuralnet/RegressionMV.scala index 733344640..7a804b580 100644 --- a/src/main/scala/scalation/modeling/neuralnet/RegressionMV.scala +++ b/src/main/scala/scalation/modeling/neuralnet/RegressionMV.scala @@ -23,7 +23,7 @@ import scalation.mathstat._ * Fit the parameter vector b in for each regression equation * y = b dot x + e = b_0 + b_1 * x_1 + ... b_k * x_k + e * where e represents the residuals (the part not explained by the model). - * Use Least-Squares (minimizing the residuals) to solve the parameter vector b + * Use Least-Squares (minimizing the residuals) to solve the parameter matrix bb(0).w * using the Normal Equations: * x.t * x * b = x.t * y * b = fac.solve (.) @@ -46,7 +46,7 @@ import scalation.mathstat._ class RegressionMV (x: MatrixD, y: MatrixD, fname_ : Array [String] = null, hparam: HyperParameter = Regression.hp) extends PredictorMV (x, y, fname_, hparam) - with Fit (dfm = x.dim2 - 1, df = x.dim - x.dim2): + with Fit (dfr = x.dim2 - 1, df = x.dim - x.dim2): // if not using an intercept df = (x.dim2, x.dim-x.dim2), correct by calling 'resetDF' method from `Fit` private val debug = debugf ("RegressionMV", false) // debug function @@ -54,7 +54,7 @@ class RegressionMV (x: MatrixD, y: MatrixD, fname_ : Array [String] = null, private val algorithm = hparam("factorization") // factorization algorithm private val n = x.dim2 // number of columns - modelName = "RegressionMV" + _modelName = s"RegressionMV_$n" if n < 1 then flaw ("init", s"dim2 = $n of the 'x' matrix must be at least 1") @@ -84,7 +84,7 @@ class RegressionMV (x: MatrixD, y: MatrixD, fname_ : Array [String] = null, val fac = solver (x_) fac.factor () // factor the matrix, either X or X.t * X - bb = Array (new NetParam (new MatrixD (x.dim2, y.dim2))) // allocate parameters bb (only uses 'bb(0).w') + bb = Array (new NetParam (new MatrixD (x_.dim2, y_.dim2))) // allocate parameters bb (only uses 'bb(0).w') for k <- y_.indices2 do val yk = y_(?, k) bb(0).w(?, k) = fac match // RECORD the parameters/coefficients (@see `PredictorMV`) @@ -143,10 +143,11 @@ class RegressionMV (x: MatrixD, y: MatrixD, fname_ : Array [String] = null, //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Build a sub-model that is restricted to the given columns of the data matrix. * @param x_cols the columns that the new model is restricted to + * @param fname2 the variable/feature names for the new model (defaults to null) */ - def buildModel (x_cols: MatrixD): RegressionMV = + def buildModel (x_cols: MatrixD, fname2: Array [String] = null): RegressionMV = debug ("buildModel", s"${x_cols.dim} by ${x_cols.dim2}") - new RegressionMV (x_cols, y, null, hparam) + new RegressionMV (x_cols, y, fname2, hparam) end buildModel end RegressionMV @@ -251,7 +252,7 @@ end regressionMVTest println (mod.summary ()) // parameter/coefficient statistics banner ("Concrete Validation Test") - println (FitM.showFitMap (mod.validate ()(), QoF.values.map (_.toString))) + println (Fit.showFitMap (mod.validate ()()._2)) banner ("Concrete Cross-Validation Test") val stats = mod.crossValidate () @@ -279,7 +280,7 @@ end regressionMVTest2 println (mod.summary ()) // parameter/coefficient statistics banner ("AutoMPG Validation Test") - println (FitM.showFitMap (mod.validate ()(), QoF.values.map (_.toString))) + println (Fit.showFitMap (mod.validate ()()._2)) banner ("AutoMPG Cross-Validation Test") val stats = mod.crossValidate () @@ -311,8 +312,7 @@ end regressionMVTest3 // val (cols, rSq) = mod.backwardElimAll () // R^2, R^2 bar, smape, R^2 cv val k = cols.size println (s"k = $k, n = ${ox.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "smape", "R^2 cv"), - s"R^2 vs n for ${mod.modelName}", lines = true) + new PlotM (null, rSq.transpose, Regression.metrics, s"R^2 vs n for ${mod.modelName}", lines = true) println (s"rSq = $rSq") end regressionMVTest4 @@ -345,8 +345,7 @@ end regressionMVTest4 val (cols, rSq) = mod.selectFeatures (tech) // R^2, R^2 bar, smape, R^2 cv val k = cols.size println (s"k = $k, n = ${ox.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "smape", "R^2 cv"), - s"R^2 vs n for ${mod.modelName} with $tech", lines = true) + new PlotM (null, rSq.transpose, Regression.metrics, s"R^2 vs n for ${mod.modelName} with $tech", lines = true) println (s"$tech: rSq = $rSq") end for diff --git a/src/main/scala/scalation/modeling/neuralnet/RegularizedMV.scala b/src/main/scala/scalation/modeling/neuralnet/RegularizedMV.scala new file mode 100644 index 000000000..8c112d9c0 --- /dev/null +++ b/src/main/scala/scalation/modeling/neuralnet/RegularizedMV.scala @@ -0,0 +1,35 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author John Miller + * @version 2.0 + * @date Thu Mar 5 16:10:39 EST 2026 + * @see LICENSE (MIT style license file). + * + * @note Model Support: Regularization Method for MV + */ + +package scalation +package modeling +package neuralnet + +import scalation.mathstat._ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `RegularizedMV` trait describes the `center` method that is to be supported + * by all companion objects supporting regularized MV regression. + */ +trait RegularizedMV: + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a Regularized MV Regression from a data matrix and response matrix. + * This function centers the data. Implementations should return specific return types. + * @param x the un-centered data/input m-by-n matrix, NOT augmented with a first column of ones + * @param y the un-centered response/output matrix + * @param fname the feature/variable names (defaults to null) + * @param hparam the shrinkage hyper-parameter (0 => OLS) in the penalty term 'lambda * norm of b' + */ + def center (x: MatrixD, y: MatrixD, fname: Array [String] = null, + hparam: HyperParameter = RidgeRegression.hp): PredictorMV + +end RegularizedMV + diff --git a/src/main/scala/scalation/modeling/neuralnet/RidgeRegressionMV.scala b/src/main/scala/scalation/modeling/neuralnet/RidgeRegressionMV.scala new file mode 100644 index 000000000..60a0658b7 --- /dev/null +++ b/src/main/scala/scalation/modeling/neuralnet/RidgeRegressionMV.scala @@ -0,0 +1,481 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author John Miller + * @version 2.0 + * @date Wed Feb 20 17:39:57 EST 2013 + * @see LICENSE (MIT style license file). + * + * @note Model: Multiple Linear Regression with Multiple Response Variables + * Multi-variate Multiple Linear Regression + */ + +// FIX: use cholesky. QR does not work + +package scalation +package modeling +package neuralnet + +import scala.math.sqrt + +import scala.runtime.ScalaRunTime.stringOf + +import scalation.mathstat._ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `RidgeRegressionMV` class supports multi-variate multiple linear regression. + * In this case, x is multi-dimensional [1, x_1, ... x_k] and y is multi-dimensional + * [y_0, ... y_l]. + * Fit the parameter vector b in for each regression equation + * y = b dot x + e = b_0 + b_1 * x_1 + ... b_k * x_k + e + * where e represents the residuals (the part not explained by the model). + * Use Least-Squares (minimizing the residuals) to solve the parameter vector b + * using the Normal Equations: + * x.t * x * b = x.t * y + * b = fac.solve (.) + * with L_2 Regularization. + * Five factorization algorithms are provided: + * `Fac_QR` QR Factorization: slower, more stable (default) + * `Fac_SVD` Singular Value Decomposition: slowest, most robust + * `Fac_Cholesky` Cholesky Factorization: faster, less stable (reasonable choice) + * `Fac_LU' LU Factorization: better than Inverse + * `Fac_Inverse` Inverse Factorization: textbook approach + * @see see.stanford.edu/materials/lsoeldsee263/05-ls.pdf + * Note, not intended for use when the number of degrees of freedom 'df' is negative. + * @see en.wikipedia.org/wiki/Degrees_of_freedom_(statistics) + *------------------------------------------------------------------------------ + * @param x the data/input m-by-n matrix + * (augment with a first column of ones to include intercept in model) + * @param y the response/output m-by-ny matrix + * @param fname_ the feature/variable names (defaults to null) + * @param hparam the hyper-parameters (defaults to Regression.hp) + * @param xℱ the transformation applied to x (e.g., Center or Norm) + * @param yℱ the transformation applied to y (e.g., Center) + */ +class RidgeRegressionMV (x: MatrixD, y: MatrixD, fname_ : Array [String] = null, + hparam: HyperParameter = RidgeRegression.hp, + xℱ: Transform = null, yℱ: Transform = null) + extends PredictorMV (x, y, fname_, hparam) + with Fit (dfr = x.dim2, df = x.dim - x.dim2 - 1): + // degrees of freedom: dfr = n, df = m - n - 1 as centered x matrix has 1 less column + // if not using an intercept df = (x.dim2, x.dim-x.dim2), correct by calling 'resetDF' method from `Fit` + // no intercept => correct Degrees of Freedom (DoF); as lambda get larger, need effective DoF + + private val debug = debugf ("RidgeRegressionMV", false) // debug function + private val flaw = flawf ("RidgeRegressionMV") // flaw function + private val algorithm = hparam("factorization") // factorization algorithm + private val lambda = hparam ("lambda").toDouble +// if hparam("lambda") <= 0.0 then findLambda._1 +// else hparam ("lambda").toDouble + + _modelName = "RidgeRegressionMV_${lambda}" + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a solver for the Normal Equations using the selected factorization algorithm. + * @param x_ the matrix to be used by the solver + */ + private def solver (x_ : MatrixD): Factorization = + + val xtx = x_.transpose * x_ // pre-compute X.t * X + val ey = MatrixD.eye (x_.dim, x_.dim2) // identity matrix + val xtx_ = xtx.copy // copy xtx (X.t * X) + for i <- xtx_.indices do xtx_(i, i) += lambda // add lambda to the diagonal + + algorithm match // select the factorization technique + case "Fac_QR" => val xx = x_ ++ (ey * sqrt(lambda)) + println (s"xx.dim ${xx.dim}") + Fac_QR(xx) // QR/LQ Factorization +// case "Fac_SVD" => new Fac_SVD (x_) // Singular Value Decomposition - FIX + case "Fac_Cholesky" => new Fac_Cholesky(xtx_) // Cholesky Factorization + case "Fac_LU" => new Fac_LU(xtx_) // LU Factorization + case _ => new Fac_Inverse(xtx_) // Inverse Factorization + end match + end solver + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Train the predictor by fitting the parameter vector (b-vector) in the + * multiple regression equation + * y = b dot x + e = [b_0, ... b_k] dot [1, x_1 , ... x_k] + e + * using the ordinary least squares 'OLS' method. + * @param x_ the training/full data/input matrix + * @param y_ the training/full response/output matrix + */ + def train (x_ : MatrixD = x, y_ : MatrixD = y): Unit = + val fac = solver (x_) + fac.factor () // factor the matrix, either X or X.t * X + + bb = Array (new NetParam (new MatrixD (x_.dim2, y_.dim2))) // allocate parameters bb (only uses 'bb(0).w') + for k <- y_.indices2 do + val yk = y_(?, k) +// println (s"yk = ${yk.dim}") + bb(0).w(?, k) = fac match // RECORD the parameters/coefficients (@see `PredictorMV`) + case fac: Fac_QR => fac.solve (yk) + case fac: Fac_SVD => fac.solve (yk) + case _ => fac.solve (x_.transpose * yk) + + if bb(0).w(0, k).isNaN then flaw ("train", s"parameters bb(0).w = ${bb(0).w}") + end for + + debug ("train", s"$fac estimates parameters bb(0).w = ${bb(0).w}") + end train + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Test a predictive model y_ = f(x_) + e and return its QoF vector. + * Testing may be be in-sample (on the training set) or out-of-sample + * (on the testing set) as determined by the parameters passed in. + * Note: must call train before test. + * @param x_ the testing/full data/input matrix (defaults to full x) + * @param y_ the testing/full response/output matrix (defaults to full y) + */ + def test (x_ : MatrixD = x, y_ : MatrixD = y): (MatrixD, MatrixD) = + val yp = predict_ (x_) // make predictions + e = y_ - yp // RECORD the residuals/errors (@see `Predictor`) + val qof = MatrixD (for k <- y_.indices2 yield diagnose (y_(?, k), yp(?, k))).transpose + (yp, qof) // return predictions and QoF vector + end test + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Predict the vector of y = f(z) by evaluating the formula y = b dot z. + * It works on transformed values. + * @param z the new vector to predict + */ + def predict_ (z: VectorD): VectorD = bb(0).w dot z + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Predict the value of matrix y = f(x_, b). It works on transformed values. + * @param x_ the matrix to use for making predictions, one for each row + */ + def predict_ (x_ : MatrixD): MatrixD = x_ * bb(0).w + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Predict the vector of y = f(z) by evaluating the formula y = b dot z. + * It is overridden to handle transformations. + * @param z the new vector to predict + */ + override def predict (z: VectorD): VectorD = + val zz = if xℱ == null then z else xℱ.f(MatrixD (z))(0) + if yℱ == null then bb(0).w dot zz else yℱ.fi(bb(0).w dot zz) + end predict + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Predict the matrix of vector y = f(x_, b). It is overridden to handle transformations. + * @param x_ the matrix to use for making predictions, one for each row + */ + override def predict (x_ : MatrixD): MatrixD = + val xx = if xℱ == null then x_ else xℱ.f(x_) + if yℱ == null then xx * bb(0).w else yℱ.fi(xx * bb(0).w) + end predict + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Produce a QoF summary for a model with diagnostics for each predictor 'x_j' + * and the overall Quality of Fit (QoF). + * @param x_ the testing/full data/input matrix + * @param fname_ the array of feature/variable names + * @param b_ the parameters/coefficients for the model + * @param vifs the Variance Inflation Factors (VIFs) + */ + override def summary (x_ : MatrixD = getX, fname_ : Array [String] = fname, + b_ : VectorD = bb(0).w(?, 0), // FIX + vifs: VectorD = vif ()): String = + super.summary (x_, fname_, b_, vifs) // summary from `Fit` + end summary + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Build a sub-model that is restricted to the given columns of the data matrix. + * @param x_cols the columns that the new model is restricted to + */ + def buildModel (x_cols: MatrixD, fname: Array [String] = null): RidgeRegressionMV = + debug ("buildModel", s"${x_cols.dim} by ${x_cols.dim2}") + new RidgeRegressionMV (x_cols, y, fname, hparam) + end buildModel + +end RidgeRegressionMV + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `RidgeRegressionMV` companion object provides factory methods for creating + * Multi-Variate (MV) Regression models. + */ +object RidgeRegressionMV extends RegularizedMV: + + val hp = RidgeRegression.hp + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a Ridge RegressionMV object from a combined data-response matrix. + * @param xy the combined data-response matrix (predictors and response) + * @param fname the feature/variable names (defaults to null) + * @param hparam the hyper-parameters (defaults to Regression.hp) + * @param col the first designated response column (defaults to next to last column) + */ + def apply (xy: MatrixD, fname: Array [String] = null, + hparam: HyperParameter = RidgeRegression.hp) + (col: Int = xy.dim2 - 2): RidgeRegressionMV = + val (x, y) = (xy(?, 0 until col), xy(?, col until xy.dim)) + val xℱ = CenterForm (x) + val yℱ = CenterForm (y) + new RidgeRegressionMV (xℱ.f(x), yℱ.f(y), fname, hparam, xℱ, yℱ) + end apply + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a Ridge RegressionMV from a data matrix and response vector. + * This function centers the data. + * @param x the un-centered data/input m-by-n matrix, NOT augmented with a first column of ones + * @param y the un-centered response/output matrix + * @param fname the feature/variable names (defaults to null) + * @param hparam the shrinkage hyper-parameter (0 => OLS) in the penalty term 'lambda * b dot b' + */ + def center (x: MatrixD, y: MatrixD, fname: Array [String] = null, + hparam: HyperParameter = RidgeRegression.hp): RidgeRegressionMV = + val xℱ = CenterForm (x) + val yℱ = CenterForm (y) + new RidgeRegressionMV (xℱ.f(x), yℱ.f(y), fname, hparam, xℱ, yℱ) + end center + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a Ridge RegressionMV object from a data matrix and a response matrix. + * This method provides data rescaling. + * @param x the data/input m-by-n matrix + * (augment with a first column of ones to include intercept in model) + * @param y the response/output matrix + * @param fname the feature/variable names (use null for default) + * @param hparam the hyper-parameters (defaults to Regression.hp) + */ + def rescale (x: MatrixD, y: MatrixD, fname: Array [String] = null, + hparam: HyperParameter = RidgeRegression.hp): RidgeRegressionMV = + val xℱ = NormForm (x) + val yℱ = CenterForm (y) + new RidgeRegressionMV (xℱ.f(x), yℱ.f(y), fname, hparam, xℱ, yℱ) + end rescale + +end RidgeRegressionMV + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `ridgeRegressionMVTest` main function tests the `RidgeRegressionMV` class using + * the following regression equation. + * y = b dot x = b_1*x_1 + b_2*x_2. + * It compares `RidgeRegressionMV` with `RegressionMV` + * @see statmaster.sdu.dk/courses/st111/module03/index.html + * > runMain scalation.modeling.neuralnet.ridgeRegressionMVTest + */ +@main def ridgeRegressionMVTest (): Unit = + + // 5 data points: x_0 x_1 + val x = MatrixD ((5, 2), 36.0, 66.0, // 5-by-2 data matrix + 37.0, 68.0, + 47.0, 64.0, + 32.0, 53.0, + 1.0, 101.0) + + val y = MatrixD ((5, 2), 745.0, 700.0, // 5-by-2 response matrix + 895.0, 900.0, + 442.0, 500.0, + 440.0, 400.0, + 1598.0, 1500.0) + +// println ("model: y = b_0 + b_1*x_1 + b_2*x_2") + println ("model: y = b₀ + b₁*x₁ + b₂*x₂") // for RegressionMV, remove b₀ for Ridge + println (s"x = $x") + println (s"y = $y") + + banner ("RegressionMV") + val ox = VectorD.one (y.dim) +^: x // prepend a column of all 1's + val reg = new RegressionMV (ox, y) // create a RegressionMV model + reg.trainNtest ()() // train and test the model + + banner ("RidgeRegressionMV with manual centering") + val mu_x = x.mean // column-wise mean of x + val mu_y = y.mean // mean of y + val x_c = x - mu_x // centered x (column-wise) + val y_c = y - mu_y // centered y + val mod = new RidgeRegressionMV (x_c, y_c) // create a Ridge RegressionMV model + mod.trainNtest ()() // train and test the model + + banner ("RidgeRegressionMV with Auto-centering") + val amod = RidgeRegressionMV.center (x, y) // create an auto-centered Ridge RegressionMV model + amod.trainNtest ()() // train and test the model + + banner ("RidgeRegressionMV with Rescaling") + val rmod = RidgeRegressionMV.rescale (x, y) // create a rescaled Ridge RegressionMV model + rmod.trainNtest ()() // train and test the model + + banner ("Make one OOS Predictions") + val z = VectorD (20.0, 80.0) // new instance to predict + val _1z = 1.0 +: z // prepend 1 to z + val z_c = z - mu_x // center z + println (s"reg.predict ($z) = ${reg.predict (_1z)}") // predict using _1z + println (s"mod.predict ($z) = ${mod.predict (z_c) + mu_y}") // predict using z_c and add y's mean + println (s"amod.predict ($z) = ${amod.predict (z)}") // predict using z with auto-centering + println (s"rmod.predict ($z) = ${rmod.predict (z)}") // predict using z with rescaling + + banner ("Compare Summaries") + println (reg.summary ()) + println (mod.summary ()) + println (amod.summary ()) + println (rmod.summary ()) + +end ridgeRegressionMVTest + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `ridgeRegressionMVTest2` main function is used to test the `RidgeRegressionMV` class. + * > runMain scalation.modeling.neuralnet.ridgeRegressionMVTest2 + */ +@main def ridgeRegressionMVTest2 (): Unit = + + val x = MatrixD ((5, 3), 1.0, 0.35, 0.9, // training data - input matrix (m=5 vectors) + 1.0, 0.20, 0.7, + 1.0, 0.30, 0.8, + 1.0, 0.25, 0.75, + 1.0, 0.40, 0.95) + val y = MatrixD ((5, 2), 0.5, 0.4, // training data - output matrix (m=5 vectors) + 0.3, 0.3, + 0.2, 0.35, + 0.3, 0.32, + 0.6, 0.5) + + println (s"input matrix x = $x") + println (s"output matrix y = $y") + + val mod = new RidgeRegressionMV (x, y) // create RegreesionMV model + mod.trainNtest ()() // train and test the model + println (mod.summary ()) // parameter/coefficient statistics + + banner ("ridgeRegressionMVTest: Compare with Linear Regression - first column of y") + val y0 = y(?, 0) // use first column of response matrix y + val rg0 = new Regression (x, y0) // create a Regression model + rg0.trainNtest ()() // train and test the model + println (rg0.summary ()) // parameter/coefficient statistics + + banner ("ridgeRegressionMVTest: Compare with Linear Regression - second column of y") + val y1 = y(?, 1) // use second column of response matrix y + val rg1 = new Regression (x, y1) // create a Regression model + rg1.trainNtest ()() // train and test the model + println (rg1.summary ()) // parameter/coefficient statistics + + val b_ = mod.parameters(0).w // check for parameter agreements with `Regression` + assert (b_(?, 0) == rg0.parameter) + assert (b_(?, 1) == rg1.parameter) + +end ridgeRegressionMVTest2 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `ridgeRegressionMVTest3` main function tests the `RidgeRegressionMV` class using + * the Concrete dataset. + * > runMain scalation.modeling.neuralnet.ridgeRegressionMVTest3 + */ +@main def ridgeRegressionMVTest3 (): Unit = + + import Example_Concrete._ + +// println (s"ox = $ox") +// println (s"y = $y") + println (s"ox_fname = ${stringOf (ox_fname)}") + + banner ("Concrete RidgeRegressionMV") + val mod = new RidgeRegressionMV (ox, y, ox_fname) // create model with intercept (else pass x) + mod.trainNtest ()() // train and test the model + println (mod.summary ()) // parameter/coefficient statistics + + banner ("Concrete Validation Test") + println (Fit.showFitMap (mod.validate ()()._2)) + + banner ("Concrete Cross-Validation Test") + val stats = mod.crossValidate () + FitM.showQofStatTable (stats) + +end ridgeRegressionMVTest3 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `ridgeRegressionMVTest4` main function tests the `RidgeRegressionMV` class using + * the AutoMPG dataset. + * > runMain scalation.modeling.neuralnet.ridgeRegressionMVTest4 + */ +@main def ridgeRegressionMVTest4 (): Unit = + + import Example_AutoMPG.{ox, yy, ox_fname} + +// println (s"ox = $ox") +// println (s"yy = $yy") + println (s"ox_fname = ${stringOf (ox_fname)}") + + banner ("AutoMPG RidgeRegressionMV") + val mod = new RidgeRegressionMV (ox, yy, ox_fname) // create model with intercept (else pass x) + mod.trainNtest ()() // train and test the model + println (mod.summary ()) // parameter/coefficient statistics + + banner ("AutoMPG Validation Test") + println (Fit.showFitMap (mod.validate ()()._2)) + + banner ("AutoMPG Cross-Validation Test") + val stats = mod.crossValidate () + FitM.showQofStatTable (stats) + +end ridgeRegressionMVTest4 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `ridgeRegressionMVTest5` main function tests the `RidgeRegressionMV` class using + * the AutoMPG dataset. It tests forward selection. + * > runMain scalation.modeling.neuralnet.ridgeRegressionMVTest5 + */ +@main def ridgeRegressionMVTest5 (): Unit = + + import Example_AutoMPG.{ox, yy, ox_fname} + +// println (s"ox = $ox") +// println (s"y = $y") + println (s"ox_fname = ${stringOf (ox_fname)}") + + banner ("AutoMPG RidgeRegressionMV") + val mod = new RidgeRegressionMV (ox, yy, ox_fname) // create model with intercept (else pass x) + mod.trainNtest ()() // train and test the model + println (mod.summary ()) // parameter/coefficient statistics + + banner ("Feature Selection Technique: Forward") + val (cols, rSq) = mod.forwardSelAll () // R^2, R^2 bar, smape, R^2 cv +// val (cols, rSq) = mod.backwardElimAll () // R^2, R^2 bar, smape, R^2 cv + val k = cols.size + println (s"k = $k, n = ${ox.dim2}") + new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "smape", "R^2 cv"), + s"R^2 vs n for ${mod.modelName}", lines = true) + println (s"rSq = $rSq") + +end ridgeRegressionMVTest5 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `ridgeRegressionMVTest6` main function tests the `RidgeRegressionMV` class using + * the AutoMPG dataset. It tests forward, backward and stepwise selection. + * > runMain scalation.modeling.neuralnet.ridgeRegressionMVTest6 + */ +@main def ridgeRegressionMVTest6 (): Unit = + + import Example_AutoMPG.{ox, yy, ox_fname} + +// println (s"ox = $ox") +// println (s"y = $y") + + banner ("AutoMPG RidgeRegressionMV") + val mod = new RidgeRegressionMV (ox, yy, ox_fname) // create model with intercept (else pass x) + mod.trainNtest ()() // train and test the model + println (mod.summary ()) // parameter/coefficient statistics + + banner ("Cross-Validation") + FitM.showQofStatTable (mod.crossValidate ()) + + println (s"ox_fname = ${stringOf (ox_fname)}") + + for tech <- SelectionTech.values do + banner (s"Feature Selection Technique: $tech") + val (cols, rSq) = mod.selectFeatures (tech) // R^2, R^2 bar, smape, R^2 cv + val k = cols.size + println (s"k = $k, n = ${ox.dim2}") + new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "smape", "R^2 cv"), + s"R^2 vs n for ${mod.modelName} with $tech", lines = true) + println (s"$tech: rSq = $rSq") + end for + +end ridgeRegressionMVTest6 + diff --git a/src/main/scala/scalation/modeling/neuralnet/SimpleCNN.scala b/src/main/scala/scalation/modeling/neuralnet/SimpleCNN.scala new file mode 100644 index 000000000..c803ee3b8 --- /dev/null +++ b/src/main/scala/scalation/modeling/neuralnet/SimpleCNN.scala @@ -0,0 +1,388 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author John Miller + * @version 2.0 + * @date Tue Nov 4 13:44:23 EST 2025 + * @see LICENSE (MIT style license file). + * + * @note Simple Convolutional Neural Networks (CNNs) Using Gradient Descent Optimization + * Tests both Gradient Descent (GD) and Incremental Gradient Descent (IGD) + * Simplified version of `CNN_1D` for illustration/learning, not production + * + * @note the symbol ƒ indicates the derivative of function f, i.e., ƒ = f' + */ + +package scalation +package modeling +package neuralnet + +import scalation.? // wildcard: xy(?, 3) gives column 3 +import scalation.mathstat.{VectorD, MatrixD, Plot} +//import scalation.mathstat.VectorDOps._ +import scalation.modeling.ActivationFun.{sigmoid_} // sigmoid activation functions +import scalation.modeling.ActivationFun.{f_reLU, reLU_} // reLU activation functions +import scalation.modeling.forecasting.{ARY, MakeMatrix4TS} + +import CoFilter_1D.{conv, convs} // convolutional operators (s => same/padding) + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `SimpleCNN` object contains a simple dataset for testing Gradient Descent (GD) + * and Incremental Gradient Descent (IGD) optimization algorithms. + * @see https://nowak.ece.wisc.edu/MFML.pdf + */ +object SimpleCNN: + + /** Sequential Data, e.g., time series or acoustic signal + */ + val yy = VectorD (1, 3, 4, 2, 5, 7, 9, 8, 6, 3, 4, 2, 7, 6, 3, 1, 5, 7, 9, 8) + + /** Build an input/predictor matrix (default number of lags p = 3) + */ +// MakeMatrix4TS.hp("p") = 3 + val xx = ARY.buildMatrix (yy, MakeMatrix4TS.hp) + +end SimpleCNN + +import SimpleCNN.{yy, xx} + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `simpleCNN1` main function illustrates the use of Gradient Descent (GD) to optimize + * the weights/parameters of a simple neural network (3-layer (1 hidden) Neural Network). + * Computations done at the matrix level: X -> Z -> Y. R^2 = .299, .432 (requires more epochs) + * FIX - finish implementation + * > runMain scalation.modeling.neuralnet.simpleCNN1 + */ +@main def simpleCNN1 (): Unit = + + import scalation.mathstat.MatrixDOps._ // may clash with VectorDOps + + val (x, y) = (xx, MatrixD.fromVector (yy)) // input matrix, output/response matrix + val sst = (y - y.mean).normSq // sum of squares total, per column + val a = MatrixD ((2, 3), 0.1, 0.1, 0.1, // parameter/weight matrix: input -> hidden + 0.1, 0.1, 0.1) + val α = VectorD (0.1, 0.1, 0.1) // hidden layer bias vector + val b = MatrixD ((3, 2), 0.1, 0.1, // parameter/weight matrix: hidden -> output + 0.2, 0.1, + 0.1, 0.1) // initial weights/parameters (random in practice) + val β = VectorD (0.1, 0.1) // output layer bias vector + + val η = 10.0 // learning rate (to be tuned) + var u, z, v, ŷ, ε, ƒ1, δ1, g1, ƒ0, δ0, g0: MatrixD = null + +// try f0 = reLU, f1 = id + + for epoch <- 1 to 10 do + println (s"Improvement step $epoch") + + // forward prop: input -> hidden + u = x * a + α // hidden pre-activation matrix + z = f_reLU.fM (u) // hidden matrix from f0 activation + + // forward prop: hidden -> output + v = z * b + β // output pre-activation matrix + ŷ = f_reLU.fM (v) // output/prediction matrix from f1 activation + ε = y - ŷ // error matrix + + // backward prop: hidden <- output + ƒ1 = ŷ ⊙ (1.0 - ŷ) // derivative (f1') for sigmoid + δ1 = -ε ⊙ ƒ1 // delta correction matrix via Hadamard product + g1 = z.ᵀ * δ1 // transposed Jacobian matrix (gradients) + + // backward prop: input <- hidden + ƒ0 = z ⊙ (1.0 - z) // derivative (f0') for sigmoid + δ0 = (δ1 * b.ᵀ) ⊙ ƒ0 // delta correction matrix + g0 = x.ᵀ * δ0 // transposed Jacobian matrix (gradients) + + // parameter updates + b -= g1 * η // update output parameter/weight matrix + β -= δ1.mean * η // update output bias vector + a -= g0 * η // update hidden parameter/weight matrix + α -= δ0.mean * η // update hidden bias vector + val sse = ε.normSq // sum of squared errors + val r2 = -(sse / sst - 1.0) // R^2 (recoded to avoid Ops clash) + + println (s""" + u = $u + z = $z + v = $v + ŷ = $ŷ + ε = $ε + ƒ1 = $ƒ1 + δ1 = $δ1 + g1 = $g1 + ƒ0 = $ƒ0 + δ0 = $δ0 + g0 = $g0 + b = $b + β = $β + a = $a + α = $α + r2 = $r2 + """) + end for + + new Plot (null, y(?, 0), ŷ(?, 0), "GD for Three-layer Neural Net y_0", lines = true) + new Plot (null, y(?, 1), ŷ(?, 1), "GD for Three-layer Neural Net y_1", lines = true) + +end simpleCNN1 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `simpleCNN2` main function illustrates the use of Incremental Gradient Descent (IGD) + * to optimize the weights/parameters of a simple (3-layer (1 hidden) CNN. + * + * Prediction Equation: z = f0(c *_c x + α) + * ŷ = f1(B^T z + β) + * + * where x is an input vector, z is the hidden layer vector, ŷ is a predicted output vector, f0, f1 + * are the activation functions, c is the shared weight vector and B is the parameter matrix, + * and α and β are the bias vectors. + * @note: Stochastic Gradient Descent (SGD) adds stochastic selection to IGD. In practice, + * mini-batches of size 32, 64, or 128 are commonly used. + * Computations done at the vector level, x -> z -> y. R^2 = .389 vs. .409 for Regression + * > runMain scalation.modeling.neuralnet.simpleCNN2 + */ +@main def simpleCNN2 (): Unit = + + import scalation.mathstat.MatrixDOps.⊗ // outer product of two vectors: v1 ⊗ v2 = v1 v1.ᵀ + // matrix where m_ij = v1_i * v2_j + import scalation.mathstat.VectorDOps._ + + val nx = xx.dim2 // size of input data row + val sst = (yy - yy.mean).normSq // sum of squares total, per column + val c = VectorD (0.2, 0.2, 0.2) // convolutional parameter/weight vector: input -> hidden + val α = VectorD (0.1, 0.1, 0.1, 0.1) // hidden layer bias vector + val b = MatrixD ((4, 1), 0.1, 0.1, 0.1, 0.1) // parameter/weight matrix: hidden -> output + // initial weights/parameters (random in practice) + val β = VectorD (0.1) // output layer bias vector + val nc = c.dim // size of convolutional filter + val nv = nx - nc + 1 // size of delta 0 for a 'valid' convolution @see `simpleCNN3` + + println (s"nx = $nx, nc = $nc, sst = $sst, xx = $xx, yy = $yy") + + val η = 0.7 // sigmoid learning rate (to be tuned) +// val η = 0.012 // reLU learning rate (to be tuned) + var u, z, v, ŷ, ε, ƒ1, δ1, ƒ0, δ0: VectorD = null + var g1: MatrixD = null + val yp = new MatrixD (yy.dim, 1) // save each prediction in yp + +// try f0 = reLU or sigmoid, f1 = id + + for epoch <- 1 to 100 do + println (s"Improvement step $epoch") + val sse = VectorD (0.0) + for i <- xx.indices do + val (x, y) = (xx(i), VectorD (yy(i))) // randomize i for Stochastic Gradient Descent (SGD) + + // forward prop: input -> hidden + u = convs (c, x) + α // hidden pre-activation vector via 'same' convolution + z = sigmoid_ (u) // hidden vector from f0 = sigmoid activation +// z = reLU_ (u) // hidden vector from f0 = reLU activation + + // forward prop: hidden -> output + v = b.ᵀ * z + β // output pre-activation vector + ŷ = v // output/prediction vector from f1 = id activation + ε = y - ŷ // error vector + + // backward prop: hidden <- output +// ƒ1 = ŷ.map (t => if t > 0.0 then 1.0 else 0.0) // derivative (f1') for reLU + ƒ1 = VectorD.one (ŷ.dim) // derivative (f1') for id + δ1 = -ε * ƒ1 // delta correction vector via elementwise product + g1 = z ⊗ δ1 // transposed Jacobian matrix (gradients) + + // backward prop: input <- hidden + ƒ0 = z * (1.0 - z) // derivative (f0') for sigmoid +// ƒ0 = z.map (t => if t > 0.0 then 1.0 else 0.0) // derivative (f0') for reLU +// ƒ0 = VectorD.one (z.dim) // derivative (f0') for id + δ0 = b * δ1 * ƒ0 // delta correction vector + + // parameter updates + b -= g1 * η // update output parameter/weight matrix + β -= δ1 * η // update output bias vector +// c -= g0 * η // update convolutional hidden parameter/weight vector + + for j <- c.indices do c(j) -= x(j until j+nv) ∙ δ0(j until j+nv) * η // FIX - c_j based on what x's and δ0's + + α -= δ0 * η // update hidden bias vector + yp(i) = ŷ // save i-th prediction + sse += ε * ε // sum of squared errors + val r2 = -(sse / sst - 1.0) // R^2 (recoded to avoid Ops clash) + + println (s""" + u = $u + z = $z + v = $v + ŷ = $ŷ + ε = $ε + ƒ1 = $ƒ1 + δ1 = $δ1 + g1 = $g1 + ƒ0 = $ƒ0 + δ0 = $δ0 + b = $b + β = $β + c = $c + α = $α + sse= $sse + r2 = $r2 + smape = ${FitM.smapeF (yy.drop(1), yp(?, 0).drop(1))} + """) + end for + end for + + new Plot (null, yy, yp(?, 0), "IGD for CNN y", lines = true) + + println ("Compare smape scores with Regression") + import scalation.modeling.Regression + val mod = new Regression (xx, yy) + val ypr = mod.trainNtest ()()._1 + println (s"smape = ${FitM.smapeF (yy.drop(1), ypr.drop(1))}") // can't forecast first value + +end simpleCNN2 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `simpleCNN3` main function illustrates the use of Incremental Gradient Descent (IGD) + * to optimize the weights/parameters of a simple (3-layer (1 hidden) CNN. + * + * Prediction Equation: z = f0(c *_c x + α) + * ŷ = f1(B^T z + β) + * + * where x is an input vector, z is the hidden layer vector, ŷ is a predicted output vector, f0, f1 + * are the activation functions, c is the shared weight vector and B is the parameter matrix, + * and α and β are the bias vectors. + * @note: Stochastic Gradient Descent (SGD) adds stochastic selection to IGD. In practice, + * mini-batches of size 32, 64, or 128 are commonly used. + * Computations done at the vector level, x -> z -> y. R^2 = .389 vs. .409 for Regression + * > runMain scalation.modeling.neuralnet.simpleCNN3 + */ +@main def simpleCNN3 (): Unit = + + import scalation.mathstat.MatrixDOps.⊗ // outer product of two vectors: v1 ⊗ v2 = v1 v1.ᵀ + // matrix where m_ij = v1_i * v2_j +// import scalation.mathstat.VectorDOps._ + + val nx = xx.dim2 // size of input data row + val sst = (yy - yy.mean).normSq // sum of squares total, per column + val c = VectorD (0.2, 0.2, 0.2) // convolutional parameter/weight vector: input -> hidden + val α = VectorD (0.1, 0.1) // hidden layer bias vector + val b = MatrixD ((2, 1), 0.1, 0.1) // parameter/weight matrix: hidden -> output + // initial weights/parameters (random in practice) + val β = VectorD (0.1) // output layer bias vector + val nc = c.dim // size of convolutional filter + val nδ0 = nx - nc + 1 // size of delta 0 + + println (s"nx = $nx, nc = $nc, sst = $sst, xx = $xx, yy = $yy") + + val η = 0.7 // sigmoid learning rate (to be tuned) +// val η = 0.012 // reLU learning rate (to be tuned) + var u, z, v, ŷ, ε, ƒ1, δ1, ƒ0, δ0: VectorD = null + var g1: MatrixD = null + val yp = new MatrixD (yy.dim, 1) // save each prediction in yp + +// try f0 = reLU or sigmoid, f1 = id + + for epoch <- 1 to 100 do + println (s"Improvement step $epoch") + val sse = VectorD (0.0) + for i <- xx.indices do + val (x, y) = (xx(i), VectorD (yy(i))) // randomize i for Stochastic Gradient Descent (SGD) + + // forward prop: input -> hidden + u = conv (c, x) + α // hidden pre-activation vector via 'valid' convolution +// z = sigmoid_ (u) // hidden vector from f0 = sigmoid activation + z = reLU_ (u) // hidden vector from f0 = reLU activation + + // forward prop: hidden -> output + v = b.ᵀ * z + β // output pre-activation vector + ŷ = v // output/prediction vector from f1 = id activation + ε = y - ŷ // error vector + + // backward prop: hidden <- output +// ƒ1 = ŷ.map (t => if t > 0.0 then 1.0 else 0.0) // derivative (f1') for reLU + ƒ1 = VectorD.one (ŷ.dim) // derivative (f1') for id + δ1 = -ε * ƒ1 // delta correction vector via elementwise product + g1 = z ⊗ δ1 // transposed Jacobian matrix (gradients) + + import scalation.is_ + + // backward prop: input <- hidden +// ƒ0 = z * (1.0 - z) // derivative (f0') for sigmoid +// ƒ0 = z.map (t => if t > 0.0 then 1.0 else 0.0) // derivative (f0') for reLU + ƒ0 = z.map (t => is_ (t > 0.0)) // derivative (f0') for reLU +// ƒ0 = VectorD.one (z.dim) // derivative (f0') for id + δ0 = b * δ1 * ƒ0 // delta correction vector + + // parameter updates + b -= g1 * η // update output parameter/weight matrix + β -= δ1 * η // update output bias vector +// c -= g0 * η // update convolutional hidden parameter/weight vector + + for j <- c.indices do c(j) -= x(j until j+nδ0) ∙ δ0 * η // CHECK - c_j based on what x's and δ0's + + α -= δ0 * η // update hidden bias vector + yp(i) = ŷ // save i-th prediction + sse += ε * ε // sum of squared errors + val r2 = -(sse / sst - 1.0) // R^2 (recoded to avoid Ops clash) + + println (s""" + u = $u + z = $z + v = $v + ŷ = $ŷ + ε = $ε + ƒ1 = $ƒ1 + δ1 = $δ1 + g1 = $g1 + ƒ0 = $ƒ0 + δ0 = $δ0 + b = $b + β = $β + c = $c + α = $α + sse= $sse + r2 = $r2 + smape= ${FitM.smapeF (yy.drop(1), yp(?, 0).drop(1))} // can't forecast first value + """) + end for + end for + + new Plot (null, yy, yp(?, 0), "IGD for CNN y", lines = true) + + println ("Compare smape scores with Regression") + import scalation.modeling.Regression + val mod = new Regression (xx, yy) + val ypr = mod.trainNtest ()()._1 + println (s"smape = ${FitM.smapeF (yy.drop(1), ypr.drop(1))}") + + println ("Compare smape scores with ARY Forecaster") + val mod2 = ARY (yy, 1) + mod2.setSkip (1) + mod2.trainNtest_x ()() + +end simpleCNN3 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `simpleCNN4` main function tests the conv ('valid' convolution) and convs + * ('same' convolution) updated methods in `VectorD`. + */ +@main def simpleCNN4 (): Unit = + + val c = VectorD (0.5, 1.0, 0.5) // convolution filter + val x = VectorD (-3, -2, -1, 0, 1, 2, 3, 4) // sample input (one row) + val α = 0.0 // for simplicity assume no bias + var u = conv (c, x) + α // hidden pre-activation vector φ via 'valid' convolution + var z = reLU_ (u) // hidden vector (no pooling) from f0 = reLU activation + println ("Test the 'valid' convolution operator") + println (s"u = $u") + println (s"z = $z") + + u = convs (c, x) + α // hidden pre-activation vector φ via 'same' convolution + z = reLU_ (u) // hidden vector (no pooling) from f0 = reLU activation + println ("Test the 'same' convolution operator") + println (s"u = $u") + println (s"z = $z") + +end simpleCNN4 + diff --git a/src/main/scala/scalation/modeling/neuralnet/SimpleCNN.scala.txt b/src/main/scala/scalation/modeling/neuralnet/SimpleCNN.scala.txt new file mode 100644 index 000000000..6ce5fb14e --- /dev/null +++ b/src/main/scala/scalation/modeling/neuralnet/SimpleCNN.scala.txt @@ -0,0 +1,390 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author John Miller + * @version 2.0 + * @date Tue Nov 4 13:44:23 EST 2025 + * @see LICENSE (MIT style license file). + * + * @note Simple Convolutional Neural Networks (CNNs) Using Gradient Descent Optimization + * Tests both Gradient Descent (GD) and Incremental Gradient Descent (IGD) + * Simplified version of `CNN_1D` for illustration/learning, not production + * + * @note the symbol ƒ indicates the derivative of function f, i.e., ƒ = f' + */ + +package scalation.modeling.neuralnet + +import scalation.? // wildcard: xy(?, 3) gives column 3 +import scalation.mathstat.{VectorD, MatrixD, Plot} +//import scalation.mathstat.VectorDOps._ +import scalation.modeling.ActivationFun.{sigmoid_} // sigmoid activation functions +import scalation.modeling.ActivationFun.{f_reLU, reLU_} // reLU activation functions +import scalation.modeling.forecasting.{ARY, MakeMatrix4TS} + +import CoFilter_1D.{conv, convs} // convolutional operators (s => same/padding) + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `SimpleCNN` object contains a simple dataset for testing Gradient Descent (GD) + * and Incremental Gradient Descent (IGD) optimization algorithms. + * @see https://nowak.ece.wisc.edu/MFML.pdf + */ +object SimpleCNN: + + /** Sequential Data, e.g., time series or acoustic signal + */ + val yy = VectorD (1, 3, 4, 2, 5, 7, 9, 8, 6, 3, 4, 2, 7, 6, 3, 1, 5, 7, 9, 8) + + /** Build an input/predictor matrix (default number of lags p = 3) + */ +// MakeMatrix4TS.hp("p") = 3 + val xx = ARY.buildMatrix (yy, MakeMatrix4TS.hp) + +end SimpleCNN + +import SimpleCNN.{yy, xx} + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `simpleCNN1` main function illustrates the use of Gradient Descent (GD) to optimize + * the weights/parameters of a simple neural network (3-layer (1 hidden) Neural Network). + * Computations done at the matrix level: X -> Z -> Y. R^2 = .299, .432 (requires more epochs) + * FIX - finish implementation + * > runMain scalation.modeling.neuralnet.simpleCNN1 + */ +@main def simpleCNN1 (): Unit = + + import scalation.mathstat.MatrixDOps._ // may clash with VectorDOps + + val (x, y) = (xx, MatrixD.fromVector (yy)) // input matrix, output/response matrix + val sst = (y - y.mean).normSq // sum of squares total, per column + val a = MatrixD ((2, 3), 0.1, 0.1, 0.1, // parameter/weight matrix: input -> hidden + 0.1, 0.1, 0.1) + val α = VectorD (0.1, 0.1, 0.1) // hidden layer bias vector + val b = MatrixD ((3, 2), 0.1, 0.1, // parameter/weight matrix: hidden -> output + 0.2, 0.1, + 0.1, 0.1) // initial weights/parameters (random in practice) + val β = VectorD (0.1, 0.1) // output layer bias vector + + val η = 10.0 // learning rate (to be tuned) + var u, z, v, ŷ, ε, ƒ1, δ1, g1, ƒ0, δ0, g0: MatrixD = null + +// try f0 = reLU, f1 = id + + for epoch <- 1 to 10 do + println (s"Improvement step $epoch") + + // forward prop: input -> hidden + u = x * a + α // hidden pre-activation matrix + z = f_reLU.fM (u) // hidden matrix from f0 activation + + // forward prop: hidden -> output + v = z * b + β // output pre-activation matrix + ŷ = f_reLU.fM (v) // output/prediction matrix from f1 activation + ε = y - ŷ // error matrix + + // backward prop: hidden <- output + ƒ1 = ŷ ⊙ (1.0 - ŷ) // derivative (f1') for sigmoid + δ1 = -ε ⊙ ƒ1 // delta correction matrix via Hadamard product + g1 = z.ᵀ * δ1 // transposed Jacobian matrix (gradients) + + // backward prop: input <- hidden + ƒ0 = z ⊙ (1.0 - z) // derivative (f0') for sigmoid + δ0 = (δ1 * b.ᵀ) ⊙ ƒ0 // delta correction matrix + g0 = x.ᵀ * δ0 // transposed Jacobian matrix (gradients) + + // parameter updates + b -= g1 * η // update output parameter/weight matrix + β -= δ1.mean * η // update output bias vector + a -= g0 * η // update hidden parameter/weight matrix + α -= δ0.mean * η // update hidden bias vector + val sse = ε.normSq // sum of squared errors + val r2 = -(sse / sst - 1.0) // R^2 (recoded to avoid Ops clash) + + println (s""" + u = $u + z = $z + v = $v + ŷ = $ŷ + ε = $ε + ƒ1 = $ƒ1 + δ1 = $δ1 + g1 = $g1 + ƒ0 = $ƒ0 + δ0 = $δ0 + g0 = $g0 + b = $b + β = $β + a = $a + α = $α + r2 = $r2 + """) + end for + + new Plot (null, y(?, 0), ŷ(?, 0), "GD for Three-layer Neural Net y_0", lines = true) + new Plot (null, y(?, 1), ŷ(?, 1), "GD for Three-layer Neural Net y_1", lines = true) + +end simpleCNN1 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `simpleCNN2` main function illustrates the use of Incremental Gradient Descent (IGD) + * to optimize the weights/parameters of a simple (3-layer (1 hidden) CNN. + * + * Prediction Equation: z = f0(c *_c x + α) + * ŷ = f1(B^T z + β) + * + * where x is an input vector, z is the hidden layer vector, ŷ is a predicted output vector, f0, f1 + * are the activation functions, c is the shared weight vector and B is the parameter matrix, + * and α and β are the bias vectors. + * @note: Stochastic Gradient Descent (SGD) adds stochastic selection to IGD. In practice, + * mini-batches of size 32, 64, or 128 are commonly used. + * Computations done at the vector level, x -> z -> y. R^2 = .389 vs. .409 for Regression + * > runMain scalation.modeling.neuralnet.simpleCNN2 + */ +@main def simpleCNN2 (): Unit = + + import scalation.mathstat.MatrixDOps.⊗ // outer product of two vectors: v1 ⊗ v2 = v1 v1.ᵀ + // matrix where m_ij = v1_i * v2_j + import scalation.mathstat.VectorDOps._ + + object MyFit extends scalation.modeling.FitM + + val nx = xx.dim2 // size of input data row + val sst = (yy - yy.mean).normSq // sum of squares total, per column + val c = VectorD (0.2, 0.2, 0.2) // convolutional parameter/weight vector: input -> hidden + val α = VectorD (0.1, 0.1, 0.1, 0.1) // hidden layer bias vector + val b = MatrixD ((4, 1), 0.1, 0.1, 0.1, 0.1) // parameter/weight matrix: hidden -> output + // initial weights/parameters (random in practice) + val β = VectorD (0.1) // output layer bias vector + val nc = c.dim // size of convolutional filter + val nv = nx - nc + 1 // size of delta 0 for a 'valid' convolution @see `simpleCNN3` + + println (s"nx = $nx, nc = $nc, sst = $sst, xx = $xx, yy = $yy") + + val η = 0.7 // sigmoid learning rate (to be tuned) +// val η = 0.012 // reLU learning rate (to be tuned) + var u, z, v, ŷ, ε, ƒ1, δ1, ƒ0, δ0: VectorD = null + var g1: MatrixD = null + val yp = new MatrixD (yy.dim, 1) // save each prediction in yp + +// try f0 = reLU or sigmoid, f1 = id + + for epoch <- 1 to 100 do + println (s"Improvement step $epoch") + val sse = VectorD (0.0) + for i <- xx.indices do + val (x, y) = (xx(i), VectorD (yy(i))) // randomize i for Stochastic Gradient Descent (SGD) + + // forward prop: input -> hidden + u = convs (c, x) + α // hidden pre-activation vector via 'same' convolution + z = sigmoid_ (u) // hidden vector from f0 = sigmoid activation +// z = reLU_ (u) // hidden vector from f0 = reLU activation + + // forward prop: hidden -> output + v = b.ᵀ * z + β // output pre-activation vector + ŷ = v // output/prediction vector from f1 = id activation + ε = y - ŷ // error vector + + // backward prop: hidden <- output +// ƒ1 = ŷ.map (t => if t > 0.0 then 1.0 else 0.0) // derivative (f1') for reLU + ƒ1 = VectorD.one (ŷ.dim) // derivative (f1') for id + δ1 = -ε * ƒ1 // delta correction vector via elementwise product + g1 = z ⊗ δ1 // transposed Jacobian matrix (gradients) + + // backward prop: input <- hidden + ƒ0 = z * (1.0 - z) // derivative (f0') for sigmoid +// ƒ0 = z.map (t => if t > 0.0 then 1.0 else 0.0) // derivative (f0') for reLU +// ƒ0 = VectorD.one (z.dim) // derivative (f0') for id + δ0 = b * δ1 * ƒ0 // delta correction vector + + // parameter updates + b -= g1 * η // update output parameter/weight matrix + β -= δ1 * η // update output bias vector +// c -= g0 * η // update convolutional hidden parameter/weight vector + + for j <- c.indices do c(j) -= x(j until j+nv) ∙ δ0(j until j+nv) * η // FIX - c_j based on what x's and δ0's + + α -= δ0 * η // update hidden bias vector + yp(i) = ŷ // save i-th prediction + sse += ε * ε // sum of squared errors + val r2 = -(sse / sst - 1.0) // R^2 (recoded to avoid Ops clash) + + println (s""" + u = $u + z = $z + v = $v + ŷ = $ŷ + ε = $ε + ƒ1 = $ƒ1 + δ1 = $δ1 + g1 = $g1 + ƒ0 = $ƒ0 + δ0 = $δ0 + b = $b + β = $β + c = $c + α = $α + sse= $sse + r2 = $r2 + smape = ${MyFit.smapeF (yy.drop(1), yp(?, 0).drop(1))} + """) + end for + end for + + new Plot (null, yy, yp(?, 0), "IGD for CNN y", lines = true) + + println ("Compare smape scores with Regression") + import scalation.modeling.Regression + val mod = new Regression (xx, yy) + val ypr = mod.trainNtest ()()._1 + println (s"smape = ${MyFit.smapeF (yy.drop(1), ypr.drop(1))}") // can't forecast first value + +end simpleCNN2 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `simpleCNN3` main function illustrates the use of Incremental Gradient Descent (IGD) + * to optimize the weights/parameters of a simple (3-layer (1 hidden) CNN. + * + * Prediction Equation: z = f0(c *_c x + α) + * ŷ = f1(B^T z + β) + * + * where x is an input vector, z is the hidden layer vector, ŷ is a predicted output vector, f0, f1 + * are the activation functions, c is the shared weight vector and B is the parameter matrix, + * and α and β are the bias vectors. + * @note: Stochastic Gradient Descent (SGD) adds stochastic selection to IGD. In practice, + * mini-batches of size 32, 64, or 128 are commonly used. + * Computations done at the vector level, x -> z -> y. R^2 = .389 vs. .409 for Regression + * > runMain scalation.modeling.neuralnet.simpleCNN3 + */ +@main def simpleCNN3 (): Unit = + + import scalation.mathstat.MatrixDOps.⊗ // outer product of two vectors: v1 ⊗ v2 = v1 v1.ᵀ + // matrix where m_ij = v1_i * v2_j +// import scalation.mathstat.VectorDOps._ + + object MyFit extends scalation.modeling.FitM + + val nx = xx.dim2 // size of input data row + val sst = (yy - yy.mean).normSq // sum of squares total, per column + val c = VectorD (0.2, 0.2, 0.2) // convolutional parameter/weight vector: input -> hidden + val α = VectorD (0.1, 0.1) // hidden layer bias vector + val b = MatrixD ((2, 1), 0.1, 0.1) // parameter/weight matrix: hidden -> output + // initial weights/parameters (random in practice) + val β = VectorD (0.1) // output layer bias vector + val nc = c.dim // size of convolutional filter + val nδ0 = nx - nc + 1 // size of delta 0 + + println (s"nx = $nx, nc = $nc, sst = $sst, xx = $xx, yy = $yy") + + val η = 0.7 // sigmoid learning rate (to be tuned) +// val η = 0.012 // reLU learning rate (to be tuned) + var u, z, v, ŷ, ε, ƒ1, δ1, ƒ0, δ0: VectorD = null + var g1: MatrixD = null + val yp = new MatrixD (yy.dim, 1) // save each prediction in yp + +// try f0 = reLU or sigmoid, f1 = id + + for epoch <- 1 to 100 do + println (s"Improvement step $epoch") + val sse = VectorD (0.0) + for i <- xx.indices do + val (x, y) = (xx(i), VectorD (yy(i))) // randomize i for Stochastic Gradient Descent (SGD) + + // forward prop: input -> hidden + u = conv (c, x) + α // hidden pre-activation vector via 'valid' convolution +// z = sigmoid_ (u) // hidden vector from f0 = sigmoid activation + z = reLU_ (u) // hidden vector from f0 = reLU activation + + // forward prop: hidden -> output + v = b.ᵀ * z + β // output pre-activation vector + ŷ = v // output/prediction vector from f1 = id activation + ε = y - ŷ // error vector + + // backward prop: hidden <- output +// ƒ1 = ŷ.map (t => if t > 0.0 then 1.0 else 0.0) // derivative (f1') for reLU + ƒ1 = VectorD.one (ŷ.dim) // derivative (f1') for id + δ1 = -ε * ƒ1 // delta correction vector via elementwise product + g1 = z ⊗ δ1 // transposed Jacobian matrix (gradients) + + import scalation.is_ + + // backward prop: input <- hidden +// ƒ0 = z * (1.0 - z) // derivative (f0') for sigmoid +// ƒ0 = z.map (t => if t > 0.0 then 1.0 else 0.0) // derivative (f0') for reLU + ƒ0 = z.map (t => is_ (t > 0.0)) // derivative (f0') for reLU +// ƒ0 = VectorD.one (z.dim) // derivative (f0') for id + δ0 = b * δ1 * ƒ0 // delta correction vector + + // parameter updates + b -= g1 * η // update output parameter/weight matrix + β -= δ1 * η // update output bias vector +// c -= g0 * η // update convolutional hidden parameter/weight vector + + for j <- c.indices do c(j) -= x(j until j+nδ0) ∙ δ0 * η // CHECK - c_j based on what x's and δ0's + + α -= δ0 * η // update hidden bias vector + yp(i) = ŷ // save i-th prediction + sse += ε * ε // sum of squared errors + val r2 = -(sse / sst - 1.0) // R^2 (recoded to avoid Ops clash) + + println (s""" + u = $u + z = $z + v = $v + ŷ = $ŷ + ε = $ε + ƒ1 = $ƒ1 + δ1 = $δ1 + g1 = $g1 + ƒ0 = $ƒ0 + δ0 = $δ0 + b = $b + β = $β + c = $c + α = $α + sse= $sse + r2 = $r2 + smape= ${MyFit.smapeF (yy.drop(1), yp(?, 0).drop(1))} // can't forecast first value + """) + end for + end for + + new Plot (null, yy, yp(?, 0), "IGD for CNN y", lines = true) + + println ("Compare smape scores with Regression") + import scalation.modeling.Regression + val mod = new Regression (xx, yy) + val ypr = mod.trainNtest ()()._1 + println (s"smape = ${MyFit.smapeF (yy.drop(1), ypr.drop(1))}") + + println ("Compare smape scores with ARY Forecaster") + val mod2 = ARY (yy, 1) + mod2.setSkip (1) + mod2.trainNtest_x ()() + +end simpleCNN3 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `simpleCNN4` main function tests the conv ('valid' convolution) and convs + * ('same' convolution) updated methods in `VectorD`. + */ +@main def simpleCNN4 (): Unit = + + val c = VectorD (0.5, 1.0, 0.5) // convolution filter + val x = VectorD (-3, -2, -1, 0, 1, 2, 3, 4) // sample input (one row) + val α = 0.0 // for simplicity assume no bias + var u = conv (c, x) + α // hidden pre-activation vector φ via 'valid' convolution + var z = reLU_ (u) // hidden vector (no pooling) from f0 = reLU activation + println ("Test the 'valid' convolution operator") + println (s"u = $u") + println (s"z = $z") + + u = convs (c, x) + α // hidden pre-activation vector φ via 'same' convolution + z = reLU_ (u) // hidden vector (no pooling) from f0 = reLU activation + println ("Test the 'same' convolution operator") + println (s"u = $u") + println (s"z = $z") + +end simpleCNN4 + diff --git a/src/main/scala/scalation/modeling/neuralnet/StoppingRule.scala b/src/main/scala/scalation/modeling/neuralnet/StoppingRule.scala index dc09e8297..8d01d7a60 100644 --- a/src/main/scala/scalation/modeling/neuralnet/StoppingRule.scala +++ b/src/main/scala/scalation/modeling/neuralnet/StoppingRule.scala @@ -39,7 +39,6 @@ trait StoppingRule: else // getting better up = 0 if sse < sse_best then { b_best = b.copy; sse_best = sse } // lower see => save as best - end if sse0 = sse // make current the previous if up > upLimit then (b_best, sse_best) else (null, sse_best) // if at limit, return best end stopWhen @@ -57,12 +56,64 @@ trait StoppingRule: if sse < sse_best then // lower see => save as best bb_best = (for l <- b.indices yield b(l).copy).toArray // copy for each layer l sse_best = sse - end if end if sse0 = sse // make current the previous if up > upLimit then (bb_best, sse_best) // if at limit, return best else (null, sse_best) // return null => continue end stopWhen + // Added for AutoGrad (`scalation.modeling.autograd`) + + private var prev_loss = Double.MaxValue + private var best_loss = Double.MaxValue + private var best_params = IndexedSeq [autograd.Variabl] () + private var waitLimit = 0 + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Stop when too many steps have the cost measure (e.g., loss) increasing. + * Signal a stopping condition by returning the best list of parameters, else null. + * @param params the current list of `Variabl` parameters (e.g., weights, biases). + * @param loss the current loss value. + * @param upLimit the maximum number of consecutive steps allowed without improvement. + * @return A tuple containing (best_params, best_loss) if patience is exceeded, else (null, best_loss). + */ + def stopWhenContinuous (params: IndexedSeq [autograd.Variabl], loss: Double, upLimit: Int): + (IndexedSeq [autograd.Variabl], Double) = + if loss > prev_loss + EPSILON then up += 1 + else + up = 0 + if loss < best_loss then + best_params = params + best_loss = loss + end if + + prev_loss = loss // update previous loss + + if up > upLimit then (best_params, best_loss) + else (null, best_loss) + end stopWhenContinuous + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Early stopping with patience. + * If the loss does not improve (by more than EPSILON) for `patience` consecutive steps, + * signal a stopping condition by returning the best parameters and loss. + * @param params the current list of `Variabl` parameters (e.g., weights, biases). + * @param loss the current loss value. + * @param patience the number of epochs to waitLimit without improvement. + * @return A tuple containing (best_params, best_loss) if patience is exceeded, else (null, best_loss). + */ + def stopWhenPatience (params: IndexedSeq [autograd.Variabl], loss: Double, patience: Int): + (IndexedSeq [autograd.Variabl], Double) = + if loss < best_loss - EPSILON then + best_params = params + best_loss = loss + waitLimit = 0 + else + waitLimit += 1 + + if waitLimit >= patience then (best_params, best_loss) + else (null, best_loss) + end stopWhenPatience + end StoppingRule diff --git a/src/main/scala/scalation/optimization/BoundsConstraint.scala b/src/main/scala/scalation/optimization/BoundsConstraint.scala index 6c02b3729..7ba6ede8a 100644 --- a/src/main/scala/scalation/optimization/BoundsConstraint.scala +++ b/src/main/scala/scalation/optimization/BoundsConstraint.scala @@ -30,11 +30,9 @@ trait BoundsConstraint (lower: VectorD = null, upper: VectorD = null): if lower != null then for i <- lower.indices if x(i) < lower(i) do x(i) = if lower(i) < 0 then lower(i) * 0.95 else lower(i) * 1.05 - end if if upper != null then for i <- upper.indices if x(i) > upper(i) do x(i) = if upper(i) > 0 then upper(i) * 0.95 else upper(i) * 1.05 - end for end if end constrain diff --git a/src/main/scala/scalation/optimization/ConjugateGradient.scala b/src/main/scala/scalation/optimization/ConjugateGradient.scala index f9407935e..11d275eb1 100644 --- a/src/main/scala/scalation/optimization/ConjugateGradient.scala +++ b/src/main/scala/scalation/optimization/ConjugateGradient.scala @@ -41,10 +41,10 @@ class ConjugateGradient (f: FunctionV2S, g: FunctionV2S = null, extends Minimizer: private val debug = debugf ("ConjugateGradient", true) // debug function - private val flaw = flawf ("ConjugateGradient") // flaw function +// private val flaw = flawf ("ConjugateGradient") // flaw function private val WEIGHT = 1000.0 // weight on penalty for constraint violation - private var gr: FunctionV2V = null // gradient (vector of partial derivatives) +// private var gr: FunctionV2V = null // gradient (vector of partial derivatives) - FIX - should be used //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Compute the beta function using the Polak-Ribiere (PR) technique. The @@ -61,11 +61,13 @@ class ConjugateGradient (f: FunctionV2S, g: FunctionV2S = null, * they are more efficient and more accurate than estimating the values * using difference quotients (the default approach). * @param partials the array of partial derivative functions - */ + * FIX - should be used + * def setDerivatives (partials: FunctionV2V): Unit = if g != null then flaw ("setDerivatives", "only works for unconstrained problems") gr = partials // use given functions for partial derivatives end setDerivatives + */ //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The objective function f plus a weighted penalty based on the constraint @@ -79,7 +81,6 @@ class ConjugateGradient (f: FunctionV2S, g: FunctionV2S = null, else // constrained, g(x) <= 0 val penalty = if ineq then max (g(x), 0.0) else abs (g(x)) f_x + abs (f_x) * WEIGHT * penalty * penalty - end if end fg //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: diff --git a/src/main/scala/scalation/optimization/CoordinateDescent.scala b/src/main/scala/scalation/optimization/CoordinateDescent.scala index 2b95d8825..7558dc0b1 100644 --- a/src/main/scala/scalation/optimization/CoordinateDescent.scala +++ b/src/main/scala/scalation/optimization/CoordinateDescent.scala @@ -100,13 +100,13 @@ end CoordinateDescent def f(x: VectorD): Double = (x(0) - 3)~^2 + (x(1) - 4)~^2 + 1 var optimizer = new CoordinateDescent (f) var opt = optimizer.solve (x0) - println ("optimal solution (f(x), x) = $opt") + println (s"optimal solution (f(x), x) = $opt") banner ("Minimize: x_0^4 + (x_0 - 3)^2 + (x_1 - 4)^2 + 1") def g(x: VectorD): Double = x(0)~^4 + (x(0) - 3)~^2 + (x(1) - 4)~^2 + 1 optimizer = new CoordinateDescent (g) opt = optimizer.solve (x0) - println ("optimal solution (g(x), x) = $opt") + println (s"optimal solution (g(x), x) = $opt") banner ("Minimizer: x_0/4 + 5x_0^2 + x_0^4 - 9x_0^2 x_1 + 3x_1^2 + 2x_1^4") // @see math.fullerton.edu/mathews/n2003/gradientsearch/GradientSearchMod/Links/GradientSearchMod_lnk_5.html @@ -114,7 +114,7 @@ end CoordinateDescent def f3 (x: VectorD): Double = x(0)/4 + 5*x(0)~^2 + x(0)~^4 - 9*x(0)~^2*x(1) + 3*x(1)~^2 + 2*x(1)~^4 optimizer = new CoordinateDescent (f3) opt = optimizer.solve (x0) - println ("optimal solution (f3(x), x) = $opt") + println (s"optimal solution (f3(x), x) = $opt") end coordinateDescentTest diff --git a/src/main/scala/scalation/optimization/DifferentialEvolution.scala b/src/main/scala/scalation/optimization/DifferentialEvolution.scala new file mode 100644 index 000000000..e6613c812 --- /dev/null +++ b/src/main/scala/scalation/optimization/DifferentialEvolution.scala @@ -0,0 +1,123 @@ + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author Korede Bishi, John Miller + * @version 2.0 + * @date Mon Jun 2 15:00:25 EDT 2025 + * @see LICENSE (MIT style license file). + * + * @note Differential Evolution (DE) Derivative-Free Optimization Algorithm + * Initial draft of code written by ChatGPT + * + * @see en.wikipedia.org/wiki/Differential_evolution + * @see `GeneticAlgorithm` for a more general metaheuristic optimizer + */ + +package scalation +package optimization + +import scala.runtime.ScalaRunTime.stringOf +import scala.util.Random.{nextInt, nextDouble, shuffle} +import scala.util.boundary, boundary.break + +import scalation.mathstat.{FunctionV2S, VectorD} +import scalation.random.RandomVecD + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `DifferentialEvolution` object solves optimization problems using + * the Differential Evolution algorithm. This population-based metaheuristic + * optimizes a real-valued function by iteratively improving candidate solutions. + * minimize f(x) + */ +object DifferentialEvolution + extends MonitorEpochs: + + private val debug = debugf ("DifferentialEvolution", true) // debug function + private val eps = 1E-12 // number close to zero + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Perform Differential Evolution optimization on objective function f. + * @param f the real-valued objective function to be minimized + * @param dim the dimensionality of the solution space + * @param bounds the search boundaries as a tuple (min, max) + * @param maxGen the maximum number of generations + * @param F the differential weight (scaling factor) + * @param CR the crossover probability + * @param popSize the population size (approx. 10 * dim) + * @return a tuple containing the best solution vector and its objective function value + */ + def optimize (f: FunctionV2S, dim: Int, bounds: (Double, Double), maxGen: Int = 400, + F: Double = 0.8, CR: Double = 0.9)(popSize: Int = 10 * dim): + (VectorD, Double) = boundary: // boundary block to allow breaking early + + val rrv = RandomVecD (dim, bounds._2, bounds._1) // random vector generator within bounds + val pop = Array.fill (popSize)(rrv.gen) // initialize population + var best = pop.minBy (f) // find initial best individual + var bestVal = f(best) // evaluate best value + + var noImprove = 0 // count of generations with no improvement + val patience = 25 // maximum allowed stagnation (complex problems may require more) + + debug ("optimize", s"initial population: ${stringOf(pop)}") + + for gen <- 1 to maxGen do + var improved = false // flag to check improvement in current generation + + for i <- 0 until popSize do + + // Mutation: select 3 unique individuals (not including i) + val idxs = shuffle ((0 until popSize).filter (_ != i)).take(3) + val (a, b, c) = (pop(idxs(0)), pop(idxs(1)), pop(idxs(2))) + val mutant = a + (b - c) * F // differential mutation formula + + // Crossover: combine target and mutant to form trial vector + val trial = new VectorD (dim) + val jj = nextInt (dim) // force at least one mutant gene + cfor (0, dim) { j => + trial(j) = if j == jj || nextDouble() < CR then mutant(j) else pop(i)(j) } + + // Selection: replace individual if trial is better + if f(trial) + eps < f(pop(i)) then + pop(i) = trial + val trialVal = f(trial) + if trialVal < bestVal then + best = trial // update best vector + bestVal = trialVal // update best value + improved = true // mark that improvement occurred + end for + + epochLoss += bestVal // track bestVal for plotting convergence + debug ("optimize", s"Generation $gen: bestVal = $bestVal") + + if improved then noImprove = 0 else noImprove += 1 // update stagnation count + if noImprove >= patience then + println (s"Early stopping at generation $gen (no improvement in $patience generations).") + break ((best, bestVal)) // exit early with best solution + end for + + (best, bestVal) // return best result after all generations + end optimize + +end DifferentialEvolution + + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `differentialEvolutionTest` main function is used to test the `DifferentialEvolution` + * object. + * > runMain scalation.optimization.differentialEvolutionTest + */ +@main def differentialEvolutionTest (): Unit = + + import DifferentialEvolution._ + + banner ("Problem: (x_0 - 3)^2 + (x_1 + 1)^2 + 1") + val f: FunctionV2S = (x: VectorD) => (x(0) - 3)~^2 + (x(1) + 1)~^2 + 1 // test function + + val (bestSol, bestVal) = optimize (f, 2, (-5.0, 5.0))() + + println (s"Best solution: $bestSol") // output best solution vector + println (s"Objective value: $bestVal") // output objective function value + + plotLoss () // show convergence plot of bestVal per generation + +end differentialEvolutionTest + diff --git a/src/main/scala/scalation/optimization/GeneticAlgorithm.scala b/src/main/scala/scalation/optimization/GeneticAlgorithm.scala new file mode 100644 index 000000000..3c345bf77 --- /dev/null +++ b/src/main/scala/scalation/optimization/GeneticAlgorithm.scala @@ -0,0 +1,236 @@ + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author Casey Bowman, John Miller + * @version 2.0 + * @date Wed Feb 16 11:34:46 EST 2022 + * @see LICENSE (MIT style license file). + * + */ + +// U N D E R D E V E L O P M E N T + +package scalation +package optimization + +import scalation.mathstat.{FunctionV2S, VectorD} +import scalation.random.{Randi, Uniform, Variate} + +import scala.collection.mutable.ArrayBuffer +//import scala.util.Sorting +import scala.util.control.Breaks.breakable + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `GA` class solves unconstrained Non-Linear Programming (NLP) problems + * using a genetic algorithm approach. Given a function 'f' and a set of + * random variables based on the dimensionality of the search space, and + * the domain of the search space, the GA will evolve a pool of candidate + * solutions using evolutionary concepts such as crossover and mutation. + * The random variables are used to create random candidate solutions for + * the solution pool. The algorithm iterates until it converges or has + * reached a maximum number of generations. + * + * minimize f(x) + * + * @param f the vector-to-scalar objective function + * @param rands random variables used to create the initial 'gene' pool. + * There is one r.v. per dimension, and should reflect the + * domain of the search space. + */ +class GeneticAlgorithm (f: FunctionV2S, rands: Array [Variate]) + extends Minimizer + with MonitorEpochs: + + private val N = 15 // number of candidates to keep in the pool + private val pool = Array.ofDim [FuncVec] (N) // the pool of candidate solutions. The values are a tuple + // of the candidate with their objective function value. + + private val randInd = Randi (0, rands.length) // an r.v. to generate a random index for crossover and mutation + private val randMut = Uniform (-0.2, 0.2) // an r.v. to generate a size for mutations. + private val epochs = new ArrayBuffer [Double] () + +// println (rands.deep) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create the initial pool of candidate solutions. + * @param seeds a (possibly null) array of initial candidates provided + * by the user. + */ + def initPool (seeds: Array [VectorD] = Array.ofDim (0)): Unit = + var i0 = 0 + if seeds != null then + i0 = seeds.length + for i <- 0 until i0 do pool(i) = (f(seeds(i)), seeds(i)) + + for i <- i0 until N do + val x = new VectorD (rands.length) + for j <- x.indices do x(j) = rands(j).gen + pool(i) = (f(x), x) + end for + end initPool + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Sort the pool by the objective function value of the candidate solutions. + */ + def sortPool (): Unit = + for i <- 0 until N - 1 do + val j = findMin (i) + if i != j then { val t = pool(i); pool(i)= pool(j); pool(j)= t } + end for + +/* + for i <- 0 until N - 1 do + for j <- 0 until N - i - 1 do + val y1 = pool(j)._1 + val y2 = pool(j + 1)._1 + if y2 < y1 then + val t = pool(j) + pool(j) = pool(j + 1) + pool(j + 1) = t + end for + end for +*/ + end sortPool + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Find + * @param i + */ + def findMin (i: Int): Int = + var jm = i + for j <- i+1 until N do + if pool(j)._1 <= pool(jm)._1 then jm = j + jm + end findMin + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Calculate the next generation of solutions. The top four solutions are + * kept and the rest of the pool size is filled out by evolving new solutions + * with crossover and mutation. + */ + def nextGen (): Unit = + var count = 4 + for i <- 0 until 3 do + for j <- i + 1 until 4 do + val x1 = pool(i)._2 + val x2 = pool(j)._2 + val x3 = cross (x1, x2) + mutate (x3) + pool(count) = (f(x3), x3) + count += 1 + end for + end for + + for i <- count until pool.length do + val x = new VectorD (rands.length) + for j <- x.indices do x(j) = rands(j).gen + pool(i) = (f(x), x) + end for + end nextGen + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Calculate the crossover of two solutions. + * @param x1 the first solution for the crossover + * @param x2 the second solution for the crossover + */ + def cross (x1: VectorD, x2: VectorD): VectorD = + + val k = randInd.igen // generate a random index + val j = randInd.igen + if j % 2 == 0 then x1(0 until k) ++ x2(k until x2.dim) + else x2(0 until k) ++ x1(k until x1.dim) + end cross + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Perform a mutation on a solution. + * @param x the solution on which to perform the mutation + */ + def mutate (x: VectorD): Unit = + for i <- x.indices do x(i) *= (1.0 + randMut.gen) // apply a multiplicative factor to the current index-value of the solution. + end mutate + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Utility method to print the current solution pool. + * @param n the number of solutions from the pool to include in the print + * the default is set to 5 + */ + def printPool (n: Int = 5): Unit = + print ("Pool = [") + for i <- 0 until n - 1 do print (s"${pool(i)}, ") + println (s"${pool(n - 1)}]") + end printPool + + def lineSearch (x: VectorD, dir: VectorD, step: Double = STEP): Double = + throw new UnsupportedOperationException ("lineSearch: method is not needed for GeneticAlgorithm") + end lineSearch + + def solve (x0: VectorD, step: Double = STEP, toler: Double = EPSILON): FuncVec = + throw new UnsupportedOperationException ("solve: use solve2 instead of solve") + end solve + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Solve the optimization problem. + * @param seeds an array of initial candidates provided + * by the user. + */ + def solve2 (seeds: Array[VectorD] = null): FuncVec = + initPool (seeds) + sortPool () + banner ("Generation 0:") + printPool () + breakable { + for i <- 0 until MAX_IT do + banner ("Generation " + (i + 1) + ":") + nextGen () + sortPool () + epochs += pool(0)._1 + printPool () + end for + } // breakable + pool(0) + end solve2 + +end GeneticAlgorithm + + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** + * > runMain scalation.optimization.geneticAlgorithmTest + */ +@main def geneticAlgorithmTest (): Unit = + + def f (x: VectorD): Double = (x(0) - 3.0) * (x(0) - 3.0) + (x(1) + 1.0) * (x(1) + 1.0) + 1.0 + + val r0 = Uniform (-10.0, 10.0, 1) + val r1 = Uniform (-10.0, 10.0, 2) + + val seeds = Array (VectorD (2.0, 0.0), VectorD (4.0, -2.0)) + + val solver = new GeneticAlgorithm (f, Array (r0, r1)) + val x = solver.solve2 (seeds) + + println ("optimal x = " + x) + +end geneticAlgorithmTest + + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** + * > runMain scalation.optimization.selectionSortTest + */ +@main def selectionSortTest (): Unit = + + def f(x: VectorD): Double = (x(0) - 3.0) * (x(0) - 3.0) + (x(1) + 1.0) * (x(1) + 1.0) + 1.0 + + val r0 = Uniform (-10.0, 10.0, 1) + val r1 = Uniform (-10.0, 10.0, 2) + + val seeds = Array (VectorD(2.0, 0.0), VectorD(4.0, -2.0)) + + val solver = new GeneticAlgorithm(f, Array(r0, r1)) + solver.initPool (seeds) + solver.printPool (15) + solver.sortPool () + solver.printPool (15) + +end selectionSortTest + diff --git a/src/main/scala/scalation/optimization/GoldenSectionLS.scala b/src/main/scala/scalation/optimization/GoldenSectionLS.scala index df2744e99..58a1a92a5 100644 --- a/src/main/scala/scalation/optimization/GoldenSectionLS.scala +++ b/src/main/scala/scalation/optimization/GoldenSectionLS.scala @@ -93,7 +93,7 @@ class GoldenSectionLS (f: FunctionS2S, τ: Double = 1E-5) var matched = false breakable { - for k <- 1 to MAX_IT do // expand right to try to find a down-up pattern + for _ <- 0 until MAX_IT do // expand right to try to find a down-up pattern val dist = x3 - x1 x2 = x1 + G_SECTION * dist f2 = f(x2) @@ -111,7 +111,6 @@ class GoldenSectionLS (f: FunctionS2S, τ: Double = 1E-5) x2 = x1 + G_SECTION * (xmax - x1) f2 = f(x2) gsection (true, x1, x2, xmax, f2) // apply golden section search on original interval - end if end lsearch //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: diff --git a/src/main/scala/scalation/optimization/IntegerTabuSearch.scala b/src/main/scala/scalation/optimization/IntegerTabuSearch.scala index 52e661795..84fe91f55 100644 --- a/src/main/scala/scalation/optimization/IntegerTabuSearch.scala +++ b/src/main/scala/scalation/optimization/IntegerTabuSearch.scala @@ -52,7 +52,6 @@ class IntegerTabuSearch (f: VectorI => Double, var sum = f(x) // unconstrained value if g != null then // if constrained sum += f(x) * weight * (max (g(x), 0))~^2 // add penalty - end if sum end if end fg @@ -79,13 +78,11 @@ class IntegerTabuSearch (f: VectorI => Double, x_f = minNeighbor (x_f, i + 1, step) // min in neighborhood of x_f y_f = minNeighbor (y_f, i + 1, step) // min in neighborhood of y_f z_f = minNeighbor (z_f, i + 1, step) // min in neighborhood of z_f - end if if x_f._2 < y_f._2 then // find smallest of 3 functional value if x_f._2 < z_f._2 then x_f else z_f else if y_f._2 < z_f._2 then y_f else z_f - end if end minNeighbor //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -103,7 +100,6 @@ class IntegerTabuSearch (f: VectorI => Double, if x_f._2 <= y_f._2 then // no improvement if step == 1 then { print ("optimal "); break () } // => return solution when step is 1 else step -= 1 // => decrease step size otherwise - end if x_f = y_f // move to improved point end for } // breakable diff --git a/src/main/scala/scalation/optimization/LassoAddm.scala b/src/main/scala/scalation/optimization/LassoAddm.scala index 4bfd93389..eb73dd3d2 100644 --- a/src/main/scala/scalation/optimization/LassoAddm.scala +++ b/src/main/scala/scalation/optimization/LassoAddm.scala @@ -92,7 +92,6 @@ object LassoAdmm: else z = new VectorD (n) l = new VectorD (n) - end if var z_old: VectorD = null diff --git a/src/main/scala/scalation/optimization/Minimizer.scala b/src/main/scala/scalation/optimization/Minimizer.scala index da85bb901..880bc9d66 100644 --- a/src/main/scala/scalation/optimization/Minimizer.scala +++ b/src/main/scala/scalation/optimization/Minimizer.scala @@ -32,11 +32,11 @@ import scalation.random.RandomVecD */ trait Minimizer: - protected val EPSILON = 1E-10 // number close to zero - // between machine epsilon and its square root - protected val TOL = 100.0 * EPSILON // default tolerance level more relaxed - protected val STEP = 0.5 // default initial step size - protected val MAX_IT = 400 // maximum number of major steps/iterations + protected val EPSILON = 1E-10 // number close to zero + // between machine epsilon and its square root + protected val TOL = 100.0 * EPSILON // default tolerance level more relaxed + protected val STEP = 0.5 // default initial step size + protected val MAX_IT = 400 // maximum number of major steps/iterations //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The objective function f plus a weighted penalty based on the constraint @@ -61,7 +61,7 @@ trait Minimizer: * Return the optimal point/vector x and its objective function value. * @param x0 the starting point * @param step the initial step size (may default to STEP) - * @param toler the tolerance (may default to EPSILON) + * @param toler the tolerance (may default to TOL) */ def solve (x0: VectorD, step: Double, toler: Double): FuncVec @@ -75,12 +75,12 @@ trait Minimizer: * @param toler the tolerance */ def resolve (n: Int, step_ : Double = STEP, toler: Double = TOL): FuncVec = - val rvg = RandomVecD (n, -0.5, 0.5) + val rvg = RandomVecD (n, 0.5, -0.5) // note max before min var opt = (MAX_VALUE, VectorD.nullv) for i <- 0 until 2*n do val x0 = rvg.gen println (s"==> resolve: random restart $i at x0 = $x0") - opt = better (solve (x0, STEP, EPSILON), opt) + opt = better (solve (x0, step_, toler), opt) end for opt end resolve @@ -93,8 +93,8 @@ end Minimizer */ object Minimizer: - val EPSILON = 1E-10 // number close to zero - val STEP = 0.6 // step size + val EPSILON = 1E-10 // number close to zero + val STEP = 0.6 // step size //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Test the optimizer's solve method with the given objective function and diff --git a/src/main/scala/scalation/optimization/NelderMeadSimplex.scala b/src/main/scala/scalation/optimization/NelderMeadSimplex.scala index 54c54dccf..7baa8ab4e 100644 --- a/src/main/scala/scalation/optimization/NelderMeadSimplex.scala +++ b/src/main/scala/scalation/optimization/NelderMeadSimplex.scala @@ -71,7 +71,6 @@ class NelderMeadSimplex (f: FunctionV2S, n: Int) for j <- i+1 to n if simplex(j)._1 > simplex(im)._1 do im = j if im != i then val t = simplex(i); simplex(i) = simplex(im); simplex(im) = t - end if end for end sort diff --git a/src/main/scala/scalation/optimization/NelderMeadSimplex2.scala b/src/main/scala/scalation/optimization/NelderMeadSimplex2.scala index 7baa5f30d..7527e9dcc 100644 --- a/src/main/scala/scalation/optimization/NelderMeadSimplex2.scala +++ b/src/main/scala/scalation/optimization/NelderMeadSimplex2.scala @@ -76,7 +76,6 @@ class NelderMeadSimplex2 (f: FunctionV2S, n: Int, checkCon: Boolean = false, for j <- i+1 to n if simplex(j)._1 > simplex(im)._1 do im = j if im != i then val t = simplex(i); simplex(i) = simplex(im); simplex(im) = t - end if end for end sort @@ -176,11 +175,11 @@ class NelderMeadSimplex2 (f: FunctionV2S, n: Int, checkCon: Boolean = false, var diff = simplex(0)._1 - simplex(n)._1 // difference between f_h and f_l breakable { - for k <- 1 to MAX_IT do + for _ <- 0 until MAX_IT do f_h = simplex(0)._1 // functional value for x_h (highest/worst) f_s = simplex(1)._1 // functional value for x_s (second worst) f_l = simplex(n)._1 // functional value for x_l (lowest/best) - val (f_c, x_c) = centroid () // compute best-side centroid of simplex + val (_, x_c) = centroid () // compute best-side centroid of simplex, f_c not used val (f_r, x_r) = reflect (x_c) // compute reflection point val smaller = f_r < f_l // f_r smaller than best val larger = f_r >= f_s // f_r at least as large as second worst @@ -191,7 +190,6 @@ class NelderMeadSimplex2 (f: FunctionV2S, n: Int, checkCon: Boolean = false, val (f_e, x_e) = expand (x_c, x_r) // expand beyond reflection point if f_e < f_r then { replace (x_e); break () } // replace worst x_h with x_e else { replace (x_r); break () } // replace worst x_h with x_r - end if if larger then // contract back from reflection point if f_r < f_h then // f_r between second worst and worst @@ -200,7 +198,6 @@ class NelderMeadSimplex2 (f: FunctionV2S, n: Int, checkCon: Boolean = false, else // f_r at least as large as worst val (f_ci, x_ci) = contractIn (x_c) if f_ci <= f_h then { replace (x_ci); break () } // replace worst x_h with x_ci - end if end if shrink () // shrink the size of the simplex diff --git a/src/main/scala/scalation/optimization/SPSA.scala b/src/main/scala/scalation/optimization/SPSA.scala index 425879799..9d6392400 100644 --- a/src/main/scala/scalation/optimization/SPSA.scala +++ b/src/main/scala/scalation/optimization/SPSA.scala @@ -41,7 +41,6 @@ class SPSA (f: FunctionV2S, max_iter: Int = 100, checkCon: Boolean = false, private val flaw = flawf ("SPSA") // flaw function private val EPS = 1E-6 - private val coin = Bernoulli () // Bernoulli (0/1) RVG private var alpha = 0.602 private var gamma = 0.101 private var A = 100.0 @@ -76,7 +75,9 @@ class SPSA (f: FunctionV2S, max_iter: Int = 100, checkCon: Boolean = false, * @param stream the random number stream */ def bernoulliVec (n: Int, p: Double = 0.5, stream: Int = 0): VectorD = - VectorD (for i <- 0 until n yield 2.0 * coin.gen - 1.0) + val coin = Bernoulli (p, stream) // Bernoulli (0/1) RVG + VectorD (for _ <- 0 until n yield 2.0 * coin.gen - 1.0) + end bernoulliVec //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Solve for an optimal point by moving a distance ak in the -ghat direction. @@ -112,7 +113,6 @@ class SPSA (f: FunctionV2S, max_iter: Int = 100, checkCon: Boolean = false, if f_x < f_best then x_best = x.copy // copy by value f_best = f_x - end if epochLoss += f_best // record best for k-th epoch if (x - x_old).norm < toler then go = false // stopping rule } // cfor diff --git a/src/main/scala/scalation/optimization/StoppingRule.scala b/src/main/scala/scalation/optimization/StoppingRule.scala index 4e2b08f0e..292508186 100644 --- a/src/main/scala/scalation/optimization/StoppingRule.scala +++ b/src/main/scala/scalation/optimization/StoppingRule.scala @@ -36,7 +36,6 @@ trait StoppingRule (upLimit: Int = 3): else // getting better up = 0 if loss < loss_best then { loss_best = loss; x_best = x.copy } // lower loss => save as best - end if loss0 = loss // make current the previous if up > upLimit then @@ -44,7 +43,6 @@ trait StoppingRule (upLimit: Int = 3): (loss_best, x_best) // at limit => return best x else (loss_best, null) // null => continue search - end if end stopWhen //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: diff --git a/src/main/scala/scalation/optimization/TabuSearch.scala b/src/main/scala/scalation/optimization/TabuSearch.scala index dbf1ed541..01f8d2ed9 100644 --- a/src/main/scala/scalation/optimization/TabuSearch.scala +++ b/src/main/scala/scalation/optimization/TabuSearch.scala @@ -51,7 +51,6 @@ class TabuSearch (f: VectorD => Double, g: VectorD => Double = null, maxStep: Do var sum = f(x) // unconstrained value if g != null then // if constrained sum += f(x) * weight * (max (g(x), 0))~^2 // add penalty - end if sum end if end fg @@ -78,13 +77,11 @@ class TabuSearch (f: VectorD => Double, g: VectorD => Double = null, maxStep: Do x_f = minNeighbor (x_f, i + 1, step) // min in neighborhood of x_f y_f = minNeighbor (y_f, i + 1, step) // min in neighborhood of y_f z_f = minNeighbor (z_f, i + 1, step) // min in neighborhood of z_f - end if if x_f._2 < y_f._2 then // find smallest of 3 functional value if x_f._2 < z_f._2 then x_f else z_f else if y_f._2 < z_f._2 then y_f else z_f - end if end minNeighbor //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -102,7 +99,6 @@ class TabuSearch (f: VectorD => Double, g: VectorD => Double = null, maxStep: Do if x_f._2 <= y_f._2 then // no improvement if step <= TOL then { print ("optimal"); break () } // => return solution when step is below TOL else step /= shrink // => decrease step size otherwise - end if x_f = y_f // move to improved point end for } // breakable diff --git a/src/main/scala/scalation/optimization/WolfeLS.scala b/src/main/scala/scalation/optimization/WolfeLS.scala index 4b15e79d5..16357c8b1 100644 --- a/src/main/scala/scalation/optimization/WolfeLS.scala +++ b/src/main/scala/scalation/optimization/WolfeLS.scala @@ -92,7 +92,6 @@ class WolfeLS (f: FunctionS2S, c1: Double = 0.0001, c2: Double = 0.9) x = if hi < POS_INF then (lo + hi) / 2.0 else x + x else go = false // both conditions satisfied - end if fx = f(x); dfx = Ⅾ (f)(x) // recompute f(x) and f'(x) debug ("lsearch_", s"(it = $it) x = $x, f(x) = $fx") diff --git a/src/main/scala/scalation/optimization/WolfeLS3.scala b/src/main/scala/scalation/optimization/WolfeLS3.scala index 03ea39c71..995225282 100644 --- a/src/main/scala/scalation/optimization/WolfeLS3.scala +++ b/src/main/scala/scalation/optimization/WolfeLS3.scala @@ -115,7 +115,6 @@ class WolfeLS3 (f: FunctionV2S, var g: FunctionV2V, c1: Double = 0.0001, c2: Dou else split = false go = false - end if debug ("lsearch", s"(it = $it) after a = $a in [$l, $u]") } // cfor diff --git a/src/main/scala/scalation/optimization/functions/BenchmarkFunction.scala b/src/main/scala/scalation/optimization/functions/BenchmarkFunction.scala index 461a8fec8..cc01b8cb8 100644 --- a/src/main/scala/scalation/optimization/functions/BenchmarkFunction.scala +++ b/src/main/scala/scalation/optimization/functions/BenchmarkFunction.scala @@ -35,7 +35,9 @@ import scalation.mathstat.VectorD */ trait BenchmarkFunction: - val functionMinimum: VectorD + val functionMinimum: VectorD // known minimal point + + val bound: (VectorD, VectorD) = (null, null) // optional (lower, upper) bounds //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The objective function used for benchmarking or testing purposes. Can be diff --git a/src/main/scala/scalation/optimization/functions/ExampleFunctions.scala b/src/main/scala/scalation/optimization/functions/ExampleFunctions.scala index 539071296..df0db5259 100644 --- a/src/main/scala/scalation/optimization/functions/ExampleFunctions.scala +++ b/src/main/scala/scalation/optimization/functions/ExampleFunctions.scala @@ -222,6 +222,8 @@ object ReciprocalFunction extends BenchmarkFunction: val functionMinimum: VectorD = VectorD (1.06035, 4) + override val bound: (VectorD, VectorD) = (VectorD (0.2, 0.2), VectorD (10.0, 10.0)) + def objFunction (x: VectorD): Double = 1 / x(0) + x(0) ~^ 4 + (x(0) - 3.0) ~^ 2 + (x(1) - 4.0) ~^ 2 + 1.0 diff --git a/src/main/scala/scalation/optimization/linear_opt/CheckLP.scala b/src/main/scala/scalation/optimization/linear_opt/CheckLP.scala index 3545a07a0..9b3532f4c 100644 --- a/src/main/scala/scalation/optimization/linear_opt/CheckLP.scala +++ b/src/main/scala/scalation/optimization/linear_opt/CheckLP.scala @@ -69,7 +69,6 @@ class CheckLP (a: MatrixD, b: VectorD, c: VectorD): if ax_i > b_i + EPSILON then flaw ("isPrimalFeasible", s"constraint ax_i <= b_i violated for row $i: $ax_i > $b_i") feas = false - end if end for feas end isPrimalFeasible @@ -96,7 +95,6 @@ class CheckLP (a: MatrixD, b: VectorD, c: VectorD): if ya_j > c_j + EPSILON then flaw ("isDualFeasible", s"constraint ya_j <= c_j violated for col $j: $ya_j > $c_j") feas = false - end if end for feas end isDualFeasible @@ -115,11 +113,9 @@ class CheckLP (a: MatrixD, b: VectorD, c: VectorD): if abs (f - cx) > EPSILON then flaw ("isOptimal", s"failed since f = $f != c x = $cx") opti = false - end if if abs (f - yb) > EPSILON then flaw ("isOptimal", s"failed since f = $f != y b = $yb") opti = false - end if opti end isOptimal diff --git a/src/main/scala/scalation/optimization/linear_opt/IntegerLP.scala b/src/main/scala/scalation/optimization/linear_opt/IntegerLP.scala index 8cbdeb642..b1b1c37ce 100644 --- a/src/main/scala/scalation/optimization/linear_opt/IntegerLP.scala +++ b/src/main/scala/scalation/optimization/linear_opt/IntegerLP.scala @@ -98,13 +98,11 @@ class IntegerLP (a: MatrixD, b: VectorD, c: VectorD, excl: Set [Int] = Set ()): println (s"x_$j <= ${x_le(j)}") aa = aa :+ VectorD.oneAt (j, c.dim) // add row to constraint matrix bb = bb :+ x_le(j) // add element to limit vector - end if if x_ge(j) >= 0.0 then // check for x_j >= bound println ("x_$j >= ${x_ge(j)}") aa = aa :+ VectorD.oneAt (j, c.dim) // add row to constraint matrix bb = bb :+ -x_ge(j) // add element to limit vector - end if end for (aa, bb) // return the full set of constraints end formConstraints @@ -149,7 +147,6 @@ class IntegerLP (a: MatrixD, b: VectorD, c: VectorD, excl: Set [Int] = Set ()): println (">>>>>>>>>>>>>> left branch: dp = " + (dp + 1)) println (">>>>>>>>>>>>>> add constraint x_" + j + " <= " + bound) solve (dp + 1, formConstraints) - end if // add upper bound constraint: x_j >= -ceil (x(j)) where "-" => ">=" constraint bound = ceil (x(j)) diff --git a/src/main/scala/scalation/optimization/linear_opt/QuadraticSimplex.scala b/src/main/scala/scalation/optimization/linear_opt/QuadraticSimplex.scala index f1ac772b4..b256811d0 100644 --- a/src/main/scala/scalation/optimization/linear_opt/QuadraticSimplex.scala +++ b/src/main/scala/scalation/optimization/linear_opt/QuadraticSimplex.scala @@ -182,7 +182,7 @@ class QuadraticSimplex (a: MatrixD, b: VectorD, q: MatrixD, c: VectorD, var x_B: showTableau () breakable { - for it <- 1 to MAX_IT do + for _ <- 0 until MAX_IT do l = entering (); if l == -1 then break () // optimal solution found k = leaving (l); if k == -1 then break () // solution is unbounded pivot (k, l) // pivot: k leaves and l enters diff --git a/src/main/scala/scalation/optimization/linear_opt/Simplex2P.scala b/src/main/scala/scalation/optimization/linear_opt/Simplex2P.scala index 82ad1abd6..3d5a664dc 100644 --- a/src/main/scala/scalation/optimization/linear_opt/Simplex2P.scala +++ b/src/main/scala/scalation/optimization/linear_opt/Simplex2P.scala @@ -106,7 +106,6 @@ class Simplex2P (a: MatrixD, b: VectorD, c: VectorD) jr += 1 x_B(i) = MpN + jr // put artificial variable in basis t(M) += t(i) // row op to make t(M, MpN + j) zero - end if end for end initBasis @@ -206,7 +205,6 @@ class Simplex2P (a: MatrixD, b: VectorD, c: VectorD) true else false - end if end infeasible //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -238,7 +236,6 @@ class Simplex2P (a: MatrixD, b: VectorD, c: VectorD) t(M)(MpN until jj) = -1.0 // set cost row (M) in the tableau to remove artificials else t(M)(0 until N) = -c // set cost row (M) in the tableau to given cost vector - end if initBasis () // initialize the basis to the slack and artificial vars if R > 0 then // there are artificial variables => phase I required diff --git a/src/main/scala/scalation/optimization/quasi_newton/BFGS.scala b/src/main/scala/scalation/optimization/quasi_newton/BFGS.scala index cc1176800..0be06a6a0 100644 --- a/src/main/scala/scalation/optimization/quasi_newton/BFGS.scala +++ b/src/main/scala/scalation/optimization/quasi_newton/BFGS.scala @@ -53,11 +53,11 @@ class BFGS (f: FunctionV2S, g: FunctionV2S = null, extends Minimizer, PathMonitor: private val debug = debugf ("BFGS", false) // debug function - private val flaw = flawf ("BFGS") // flaw function +// private val flaw = flawf ("BFGS") // flaw function private val WEIGHT = 1000.0 // weight on penalty for constraint violation private var bfgs = true // use BFGS (true) or Gradient Descent (false) - private var df: Array [FunctionV2S] = null // gradient as explicit functions for partials +// private var df: Array [FunctionV2S] = null // gradient as explicit functions for partials - FIX - should use //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Use the Gradient Descent algorithm rather than the default BFGS algorithm. @@ -84,11 +84,12 @@ class BFGS (f: FunctionV2S, g: FunctionV2S = null, * they are more efficient and more accurate than estimating the values * using difference quotients (the default approach). * @param grad the gradient as explicit functions for partials - */ + * FIX - should use def setDerivatives (grad: Array [FunctionV2S]): Unit = if g != null then flaw ("setDerivatives", "only works for unconstrained problems") df = grad // use given functions for partial derivatives end setDerivatives + */ //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The objective function f plus a weighted penalty based on the constraint @@ -102,7 +103,6 @@ class BFGS (f: FunctionV2S, g: FunctionV2S = null, else // constrained, g(x) <= 0 val penalty = if ineq then max (g(x), 0.0) else abs (g(x)) f_x + abs (f_x) * WEIGHT * penalty * penalty - end if end fg //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -296,7 +296,6 @@ class BFGS (f: FunctionV2S, g: FunctionV2S = null, if lSearchAlg == LBFGSLineSearchAlg.BacktrackingOrthantWise then debug("solve", "orthantwise is not currently supported by BFGS") return (fg(x._1), x._1) - end if val lSearchImple = LBFGSLineSearch.getImple (lSearchAlg) val aHi = eye (x0.dim, x0.dim) // approximate Hessian inverse (aHi) matrix @@ -389,7 +388,6 @@ class BFGS (f: FunctionV2S, g: FunctionV2S = null, if lSearchAlg == LBFGSLineSearchAlg.BacktrackingOrthantWise then debug("solve_", "orthantwise is not currently supported by BFGS") return (fg(x._1), x._1) - end if val lSearchImple = LBFGSLineSearch.getImple (lSearchAlg) val aHi = eye (x0.dim, x0.dim) // approximate Hessian inverse (aHi) matrix @@ -471,12 +469,11 @@ object BFGS: ineq: Boolean = true, exactLS: Boolean = false, steepest: Boolean = true): BFGS = if steepest then - val steep = new BFGS (f, f, ineq, exactLS) + val steep = new BFGS (f, g, ineq, exactLS) steep.setSteepest () steep else - new BFGS (f, f, ineq, exactLS) - end if + new BFGS (f, g, ineq, exactLS) end apply end BFGS diff --git a/src/main/scala/scalation/optimization/quasi_newton/DM_LBFGS.scala b/src/main/scala/scalation/optimization/quasi_newton/DM_LBFGS.scala index 95e0de017..6d8a5d7e4 100644 --- a/src/main/scala/scalation/optimization/quasi_newton/DM_LBFGS.scala +++ b/src/main/scala/scalation/optimization/quasi_newton/DM_LBFGS.scala @@ -69,7 +69,7 @@ object DM_LBFGS extends PathMonitor: if 0 < params.past then pf = new VectorD(params.past) // allocate space for storing previous obj func values // Evaluate the function value and its gradient - val evaluationResults = cd.evaluationLogic.evaluate (cd.instance, x, cd.n, 0) + val evaluationResults = cd.evalLogic.evaluate (cd.instance, x, cd.n, 0) fx = evaluationResults.objFunctionValue g = evaluationResults.gradientVector @@ -126,7 +126,6 @@ object DM_LBFGS extends PathMonitor: if useOrthantWiseLogic then val orthantWisePrms = params.orthantWise.get pg = orthantWisePrms.pseudoGradient (xNew, g) - end if // Compute x and g norms xnorm = xNew.norm @@ -184,23 +183,23 @@ object DM_LBFGS extends PathMonitor: d = if ! useOrthantWiseLogic then -g else -pg j = end - for i <- 0 until bound do + cfor (0, bound) { _ => j = (j + m - 1) % m // if (--j == -1) j = m-1 val it = lm(j) it.alpha = it.s dot d // \alpha_{j} = \rho_{j} s^{t}_{j} \cdot q_{k+1} it.alpha = it.alpha / it.ys d += (it.y * (-it.alpha)) // q_{i} = q_{i+1} - \alpha_{i} y_{i} - end for + } // cfor d *= (ys / yy) - for i <- 0 until bound do + cfor (0, bound) { _ => val it = lm(j) beta = it.y dot d // \beta_{j} = \rho_{j} y^t_{j} \cdot \gamma_{i} beta /= it.ys d += it.s * (it.alpha - beta) // \gamma_{i+1} = \gamma_{i} + (\alpha_{j} - \beta_{j}) s_{j} j = (j + 1) % m // if (++j == m) j = 0 - end for + } // cfor // Constrain the search direction for orthant-wise updates if useOrthantWiseLogic then @@ -217,7 +216,7 @@ object DM_LBFGS extends PathMonitor: LBFGSResults (LBFGSReturnCode.UnknownError, xNew, Some(fx), None) catch - case e: OutOfMemoryError => LBFGSResults (LBFGSReturnCode.OutOfMemory, x, None, None) + case _ : OutOfMemoryError => LBFGSResults (LBFGSReturnCode.OutOfMemory, x, None, None) end dmlbfgsMain diff --git a/src/main/scala/scalation/optimization/quasi_newton/EvaluationLogic.scala b/src/main/scala/scalation/optimization/quasi_newton/EvaluationLogic.scala index 19f771f95..2c4576742 100644 --- a/src/main/scala/scalation/optimization/quasi_newton/EvaluationLogic.scala +++ b/src/main/scala/scalation/optimization/quasi_newton/EvaluationLogic.scala @@ -30,9 +30,8 @@ trait EvaluationLogic: /** Evaluates the gradients and objective function according to the state of * the variables during the minimization process. * - * @param instance user data provided by each call of the `lbfgsMain` method. Can - * have `Any` type defined by the user as long as the same type is - * utilized in other instances that rely on this `EvaluationLogic` + * @param instance an optional user data segment that may be provided when calling + * the `LBFGS.lbfgsMain` method (@see `OptimizationLogic`) * @param x `VectorD` with the current values of the variables * @param n the number of variables * @param step current step chosen by the line search routine. diff --git a/src/main/scala/scalation/optimization/quasi_newton/LBFGS.scala b/src/main/scala/scalation/optimization/quasi_newton/LBFGS.scala index 4387b66fa..35ff808cc 100644 --- a/src/main/scala/scalation/optimization/quasi_newton/LBFGS.scala +++ b/src/main/scala/scalation/optimization/quasi_newton/LBFGS.scala @@ -65,6 +65,9 @@ object LBFGS extends PathMonitor: * @param n the dimensionality of the optimization problem * @param x the starting point (initial guess) * @param functionLogic the logic defining the objective function and its gradient + * @param params the optimization control hyper-parameters (@see `LBFGSPrms`) + * @param instance an optional user data segment that may be provided when calling this method + * (@see `OptimizationLogic`) */ def lbfgsMain (n: Int, x: VectorD, functionLogic: EvaluationLogic | OptimizationLogic, params: LBFGSPrms = LBFGSPrms (), instance: Any = None): LBFGSResults = @@ -106,7 +109,7 @@ object LBFGS extends PathMonitor: if 0 < params.past then pf = new VectorD (params.past) // Evaluate the function value and its gradient. */ - val evaluationResults = cd.evaluationLogic.evaluate (cd.instance, x, cd.n, 0) + val evaluationResults = cd.evalLogic.evaluate (cd.instance, x, cd.n, 0) fx = evaluationResults.objFunctionValue g = evaluationResults.gradientVector @@ -163,7 +166,7 @@ object LBFGS extends PathMonitor: val orthantWisePrms = params.orthantWise.get pg = orthantWisePrms.pseudoGradient (xNew, g) - /* Compute x and g norms. */ + // Compute x and g norms xnorm = xNew.norm gnorm = if ! useOrthantWiseLogic then g.norm else pg.norm @@ -208,7 +211,7 @@ object LBFGS extends PathMonitor: ys = y dot s yy = y dot y - lm(end) = LBFGSIterationData(s, y, ys, 0) + lm(end) = LBFGSIterationData (s, y, ys, 0) // Recursive formula to compute dir = -(H \cdot g) // This is described in page 779 of: Jorge Nocedal. @@ -218,27 +221,27 @@ object LBFGS extends PathMonitor: k += 1 end = (end + 1) % m - // Compute the steepest direction, i.e., the negative of gradients. */ + // Compute the steepest direction, i.e., the negative of gradients. d = if ! useOrthantWiseLogic then -g else -pg j = end - for i <- 0 until bound do + cfor (0, bound) { _ => j = (j + m - 1) % m // if (--j == -1) j = m-1 val it = lm(j) it.alpha = it.s dot d // \alpha_{j} = \rho_{j} s^{t}_{j} \cdot q_{k+1} it.alpha = it.alpha / it.ys d += (it.y * (-it.alpha)) // q_{i} = q_{i+1} - \alpha_{i} y_{i} - end for + } // cfor d *= (ys / yy) - for i <- 0 until bound do + cfor (0, bound) { _ => val it = lm(j) beta = it.y dot d // \beta_{j} = \rho_{j} y^t_{j} \cdot \gamma_{i} beta /= it.ys d += it.s * (it.alpha - beta) // \gamma_{i+1} = \gamma_{i} + (\alpha_{j} - \beta_{j}) s_{j} j = (j + 1) % m // if (++j == m) j = 0 - end for + } // cfor // Constrain the search direction for orthant-wise updates. if useOrthantWiseLogic then @@ -253,7 +256,7 @@ object LBFGS extends PathMonitor: LBFGSResults(LBFGSReturnCode.UnknownError, xNew, Some(fx), None) catch - case e: OutOfMemoryError => LBFGSResults(LBFGSReturnCode.OutOfMemory, x, None, None) + case _ : OutOfMemoryError => LBFGSResults(LBFGSReturnCode.OutOfMemory, x, None, None) end lbfgsMain diff --git a/src/main/scala/scalation/optimization/quasi_newton/LBFGSBacktrackingArmijo.scala b/src/main/scala/scalation/optimization/quasi_newton/LBFGSBacktrackingArmijo.scala index 7b7faf6e3..299964f1b 100644 --- a/src/main/scala/scalation/optimization/quasi_newton/LBFGSBacktrackingArmijo.scala +++ b/src/main/scala/scalation/optimization/quasi_newton/LBFGSBacktrackingArmijo.scala @@ -58,7 +58,7 @@ object LBFGSBacktrackingArmijo extends LBFGSLineSearch: xNew = x + (s * stpNew) // Evaluate the function and gradient values - val evaluationResults = cd.evaluationLogic.evaluate (cd.instance, xNew, n, stpNew) + val evaluationResults = cd.evalLogic.evaluate (cd.instance, xNew, n, stpNew) fNew = evaluationResults.objFunctionValue gNew = evaluationResults.gradientVector diff --git a/src/main/scala/scalation/optimization/quasi_newton/LBFGSBacktrackingOrthantWise.scala b/src/main/scala/scalation/optimization/quasi_newton/LBFGSBacktrackingOrthantWise.scala index 2b3feb7c7..16957198b 100644 --- a/src/main/scala/scalation/optimization/quasi_newton/LBFGSBacktrackingOrthantWise.scala +++ b/src/main/scala/scalation/optimization/quasi_newton/LBFGSBacktrackingOrthantWise.scala @@ -62,7 +62,7 @@ object LBFGSBacktrackingOrthantWise extends LBFGSLineSearch: xNew = orthantwisePrms.project (xNew, wp) // Evaluate the function and gradient values - val evaluationResults = cd.evaluationLogic.evaluate (cd.instance, xNew, n, stpNew) + val evaluationResults = cd.evalLogic.evaluate (cd.instance, xNew, n, stpNew) fNew = evaluationResults.objFunctionValue gNew = evaluationResults.gradientVector diff --git a/src/main/scala/scalation/optimization/quasi_newton/LBFGSBacktrackingStrongWolfe.scala b/src/main/scala/scalation/optimization/quasi_newton/LBFGSBacktrackingStrongWolfe.scala index dda57db7d..a8ac22036 100644 --- a/src/main/scala/scalation/optimization/quasi_newton/LBFGSBacktrackingStrongWolfe.scala +++ b/src/main/scala/scalation/optimization/quasi_newton/LBFGSBacktrackingStrongWolfe.scala @@ -60,7 +60,7 @@ object LBFGSBacktrackingStrongWolfe extends LBFGSLineSearch: xNew = x + (s * stpNew) // Evaluate the function and gradient values - val evaluationResults = cd.evaluationLogic.evaluate (cd.instance, xNew, n, stpNew) + val evaluationResults = cd.evalLogic.evaluate (cd.instance, xNew, n, stpNew) fNew = evaluationResults.objFunctionValue gNew = evaluationResults.gradientVector diff --git a/src/main/scala/scalation/optimization/quasi_newton/LBFGSBacktrackingWolfe.scala b/src/main/scala/scalation/optimization/quasi_newton/LBFGSBacktrackingWolfe.scala index ddab703dd..b3bc4f22c 100644 --- a/src/main/scala/scalation/optimization/quasi_newton/LBFGSBacktrackingWolfe.scala +++ b/src/main/scala/scalation/optimization/quasi_newton/LBFGSBacktrackingWolfe.scala @@ -60,7 +60,7 @@ object LBFGSBacktrackingWolfe extends LBFGSLineSearch: xNew = x + (s * stpNew) // Evaluate the function and gradient values - val evaluationResults = cd.evaluationLogic.evaluate (cd.instance, xNew, n, stpNew) + val evaluationResults = cd.evalLogic.evaluate (cd.instance, xNew, n, stpNew) fNew = evaluationResults.objFunctionValue gNew = evaluationResults.gradientVector diff --git a/src/main/scala/scalation/optimization/quasi_newton/LBFGSCallbackData.scala b/src/main/scala/scalation/optimization/quasi_newton/LBFGSCallbackData.scala index 93d5671d3..20cfd7b64 100644 --- a/src/main/scala/scalation/optimization/quasi_newton/LBFGSCallbackData.scala +++ b/src/main/scala/scalation/optimization/quasi_newton/LBFGSCallbackData.scala @@ -21,13 +21,11 @@ package quasi_newton * and classes while retaining the ability to callback the methods of said * logic with the correct parameters. * - * @param n The number of variables used in the optimization. - * @param instance User data provided for a given call of the L-BFGS optimization - * done by `lbfgsMain` on the `LBFGS` object. Can have `Any` - * type defined by the user as long as it is the same one - * expected by the `optimizationLogic` parameter. - * @param evaluationLogic `EvaluationLogic` that describes the optimization steps - * for the L-BFGS optimization done by the `LBFGS` object. + * @param n the number of variables used in the optimization. + * @param instance an optional user data segment that may be provided when calling + * the `LBFGS.lbfgsMain` method (@see `OptimizationLogic`) + * @param evalLogic `EvaluationLogic` that describes the optimization steps + * for the L-BFGS optimization done by the `LBFGS` object. */ -case class LBFGSCallbackData (n: Int, instance: Any, evaluationLogic: EvaluationLogic) +case class LBFGSCallbackData (n: Int, instance: Any, evalLogic: EvaluationLogic) diff --git a/src/main/scala/scalation/optimization/quasi_newton/LBFGSMoreThuente.scala b/src/main/scala/scalation/optimization/quasi_newton/LBFGSMoreThuente.scala index 2ecd16991..6127927a9 100644 --- a/src/main/scala/scalation/optimization/quasi_newton/LBFGSMoreThuente.scala +++ b/src/main/scala/scalation/optimization/quasi_newton/LBFGSMoreThuente.scala @@ -56,14 +56,14 @@ object LBFGSMoreThuente extends LBFGSLineSearch: var fNew = f var stpNew = stp - /* Check the input parameters for errors. */ + // Check the input parameters for errors. if stp <= 0 then return LBFGSLineSearchFailure (LBFGSReturnCode.InvalidPrms, LBFGSLineSearchIncomplete (xNew, fNew)) - /* Compute the initial gradient in the search direction. */ + // Compute the initial gradient in the search direction. val dginit = g dot s - /* Make sure that s points to a descent direction. */ + // Make sure that s points to a descent direction. if 0 < dginit then return LBFGSLineSearchFailure (LBFGSReturnCode.IncreaseGradient, LBFGSLineSearchIncomplete (xNew, fNew)) @@ -94,7 +94,6 @@ object LBFGSMoreThuente extends LBFGSLineSearch: else stmin = stx stmax = stpNew + 4.0 * (stpNew - stx) - end if // Clip the step in the range of [stpmin, stpmax] if stpNew < params.minStep then stpNew = params.minStep @@ -104,13 +103,12 @@ object LBFGSMoreThuente extends LBFGSLineSearch: if (brackt && ((stpNew <= stmin || stmax <= stpNew) || params.maxLineSearch <= count + 1 || errorCode.nonEmpty)) || (brackt && (stmax - stmin <= params.xtol * stmax)) then stpNew = stx - end if // Compute the current value of xNew: xNew <- x + (*stp) * s. xNew = x + (s * stpNew) // Evaluate the function and gradient values - val evaluationResults = cd.evaluationLogic.evaluate (cd.instance, xNew, cd.n, stpNew) + val evaluationResults = cd.evalLogic.evaluate (cd.instance, xNew, cd.n, stpNew) fNew = evaluationResults.objFunctionValue gNew = evaluationResults.gradientVector @@ -275,7 +273,6 @@ object LBFGSMoreThuente extends LBFGSLineSearch: newt = if abs(t - mc) < abs(t - mq) then mc else mq else newt = if abs(t - mc) > abs(t - mq) then mc else mq - end if else /* Case 4: a lower function value, derivatives of the same sign, and the magnitude of the derivative does diff --git a/src/main/scala/scalation/optimization/quasi_newton/LBFGS_B.scala b/src/main/scala/scalation/optimization/quasi_newton/LBFGS_B.scala index 0df640456..08af39c8f 100644 --- a/src/main/scala/scalation/optimization/quasi_newton/LBFGS_B.scala +++ b/src/main/scala/scalation/optimization/quasi_newton/LBFGS_B.scala @@ -1,6 +1,6 @@ //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author Hao Peng +/** @author Hao Peng, Nirupom Bose Roy * @version 2.0 * @date Fri Oct 7 12:27:00 EDT 2017 * @see LICENSE (MIT style license file). @@ -29,6 +29,8 @@ import scala.util.control.Breaks.{break, breakable} import scalation.calculus.Differential.∇ import scalation.mathstat._ +import scalation.optimization.quasi_newton.LBFGS_B_TestUtil._ +import scalation.optimization.functions._ import MatrixD.eye @@ -305,38 +307,126 @@ class LBFGS_B (f: FunctionV2S, g: FunctionV2S = null, end if end fg - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Perform an exact `GoldenSectionLS` or inexact `WolfeLS` Line Search. - * Search in direction dir, returning the distance z to move in that direction. + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Perform a line search from point `x` along direction `dir`, returning the + * accepted step length. + * This method satisfies the `Minimizer` trait contract. For exact scalar + * search requests it delegates to `lineSearch1D`; otherwise it delegates to + * the native More-Thuente bounded line search and returns only the accepted + * step length. * @param x the current point - * @param dir the direction to move in + * @param dir the search direction * @param step the initial step size */ - def lineSearch (x: VectorD, dir: VectorD, step: Double = STEP): Double = + override def lineSearch (x: VectorD, dir: VectorD, step: Double): Double = + if exactLS then lineSearch1D (x, dir, step) + else + val fv = fg(x) + val gr = ∇(fg)(x) + val (_, _, _, rate) = lineSearchMT (x, fv, gr, dir, step) + rate + end lineSearch + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Perform a More-Thuente line search along the feasible segment defined by + * the current point `x` and search direction `dir`. + * For L-BFGS-B, `dir` is typically the vector from the current point to the + * subspace minimizer, so the feasible step length is restricted to the + * interval `[0, 1]`. + * Returns the accepted point, gradient, objective value, and step length. + * If the line search fails but provides an incomplete best point, that point + * is used as a conservative fallback. + * @param x the current point + * @param fv the objective value at `x` + * @param gr the gradient at `x` + * @param dir the search direction + * @param alphaInit the initial step size + */ + private def lineSearchMT (x: VectorD, fv: Double, + gr: VectorD, dir: VectorD, + alphaInit: Double): (VectorD, VectorD, Double, Double) = + + debug ("lineSearchMT", s"x = $x, fv = $fv, gr = $gr, dir = $dir, alphaInit = $alphaInit") + + val dirNorm = dir.norm + if dirNorm <= EPSILON then return (x, gr, fv, 0.0) + + // Reuse the native L-BFGS callback/evaluation pathway so that the bounded + // solver uses the same More-Thuente implementation as the unconstrained one. + val evalLogic = FunctionEvaluation ((z: VectorD) => fg (z)) + val cd = LBFGSCallbackData (dim, None, evalLogic) + + // Restrict search to the feasible convex segment x + α dir, α in [0, 1]. + val initStep = max (1.0e-12, min (alphaInit, 1.0)) + val lsPrms = LBFGSLineSearchPrms (defaultStep = initStep, + minStep = 1.0e-15, + maxStep = 1.0, + ftol = 1.0e-4, + gtol = 1.0e-2, + xtol = 1.0e-15, + maxLineSearch = 20) + + LBFGSMoreThuente.lineSearch (dim, x, fv, gr, dir, initStep, cd, lsPrms) match + case stepRes: LBFGSLineSearchStep => + (stepRes.x, stepRes.g, stepRes.fx, stepRes.step) + + case failRes: LBFGSLineSearchFailure => + val inc = failRes.bestIncompleteResults + val xInc = inc.variableValues + val fInc = inc.functionValue + val gInc = ∇ (fg)(xInc) + val alpha = ((xInc - x) dot dir) / max (dir dot dir, EPSILON) + (xInc, gInc, fInc, alpha) + end lineSearchMT + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Perform a scalar 1D line search along the direction `dir` from point `x`. + * This helper is used only when exact scalar line search is explicitly + * requested. Otherwise, the bounded solver uses the native More-Thuente + * line search via `lineSearchMT`. + * @param x the current point + * @param dir the search direction + * @param step the initial step size (may use STEP as default) + */ + private def lineSearch1D (x: VectorD, dir: VectorD, step: Double): Double = debug ("linesearch", s"x = $x, dir = $dir, step = $step") def f_1D (z: Double): Double = fg(x + dir * z) // create a 1D function val ls = if exactLS then new GoldenSectionLS (f_1D ) // Golden Section Line Search else new WolfeLS (f_1D) // Wolfe line search ((c1 = .0001, c2 = .9) ls.search (step) // perform a Line Search - end lineSearch - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Solve the following Non-Linear Programming (NLP) problem using L-BFGS_B: - * min { f(x) | g(x) <= 0 }. - * @param x0 the starting point - * @param alphaInit the initial step size - * @param toler the tolerance + end lineSearch1D + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Solve the bound-constrained nonlinear optimization problem + * min { f(x) | g(x) <= 0 } + * using the L-BFGS-B algorithm. + * Notes: + * - If no bounds are provided, this method installs default unbounded + * box constraints `(-∞, +∞)` in every dimension. + * - The internal search step is computed along the feasible segment from + * the current point `x` to the subspace minimizer. + * @param x0 the starting point + * @param alphaInit the initial line-search step size + * @param toler the convergence tolerance */ - def solve (x0: VectorD, alphaInit : Double = STEP, toler: Double = EPSILON): FuncVec = + def solve (x0: VectorD, alphaInit: Double = STEP, toler: Double = EPSILON): FuncVec = debug ("solve", s"x0 = $x0, alphaInit = $alphaInit, toler = $toler") var best = (MAX_VALUE, VectorD.nullv) dim = x0.size theta = 1.0 - if l_u == null then l_u = makeBounds (dim, NEGATIVE_INFINITY, POSITIVE_INFINITY) - val (l, u) = l_u + + // Install default unbounded box constraints when bounds were not supplied. + // Note: `l_u` may be the tuple `(null, null)` rather than `null`, so both + // tuple members must be checked explicitly. + val needDefaultBounds = + l_u == null || l_u._1 == null || l_u._2 == null + if needDefaultBounds then + l_u = makeBounds(dim, NEGATIVE_INFINITY, POSITIVE_INFINITY) + +// val (l, u) = l_u ww = new MatrixD (dim, 0) // FIX - causes empty matrix warning // mm = new MatrixD (0, 0) // find alt. to zero dimension matrix @@ -369,17 +459,28 @@ class LBFGS_B (f: FunctionV2S, g: FunctionV2S = null, val subspaceMin = subspaceMinimize (x, gr, xCauchy, c) forceBounds (subspaceMin) - // STEP 4: perform linesearch - val rate = lineSearch (x, subspaceMin-x, alphaInit) - - // STEP 5: compute gradient - x = x - (x - subspaceMin) * rate // update current guess and function information - forceBounds (x) + // STEP 4: perform line search along the feasible segment from + // the current point to the subspace minimizer. + val dir = subspaceMin - x + val (xLs, gLs, fLs, _) = // _ for rate + if exactLS then + val r = lineSearch1D (x, dir, alphaInit) + val xNew = x + dir * r + forceBounds (xNew) + val fNew = fg (xNew) + val gNew = ∇ (fg)(xNew) + (xNew, gNew, fNew, r) + else + lineSearchMT (x, fv, gr, dir, alphaInit) + + // STEP 5: accept line-search result + x = xLs + gr = gLs + fv = fLs + forceBounds (x) // safeguard only; should already be feasible if 0 <= rate <= 1 - fv = fg(x) if blown ((fv, x)) then { best = better ((f_old, x_old), best); break () } - gr = ∇ (fg)(x) mgn = getMgn (x, gr) if mgn < toler || count > countMax then { best = better ((fv, x), best); break () } if abs (mgn - mgn_old) < toler then count += 1 @@ -387,7 +488,7 @@ class LBFGS_B (f: FunctionV2S, g: FunctionV2S = null, val newY = gr - g_old // prepare for next iteration val newS = x - x_old - // STEP 6 + // STEP 6: update limited-memory history if the curvature condition holds. val test = abs (newS dot newY) if test > EPSILON * newY.normSq then if yHistory.size >= hs then { yHistory.remove (0); sHistory.remove (0) } @@ -395,7 +496,7 @@ class LBFGS_B (f: FunctionV2S, g: FunctionV2S = null, sHistory append newS theta = (newY dot newY) / (newY dot newS) - // STEP 7 + // STEP 7: rebuild the compact L-BFGS-B matrices W and M. yHistoryMx = MatrixD (yHistory).transpose sHistoryMx = MatrixD (sHistory).transpose ww = yHistoryMx ++^ (sHistoryMx * theta) @@ -498,3 +599,283 @@ end lBFGS_BTest2 end lBFGS_BTest3 + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `lBFGS_BQuadraticInteriorTest` main function test L-BFGS-B on a simple + * convex quadratic with an interior optimum. + * > runMain scalation.optimization.quasi_newton.lBFGS_BQuadraticInteriorTest + */ +@main def lBFGS_BQuadraticInteriorTest (): Unit = + + val x0 = VectorD (0.0, 0.0) + def f (x: VectorD): Double = (x(0) - 3.0)~^2 + (x(1) - 4.0)~^2 + 1.0 + + runLBFGSB (name = "Quadratic interior optimum", + f = f, + x0 = x0, + bounds = makeBounds (2, -10.0, 10.0), + exactLS = false) + + compareLS (name = "Quadratic interior optimum", + f = f, + x0 = x0, + bounds = makeBounds (2, -10.0, 10.0)) + +end lBFGS_BQuadraticInteriorTest + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `lBFGS_BQuadraticActiveBoundTest` main function tests L-BFGS-B on a convex + * quadratic whose optimum is clipped by active bounds. + * > runMain scalation.optimization.quasi_newton.lBFGS_BQuadraticActiveBoundTest + */ +@main def lBFGS_BQuadraticActiveBoundTest (): Unit = + + val x0 = VectorD (0.0, 0.0) + def f (x: VectorD): Double = (x(0) - 3.0)~^2 + (x(1) - 4.0)~^2 + 1.0 + + runLBFGSB (name = "Quadratic active-bound optimum", + f = f, + x0 = x0, + bounds = (VectorD (-10.0, -10.0), VectorD (2.5, 3.5)), + exactLS = false) + + compareLS (name = "Quadratic active-bound optimum", + f = f, + x0 = x0, + bounds = (VectorD (-10.0, -10.0), VectorD (2.5, 3.5))) + +end lBFGS_BQuadraticActiveBoundTest + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `lBFGS_BInactiveBoundsVsLBFGSTes` main function compare L-BFGS-B to unconstrained + * L-BFGS when bounds are effectively inactive. + * > runMain scalation.optimization.quasi_newton.lBFGS_BInactiveBoundsVsLBFGSTes + */ +@main def lBFGS_BInactiveBoundsVsLBFGSTest (): Unit = + + val x0 = VectorD (-4.0, 7.0) + def f (x: VectorD): Double = (x(0) + 2.0 * x(1) - 7.0)~^2 + + (2.0 * x(0) + x(1) - 5.0)~^2 + + compareToLBFGS ( + name = "Booth with inactive bounds", + f = f, + x0 = x0, + bounds = makeBounds (2, -100.0, 100.0) + ) + +end lBFGS_BInactiveBoundsVsLBFGSTest + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The boothFunctionLBFGS_BTest tests L-BFGS-B on the Booth function. + * > runMain scalation.optimization.quasi_newton.boothFunctionLBFGS_BTest + */ +@main def boothFunctionLBFGS_BTest (): Unit = + + val lo = VectorD (-10, -10) + val hi = VectorD ( 10, 10) + + runLBFGSB (name = "Booth", + f = BoothFunction.objFunction, + x0 = VectorD (-4, 7), + bounds = (lo, hi), + exactLS = false) + + compareLS (name = "Booth", + f = BoothFunction.objFunction, + x0 = VectorD (-4, 7), + bounds = (lo, hi)) + +end boothFunctionLBFGS_BTest + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `bealeFunctionLBFGS_BTest` main function tests L-BFGS-B on the Beale function. + * > runMain scalation.optimization.quasi_newton.bealeFunctionLBFGS_BTest + */ +@main def bealeFunctionLBFGS_BTest (): Unit = + + val lo = VectorD (-10, -10) + val hi = VectorD ( 10, 10) + + runLBFGSB (name = "Beale", + f = BealeFunction.objFunction, + x0 = VectorD (2, -2), + bounds = (lo, hi), + exactLS = false) + + compareLS (name = "Beale", + f = BealeFunction.objFunction, + x0 = VectorD (2, -2), + bounds = (lo, hi)) + +end bealeFunctionLBFGS_BTest + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `bohachevsky1FunctionLBFGS_BTest` main function tests L-BFGS-B on the Bohachevsky 1 function. + * > runMain scalation.optimization.quasi_newton.bohachevsky1FunctionLBFGS_BTest + */ +@main def bohachevsky1FunctionLBFGS_BTest (): Unit = + + runLBFGSB (name = "Bohachevsky1", + f = Bohachevsky1Function.objFunction, + x0 = VectorD (10, -10), + bounds = makeBounds (2, -10.0, 10.0), + exactLS = false) + + compareLS (name = "Bohachevsky1", + f = Bohachevsky1Function.objFunction, + x0 = VectorD (10, -10), + bounds = makeBounds (2, -10.0, 10.0)) + +end bohachevsky1FunctionLBFGS_BTest + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `bohachevsky2FunctionLBFGS_BTest` main function tests L-BFGS-B on the Bohachevsky 2 function. + * > runMain scalation.optimization.quasi_newton.bohachevsky2FunctionLBFGS_BTest + */ +@main def bohachevsky2FunctionLBFGS_BTest (): Unit = + + runLBFGSB (name = "Bohachevsky2", + f = Bohachevsky2Function.objFunction, + x0 = VectorD (10, -10), + bounds = makeBounds (2, -10.0, 10.0), + exactLS = false) + + compareLS (name = "Bohachevsky2", + f = Bohachevsky2Function.objFunction, + x0 = VectorD (10, -10), + bounds = makeBounds (2, -10.0, 10.0)) + +end bohachevsky2FunctionLBFGS_BTest + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `bohachevsky3FunctionLBFGS_BTest` main function tests L-BFGS-B on the Bohachevsky 3 function. + * > runMain scalation.optimization.quasi_newton.bohachevsky3FunctionLBFGS_BTest + */ +@main def bohachevsky3FunctionLBFGS_BTest (): Unit = + + runLBFGSB (name = "Bohachevsky3", + f = Bohachevsky3Function.objFunction, + x0 = VectorD (10, -10), + bounds = makeBounds (2, -10.0, 10.0), + exactLS = false) + + compareLS (name = "Bohachevsky3", + f = Bohachevsky3Function.objFunction, + x0 = VectorD (10, -10), + bounds = makeBounds (2, -10.0, 10.0)) + +end bohachevsky3FunctionLBFGS_BTest + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `camel3FunctionLBFGS_BTest` main function tests L-BFGS-B on the Three-Hump Camel function. + * > runMain scalation.optimization.quasi_newton.camel3FunctionLBFGS_BTest + */ +@main def camel3FunctionLBFGS_BTest (): Unit = + + runLBFGSB (name = "Camel3", + f = Camel3Function.objFunction, + x0 = VectorD (10, -10), + bounds = makeBounds (2, -10.0, 10.0), + exactLS = false) + + compareLS (name = "Camel3", + f = Camel3Function.objFunction, + x0 = VectorD (10, -10), + bounds = makeBounds (2, -10.0, 10.0)) + +end camel3FunctionLBFGS_BTest + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `cubeFunctionLBFGS_BTest` main function tests L-BFGS-B on the Cube function. + * > runMain scalation.optimization.quasi_newton.cubeFunctionLBFGS_BTest + */ +@main def cubeFunctionLBFGS_BTest (): Unit = + + runLBFGSB (name = "Cube", + f = CubeFunction.objFunction, + x0 = VectorD (5, -5), + bounds = makeBounds (2, -10.0, 10.0), + exactLS = false) + + compareLS (name = "Cube", + f = CubeFunction.objFunction, + x0 = VectorD (5, -5), + bounds = makeBounds (2, -10.0, 10.0)) + +end cubeFunctionLBFGS_BTest + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `freudensteinRothFunctionLBFGS_BTest` main function tests L-BFGS-B on the Freudenstein-Roth function. + * > runMain scalation.optimization.quasi_newton.freudensteinRothFunctionLBFGS_BTest + */ +@main def freudensteinRothFunctionLBFGS_BTest (): Unit = + + runLBFGSB (name = "Freudenstein-Roth", + f = FreudensteinRothFunction.objFunction, + x0 = VectorD (5, -5), + bounds = makeBounds (2, -10.0, 10.0), + exactLS = false) + + compareLS (name = "Freudenstein-Roth", + f = FreudensteinRothFunction.objFunction, + x0 = VectorD (5, -5), + bounds = makeBounds (2, -10.0, 10.0)) + +end freudensteinRothFunctionLBFGS_BTest + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `mccormickFunctionLBFGS_BTest` main function tests L-BFGS-B on the McCormick function. + * > runMain scalation.optimization.quasi_newton.mccormickFunctionLBFGS_BTest + */ +@main def mccormickFunctionLBFGS_BTest (): Unit = + + val bounds = (VectorD (-4.0, -4.0), VectorD (4.0, 4.0)) + + runLBFGSB (name = "McCormick", + f = McCormickFunction.objFunction, + x0 = VectorD (2.5, 3.5), + bounds = bounds, + exactLS = false) + + compareLS (name = "McCormick", + f = McCormickFunction.objFunction, + x0 = VectorD (2.5, 3.5), + bounds = bounds) + +end mccormickFunctionLBFGS_BTest + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `run_all_LBFGS_B` main function runs a compact but broad L-BFGS-B regression suite. + * > runMain scalation.optimization.quasi_newton.run_all_LBFGS_B + */ +@main def run_all_LBFGS_B (): Unit = + + lBFGS_BQuadraticInteriorTest () + lBFGS_BQuadraticActiveBoundTest () + lBFGS_BInactiveBoundsVsLBFGSTest () + + boothFunctionLBFGS_BTest () + bealeFunctionLBFGS_BTest () + bohachevsky1FunctionLBFGS_BTest () + bohachevsky2FunctionLBFGS_BTest () + bohachevsky3FunctionLBFGS_BTest () + camel3FunctionLBFGS_BTest () + cubeFunctionLBFGS_BTest () + freudensteinRothFunctionLBFGS_BTest () + mccormickFunctionLBFGS_BTest () + +end run_all_LBFGS_B + diff --git a/src/main/scala/scalation/optimization/quasi_newton/LBFGS_B.scala.bak b/src/main/scala/scalation/optimization/quasi_newton/LBFGS_B.scala.bak new file mode 100644 index 000000000..e4f0cad22 --- /dev/null +++ b/src/main/scala/scalation/optimization/quasi_newton/LBFGS_B.scala.bak @@ -0,0 +1,1208 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author Hao Peng + * @version 2.0 + * @date Fri Oct 7 12:27:00 EDT 2017 + * @see LICENSE (MIT style license file). + * + * @note Limited memory BFGS with Bounds (L-BFGS-B) + * + *------------------------------------------------------------------------------ + * Limited memory Broyden–Fletcher–Goldfarb–Shanno (BFGS) for Bound constrained + * optimization (L-BFGS-B) algorithm. Originally proposed by Byrd et al. in 1995. + * See the first two links for the original paper and authors' software (written + * in Fortran) distribution site, respectively. This implementation is translated + * from a C++ implementation found in the last link. + * + * @see www.ece.northwestern.edu/~nocedal/PSfiles/limited.ps.gz + * @see users.iems.northwestern.edu/~nocedal/lbfgsb.html + * @see github.com/PatWie/CppNumericalSolvers/blob/master/include/cppoptlib/solver/lbfgsbsolver.h + */ + +package scalation +package optimization +package quasi_newton + +import scala.collection.mutable.ArrayBuffer +import scala.math.{abs, max, min} +import scala.util.control.Breaks.{break, breakable} + +import scalation.calculus.Differential.∇ +import scalation.mathstat._ +import MatrixD.eye + +import scala.annotation.unused + +type Bounds = (VectorD, VectorD) + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `LBFGS_B` companion object provides a factory method for Limited memory + * Broyden–Fletcher–Goldfarb–Shanno for Bounds constrained optimization. + */ +object LBFGS_B: + + val emptyMatrix = new MatrixD (0, 0) // empty zero dimension matrix + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create an `LBFGS_B` object with a given dimensionality and default lower + * and upper bounds of -1 and 1, respectively. + * @param f the objective function to be minimized + * @param n the dimensionality of the search space + * @param exactLS whether to use exact (e.g., `GoldenLS`) + * or inexact (e.g., `WolfeLS`) Line Search + * @param l_u (vector, vector) of lower and upper bounds for all input parameters + * @param gradF vector to vector functional formula for computing the gradiant, if available + */ + def apply (f: FunctionV2S, n: Int, + exactLS: Boolean = false, l_u_ : Bounds = (null, null), + gradF: FunctionV2V = null): LBFGS_B = + + val l_u = if l_u_ == (null, null) then (VectorD.fill (n)(-1), VectorD.fill (n)(1)) + else l_u_ + new LBFGS_B (f, exactLS, l_u, gradF) + end apply + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Make simple bounds where the limits in each dimension is the same. + * @param n the dimensionality of the search space + * @param lo scalar lower bounds for all input parameters + * @param up scalar upper bounds for all input parameters + */ + inline def makeBounds (n: Int, lo: Double, up: Double): Bounds = + (VectorD.fill (n)(lo), VectorD.fill (n)(up)) + end makeBounds + +end LBFGS_B + +import LBFGS_B.makeBounds + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `LBFGS_B` the class implements the Limited memory Broyden–Fletcher– + * Goldfarb–Shanno for Bounds constrained optimization (L-BFGS-B) + * Quasi-Newton Algorithm for solving Non-Linear Programming (NLP) problems. + * L-BFGS-B determines a search direction by deflecting the steepest descent direction + * vector (opposite the gradient) by * multiplying it by a matrix that approximates + * the inverse Hessian. Furthermore, only a few vectors represent the approximation + * of the Hessian Matrix (limited memory). The parameters estimated are also bounded + * within user specified lower and upper bounds. + * + * minimize f(x) + * subject to g(x) <= 0 [ optionally g(x) == 0 ] + * + * @param f the objective function to be minimized + * @param exactLS whether to use exact (e.g., `GoldenLS`) + * or inexact (e.g., `WolfeLS`) Line Search + * @param l_u (vector, vector) of lower and upper bounds for all input parameters + * @param gradF vector to vector functional formula for computing the gradiant, if available + */ +class LBFGS_B (f: FunctionV2S, + exactLS: Boolean = false, + private var l_u: Bounds = (null, null), + gradF: FunctionV2V = null) + extends Minimizer: + + private val debug = debugf ("LBFGS_B", false) // debug function + private var ww, mm: MatrixD = null // workspace matrices + private var theta = 0.0 // a scaling parameter + private var dim = 0 // dimension of the input vector + private var hs = 5 // history size, number of historical vectors to store + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Sort pairs (k, v) according to v into ascending order. + * std::vector sort_indexes(const std::vector< std::pair > &v) + * @param v the ArrayBuffer of Tuple2 to be sorted by the 2nd element + */ + private def sortIndices (v: ArrayBuffer [(Int, Double)]): VectorI = + val sv = v.sortBy (_._2) // FIX - order different in C++ code + val idx = new VectorI (sv.length) + for i <- idx.indices do idx(i) = sv(i)._1 + idx + end sortIndices + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Force the values within 'v' to stay within the pre-defined bounds. + * @see void clampToBound(const TProblem &problem, TVector &x) + * @param v the Vector containing values to be adjusted + */ + private def forceBounds (v: VectorD): Unit = + val (l, u) = l_u + for i <- v.indices do + if v(i) > u(i) then v(i) = u(i) // upper bound + else if v(i) < l(i) then v(i) = l(i) // lower bound + end for + end forceBounds + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Algorithm CP: Computation of the Generalized Cauchy Point. See page 8 of + * @see www.ece.northwestern.edu/~nocedal/PSfiles/limited.ps.gvz + * @see void getGeneralizedCauchyPoint(const TProblem &problem, const TVector &x, + * const TVector &g, TVector &x_cauchy, VariableTVector &c) + * @param x the parameter vector + * @param gr the gradient vector + */ + private def getGCP (x: VectorD, gr: VectorD): Bounds = + debug ("getGCP", s"x = $x, gr = $gr") + + val (l, u) = l_u + val setOfT = new ArrayBuffer [(Int, Double)] () + val d = -gr + + for j <- 0 until dim do + if gr(j) == 0 then setOfT.append ((j, MAX_VALUE)) + else + val tmp = if gr(j) < 0 then (x(j) - u(j)) / gr(j) + else (x(j) - l(j)) / gr(j) + setOfT.append ((j, tmp)) + if tmp == 0 then d(j) = 0 + end if + end for + + val sortedIndices = sortIndices (setOfT) + val xCauchy = x.copy + + val p = ww.transpose * d + val c = new VectorD (ww.dim2) + var fPrime = -d.dot (d) + + // MODIFIED + inline def eps = Math.ulp (1.0) // Match C++ code for numerical stability + + var fDoublePrime = max (-theta * fPrime - (p dot (mm * p)), eps) // Using eps instead of EPSILON + val f_dp_orig = fDoublePrime + var dt_min = -fPrime / fDoublePrime + var t_old = 0.0 + + var i = 0 + breakable { + for j <- 0 until dim do + i = j + if setOfT (sortedIndices(j))._2 > 0 then break () + } // breakable + var b = sortedIndices(i) + var t = setOfT(b)._2 + var dt = t + + while dt_min >= dt && i < dim do + if d(b) > 0 then xCauchy(b) = u(b) + else if d(b) < 0 then xCauchy(b) = l(b) + val zb = xCauchy(b) - x(b) + c += p * dt + + // cache + val wbt = ww(b) + fPrime += dt * fDoublePrime + gr(b) * gr(b) + theta * gr(b) * zb - gr(b) * wbt.dot (mm * c) + fDoublePrime += -theta * gr(b) * gr(b) - 2.0 * (gr(b) * wbt.dot (mm * p)) + - gr(b) * gr(b) * (wbt.dot (mm * wbt)) + fDoublePrime = max (eps * f_dp_orig, fDoublePrime) // Using eps instead of EPSILON + + p += wbt * gr(b) + d(b) = 0 + dt_min = -fPrime / fDoublePrime + t_old = t + i += 1 + if i < dim then + b = sortedIndices(i) + t = setOfT(b)._2 + dt = t - t_old + end while + + dt_min = max (dt_min, 0.0) + t_old += dt_min + + for ii <- i until xCauchy.dim do + val si = sortedIndices (ii) + xCauchy(si) = x(si) + t_old * d(si) + c += p * dt_min + (xCauchy, c) + end getGCP + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Find the alpha* parameter, a positive scalar. See Equation 5.8 on page 11 of + * @see www.ece.northwestern.edu/~nocedal/PSfiles/limited.ps.gvz + * @see Scalar findAlpha(const TProblem &problem, TVector &x_cp, VariableTVector &du, std::vector &FreeVariables) + * @param x_cp vector of cauchy point + * @param du vector containing intermediate results used to find alpha* + * @param freeVar an ArrayBuffer storing the indices of free variable + */ + private def findAlpha (x_cp: VectorD, du: VectorD, freeVar: ArrayBuffer [Int]): Double = + debug ("findAlpha", s"x_cp = $x_cp, du = $du, freeVar = $freeVar") + + val (l, u) = l_u + var alphastar = 1.0 + val n = freeVar.size + assert (du.dim == n) + + // MODIFIED : changed to match C++ code (will help with numerical stability) + var i = 0 + while i < n do + val v = du(i) + if abs (v) >= 1e-7 then // keep guard + val fi = freeVar(i) + val a = if v > 0.0 then (u(fi) - x_cp(fi)) / v + else (l(fi) - x_cp(fi)) / v + alphastar = min (alphastar, a) + end if + i += 1 + end while + + max (0.0, min (1.0, alphastar)) // ensure in [0, 1] + end findAlpha + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Minimization of the subspace of free variables. See Section 5 on page 9 of + * @see www.ece.northwestern.edu/~nocedal/PSfiles/limited.ps.gvz + * @see void SubspaceMinimization(const TProblem &problem, TVector &x_cauchy, + TVector &x, VariableTVector &c, TVector &g, TVector &SubspaceMin) + * @param x the parameter vector + * @param gr the gradient vector + * @param xCauchy the vector of Cauchy points + * @param c vector obtained from getGCP used to initialize the subspace + * minimization process + */ + private def subspaceMinimize (x: VectorD, gr: VectorD, xCauchy: VectorD, c: VectorD): VectorD = + debug ("subspaceMinimize", s"x = $x, gr = $gr, xCauchy = $xCauchy, c = $c") + + val (l, u) = l_u + val thetaInverse = 1.0 / theta + val freeVarIdx = new ArrayBuffer [Int] () + for i <- xCauchy.indices if xCauchy(i) != u(i) && xCauchy(i) != l(i) do freeVarIdx.append (i) + val freeVarCount = freeVarIdx.size + + val wwzz = new MatrixD (ww.dim2, freeVarCount) + for i <- 0 until freeVarCount do wwzz(?, i) = ww(freeVarIdx(i)) + val rr = (gr + (xCauchy - x) * theta - ww * (mm * c)) + val r = new VectorD (freeVarCount) + for i <- 0 until freeVarCount do r(i) = rr(freeVarIdx(i)) + + var v = mm * (wwzz * r) + var nn = wwzz * wwzz.transpose * thetaInverse + nn = eye (nn.dim, nn.dim) - mm * nn + + val lu = new Fac_LU (nn) + lu.factor () + v = lu.solve (v) + + val du = r * -thetaInverse - wwzz.transpose * v * thetaInverse * thetaInverse + val alpha_star = findAlpha (xCauchy, du, freeVarIdx) + val dStar = du * alpha_star + val subspaceMin = xCauchy.copy + for i <- 0 until freeVarCount do subspaceMin (freeVarIdx(i)) += dStar(i) + subspaceMin + + end subspaceMinimize + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Modify the number of historical vectors to store. + * @see void setHistorySize(const int hs) { m_historySize = hs; } + * @param hs_ the new history size + */ + def setHistorySize (hs_ : Int): Unit = hs = hs_ + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Compute the projected gradient under simple bounds. + * If x_i is strictly inside (l_i, u_i), keep g_i. + * If x_i is at lower bound, clamp to min(0, g_i) (can't go further down). + * If x_i is at upper bound, clamp to max(0, g_i) (can't go further up). + * Result is zero in components where movement is blocked by an active bound. + * @param x the current point + * @param g the raw gradient at x + * @param l the vector of lower bounds + * @param u the vector of upper bounds + */ + private def projectedGrad (x: VectorD, g: VectorD, l: VectorD, u: VectorD): VectorD = + val pg = new VectorD (x.dim) + var i = 0 + while i < x.dim do + val xi = x(i); + val gi = g(i) + pg(i) = + if xi > l(i) && xi < u(i) then gi + else if xi <= l(i) then math.min (0.0, gi) + else /* xi >= u(i) */ math.max (0.0, gi) + i += 1 + pg + end projectedGrad + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** The objective function f. + * @param x the coordinate values of the current point + */ + inline override def fg (x: VectorD): Double = f(x) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Perform an exact `GoldenSectionLS` or inexact `WolfeLS` Line Search. + * Search in direction dir, returning the distance z to move in that direction. + * @param x the current point + * @param dir the direction to move in + * @param step the initial step size + */ + @deprecated ("Use LBFGSLineSearch with MoreThuente instead", "2.0") + def lineSearch (x: VectorD, dir: VectorD, step: Double = STEP): Double = + debug ("linesearch", s"x = $x, dir = $dir, step = $step") + + def f_1D (z: Double): Double = fg(x + dir * z) // create a 1D function + val ls = if exactLS then new GoldenSectionLS (f_1D ) // Golden Section Line Search + else new WolfeLS (f_1D) // Wolfe line search ((c1 = .0001, c2 = .9) + ls.search (step) // perform a Line Search + end lineSearch + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Solve the following Non-Linear Programming (NLP) problem using L-BFGS_B: + * min { f(x) | g(x) <= 0 }. + * @see void minimize(TProblem &problem, TVector &x0) + * @param x0 the starting point + * @param alphaInit the initial step size + * @param toler the tolerance + */ + def solve (x0: VectorD, alphaInit: Double = 1.0, toler: Double = EPSILON): FuncVec = + debug ("solve", s"x0 = $x0, alphaInit = $alphaInit, toler = $toler") + + var best = (MAX_VALUE, VectorD.nullv) + + var outerIters = 0 + var totalLineSearchIters = 0 + + dim = x0.dim + theta = 1.0 + if l_u == (null, null) then l_u = makeBounds (dim, NEGATIVE_INFINITY, POSITIVE_INFINITY) + + ww = new MatrixD (dim, 0) // FIX - causes empty matrix warning + mm = LBFGS_B.emptyMatrix + + val yHistory = ArrayBuffer [VectorD] () + val sHistory = ArrayBuffer [VectorD] () + var yHistoryMx: MatrixD = null + var sHistoryMx: MatrixD = null + + var x = x0.copy + forceBounds (x) // MODIFIED: force bounds on initial x (faster convergence) + + val grad_fg = if gradF != null then gradF else (z: VectorD) => ∇(fg)(z) + var gr = grad_fg(x) + + var fv = fg(x) // functional value at x + var mgn = 0.0 + var count = 0 + val countMax = 10 + + // FIX -- missing "auto noConvergence = ..." + + breakable { // main while loop in C++ code + for k <- 1 to MAX_IT do +// banner (s"solve: iteration $k: f(x) = $fv, x = $x") + val f_old = fv + val x_old = x + val g_old = gr + val mgn_old = mgn + + // MODIFIED: removed forceBounds on x here to match C++ code + // STEP 2: compute the cauchy point + val (xCauchy, c) = getGCP (x, gr) + + // STEP 3: compute a search direction d_k by the primal method for the sub-problem + val subspaceMin = subspaceMinimize (x, gr, xCauchy, c) + + // STEP 4: perform linesearch // MODIFIED: Used MoreThuente line search and internal projection to bounds + val dir = subspaceMin - x + + val evalLogic = + if (gradF != null) then FunctionEvaluation (f, grad_fg) + else FunctionEvaluation (f) // numeric grad will project internally + + val cd = LBFGSCallbackData (dim, null, evalLogic) + + val lineSearchPrms = LBFGSLineSearchPrms (maxLineSearch = 20, defaultStep = 1.0, minStep = 1e-15, + maxStep = 1e15, ftol = 1e-4, wolfe = 0.9, gtol = 1e-2, xtol = 1e-15) + + val lineSearchImple = LBFGSLineSearch.getImple (LBFGSLineSearchAlg.MoreThuente) + + val lineSearchResults = lineSearchImple.lineSearch (dim, x, fv, gr, dir, alphaInit, cd, lineSearchPrms, None) + + lineSearchResults match + case step: LBFGSLineSearchStep => + totalLineSearchIters += step.numberOfIterations + val stepLen = step.step + val x_prev = x + val f_prev = fv + + // 1. Trial Step (Unclamped) + val x_try = x_prev + dir * stepLen + + // 2. Now, clamp final x to bounds (element-wise) + val x_new = x_try.copy + forceBounds (x_new) + + // 3. If clamp changed x, then re-evaluate f and g + inline def notSame (a: VectorD, b: VectorD): Boolean = (a - b).norm > 0.0 + + if notSame (x_new, x_try) then + x = x_new + fv = fg(x) + gr = grad_fg(x) + else + x = x_try + fv = step.fx + gr = step.g + + // compute diagnostics + val gnorm = gr.norm + val xdelta = (x - x_prev).norm + val fdelta = f_prev - fv + printIteration (k, fv, x, gr, gnorm, xdelta, fdelta) + outerIters = k + + case fail: LBFGSLineSearchFailure => + println (s"LBFGS_B.solve: line search failed after $k iterations.") + println (s" Return code: ${fail.returnCode}") + best = better ((f_old, x_old), best) + break () + end match + + // MODIFIED: changed to match C++ code more closely + // STEP 5: stationarity & stopping criteria + if blown ((fv, x)) then + best = better ((f_old, x_old), best) + break () + + // Projected gradient (L-BFGS-B style) and ∞-norm (Matches C++ code) + val (l, u) = l_u + val pg = projectedGrad (x, gr, l, u) + mgn = pg.normInf + + val pgtol = 1e-8 // tighten to 1e-8 for more polish + val ftolRel = 1e-9 // relative f-decrease stop + + // Primary stop: projected-gradient ∞-norm + if mgn <= pgtol || count > countMax then + best = better ((fv, x), best) + break () + + // Secondary stop: tiny relative f change + if math.abs (f_old - fv) <= ftolRel * (1.0 + math.abs (f_old)) then + best = better ((fv, x), best) + break () + + // No-progress counter (use pgtol, not toler) + if math.abs (mgn - mgn_old) < pgtol then count += 1 + else count = 0 + + val newY = gr - g_old // prepare for next iteration + val newS = x - x_old + + // STEP 6 — curvature check and memory update (standard L-BFGS practice) + // Compute curvature scalars + val ys = newY dot newS // yᵀs + val yy = newY dot newY // yᵀy + + // cppoptlib condition: accept pair if |sᵀy| > 1e-7 * (yᵀy) + if (abs(ys) > 1e-7 * yy) then + // Accept the pair + if yHistory.size >= hs then + yHistory.remove (0) + sHistory.remove (0) + yHistory append newY + sHistory append newS + + // STEP 7 — positive scaling + theta = yy / ys // stays > 0 because ys > 0 + + yHistoryMx = MatrixD (yHistory).transpose + sHistoryMx = MatrixD (sHistory).transpose + + ww = yHistoryMx ++^ (sHistoryMx * theta) + + val aa = sHistoryMx.transpose * yHistoryMx + val ll = aa.lower + ll(?, ?) = 0.0 + + val dd = new MatrixD (aa.dim, aa.dim2) + dd.setDiag (-aa(?)) + + val mm2 = (dd ++^ ll.transpose) ++ (ll ++^ (sHistoryMx.transpose * sHistoryMx * theta)) + mm = Fac_LU.inverse(mm2)() + end if + + debug ("solve", s"(k = $k) move from $x_old to $x where fg(x) = $fv") + + best = better ((fv, x), best) + end for + } // breakable + + println (f"argmin ${x(0)}%.6f ${if (x.dim > 1) then x(1) else 0.0}%.6f") + println (s"f in argmin ${f2(fv)}") + println (s"iterations $outerIters") + banner (s"solve: optimal solution = $best (outer iters = $outerIters, line-search iters = $totalLineSearchIters)") + + best + end solve + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Format the double argument. + * @param x the value to be formatted + */ + private inline def f2 (x: Double): String = f"$x%.6f" + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Format the vector argument. + * @param x the vector to be formatted + */ + private inline def vecStr (v: VectorD): String = + if v == null then "—" + else v.map (d => "%.6g".format(d)).mkString (" ") + end vecStr + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Print information about the current iteration it. + */ + private def printIteration (it: Int, fx: Double, x: VectorD, g: VectorD, + gnorm: Double, xdelta: Double, fdelta: Double): Unit = + println (f"--- Iteration: $it%5d ---") + println (s" Value: ${f2(fx)}") + println (s" X: ${vecStr(x)}") + println (s" Gradient: ${vecStr(g)}") + println (s" Gradient Norm: ${f2(gnorm)}") + println (s" X Delta: ${f2(xdelta)}") + println (s" F Delta: ${f2(fdelta)}") + println (s" Hessian Cond.: ${f2(theta)}") + println ("-------------------------") + end printIteration + +end LBFGS_B + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `lBFGS_BTest` main function is used to test the `LBFGS_B` class. + * f(x) = (x_0 - 3)^2 + (x_1 - 4)^2 + 1 + * > runMain scalation.optimization.quasi_newton.lBFGS_BTest + */ +@main def lBFGS_BTest (): Unit = + + val n = 2 + val x0 = new VectorD (n) + def f (x: VectorD): Double = (x(0) - 3)~^2 + (x(1) - 4)~^2 + 1 + + banner ("Minimize (no bounds): (x_0 - 3)^2 + (x_1 - 4)^2 + 1") + var optimizer = new LBFGS_B (f) + var opt = optimizer.solve (x0) + println (s"o][ optimal solution (x, f(x)) = $opt") + + banner ("Minimize (bounds [3.5, 5.0]): (x_0 - 3)^2 + (x_1 - 4)^2 + 1") + val lu = makeBounds (x0.dim, 3.5, 5.0) + optimizer = new LBFGS_B (f, l_u = lu) + opt = optimizer.solve (x0) + println (s"][ optimal solution (x, f(x)) = $opt") + +end lBFGS_BTest + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `lBFGS_BTest2` main function is used to test the `LBFGS_B` class. + * f(x) = x_0^4 + (x_0 - 3)^2 + (x_1 - 4)^2 + 1 + * > runMain scalation.optimization.quasi_newton.lBFGS_BTest2 + */ +@main def lBFGS_BTest2 (): Unit = + + val n = 2 + val x0 = new VectorD (n) + def f (x: VectorD): Double = x(0)~^4 + (x(0) - 3)~^2 + (x(1) - 4)~^2 + 1 + + banner ("Minimize (no bounds): x_0^4 + (x_0 - 3)^2 + (x_1 - 4)^2 + 1") + var optimizer = new LBFGS_B (f) + var opt = optimizer.solve (x0) + println (s"][ optimal solution (x, f(x)) = $opt") + + banner ("Minimize (bounds [3.5, 5.0]): x_0^4 + (x_0 - 3)^2 + (x_1 - 4)^2 + 1") + val lu = makeBounds (x0.dim, 3.5, 5.0) + optimizer = new LBFGS_B (f, l_u = lu) + opt = optimizer.solve (x0) + println (s"][ optimal solution (x, f(x)) = $opt") + +end lBFGS_BTest2 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `lBFGS_BTest3` main function is used to test the `LBFGS_B` class. + * f(x) = 1/x_0 + x_0^4 + (x_0 - 3)^2 + (x_1 - 4)^2 + 1 + * > runMain scalation.optimization.quasi_newton.lBFGS_BTest3 + */ +@main def lBFGS_BTest3 (): Unit = + + val n = 2 + val x0 = VectorD (0.1, 0.0) + def f (x: VectorD): Double = 1/x(0) + x(0)~^4 + (x(0) - 3)~^2 + (x(1) - 4)~^2 + 1 + + banner ("Minimize (no bounds): 1/x_0 + x_0^4 + (x_0 - 3)^2 + (x_1 - 4)^2 + 1") + var optimizer = new LBFGS_B (f) + var opt = optimizer.solve (x0) + println (s"][ optimal solution (x, f(x)) = $opt") + + opt = optimizer.resolve (n) + println (s"][ optimal solution (x, f(x)) = $opt") + + banner ("Minimize (bounds [3.5, 5.0]): 1/x_0 + x_0^4 + (x_0 - 3)^2 + (x_1 - 4)^2 + 1") + val lu = makeBounds (x0.dim, 3.5, 5.0) + optimizer = new LBFGS_B (f, l_u = lu) + opt = optimizer.solve (x0) + println (s"][ optimal solution (x, f(x)) = $opt") + +end lBFGS_BTest3 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `lBFGS_BTest4` main function is used to test the `LBFGS_B` class. + * f(x) = 5 * x(0) * x(0) + 100 * x(1) * x(1) + 5 + * > runMain scalation.optimization.quasi_newton.lBFGS_BTest4 + */ +@main def lBFGS_BTest4 (): Unit = + + import scalation.calculus.Differential.{∇, Η} + + val x0 = VectorD (-10.0, 2.0) + + // function + def f (x: VectorD): Double = 5 * x(0) * x(0) + 100 * x(1) * x(1) + 5 + // analytical gradient + def gradF (x: VectorD): VectorD = VectorD(10 * x(0), 200 * x(1)) + // analytical Hessian + def hessF (@unused x: VectorD): MatrixD = MatrixD((2, 2), 10.0, 0.0, 0.0, 200.0) + + banner("Minimize (no bounds): 5 * x(0)^2 + 100 * x(1)^2 + 5") + + // Print function value and gradient at start (like cpp) + println (s"init x0 = $x0") + println (s"f(x0) = ${f(x0)}") + + // Gradient check: analytical vs numerical + val gradAnalytic = gradF(x0) + val gradNumeric = ∇(f)(x0) + val gradError = (gradAnalytic - gradNumeric).norm + val gradOk = gradError < 1e-6 + println (s"grad f(x0) (analytic) = $gradAnalytic") + println (s"grad f(x0) (numeric) = $gradNumeric") + println (s"Gradient check ok? $gradOk (error = $gradError)") + + // Hessian check: analytical vs numerical + val hessAnalytic = hessF(x0) + val hessNumeric = Η (f, x0) + val hessError = (hessAnalytic - hessNumeric).normF + val hessOk = hessError < 1e-6 + println (s"Hessian f(x0) (analytic) =\n$hessAnalytic") + println (s"Hessian f(x0) (numeric) =\n$hessNumeric") + println (s"Hessian check ok? $hessOk (error = $hessError)") + + // Run LBFGS_B once + val optimizer = new LBFGS_B(f) + val (fx, argmin) = optimizer.solve (x0) + + println (s"argmin = $argmin") + println (s"f(argmin) = $fx") + println (s"status = converged") + +end lBFGS_BTest4 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `lBFGS_BTest5` main function is used to test the `LBFGS_B` class + * on the Rosenbrock function with bounds, mirroring the cppoptlib test. + * f(x) = (1 - x0)^2 + 100 * (x1 - x0^2)^2 + * > runMain scalation.optimization.quasi_newton.lBFGS_BTest5 + */ +@main def lBFGS_BTest5 (): Unit = + + val x0 = VectorD (-1.2, 1.0) // classic Rosenbrock start + + // Rosenbrock function + def f (x: VectorD): Double = + val x0 = x(0) + val x1 = x(1) + (1 - x0)~^2 + 100 * (x1 - x0~^2)~^2 + + // Analytical gradient + def gradF (x: VectorD): VectorD = + val x0 = x(0) + val x1 = x(1) + VectorD (-2 * (1 - x0) - 400 * x0 * (x1 - x0 * x0), + 200 * (x1 - x0 * x0)) + + // Analytical Hessian + def hessF (x: VectorD): MatrixD = + val x0 = x(0) + val x1 = x(1) + MatrixD ((2, 2), 2 - 400 * x1 + 1200 * x0 * x0, -400 * x0, + -400 * x0, 200.0) + + banner("Rosenbrock with bounds: f(x) = (1 - x0)^2 + 100*(x1 - x0^2)^2") + + // Print function value and gradient at start + println (s"init x0 = $x0") + println (s"f(x0) = ${f(x0)}") + + import scalation.calculus.Differential.{∇, Η} + val gradAnalytic = gradF(x0) + val gradNumeric = ∇(f)(x0) + val gradError = (gradAnalytic - gradNumeric).norm + println (s"grad f(x0) (analytic) = $gradAnalytic") + println (s"grad f(x0) (numeric) = $gradNumeric") + println (s"Gradient check error = $gradError") + + val hessAnalytic = hessF(x0) + val hessNumeric = Η(f, x0) + val hessError = (hessAnalytic - hessNumeric).normF + println (s"Hessian f(x0) (analytic) =\n$hessAnalytic") + println (s"Hessian f(x0) (numeric) =\n$hessNumeric") + println (s"Hessian check error = $hessError") + + // Set tight bounds (exclude true minimum (1,1)) + val lower = VectorD(-1.0, -0.5) + val upper = VectorD( 0.2, 0.25) + + val lu = (lower, upper) + + println (s"Bounds: lower = $lower, upper = $upper") + + // Run LBFGS_B + val optimizer = new LBFGS_B (f, l_u = lu, gradF = gradF) + val (fx, argmin) = optimizer.solve (x0) + + println (s"argmin = $argmin") + println (s"f(argmin) = $fx") + println (s"status = converged/bounded optimum") + +end lBFGS_BTest5 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `lBFGS_BTest5` main function is used to test the `LBFGS_B` class + * on the ReciprocalFunction function with bounds. In addition to ReciprocalFunction + * function you may want to test RosenbrockFunction, McCormickFunction, and + * FreudensteinRothFunction functions. + * f(x) = 1/x(0) + x_0^4 + (x_0 - 3)^2 + (x_1 - 4)^2 + 1 + * > runMain scalation.optimization.quasi_newton.lBFGS_BTest6 + */ +@main def lBFGS_BTest6 (): Unit = + + import functions.ReciprocalFunction + + val x0 = VectorD (0.1, 0.1) // starting location + + banner ("Minimize: 1/x(0) + x_0^4 + (x_0 - 3)^2 + (x_1 - 4)^2 + 1") + def f = ReciprocalFunction.objFunction +// def gradF = ReciprocalFunction.gradFunction + val lu = ReciprocalFunction.bound + + // Run LBFGS_B + val optimizer = new LBFGS_B (f, l_u = lu) //, gradF = gradF) + val (fx, argmin) = optimizer.solve (x0) + + println (s"argmin = $argmin") + println (s"f(argmin) = $fx") + println (s"status = converged/bounded optimum") + +end lBFGS_BTest6 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `lBFGS_BStressTest` main function stress-tests LBFGS_B on Rosenbrock + * with several different bound configurations. + * > runMain scalation.optimization.quasi_newton.lBFGS_BStressTest + */ +@main def lBFGS_BStressTest (): Unit = + + val x0 = VectorD (-1.2, 1.0) // classic Rosenbrock starting point + + // Rosenbrock function + def f(x: VectorD): Double = (1 - x(0))~^2 + 100.0 * (x(1) - x(0)~^2)~^2 + def gradF(x: VectorD): VectorD = + VectorD (-2 * (1 - x(0)) - 400 * x(0) * (x(1) - x(0)~^2), + 200 * (x(1) - x(0)~^2)) + + // Different bound options + val bounds = Seq ( + "Option 1: Tight (exclude true minimum)" -> (VectorD(-1.0, -0.5), VectorD(0.2, 0.25)), + "Option 2: Very Tight Box Around Wrong Point" -> (VectorD(0.0, 0.0), VectorD(0.5, 0.5)), + "Option 3: Skewed Bounds (trap along edge)" -> (VectorD(-1.0, -1.0), VectorD(0.2, 2.0)), + "Option 4: Huge Bounds (almost unconstrained)"-> (VectorD(-5.0, -5.0), VectorD(5.0, 5.0)), + "Option 5: Lower Bound Excludes True Min" -> (VectorD(-2.0, 0.0), VectorD(0.8, 0.9))) + + for (label, lu) <- bounds do + banner (s"Rosenbrock Stress Test - $label") + + println (s"init x0 = $x0, f(x0) = ${f(x0)}") + println (s"Bounds: lower = ${lu._1}, upper = ${lu._2}") + + val optimizer = new LBFGS_B (f, l_u = lu, gradF = gradF) + val (fx, argmin) = optimizer.solve (x0) + + println (s"argmin = $argmin") + println (s"f(argmin) = $fx") + println (s"status = converged/bounded optimum\n") + end for + +end lBFGS_BStressTest + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Quadratic 2D with known solution and clipping by bounds. + * f(x) = 0.5 * (x - c)^T diag(d) (x - c) + * > runMain scalation.optimization.quasi_newton.lBFGS_BTest_Quad2D + */ +@main def lBFGS_BTest_Quad2D (): Unit = + val c = VectorD (1.5, -2.0) + val d = VectorD (4.0, 1.0) // diag entries (SPD) + val x0 = VectorD (0.0, 0.0) + + def f(x: VectorD): Double = + val dx = x - c + 0.5 * (dx * d).dot(dx) + + def gradF(x: VectorD): VectorD = + (x - c) * d + + // Bounds that force the solution to lie on a face (clip c(0) to 1.0) + val lower = VectorD (-1.0, -3.0) + val upper = VectorD ( 1.0, 0.0) + + println ("Quadratic 2D with clipping bounds") + println (s"true unconstrained minimizer: c = $c") + println (s"Bounds: lower = $lower, upper = $upper") + + val opt = new LBFGS_B(f, l_u = (lower, upper), gradF = gradF) + val (fx, x) = opt.solve (x0) + + println (s"argmin = $x") + println (s"f(argmin) = $fx") + +end lBFGS_BTest_Quad2D + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Himmelblau (multimodal), moderate bounds. + * f(x, y) = (x^2 + y - 11)^2 + (x + y^2 - 7)^2 + * > runMain scalation.optimization.quasi_newton.lBFGS_BTest_Himmelblau + */ +@main def lBFGS_BTest_Himmelblau (): Unit = + val x0 = VectorD (-3.5, 3.0) + + def f(x: VectorD): Double = + val a = x(0); val b = x(1) + val t1 = a*a + b - 11.0 + val t2 = a + b*b - 7.0 + t1*t1 + t2*t2 + + def gradF(x: VectorD): VectorD = + val a = x(0); val b = x(1) + val t1 = a*a + b - 11.0 + val t2 = a + b*b - 7.0 + VectorD (4.0*a*t1 + 2.0*t2, + 2.0*t1 + 4.0*b*t2) + + val lower = VectorD (-5.0, -5.0) + val upper = VectorD ( 5.0, 5.0) + + println ("Himmelblau with moderate bounds") + println (s"init x0 = $x0, f(x0) = ${f(x0)}") + println (s"Bounds: lower = $lower, upper = $upper") + + val opt = new LBFGS_B (f, l_u = (lower, upper), gradF = gradF) + val (fx, x) = opt.solve (x0) + + println (s"argmin = $x") + println (s"f(argmin) = $fx") // expect near one of the known minima ~ (3,2), (-2.805,3.131), ... + +end lBFGS_BTest_Himmelblau + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Wood function (4D Rosenbrock variant), classic nonconvex test. + * > runMain scalation.optimization.quasi_newton.lBFGS_BTest_Wood4D + */ +@main def lBFGS_BTest_Wood4D (): Unit = + val x0 = VectorD (-3.0, -1.0, -3.0, -1.0) + + def f(x: VectorD): Double = + val x1 = x(0); val x2 = x(1); val x3 = x(2); val x4 = x(3) + 100.0 * (x2 - x1~^2)~^2 + (1.0 - x1)~^2 + + 90.0 * (x4 - x3~^2)~^2 + (1.0 - x3)~^2 + + 10.0 * (x2 + x4 - 2.0)~^2 + 0.1 * (x2 - x4)~^2 + + def gradF(x: VectorD): VectorD = + val x1 = x(0); val x2 = x(1); val x3 = x(2); val x4 = x(3) + val g1 = -400.0 * (x2 - x1*x1) * x1 - 2.0 * (1.0 - x1) + val g2 = 200.0 * (x2 - x1*x1) + 20.0 * (x2 + x4 - 2.0) + 0.2 * (x2 - x4) + val g3 = -360.0 * (x4 - x3*x3) * x3 - 2.0 * (1.0 - x3) + val g4 = 180.0 * (x4 - x3*x3) + 20.0 * (x2 + x4 - 2.0) - 0.2 * (x2 - x4) + VectorD (g1, g2, g3, g4) + + val lower = VectorD.fill (4)(-5.0) + val upper = VectorD.fill (4)( 5.0) + + println ("Wood 4D with wide bounds") + println (s"init x0 = $x0, f(x0) = ${f(x0)}") + + val opt = new LBFGS_B (f, l_u = (lower, upper), gradF = gradF) + val (fx, x) = opt.solve (x0) + + println (s"argmin = $x") + println (s"f(argmin) = $fx") // true min at (1,1,1,1) with f ≈ 0 + +end lBFGS_BTest_Wood4D + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** High-dim Rosenbrock (n = 10), almost-unconstrained with wide box. + * > runMain scalation.optimization.quasi_newton.lBFGS_BTest_RosenbrockN + */ +@main def lBFGS_BTest_RosenbrockN (): Unit = + val n = 10 + val x0 = VectorD (for i <- 0 until n yield if i % 2 == 0 then -1.2 else 1.0) + + def f(x: VectorD): Double = + var s = 0.0 + var i = 0 + while i < n - 1 do + val xi = x(i); val xip = x(i+1) + s += (1.0 - xi)~^2 + 100.0 * (xip - xi*xi)~^2 + i += 1 + s + end f + + def gradF(x: VectorD): VectorD = + val g = new VectorD(n) + + var i = 0 + while i < n do + var gi = 0.0 + + // backward neighbor term: +200 * (x_i - x_{i-1}^2) + if i > 0 then + gi += 200.0 * (x(i) - x(i-1) * x(i-1)) + + // forward terms: -2(1 - x_i) - 400 x_i (x_{i+1} - x_i^2) + if i < n - 1 then + gi += -2.0 * (1.0 - x(i)) - 400.0 * x(i) * (x(i+1) - x(i) * x(i)) + + g(i) = gi + i += 1 + g + end gradF + + val lower = VectorD.fill (n)(-5.0) + val upper = VectorD.fill (n)( 5.0) + + println (s"High-dim Rosenbrock (n=$n) with wide bounds") + println (s"init f(x0) = ${f(x0)}") + + val opt = new LBFGS_B(f, l_u = (lower, upper), gradF = gradF) + val (fx, x) = opt.solve (x0) + + println (s"argmin = $x") + println (s"f(argmin) = $fx") // expect near all-ones + +end lBFGS_BTest_RosenbrockN + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Mixed/infinite bounds + active-set corner behavior on a tilted quadratic. + * > runMain scalation.optimization.quasi_newton.lBFGS_BTest_MixedBounds + */ +@main def lBFGS_BTest_MixedBounds (): Unit = + val x0 = VectorD (2.0, -3.0, 0.5, -1.0) + + // f(x) = 0.5 * ||A x - b||^2 (convex), with nontrivial coupling + val A = MatrixD ((4, 4), 3.0, 1.0, 0.0, 0.0, + 1.0, 2.0, 1.0, 0.0, + 0.0, 1.0, 3.0, 1.0, + 0.0, 0.0, 1.0, 2.0) + val b = VectorD (1.0, -2.0, 0.5, 1.0) + + def f(x: VectorD): Double = + val r = A * x - b + 0.5 * r.dot(r) + + def gradF(x: VectorD): VectorD = + val r = A * x - b + A.transpose * r + + // Mixed bounds: some finite, some free (±∞), plus asymmetric box to hit faces/corners. + val lower = VectorD (-1.0, NEGATIVE_INFINITY, 0.0, -0.5) + val upper = VectorD ( 0.5, POSITIVE_INFINITY, 1.0, 0.2) + + println ("Mixed/infinite bounds on coupled quadratic") + println (s"init x0 = $x0, f(x0) = ${f(x0)}") + println (s"Bounds: lower = $lower, upper = $upper") + + val opt = new LBFGS_B (f, l_u = (lower, upper), gradF = gradF) + val (fx, x) = opt.solve (x0) + + println (s"argmin = $x") + println (s"f(argmin) = $fx") + +end lBFGS_BTest_MixedBounds + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Convex QP with mixed bounds, exact-check via active-set enumeration. + * f(x) = 0.5 x^T Q x - b^T x, Q ≻ 0 (rotated anisotropic), box bounds. + * > runMain scalation.optimization.quasi_newton.lBFGS_BTest_QPEnum + */ +@main def lBFGS_BTest_QPEnum (): Unit = + // Convex Quadratic Program with box constraints. + // Minimize: 0.5 * x^T Q x - b^T x subject to l <= x <= u + // We'll build a 4D SPD matrix Q with coupling terms, enumerate all active sets, + // solve exactly, then compare to LBFGS_B solution. + + val n = 4 + + // Construct a symmetric positive definite Q with off-diagonal coupling + val Q = MatrixD ((n, n), 6.0, 2.0, 1.0, 0.0, + 2.0, 5.0, -1.0, 1.0, + 1.0, -1.0, 4.0, 1.5, + 0.0, 1.0, 1.5, 3.5) + + // Ensure strict SPD by adding a small multiple of identity if needed (numerical safety) + // (Not generally necessary here, but kept for robustness) + val eps = 1e-10 + cfor (0, n) { i => Q(i, i) = Q(i, i) + eps } + + val b = VectorD (1.0, -2.0, 0.5, 1.5) + + // Box bounds + val l = VectorD (-0.5, -1.0, 0.0, -0.2) + val u = VectorD ( 1.0, 0.5, 1.2, 0.8) + + // Objective function and analytic gradient + def f (x: VectorD): Double = 0.5 * (x dot (Q * x)) - b.dot(x) + def gradF (x: VectorD): VectorD = Q * x - b + + // Active set enumeration: each variable can be at lower (-1), free (0), or upper (+1) + case class EnumResult (x: VectorD, fx: Double) + + def enumerateQP (): EnumResult = + var best: EnumResult = null + + val pattern = Array.fill (n)(-1) + // Iterate over all 3^n patterns using base-3 counting + val total = math.pow(3, n).toInt + var code = 0 + while code < total do + // Decode code into ternary digits → pattern + var tmp = code + var i = 0 + while i < n do + pattern(i) = tmp % 3 - 1 // values in {-1,0,1} + tmp /= 3 + i += 1 + + val x = new VectorD(n) + val freeIdxBuf = new scala.collection.mutable.ArrayBuffer[Int]() + + // Assign bounds for active variables, collect free indices + i = 0 + while i < n do + pattern(i) match + case -1 => x(i) = l(i) // at lower + case 1 => x(i) = u(i) // at upper + case 0 => freeIdxBuf += i // free variable + i += 1 + + val freeIdx = freeIdxBuf.toArray + val m = freeIdx.length + + var feasible = true + if m > 0 then + // Build Q_ff and rhs = b_f - Q_fb * x_b + val Qff = new MatrixD(m, m) + val rhs = new VectorD(m) + + var ii = 0 + while ii < m && feasible do + val gi = freeIdx(ii) + // b_f component + var rhs_i = b(gi) + // subtract Q(g_i, j) * x(j) for bound-active j (non-free) + var j = 0 + while j < n do + if pattern(j) != 0 then rhs_i -= Q(gi, j) * x(j) + j += 1 + rhs(ii) = rhs_i + + // fill row of Q_ff + var jj = 0 + while jj < m do + Qff(ii, jj) = Q(freeIdx(ii), freeIdx(jj)) + jj += 1 + ii += 1 + + // Solve Q_ff * x_f = rhs + if feasible then + val lu = new Fac_LU(Qff.copy) + lu.factor() + val xf = lu.solve (rhs) + + // Put back free components and check bounds + ii = 0 + while ii < m && feasible do + val idx = freeIdx(ii) + val v = xf(ii) + if v < l(idx) - 1e-10 || v > u(idx) + 1e-10 then feasible = false + x(idx) = math.max(l(idx), math.min(u(idx), v)) + ii += 1 + end if + + if feasible then + // Verify KKT: projected gradient zero (optional check) + val g = gradF(x) + var kktOk = true + i = 0 + while i < n && kktOk do + if pattern(i) == 0 then + // free: gradient near 0 + if math.abs(g(i)) > 1e-6 then kktOk = false + else if pattern(i) == -1 then + // at lower: gradient >= 0 (cannot decrease objective by decreasing x) + if g(i) < -1e-6 then kktOk = false + else + // at upper: gradient <= 0 + if g(i) > 1e-6 then kktOk = false + i += 1 + + if kktOk then + val fx = f(x) + if best == null || fx < best.fx then best = EnumResult (x.copy, fx) + end if + + code += 1 + end while + best + end enumerateQP + + val exact = enumerateQP() + println (s"Exact enumeration optimum: f = ${exact.fx} at x = ${exact.x}") + + // Run LBFGS_B from a generic starting point + val x0 = VectorD(0.3, -0.4, 0.9, 0.0) + println (s"Initial x0 = $x0, f(x0) = ${f(x0)}") + + val optimizer = new LBFGS_B (f, l_u = (l, u), gradF = gradF) + val (fx_lbfgs, x_lbfgs) = optimizer.solve (x0) + + println (s"LBFGS_B optimum: f = $fx_lbfgs at x = $x_lbfgs") + + // Compare solutions + val relObjErr = math.abs (fx_lbfgs - exact.fx) / math.max (1.0, math.abs (exact.fx)) + val xDiffNorm = (x_lbfgs - exact.x).norm + + println (f"Relative objective error = $relObjErr%.3e") + println (f"||x_lbfgs - x_exact||_2 = $xDiffNorm%.3e") + + // Tolerances (convex QP, expect tight agreement) + val objTol = 1e-8 + val xTol = 1e-6 + + assert (relObjErr < objTol, s"Objective mismatch: rel error = $relObjErr > $objTol") + assert (xDiffNorm < xTol, s"Solution vector mismatch: norm diff = $xDiffNorm > $xTol") + + println ("QPEnum test: PASSED (LBFGS_B matches enumerated optimum)") + +end lBFGS_BTest_QPEnum + diff --git a/src/main/scala/scalation/optimization/quasi_newton/LBFGS_B2.scala b/src/main/scala/scalation/optimization/quasi_newton/LBFGS_B2.scala new file mode 100644 index 000000000..43c531860 --- /dev/null +++ b/src/main/scala/scalation/optimization/quasi_newton/LBFGS_B2.scala @@ -0,0 +1,519 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author Hao Peng + * @version 2.0 + * @date Fri Oct 7 12:27:00 EDT 2017 + * @see LICENSE (MIT style license file). + * + * @note Limited memory BFGS with Bounds (L-BFGS-B) + * + *------------------------------------------------------------------------------ + * Limited memory Broyden–Fletcher–Goldfarb–Shanno (BFGS) for Bound constrained + * optimization (L-BFGS-B) algorithm. Originally proposed by Byrd et al. in 1995. + * See the first two links for the original paper and authors' software (written + * in Fortran) distribution site, respectively. This implementation is translated + * from a C++ implementation found in the last link. + * + * @see www.ece.northwestern.edu/~nocedal/PSfiles/limited.ps.gz + * @see users.iems.northwestern.edu/~nocedal/lbfgsb.html + * @see github.com/PatWie/CppNumericalSolvers/blob/master/include/cppoptlib/solver/lbfgsbsolver.h + */ + +package scalation +package optimization +package quasi_newton + +import scala.collection.mutable.ArrayBuffer +import scala.math.{abs, max, min} +import scala.util.control.Breaks.{break, breakable} + +import scalation.calculus.Differential.∇ +import scalation.mathstat._ + +import MatrixD.eye + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `LBFGS_B2` companion object provides a factory method for Limited memory + * Broyden–Fletcher–Goldfarb–Shanno for Bounds constrained optimization. + */ +object LBFGS_B2: + + val emptyMatrix = new MatrixD (0, 0) // empty zero dimension matrix + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create an `LBFGS_B2` object with a given dimensionality and default lower + * and upper bounds of -1 and 1, respectively. + * @param f the objective function to be minimized + * @param n the dimensionality of the search space + * @param exactLS whether to use exact (e.g., `GoldenLS`) + * or inexact (e.g., `WolfeLS`) Line Search + * @param l_u (vector, vector) of lower and upper bounds for all input parameters + * @param gradF vector to vector functional formula for computing the gradiant, if available + */ + def apply (f: FunctionV2S, n: Int, + exactLS: Boolean = false, l_u_ : Bounds = null, + gradF: FunctionV2V = null): LBFGS_B2 = + + val l_u = if l_u_ == null then (VectorD.fill (n)(-1), VectorD.fill (n)(1)) + else l_u_ + new LBFGS_B2 (f, exactLS, l_u, gradF) + end apply + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Make simple bounds where the limits in each dimension is the same. + * @param n the dimensionality of the search space + * @param lo scalar lower bounds for all input parameters + * @param up scalar upper bounds for all input parameters + */ + inline def makeBounds (n: Int, lo: Double, up: Double): Bounds = + (VectorD.fill (n)(lo), VectorD.fill (n)(up)) + end makeBounds + +end LBFGS_B2 + +import LBFGS_B2.makeBounds + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `LBFGS_B2` the class implements the Limited memory Broyden–Fletcher– + * Goldfarb–Shanno for Bounds constrained optimization (L-BFGS-B) + * Quasi-Newton Algorithm for solving Non-Linear Programming (NLP) problems. + * L-BFGS-B determines a search direction by deflecting the steepest descent direction + * vector (opposite the gradient) by * multiplying it by a matrix that approximates + * the inverse Hessian. Furthermore, only a few vectors represent the approximation + * of the Hessian Matrix (limited memory). The parameters estimated are also bounded + * within user specified lower and upper bounds. + * + * minimize f(x) + * subject to g(x) <= 0 [ optionally g(x) == 0 ] + * + * @param f the objective function to be minimized + * @param g the constraint function to be satisfied, if any + * @param ineq whether the constraint is treated as inequality (default) or equality + * @param exactLS whether to use exact (e.g., `GoldenLS`) + * or inexact (e.g., `WolfeLS`) Line Search + * @param l_u (vector, vector) of lower and upper bounds for all input parameters + * @param gradF vector to vector functional formula for computing the gradiant, if available + */ +class LBFGS_B2 (f: FunctionV2S, + exactLS: Boolean = false, + private var l_u: Bounds = (null, null), + gradF: FunctionV2V = null) + extends Minimizer: + + private val debug = debugf ("LBFGS_B2", false) // debug function + private var ww, mm: MatrixD = null // workspace matrices + private var theta = 0.0 // a scaling parameter + private var dim = 0 // dimension of the input vector + private var hs = 5 // history size, number of historical vectors to store + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Sort pairs (k, v) according to v into ascending order. + * std::vector sort_indexes(const std::vector< std::pair > &v) + * @param v the ArrayBuffer of Tuple2 to be sorted by the 2nd element + */ + private def sortIndices (v: ArrayBuffer [(Int, Double)]): VectorI = + val sv = v.sortBy (_._2) // FIX - order different in C++ code + val idx = new VectorI (sv.length) + for i <- idx.indices do idx(i) = sv(i)._1 + idx + end sortIndices + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Force the values within 'v' to stay within the pre-defined bounds. + * @see void clampToBound(const TProblem &problem, TVector &x) + * @param v the Vector containing values to be adjusted + */ + private def forceBounds (v: VectorD): Unit = + val (l, u) = l_u + for i <- v.indices do + if v(i) > u(i) then v(i) = u(i) // upper bound + else if v(i) < l(i) then v(i) = l(i) // lower bound + end for + end forceBounds + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Algorithm CP: Computation of the Generalized Cauchy Point. See page 8 of + * @see www.ece.northwestern.edu/~nocedal/PSfiles/limited.ps.gvz + * @see void getGeneralizedCauchyPoint(const TProblem &problem, const TVector &x, const TVector &g, TVector &x_cauchy, VariableTVector &c) + * @param x the parameter vector + * @param gr the gradient vector + */ + private def getGCP (x: VectorD, gr: VectorD): Bounds = + debug ("getGCP", s"x = $x, gr = $gr") + + val (l, u) = l_u + val setOfT = new ArrayBuffer [(Int, Double)] () + val d = -gr + + for j <- 0 until dim do + if gr(j) == 0 then setOfT.append ((j, MAX_VALUE)) + else + val tmp = if gr(j) < 0 then (x(j) - u(j)) / gr(j) + else (x(j) - l(j)) / gr(j) + setOfT.append ((j, tmp)) + if tmp == 0 then d(j) = 0 + end if + end for + + val sortedIndices = sortIndices (setOfT) + val xCauchy = x.copy + + val p = ww.transpose * d + val c = new VectorD (ww.dim2) + var fPrime = -d.dot (d) + var fDoublePrime = max (-theta * fPrime - (p.dot (mm * p)), EPSILON) + val f_dp_orig = fDoublePrime + var dt_min = -fPrime / fDoublePrime + var t_old = 0.0 + + var i = 0 + breakable { + for j <- 0 until dim do + i = j + if setOfT (sortedIndices(j))._2 > 0 then break () + end for + } // breakable + var b = sortedIndices(i) + var t = setOfT(b)._2 + var dt = t + + while dt_min >= dt && i < dim do + if d(b) > 0 then xCauchy(b) = u(b) + else if d(b) < 0 then xCauchy(b) = l(b) + val zb = xCauchy(b) - x(b) + c += p * dt + + // cache + val wbt = ww(b) + fPrime += dt * fDoublePrime + gr(b) * gr(b) + theta * gr(b) * zb - gr(b) * wbt.dot (mm * c) + fDoublePrime += -theta * gr(b) * gr(b) - 2.0 * (gr(b) * wbt.dot (mm * p)) + - gr(b) * gr(b) * (wbt.dot (mm * wbt)) + fDoublePrime = max (EPSILON * f_dp_orig, fDoublePrime) + + p += wbt * gr(b) + d(b) = 0 + dt_min = -fPrime / fDoublePrime + t_old = t + i += 1 + if i < dim then + b = sortedIndices(i) + t = setOfT(b)._2 + dt = t - t_old + end while + + dt_min = max (dt_min, 0.0) + t_old += dt_min + + for ii <- i until xCauchy.dim do + val si = sortedIndices (ii) + xCauchy(si) = x(si) + t_old * d(si) + end for + c += p * dt_min + (xCauchy, c) + end getGCP + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Find the alpha* parameter, a positive scalar. See Equation 5.8 on page 11 of + * @see www.ece.northwestern.edu/~nocedal/PSfiles/limited.ps.gvz + * @see Scalar findAlpha(const TProblem &problem, TVector &x_cp, VariableTVector &du, std::vector &FreeVariables) + * @param x_cp vector of cauchy point + * @param du vector containing intermediate results used to find alpha* + * @param freeVar an ArrayBuffer storing the indices of free variable + */ + private def findAlpha (x_cp: VectorD, du: VectorD, freeVar: ArrayBuffer [Int]): Double = + debug ("findAlpha", s"x_cp = $x_cp, du = $du, freeVar = $freeVar") + + val (l, u) = l_u + var alphastar = 1.0 + val n = freeVar.size + assert (du.dim == n) + + for i <- 0 until n do + val fi = freeVar(i) + alphastar = if du(i) > 0 then min (alphastar, (u(fi) - x_cp(fi)) / du(i)) + else min (alphastar, (l(fi) - x_cp(fi)) / du(i)) + end for + alphastar + end findAlpha + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Minimization of the subspace of free variables. See Section 5 on page 9 of + * @see www.ece.northwestern.edu/~nocedal/PSfiles/limited.ps.gvz + * @see void SubspaceMinimization(const TProblem &problem, TVector &x_cauchy, TVector &x, VariableTVector &c, TVector &g, TVector &SubspaceMin) + * @param x the parameter vector + * @param gr the gradient vector + * @param xCauchy the vector of Cauchy points + * @param c vector obtained from getGCP used to initialize the subspace + * minimization process + */ + private def subspaceMinimize (x: VectorD, gr: VectorD, xCauchy: VectorD, c: VectorD): VectorD = + debug ("subspaceMinimize", s"x = $x, gr = $gr, xCauchy = $xCauchy, c = $c") + + val (l, u) = l_u + val thetaInverse = 1.0 / theta + val freeVarIdx = new ArrayBuffer [Int] () + for i <- xCauchy.indices if xCauchy(i) != u(i) && xCauchy(i) != l(i) do freeVarIdx.append (i) + val freeVarCount = freeVarIdx.size + val wwzz = new MatrixD (ww.dim2, freeVarCount) + for i <- 0 until freeVarCount do wwzz(?, i) = ww(freeVarIdx(i)) + val rr = (gr + (xCauchy - x) * theta - ww * (mm * c)) + val r = new VectorD (freeVarCount) + for i <- 0 until freeVarCount do r(i) = rr(freeVarIdx(i)) + + var v = mm * (wwzz * r) + var nn = wwzz * wwzz.transpose * thetaInverse + nn = eye (nn.dim, nn.dim) - mm * nn + + val lu = new Fac_LU (nn) + lu.factor () + v = lu.solve (v) + + val du = r * -thetaInverse - wwzz.transpose * v * thetaInverse * thetaInverse + val alpha_star = findAlpha (xCauchy, du, freeVarIdx) + val dStar = du * alpha_star + val subspaceMin = xCauchy.copy + for i <- 0 until freeVarCount do subspaceMin (freeVarIdx(i)) += dStar(i) + subspaceMin + end subspaceMinimize + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Modify the number of historical vectors to store. + * @see void setHistorySize(const int hs) { m_historySize = hs; } + * @param hs_ the new history size + */ + def setHistorySize (hs_ : Int): Unit = { hs = hs_ } + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Obtain the mean gradient norm squared + * @param x the parameter vector + * @param gr the gradient vector + */ + private def getMgn (x: VectorD, gr: VectorD): Double = + val (l, u) = l_u + val x_gr = x - gr + val checkLower = VectorD (for i <- l.indices yield max (x_gr(i) , l(i))) + val checkUpper = VectorD (for i <- u.indices yield min (checkLower(i), u(i))) + val mgn = (checkUpper - x).normSq / dim + mgn + end getMgn + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** The objective function f. Option: plus a weighted penalty based on the + * constraint function g. FIX: better penalty scheme needed. + * @param x the coordinate values of the current point + */ + inline override def fg (x: VectorD): Double = f(x) +/* + val f_x = f(x) + if g == null then // unconstrained + f_x + else // constrained, g(x) <= 0 + val penalty = if ineq then max (g(x), 0.0) else abs (g(x)) + f_x + abs (f_x) * WEIGHT * penalty * penalty + end fg +*/ + +// FIX -- allow other line search algorithms such as MoreThuente + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Perform an exact `GoldenSectionLS` or inexact `WolfeLS` Line Search. + * Search in direction dir, returning the distance z to move in that direction. + * @param x the current point + * @param dir the direction to move in + * @param step the initial step size + */ + def lineSearch (x: VectorD, dir: VectorD, step: Double = STEP): Double = + debug ("linesearch", s"x = $x, dir = $dir, step = $step") + + def f_1D (z: Double): Double = fg(x + dir * z) // create a 1D function + val ls = if exactLS then new GoldenSectionLS (f_1D ) // Golden Section Line Search + else new WolfeLS (f_1D) // Wolfe line search ((c1 = .0001, c2 = .9) + ls.search (step) // perform a Line Search + end lineSearch + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Solve the following Non-Linear Programming (NLP) problem using L-BFGS_B: + * min { f(x) | g(x) <= 0 }. + * @see void minimize(TProblem &problem, TVector &x0) + * @param x0 the starting point + * @param alphaInit the initial step size + * @param toler the tolerance + */ + def solve (x0: VectorD, alphaInit: Double = STEP, toler: Double = EPSILON): FuncVec = + debug ("solve", s"x0 = $x0, alphaInit = $alphaInit, toler = $toler") + + var best = (MAX_VALUE, VectorD.nullv) + + dim = x0.dim + theta = 1.0 + if l_u == null then l_u = makeBounds (dim, NEGATIVE_INFINITY, POSITIVE_INFINITY) +// val (l, u) = l_u + + ww = new MatrixD (dim, 0) // FIX - causes empty matrix warning +// mm = new MatrixD (0, 0) // find alt. to zero dimension matrix + mm = LBFGS_B2.emptyMatrix + + val yHistory = ArrayBuffer [VectorD] () + val sHistory = ArrayBuffer [VectorD] () + var yHistoryMx: MatrixD = null + var sHistoryMx: MatrixD = null + +// var (x, gr) = (x0, ∇ (fg)(x0)) // FIX -- differs from C++ code, could be okay + var (x, gr) = (x0, (if gradF != null then gradF (x0) // by formula + else ∇ (fg)(x0))) // numerically + var fv = fg(x) // functional value at x + var mgn = 0.0 + var count = 0 + val countMax = 10 + +// FIX -- missing "auto noConvergence = ..." + + breakable { // main while loop in C++ code + for k <- 1 to MAX_IT do +// banner (s"solve: iteration $k: f(x) = $fv, x = $x") + val f_old = fv + val x_old = x + val g_old = gr + val mgn_old = mgn + + // STEP 2: compute the cauchy point + val (xCauchy, c) = getGCP (x, gr) + forceBounds (xCauchy) // FIX -- not in C++ code + + // STEP 3: compute a search direction d_k by the primal method for the sub-problem + val subspaceMin = subspaceMinimize (x, gr, xCauchy, c) + forceBounds (subspaceMin) // FIX -- not in C++ code + + // STEP 4: perform linesearch +// FIX -- C++ sets alphaInit = 1 + val rate = lineSearch (x, subspaceMin-x, alphaInit) // FIX -- try MoreThuente + + // STEP 5: compute gradient + x = x - (x - subspaceMin) * rate // update current guess and function information + forceBounds (x) // clampToBound + + fv = fg(x) + if blown ((fv, x)) then { best = better ((f_old, x_old), best); break () } + +// gr = ∇ (fg)(x) + gr = if gradF != null then gradF (x) // by formula + else ∇ (fg)(x) // numerically + mgn = getMgn (x, gr) + if mgn < toler || count > countMax then { best = better ((fv, x), best); break () } + if abs (mgn - mgn_old) < toler then count += 1 + + val newY = gr - g_old // prepare for next iteration + val newS = x - x_old + + // STEP 6 + val test = abs (newS.dot (newY)) + if test > EPSILON * newY.normSq then +// FIX -- does not look the same as C++ code + if yHistory.size >= hs then { yHistory.remove (0); sHistory.remove (0) } + yHistory append newY + sHistory append newS + + // STEP 7 + theta = newY.dot (newY) / newY.dot (newS) + yHistoryMx = MatrixD (yHistory).transpose + sHistoryMx = MatrixD (sHistory).transpose + ww = yHistoryMx ++^ (sHistoryMx * theta) + val aa = sHistoryMx.transpose * yHistoryMx + val ll = aa.lower + ll(?, ?) = 0.0 // set ll's diagonal to 0 + val dd = new MatrixD (aa.dim, aa.dim2) + dd.setDiag (-aa(?)) // set dd diagonal to aa's + val mm2 = (dd ++^ ll.transpose) ++ (ll ++^ (sHistoryMx.transpose * sHistoryMx * theta)) + mm = Fac_LU.inverse (mm2)() + end if + + debug ("solve", s"(k = $k) move from $x_old to $x where fg(x) = $fv") + + best = better ((fv, x), best) + if abs (f_old - fv) < toler then break () // successive function values too similar + end for + } // breakable + banner (s"solve: optimal solution = $best") + best + end solve + +end LBFGS_B2 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `lBFGS_B2Test` main function is used to test the `LBFGS_B2` class. + * f(x) = (x_0 - 3)^2 + (x_1 - 4)^2 + 1 + * > runMain scalation.optimization.quasi_newton.lBFGS_B2Test + */ +@main def lBFGS_B2Test (): Unit = + + val n = 2 + val x0 = new VectorD (n) + def f (x: VectorD): Double = (x(0) - 3)~^2 + (x(1) - 4)~^2 + 1 + + banner ("Minimize (no bounds): (x_0 - 3)^2 + (x_1 - 4)^2 + 1") + var optimizer = new LBFGS_B2 (f) + var opt = optimizer.solve (x0) + println (s"o][ optimal solution (x, f(x)) = $opt") + + banner ("Minimize (bounds): (x_0 - 3)^2 + (x_1 - 4)^2 + 1") + val lu = makeBounds (x0.dim, 3.5, 5.0) + optimizer = new LBFGS_B2 (f, l_u = lu) + opt = optimizer.solve (x0) + println (s"][ optimal solution (x, f(x)) = $opt") + +end lBFGS_B2Test + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `lBFGS_B2Test2` main function is used to test the `LBFGS_B2` class. + * f(x) = x_0^4 + (x_0 - 3)^2 + (x_1 - 4)^2 + 1 + * > runMain scalation.optimization.quasi_newton.lBFGS_B2Test2 + */ +@main def lBFGS_B2Test2 (): Unit = + + val n = 2 + val x0 = new VectorD (n) + def f (x: VectorD): Double = x(0)~^4 + (x(0) - 3)~^2 + (x(1) - 4)~^2 + 1 + + banner ("Minimize (no bounds): x_0^4 + (x_0 - 3)^2 + (x_1 - 4)^2 + 1") + var optimizer = new LBFGS_B2 (f) + var opt = optimizer.solve (x0) + println (s"][ optimal solution (x, f(x)) = $opt") + + banner ("Minimize (bounds): x_0^4 + (x_0 - 3)^2 + (x_1 - 4)^2 + 1") + val lu = makeBounds (x0.dim, 3.5, 5.0) + optimizer = new LBFGS_B2 (f, l_u = lu) + opt = optimizer.solve (x0) + println (s"][ optimal solution (x, f(x)) = $opt") + +end lBFGS_B2Test2 + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `lBFGS_B2Test3` main function is used to test the `LBFGS_B2` class. + * f(x) = 1/x_0 + x_0^4 + (x_0 - 3)^2 + (x_1 - 4)^2 + 1 + * > runMain scalation.optimization.quasi_newton.lBFGS_B2Test3 + */ +@main def lBFGS_B2Test3 (): Unit = + + val n = 2 + val x0 = VectorD (0.1, 0.0) + def f (x: VectorD): Double = 1/x(0) + x(0)~^4 + (x(0) - 3)~^2 + (x(1) - 4)~^2 + 1 + + banner ("Minimize (no bounds): 1/x_0 + x_0^4 + (x_0 - 3)^2 + (x_1 - 4)^2 + 1") + var optimizer = new LBFGS_B2 (f) + var opt = optimizer.solve (x0) + println (s"][ optimal solution (x, f(x)) = $opt") + + opt = optimizer.resolve (n) + println (s"][ optimal solution (x, f(x)) = $opt") + + banner ("Minimize (bounds): 1/x_0 + x_0^4 + (x_0 - 3)^2 + (x_1 - 4)^2 + 1") + val lu = makeBounds (x0.dim, 3.5, 5.0) + optimizer = new LBFGS_B2 (f, l_u = lu) + opt = optimizer.solve (x0) + println (s"][ optimal solution (x, f(x)) = $opt") + +end lBFGS_B2Test3 + diff --git a/src/main/scala/scalation/optimization/quasi_newton/LBFGS_B_TestUtil.scala b/src/main/scala/scalation/optimization/quasi_newton/LBFGS_B_TestUtil.scala new file mode 100644 index 000000000..38760693a --- /dev/null +++ b/src/main/scala/scalation/optimization/quasi_newton/LBFGS_B_TestUtil.scala @@ -0,0 +1,87 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author John Miller, Nirupom Bose Roy + * @version 2.0 + * @date Sun Mar 06 05:30:00 EDT 2026 + * @see LICENSE (MIT style license file). + * + * @note Test Util for LBFGS_B + */ + +package scalation.optimization.quasi_newton + +import scalation.mathstat.{VectorD, FunctionV2S} +import scalation.banner + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** Utility helpers for L-BFGS-B benchmark tests. + */ +object LBFGS_B_TestUtil: + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Run one L-BFGS-B benchmark and print the result. + * @param name benchmark/test name + * @param f objective function + * @param x0 starting point + * @param bounds optional box bounds; if null, uses default infinite bounds + * @param exactLS whether to use exact 1D line search instead of More-Thuente + */ + def runLBFGSB (name: String, + f: FunctionV2S, + x0: VectorD, + bounds: Bounds = null, + exactLS: Boolean = false): (Double, VectorD)= + + banner (s"L-BFGS-B Test: $name | exactLS = $exactLS") + val opt = if bounds == null then new LBFGS_B (f, exactLS = exactLS) + else new LBFGS_B (f, exactLS = exactLS, l_u = bounds) + val res = opt.solve (x0) + println (s"result = $res") + res + end runLBFGSB + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Run both exact scalar line search and native More-Thuente for comparison. + * @param name benchmark/test name + * @param f objective function + * @param x0 starting point + * @param bounds optional box bounds + */ + def compareLS (name: String, + f: FunctionV2S, + x0: VectorD, + bounds: Bounds = null): Unit = + + banner (s"L-BFGS-B Line Search Comparison: $name") + val resMT = if bounds == null then new LBFGS_B (f, exactLS = false).solve (x0) + else new LBFGS_B (f, exactLS = false, l_u = bounds).solve (x0) + + val res1D = if bounds == null then new LBFGS_B (f, exactLS = true).solve (x0) + else new LBFGS_B (f, exactLS = true, l_u = bounds).solve (x0) + + println (s"More-Thuente result = $resMT") + println (s"Exact 1D result = $res1D") + end compareLS + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Run a bounded optimizer and the unconstrained native LBFGS from the same + * starting point under inactive bounds to compare solutions. + * @param name benchmark/test name + * @param f objective function + * @param x0 starting point + * @param bounds wide/inactive bounds + */ + def compareToLBFGS (name: String, + f: FunctionV2S, + x0: VectorD, + bounds: Bounds): Unit = + + banner (s"L-BFGS vs L-BFGS-B Comparison: $name") + val resB = new LBFGS_B (f, l_u = bounds).solve (x0) + val resU = LBFGS (f).solve (x0) + println (s"L-BFGS-B result = $resB") + println (s"L-BFGS result = $resU") + end compareToLBFGS + +end LBFGS_B_TestUtil + diff --git a/src/main/scala/scalation/optimization/quasi_newton/OptimizationLogic.scala b/src/main/scala/scalation/optimization/quasi_newton/OptimizationLogic.scala index 5d8564a61..36e927920 100644 --- a/src/main/scala/scalation/optimization/quasi_newton/OptimizationLogic.scala +++ b/src/main/scala/scalation/optimization/quasi_newton/OptimizationLogic.scala @@ -34,12 +34,10 @@ trait OptimizationLogic extends EvaluationLogic: * is provided to just print the contents of the current iteration of the * optimization. * - * @param instance User data provided by each call of the `lbfgsMain` method of - * the `LBFGS` object. Can have `Any` type defined by the user - * as long as the same type is utilized in the `evaluate` method - * implementation for the class extending this trait and on the - * corresponding `lbfgsMain` calls from the `LBFGS` object that - * relies on this `OptimizationLogic`. + * @param instance an optional user data segment that may be provided when calling the + * `LBFGS.lbfgsMain` method and may have `Any` type, but must be the same + * type used by the `evaluate` method of classes extending this trait. + * Note, has type `MemorySegment`in `OptimizationLogicC`. * @param x `VectorD` with the current values of the variables. * @param g `VectorD` with the current value of the gradient vector. * @param fx Current value of the objective function. @@ -50,13 +48,14 @@ trait OptimizationLogic extends EvaluationLogic: * @param k Iteration count. * @param ls The number of evaluations called for this iteration. * @return int Determines if optimization should continue. Zero continues - optimization. Non-zero values cancel the optimization. + * optimization. Non-zero values cancel the optimization. */ def progress (instance: Any, x: VectorD, g: VectorD, fx: Double, xnorm: Double, gnorm: Double, step: Double, n: Int, k: Int, ls: Int): LBFGSReturnCode = println (s""" Iteration $k: + instance \t\t= $instance x \t\t= $x g \t\t= $g fx \t\t= $fx diff --git a/src/main/scala/scalation/random/CDF.scala b/src/main/scala/scalation/random/CDF.scala index d5a56a8c2..67cb3ef01 100644 --- a/src/main/scala/scalation/random/CDF.scala +++ b/src/main/scala/scalation/random/CDF.scala @@ -132,7 +132,6 @@ object CDF: cbuf += 1.0 else cbuf(cbuf.size - 1) += 1.0 - end if end for (new VectorD (zbuf.size, zbuf.toArray), new VectorD (cbuf.size, cbuf.toArray).cumulate / x.dim.toDouble) @@ -322,7 +321,6 @@ object CDF: else if x > 0 then { cum = 1.0; ccum = 0.0 } else { cum = 0.0; ccum = 1.0 } - end if if cum < min then cum = 0.0 if ccum < min then ccum = 0.0 @@ -371,7 +369,6 @@ object CDF: else val f = z + 1.0 / (z + 2.0/(z + 3.0/(z + 4.0/(z + 13.0/20.0)))) e / (RT2PI * f) - // end if if x <= 0.0 then c else 1.0 - c end _normalCDF @@ -439,7 +436,6 @@ object CDF: if df <= 0.0 then flaw ("studentTCDF", "parameter df must be strictly positive") return -0.0 - end if if df =~ 1.0 then // Cauchy CDF 0.5 + (1.0/Pi) * atan (x) @@ -453,7 +449,6 @@ object CDF: if x > 0 then 1.0 - z else z else // Ordinary Normal Approximation (ONA) normalCDF (x) - end if end studentTCDF //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -479,7 +474,6 @@ object CDF: if df <= 0.0 then flaw ("noncentralTCDF", "parameter df must be strictly positive") return -0.0 - end if throw new UnsupportedOperationException ("noncentralTCDF in CDF not implemented yet") // FIX end noncentralTCDF @@ -495,11 +489,9 @@ object CDF: if x < 0.0 then flaw ("chiSquareCDF", "coordinate x should be nonnegative") return 0.0 - end if if df <= 0 then flaw ("chiSquareCDF", "parameter df must be strictly positive") return -0.0 - end if val chi = ChiSquare (df) // ChiSquare distribution val step = 0.0001 @@ -538,11 +530,9 @@ object CDF: if x < 0.0 then flaw ("fisherCDF", s"F(x) requires coordinate x = $x to be nonnegative") return 0.0 - end if if df1 <= 0 || df2 <= 0 then flaw ("fisherCDF", "parameters df1 and df2 must be strictly positive") return -0.0 - end if val ff = rBetaF (df1 * x / ((df1 * x) + df2), df1 / 2.0, df2 / 2.0) if ff > 1.0 then 1.0 else ff // handle possible round-off errors diff --git a/src/main/scala/scalation/random/PoissonProcess.scala b/src/main/scala/scalation/random/PoissonProcess.scala index e708ff1bf..22a9e79c3 100644 --- a/src/main/scala/scalation/random/PoissonProcess.scala +++ b/src/main/scala/scalation/random/PoissonProcess.scala @@ -199,7 +199,6 @@ case class NHPoissonProcess (lambda: VectorD, dt: Double = 1.0, stream: Int = 0) if i2 >= lsum.dim then flaw ("meanF", "i2 is beyond the end of lsum vector") return -1.0 - end if val t1 = i1 * dt val t2 = t1 + dt val l1 = lsum(i1 - 1) @@ -247,7 +246,6 @@ case class NHPoissonProcess (lambda: VectorD, dt: Double = 1.0, stream: Int = 0) else val s = -sum + k * log (sum) - logfac (k) exp (s) - end if end if end pf @@ -312,10 +310,10 @@ end NHPoissonProcess var sum = 0.0 val rep = 10000 - for i <- 1 to rep do + cfor (0, rep) { _ => rv.reset () sum += rv.count (tt) - end for + } // cfor println ("rv.mean = " + rv.meanF (tt) + " estimate = " + sum / rep.toDouble) end meansTest @@ -333,22 +331,22 @@ end NHPoissonProcess val rep = 50000 // replications var j = 0 // interval number - var x = 0.0 // x coordinate +// var x = 0.0 // x coordinate var o = 0.0 // observed value: height of histogram var e = 0.0 // expected value: pf (x) var chi2 = 0.0 // ChiSquare statistic var n = 0 // number of nonzero intervals val sum = new Array [Int] (51) - for i <- 1 to rep do + cfor (0, rep) { _ => rv.reset () if name == "PoissonProcess" then j = rv.count (tt) else j = rv.count (a, b) if 0 <= j && j <= 50 then sum (j) += 1 - end for + } // cfor for i <- 0 until sum.length do - x = i / 10.0 +// x = i / 10.0 o = sum(i) rv.gen if name == "PoissonProcess" then e = round (rep * rv.pf (i, tt)).toDouble @@ -356,7 +354,6 @@ end NHPoissonProcess if e >= 5 then chi2 += pow (o - e, 2) / e n += 1 - end if print ("\tsum (" + i + ") = " + o + " : " + e + " ") if i % 5 == 4 then println () end for diff --git a/src/main/scala/scalation/random/Quantile.scala b/src/main/scala/scalation/random/Quantile.scala index 6031fa3d0..486e7833d 100644 --- a/src/main/scala/scalation/random/Quantile.scala +++ b/src/main/scala/scalation/random/Quantile.scala @@ -12,6 +12,7 @@ package scalation package random import scala.math.{abs, exp, log, Pi, sqrt} +import scala.annotation.unused import scalation.mathstat.{Plot, VectorD} @@ -44,7 +45,6 @@ object Quantile: if p < 0.0 || p > 1.0 then flaw ("check", "probability parameter p must be in the range [0, 1]") return (true, -0.0) - end if if p =~ 0.0 then return (true, x_min) // smallest value, defaults to -infinity if p =~ 1.0 then return (true, POSITIVE_INFINITY) // +infinity (false, 0.0) // in usual range (0, 1) @@ -110,7 +110,7 @@ object Quantile: * @param p the p-th quantile, e.g., .95 (95%) * @param pr parameter for the distribution (currently not used) */ - def normalInv (p: Double = .95, pr: Parameters = null): Double = + def normalInv (p: Double = .95, @unused pr: Parameters = null): Double = val extreme = check (p) // handle extreme cases if extreme._1 then return extreme._2 @@ -205,7 +205,6 @@ object Quantile: else y = ((1.0 / (((df + 6.0) / (df * y) - 0.089 * d - 0.822) * (df + 2.0) * 3.0) + 0.5 / (df + 4.0)) * y - 1.0) * (df + 1.0) / (df + 2.0) + 1.0 / y - end if sign * sqrt (df * y) end studentTInv @@ -268,7 +267,6 @@ object Quantile: if df <= 0 || df >= 50 then flaw ("chiSquareInv", "parameter df must be in the set {1, 2, ..., 49}") return -0.0 - end if var x1 = 0.0 // lower limit var x2 = 8.0 * df // upper limit @@ -311,7 +309,6 @@ object Quantile: if df1 <= 0 || df2 <= 0 then flaw ("fisherInv", "parameters df1 and df2 must be strictly positive") return -0.0 - end if var x1 = 0.0 // lower limit var x2 = 1.0E6 // upper limit diff --git a/src/main/scala/scalation/random/RNG.scala b/src/main/scala/scalation/random/RNG.scala index f052b7234..78e7965ea 100644 --- a/src/main/scala/scalation/random/RNG.scala +++ b/src/main/scala/scalation/random/RNG.scala @@ -26,7 +26,6 @@ trait RNG (stream: Int): if stream < 0 || stream >= RandomSeeds.N_STREAMS then flaw ("init`", "the stream must be in the range 0 to " + (RandomSeeds.N_STREAMS - 1)) - end if //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Return the theoretical mean for the random number generator's 'gen' method. @@ -104,7 +103,7 @@ object RNGTester: var sum = 0.0 val means = new VectorD (tries) for i <- 0 until tries do - time { for i <- 0 until reps do sum += rn.gen } + time { cfor (0, reps) { _ => sum += rn.gen } } println (s"gen: sum = $sum") println (s"rn.mean = $rn.mean estimate = ${sum / reps.toDouble}") means(i) = sum @@ -130,10 +129,10 @@ object RNGTester: val e = reps / nints // expected value: pf (x) val sum = new VectorD (nints) - for i <- 0 until reps do + cfor (0, reps) { _ => val j = floor (rn.gen * nints).toInt // interval number if 0 <= j && j < nints then sum (j) += 1 - end for + } // cfor val hg = new Histogram (sum, nints, s"Histogram for distribution of $name", counts = sum) println (s"meansTest: hg = $hg") @@ -177,7 +176,7 @@ object RNGTester: val reps = 100000 // number of replications - val y = VectorD (for i <- 0 until reps yield rn.gen) + val y = VectorD (for _ <- 0 until reps yield rn.gen) val cg = new CoGram (y) cg.makeCorrelogram () diff --git a/src/main/scala/scalation/random/StreamMaker3.scala b/src/main/scala/scalation/random/StreamMaker3.scala index 47ec17e6b..e42968df0 100644 --- a/src/main/scala/scalation/random/StreamMaker3.scala +++ b/src/main/scala/scalation/random/StreamMaker3.scala @@ -58,7 +58,7 @@ import java.util.Date if j < COLS-1 then print ("\t" + seed + ","); if seed < 1000000 then print ("\t") else if i < ROWS-1 then println ("\t" + seed + ",") else println ("\t" + seed + ")") - for j <- 0 until streamLen do seed = r.igen // iterate for the next seed + cfor (0, streamLen) { _ => seed = r.igen } // iterate for the next seed end for println ("\n} // RandomSeeds3 object" + diff --git a/src/main/scala/scalation/random/Variate.scala b/src/main/scala/scalation/random/Variate.scala index 6f38a3403..ba83098c5 100644 --- a/src/main/scala/scalation/random/Variate.scala +++ b/src/main/scala/scalation/random/Variate.scala @@ -16,7 +16,7 @@ package scalation package random -import scala.math.{exp, floor, log, Pi, round, sqrt, tan} +import scala.math.{exp, floor, log, max, Pi, round, sqrt, tan} import scala.runtime.ScalaRunTime.stringOf import scalation.mathstat.{Histogram, Plot, VectorD} @@ -71,9 +71,10 @@ abstract class Variate (stream: Int = 0): //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Return the entire probability mass function (pmf) for finite discrete RV's. + * FIX: make abstract and require extending classes to implement it. * @param k number of objects of the first type */ - def pmf (k: Int = 0): Array [Double] = Array (0.0) + def pmf (k: Int = 1): Array [Double] = Array.fill (k)(1.0/k.toDouble) //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Determine the next random number for the particular distribution. @@ -97,7 +98,6 @@ abstract class Variate (stream: Int = 0): else flaw ("igen", "should not be invoked on continuous RV's") 0 - end if end igen //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -112,7 +112,6 @@ abstract class Variate (stream: Int = 0): else flaw ("igen", "should not be invoked on continuous RV's") 0 - end if end igen1 //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -422,6 +421,7 @@ end DiscreteF //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** This class generates `Erlang` random variates. * This continuous RV models the time until k stages complete. + * Can also pass mu/k to provide an Exponential like distribution with cv < 1. * @see http://www.math.uah.edu/stat/poisson/Gamma.html * @param mu the mean of exponential samples (Erlang mean = mu * k) * @param k the number of stages (or Exponential samples) @@ -432,32 +432,127 @@ case class Erlang (mu: Double = 1.0, k: Int = 2, stream: Int = 0) if mu <= 0.0 || k <= 0 then flaw ("init", "parameters mu and k must be positive") - private val l = 1.0 / mu // lambda + private val λ = 1.0 / mu // lambda, the rate parameter val mean = mu * k - def pf (z: Double): Double = l~^k * z~^(k-1) * exp (-l*z) / fac (k-1) + def pf (z: Double): Double = λ~^k * z~^(k-1) * exp (-λ*z) / fac (k-1) def gen: Double = var prod = 1.0 - for i <- 0 until k do prod *= r.gen + cfor (0, k) { _ => prod *= r.gen } -mu * log (prod) end gen def gen1 (z: Double): Double = var prod = 1.0 - for i <- 0 until k do prod *= r.gen + cfor (0, k) { _ => prod *= r.gen } -z * log (prod) end gen1 end Erlang +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** This class generates `Erlang2S` (Shifted right) random variates. + * This continuous RV models the time until 2 stages complete. + * @param mu the original unshifted mean of exponential samples (Erlang mean = mu * 2) + * @param tau the time shift, generated values Y = tau + Erlang2 + * @param stream the random number stream + */ +case class Erlang2S (mu: Double = 1.0, tau: Double = 0.2, stream: Int = 0) + extends Variate (stream): + + if tau <= 0.0 then flaw ("init", "parameter tau must be positive") + if tau >= mu then flaw ("init", "parameter tau must be less than mu") + + private val λ = 1.0 / mu // lambda, the rate parameter + + val mean = tau + 2 * mu // adjusted mean + + def pf (z: Double): Double = if z >= tau then λ~^2 * (z-tau) * exp (-λ*(z-tau)) + else 0.0 + + def gen: Double = tau - mu * log (r.gen * r.gen) + + def gen1 (z: Double): Double = tau - z * log (r.gen * r.gen) + +end Erlang2S + + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** This class generates `Erlang2T` (lower-Truncated) random variates. + * This continuous RV models the time until 2 stages complete. + * It produces Erlang2 random variates until one is greater than tau. + * @param mu the original untruncated mean of exponential samples (Erlang mean = mu * 2) + * @param tau the time threshold, require generated values Y >= tau + * @param stream the random number stream + */ +case class Erlang2T (mu: Double = 1.0, tau: Double = 0.2, stream: Int = 0) + extends Variate (stream): + + if tau <= 0.0 then flaw ("init", "parameter tau must be positive") + if tau >= mu then flaw ("init", "parameter tau must be less than mu") + + private val λ = 1.0 / mu // lambda, the rate parameter + + val mean = (λ * tau~^2 + 2 * tau + 2.0 / λ) / (1 + λ * tau) // adjusted mean + + def pf (z: Double): Double = if z >= tau then (λ~^2 * z * exp (-λ * z)) / ((1 + λ * tau) * exp (-λ * tau)) + else 0.0 + + def gen: Double = + var x = 0.0 + while x < tau do x = -mu * log (r.gen * r.gen) + x + end gen + + def gen1 (z: Double): Double = + var x = 0.0 + while x < tau do x = -z * log (r.gen * r.gen) + x + end gen1 + +end Erlang2T + + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** This class generates `Erlang2T_` (lower-Truncated) random variates. + * This discrete/continuous RV models the time until 2 stages complete. + * It produces an Erlang2 random variate and takes the max with tau (causes a Mass Point). + * FIX - fails Mean Test + * @param mu the original untruncated mean of exponential samples (Erlang mean = mu * 2) + * @param tau the time threshold, require generated values Y >= tau + * @param stream the random number stream + */ +case class Erlang2T_ (mu: Double = 1.0, tau: Double = 0.2, stream: Int = 0) + extends Variate (stream): + + if tau <= 0.0 then flaw ("init", "parameter tau must be positive") + if tau >= mu then flaw ("init", "parameter tau must be less than mu") + + private val λ = 1.0 / mu // lambda, the rate parameter + private val p_tau = 1 - exp (-λ * tau) - λ * tau * exp (-λ * tau) // probability Y = tau (mass point) + + val mean = tau + (1 - p_tau) * 2 * mu // adjusted mean + + def pf (z: Double): Double = if z > tau then λ~^2 * (z-tau) * exp (-λ*(z-tau)) + else if z == tau then POSITIVE_INFINITY // 'p_tau * delta(y-tau)' Dirac delta function + else 0.0 + + def gen: Double = max (tau, -mu * log (r.gen * r.gen)) + + def gen1 (z: Double): Double = max (tau, -z * log (r.gen * r.gen)) + +end Erlang2T_ + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** This class generates `Exponential` random variates. * This continuous RV models the time until an event occurs. + * As its stdev = mean, its coefficient of variation cv = 1. * @see www.math.uah.edu/stat/poisson/Exponential.html - * @param mu the mean + * @param mu the mean (e.g., mean inter-arrival time) * @param stream the random number stream */ case class Exponential (mu: Double = 1.0, stream: Int = 0) @@ -478,10 +573,69 @@ case class Exponential (mu: Double = 1.0, stream: Int = 0) end Exponential +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** This class generates `ExponentialS` (Shifted right) random variates. + * This continuous RV models the time until an event occurs. + * @note: Due to memoryless property, this RV is the same as producing Exponential RVs + * until one is greater than tau. + * @param mu the original unshifted mean (e.g., mean inter-arrival time) + * @param tau the time shift, generated values Y = tau + Exponential + * @param stream the random number stream + */ +case class ExponentialS (mu: Double = 1.0, tau: Double = 0.2, stream: Int = 0) + extends Variate (stream): + + if tau <= 0.0 then flaw ("init", "parameter tau must be positive") + if tau >= mu then flaw ("init", "parameter tau must be less than mu") + + private val λ = 1.0 / mu // lambda, the rate parameter + + val mean = tau + mu // adjusted mean + + def pf (z: Double): Double = if z >= tau then λ * exp (-λ*(z-tau)) + else 0.0 + + def gen: Double = tau - mu * log (r.gen) + + def gen1 (z: Double): Double = tau - z * log (r.gen) + +end ExponentialS + + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** This class generates `ExponentialT_` (lower-Truncated) random variates. + * This discrete/continuous RV models the time until an event occurs. + * It produces an Exponential random variate and takes the max with tau (causes a Mass Point). + * @param mu the original untruncated mean (e.g., mean inter-arrival time) + * @param tau the time threshold, require generated values Y >= tau + * @param stream the random number stream + */ +case class ExponentialT_ (mu: Double = 1.0, tau: Double = 0.2, stream: Int = 0) + extends Variate (stream): + + if tau <= 0.0 then flaw ("init", "parameter tau must be positive") + if tau >= mu then flaw ("init", "parameter tau must be less than mu") + + private val λ = 1.0 / mu // lambda, the rate parameter + private val p_tau = 1 - exp (-λ * tau) // probability Y = tau (mass point) + + val mean = tau + (1 - p_tau) * mu // adjusted mean + + def pf (z: Double): Double = if z > tau then λ * exp (-λ*(z-tau)) + else if z == tau then POSITIVE_INFINITY // 'p_tau * delta(y-tau)' Dirac delta function + else 0.0 + + def gen: Double = max (tau, -mu * log (r.gen)) + + def gen1 (z: Double): Double = max (tau, -z * log (r.gen)) + +end ExponentialT_ + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** This class generates `Fisher` (F-Distribution) random variates. * This continuous RV models the ratio of variances. - * @see http://www.math.uah.edu/stat/special/Fisher.html + * @see http://www.math.uacombined discrete-continuoush.edu/stat/special/Fisher.html * @param df1 the degrees of freedom for numerator Chi-Square * @param df2 the degrees of freedom for denominator Chi-Square * @param stream the random number stream @@ -558,14 +712,13 @@ case class Gamma (alpha: Double = 1.0, beta: Double = 1.0, stream: Int = 0) while x = alpha / a var prod = 1.0 - for i <- 0 until a do prod *= r.gen + cfor (0, a) { _ => prod *= r.gen } x *= -log (prod) r.gen > (x / alpha)~^b * exp (-b * x / (alpha-1.0)) do () x * beta else // alpha >= 5 if r.gen >= b then erl1.gen else erl2.gen - end if end gen def gen1 (z: Double): Double = @@ -709,10 +862,10 @@ case class HyperGeometric (p: Double = .5, n: Int = 5, pop: Int = 10, stream: In var b: Double = pop // population of number of balls var rd = reds // number of red/success balls in population var s = 0 // count number of successes - for i <- 0 until n do + cfor (0, n) { _ => if r.gen <= rd / b then { s += 1; rd -= 1 } b -= 1 - end for + } // cfor s end gen @@ -721,10 +874,10 @@ case class HyperGeometric (p: Double = .5, n: Int = 5, pop: Int = 10, stream: In var b: Double = pop // population of number of balls var rd = reds // number of red/success balls in population var s = 0 // count number of successes - for i <- 0 until n do + cfor (0, n) { _ => if r.gen <= rd / b then { s += 1; rd -= 1 } b -= 1 - end for + } // cfor s end gen1 @@ -848,14 +1001,14 @@ case class NegativeBinomial (p: Double = .5, s: Int = 2, stream: Int = 0) def gen: Double = var sum = 0 - for i <- 0 until s do sum += geom.gen.toInt + cfor (0, s) { _ => sum += geom.gen.toInt } sum end gen def gen1 (z: Double): Double = val geom = Geometric (z, stream) var sum = 0 - for i <- 0 until s do sum += geom.gen.toInt + cfor (0, s) { _ => sum += geom.gen.toInt } sum end gen1 @@ -1131,7 +1284,6 @@ case class RandiU0 (b: Int = 1000, stream: Int = 0) if previous.size == bb then flaw ("igen1", "all unique values have been exhausted - starting over") previous.clear () - end if var i = -1 while i = floor ((bb + 1) * r.gen).toInt @@ -1189,7 +1341,6 @@ case class StdNormal (stream: Int = 0) if y2 >= (y1 - 1)~^2 / 2.0 then z = y1 cont = false - end if cont do () if r.gen <= 0.5 then z else -z @@ -1377,14 +1528,14 @@ case class Trinomial (p: Double = 1.0/3.0, q: Double = 1.0/3.0, n: Int = 5, stre else 0.0 end pf - def pf (k: Int, l: Int): Double = // ex: n = 10, (k, l, m) = (2, 3, 5) + def pf (k: Int, l: Int): Double = // ex: n = 10, (k, l, m) = (2, 3, 5) if 0 <= k && 0 <= l && k+l <= n then choose (n, k, l) * p~^k * q~^l * qq~^(n-k-l) else 0.0 end pf override def pmf (k: Int): Array [Double] = - val d = Array.ofDim [Double] (n-k+1) // array to hold pmf distribution + val d = Array.ofDim [Double] (n-k+1) // array to hold pmf distribution d(0) = choose (n, k) * p~^k * qq~^n-k for l <- 1 to n-k do d(l) = d(l-1) * q_qq * (k-l+1) / l.toDouble d @@ -1392,13 +1543,13 @@ case class Trinomial (p: Double = 1.0/3.0, q: Double = 1.0/3.0, n: Int = 5, stre def gen: Double = var sum = 0.0 - for i <- 0 until n do sum += dice.gen // add 0, 1 or 2 + cfor (0, n) { _ => sum += dice.gen } // add 0, 1 or 2 sum end gen def gen1 (z: Double): Double = var sum = 0.0 - for i <- 0 until z.toInt do sum += dice.gen // add 0, 1 or 2 + cfor (0, z.toInt) { _ => sum += dice.gen } // add 0, 1 or 2 sum end gen1 @@ -1519,10 +1670,10 @@ end Weibull rv.isInstanceOf [StdNormal] then 25 else 0 val sum = new Array [Int] (51) - for i <- 1 to rep do + cfor (0, rep) { _ => j = floor (rv.gen * 10.0).toInt + offset if 0 <= j && j <= 50 then sum (j) += 1 - end for + } // cfor for i <- 0 until sum.length do x = (i - offset) / 10.0 @@ -1532,7 +1683,6 @@ end Weibull if e >= 5 then chi2 += (o-e)~^2.0 / e n += 1 - end if print (s"\tsum ($x) = $o : $e ") if i % 5 == 4 then println () end for @@ -1552,9 +1702,14 @@ end Weibull ChiSquare (), Dice (), Discrete (), -// useful? DiscreteF (), +// useful? DiscreteF (), Erlang (), + Erlang2S (), + Erlang2T (), +// fails Erlang2T_ (), Exponential (), + ExponentialS (), +// fails Fit Test ExponentialT_ (), Fisher (), Gamma (), Geometric (), @@ -1588,7 +1743,6 @@ end Weibull if testAll || (include contains i) then meansTest (distribution (i)) distributionTest (distribution (i)) - end if end for val trinom = Trinomial (1.0/3.0, 1.0/3.0, 9) @@ -1609,12 +1763,12 @@ end variateTest val freq = new VectorD (7) val x2 = VectorD.range (0, 13) val freq2 = new VectorD (13) - for i <- 0 until 10000 do + cfor (0, 10000) { _ => val sum = dice.igen val sum2 = dice.igen + dice.igen freq(sum) += 1 freq2(sum2) += 1 - end for + } // cfor println (s"x = $x") println (s"freq = $freq") println (s"x2 = $x2") @@ -1636,7 +1790,7 @@ end diceTest import mathstat.Plot val rvg = Uniform () - val x = VectorD (for i <- 0 until 100000 yield rvg.gen + rvg.gen + rvg.gen + rvg.gen) + val x = VectorD (for _ <- 0 until 100000 yield rvg.gen + rvg.gen + rvg.gen + rvg.gen) new Histogram (x) val nrm = Normal () diff --git a/src/main/scala/scalation/random/VariateMat.scala b/src/main/scala/scalation/random/VariateMat.scala index 3bf81da93..999038002 100644 --- a/src/main/scala/scalation/random/VariateMat.scala +++ b/src/main/scala/scalation/random/VariateMat.scala @@ -120,7 +120,7 @@ end NormalMat /** The `RandomMatD` class generates a random matrix of doubles. * @param dim the number of rows in the matrix * @param dim2 the number of columns in the matrix - * @param max generate integers in the range 0 (inclusive) to max (inclusive) + * @param max generate integers in the range 0 (inclusive) to max (inclusive) <-- note max arg first * @param min generate integers in the range 0 (inclusive) to max (inclusive) * @param density sparsity basis = 1 - density * @param stream the random number stream @@ -136,7 +136,7 @@ case class RandomMatD (dim: Int = 5, dim2: Int = 10, max: Double = 20.0, min: Do def pf (z: MatrixD): Double = 1.0 / (max - min) ~^ (dim + dim2) - def gen: MatrixD = MatrixD (for i <- 0 until dim yield rvec.gen) + def gen: MatrixD = MatrixD (for _ <- 0 until dim yield rvec.gen) def igen: MatrixD = gen.toInt @@ -157,12 +157,12 @@ end RandomMatD banner ("Test: NormalMat random matrix generation") rvm = NormalMat (4, 5, 0.0, 0.01) // random normal matrix generator println ("mean = " + rvm.mean) // with mean 0 and variance 0.01 - for k <- 0 until 10 do println (rvm.gen) + cfor (0, 10) { _ => println (rvm.gen) } banner ("Test: RandomMatD random matrix generation") rvm = RandomMatD (2, 100) // random matrix generator println ("mean = " + rvm.mean) - for k <- 0 until 10 do println (rvm.gen) + cfor (0, 10) { _ => println (rvm.gen) } val cor = MatrixD ((2, 2), 1.0, 0.9, // covariance/correlation matrix 0.9, 1.0) diff --git a/src/main/scala/scalation/random/VariateSet.scala b/src/main/scala/scalation/random/VariateSet.scala index 8da810ee2..9dd531e9e 100644 --- a/src/main/scala/scalation/random/VariateSet.scala +++ b/src/main/scala/scalation/random/VariateSet.scala @@ -81,7 +81,6 @@ case class RandomSet (count: Int = 10, max: Int = 20, skip: Int = -1, stream: In if count > max then flaw ("init", "requires count <= max") throw new IllegalArgumentException ("RandomSet: count shouldn't be larger than max") - end if private val rng = Randi0 (max, stream) // random integer generator @@ -223,11 +222,11 @@ end RandomSetW println ("Test: RandomSet random set generation ------------------------") println ("mean = " + rsg.mean) // random set generator - for k <- 0 until 30 do { rs = rsg.igen; println (rs) } + cfor (0, 30) { _ => rs = rsg.igen; println (rs) } println ("Test: RandomSet random subset generation ---------------------") println ("mean = " + rsg.mean) // random set generator - for k <- 0 until 30 do println (rsg.igen (rs, 5)) + cfor (0, 30) { _ => println (rsg.igen (rs, 5)) } end variateSetTest @@ -243,7 +242,7 @@ end variateSetTest var rs: Set [String] = null // variate set println ("Test: RandomSetS random set generation ------------------------") - for k <- 0 until 30 do { rs = rsg.sgen; println (rs) } + cfor (0, 30) { _ => rs = rsg.sgen; println (rs) } end variateSetTest2 @@ -259,7 +258,7 @@ end variateSetTest2 var rs: Set [String] = null // variate set println ("Test: RandomSetW random set generation ------------------------") - for k <- 0 until 30 do { rs = rsg.sgen; println (rs) } + cfor (0, 30) { _ => rs = rsg.sgen; println (rs) } end variateSetTest3 diff --git a/src/main/scala/scalation/random/VariateStr.scala b/src/main/scala/scalation/random/VariateStr.scala index d3581373c..f366bc0b0 100644 --- a/src/main/scala/scalation/random/VariateStr.scala +++ b/src/main/scala/scalation/random/VariateStr.scala @@ -36,7 +36,7 @@ case class RandomStr (lRange: Range = 4 to 7, cRange: Range = 97 to 122, stream: */ override def sgen: String = val sb = new StringBuilder () - for i <- 0 until lrng.igen do sb.append (crng.igen.toChar) + cfor (0, lrng.igen) { _ => sb.append (crng.igen.toChar) } sb.toString end sgen @@ -49,7 +49,7 @@ case class RandomStr (lRange: Range = 4 to 7, cRange: Range = 97 to 122, stream: def sgen1 (z: Range): String = val lrng = Randi (z.start, z.end, stream) // random integer generator val sb = new StringBuilder () - for i <- 0 until lrng.igen do sb.append (crng.igen.toChar) + cfor (0, lrng.igen) { _ => sb.append (crng.igen.toChar) } sb.toString end sgen1 @@ -104,7 +104,7 @@ end RandomWord var rs: String = null // variate string println ("Test: RandomStr random string generation ------------------------") - for k <- 0 until 30 do { rs = rsg.sgen; println (rs) } + cfor (0, 30) { _ => rs = rsg.sgen; println (rs) } end randomStrTest @@ -120,7 +120,7 @@ end randomStrTest var rs: String = null // variate word println ("Test: RandomWord random string generation ------------------------") - for k <- 0 until 30 do { rs = rsg.sgen; println (rs) } + cfor (0, 30) { _ => rs = rsg.sgen; println (rs) } end randomWordTest diff --git a/src/main/scala/scalation/random/VariateTen.scala b/src/main/scala/scalation/random/VariateTen.scala index 2a203f492..741a426ab 100644 --- a/src/main/scala/scalation/random/VariateTen.scala +++ b/src/main/scala/scalation/random/VariateTen.scala @@ -140,7 +140,7 @@ case class RandomTenD (dim: Int = 5, dim2: Int = 4, dim3: Int = 3, max: Double = def pf (z: TensorD): Double = 1.0 / (max - min) ~^ (dim + dim2 + dim3) - def gen: TensorD = TensorD (for i <- 0 until dim yield rmat.gen) + def gen: TensorD = TensorD (for _ <- 0 until dim yield rmat.gen) def igen: TensorD = gen.toInt @@ -159,12 +159,12 @@ end RandomTenD banner ("Test: NormalTen random tensor generation") rvt = NormalTen (5, 4, 3, 0.0, 0.01) // random normal tensor generator println ("mean = " + rvt.mean) // with mean 0 and variance 0.01 - for k <- 0 until 10 do println (rvt.gen) + cfor (0, 10) { _ => println (rvt.gen) } banner ("Test: RandomTenD random tensor generation") rvt = RandomTenD () // random tensor generator println ("mean = " + rvt.mean) - for k <- 0 until 10 do println (rvt.gen) + cfor (0, 10) { _ => println (rvt.gen) } /* FIX import VariateTen.corTransform diff --git a/src/main/scala/scalation/random/VariateVec.scala b/src/main/scala/scalation/random/VariateVec.scala index 1752bca5d..58f959227 100644 --- a/src/main/scala/scalation/random/VariateVec.scala +++ b/src/main/scala/scalation/random/VariateVec.scala @@ -312,7 +312,7 @@ end PermutedVecI //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `RandomVecSample` class generates random sample from a population. +/** The `RandomVecSample` class generates random samples from a population. * @param pop the size of the population (0, 1, ... pop-1) * @param samp the size of the random samples * @param stream the random number stream @@ -324,8 +324,7 @@ case class RandomVecSample (pop: Int, samp: Int, stream: Int = 0) if samp >= pop then flaw ("int", "requires samp < pop") - throw new IllegalArgumentException ("RandomVecSample: samp too large") - end if + throw new IllegalArgumentException (s"RandomVecSample: samp = $samp is too large") private val mu = pop / 2.0 // mean private val rng = Randi0 (pop-1, stream) // random integer generator @@ -336,15 +335,24 @@ case class RandomVecSample (pop: Int, samp: Int, stream: Int = 0) def gen: VectorD = igen.toDouble - def igen: VectorI = + def igen: VectorI = // randomly generate indices of size samp from a vector of size pop val y = VectorI.range (0, pop) // generate vector containing 0, 1, ... pop-1 for i <- 0 until samp do val j = rng.igen // random integer 0 to pop-1 val t = y(i); y(i) = y(j); y(j) = t // swap y(i) and y(j) end for - y(0 until samp) // take the first sampSize elements + y(0 until samp) // take the first samp elements end igen + def isplit: (VectorI, VectorI) = // randomly split the indices of a vector of size pop + val y = VectorI.range (0, pop) // generate vector containing 0, 1, ... pop-1 + for i <- 0 until samp do + val j = rng.igen // random integer 0 to pop-1 + val t = y(i); y(i) = y(j); y(j) = t // swap y(i) and y(j) + end for + (y(0 until samp), y(samp+1 until pop)) // take the first samp elements and the remaining elements + end isplit + end RandomVecSample @@ -352,10 +360,10 @@ end RandomVecSample /** The `RandomVecD` class generates a random vector of doubles. * Ex: (3.0, 2.0, 0.0, 4.0, 1.0) has dim = 5 and max = 4. * @param dim the dimension/size of the vector (number of elements) - * @param max generate doubles in the range min to max + * @param max generate doubles in the range min to max (@note: max arg first) * @param min generate doubles in the range min to max * @param density sparsity basis = 1 - density - * @param runLength the maximum run length + * @param runLength the maximum run length (for `repgen`) * @param stream the random number stream */ case class RandomVecD (dim: Int = 10, max: Double = 20.0, min: Double = 0.0, @@ -374,16 +382,16 @@ case class RandomVecD (dim: Int = 10, max: Double = 20.0, min: Double = 0.0, def igen: VectorI = gen.toInt def gen: VectorD = - VectorD (for i <- 0 until dim yield if rn.gen < density then rng.gen else 0.0) + VectorD (for _ <- 0 until dim yield if rn.gen < density then rng.gen else 0.0) end gen def repgen: VectorD = val v = new VectorD (dim) var cnt = 0 while cnt < dim do - val x = rng.gen // value - val rep = ri.igen // repetition - for j <- 0 until rep if cnt < dim do { v(cnt) = x; cnt += 1} + val x = rng.gen // value + val rep = ri.igen // repetition + cfor (0, rep) { _ => if cnt < dim then { v(cnt) = x; cnt += 1 } } end while v end repgen @@ -396,7 +404,7 @@ end RandomVecD * Ex: (3.0, 2.0, 0.0, 4.0, 1.0) has dim = 5. * This version does not consider density or runLength. * @param dim the dimension/size of the vector (number of elements) - * @param max generate doubles in the range min to max + * @param max generate doubles in the range min to max (@note: max arg first) * @param min generate doubles in the range min to max * @param stream the random number stream */ @@ -425,7 +433,7 @@ end RandomVecD_ /** The `RandomVecI` class generates a random vector of integers. * Ex: (3, 2, 0, 4, 1) has dim = 5 and max = 4. * @param dim the dimension/size of the vector (number of elements) - * @param max generate integers in the range min (inclusive) to max (inclusive) + * @param max generate integers in the range min (inclusive) to max (inclusive) (@note: max arg first) * @param min generate integers in the range min (inclusive) to max (inclusive) * @param skip skip this number, i.e, do not use it * @param unique whether the integers must be unique @@ -440,7 +448,6 @@ case class RandomVecI (dim: Int = 10, max: Int = 20, min: Int = 10, skip: Int = if unique && (max-min) < dim-1 then flaw ("init", "requires range max-min = ${max-min) >= dim-1 = ${dim-1}") throw new IllegalArgumentException ("RandomVecI: range max-min is too small for unique") - end if private val mu = (max - min) / 2.0 // mean private val rng = Randi (min, max, stream) // random integer generator @@ -467,6 +474,38 @@ case class RandomVecI (dim: Int = 10, max: Int = 20, min: Int = 10, skip: Int = end RandomVecI +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `RandomVecIR` class generates a random vector of integers with REPLACEMENT. + * @see `mathstat.StatBootstrap` + * Ex: (3, 2, 0, 4, 3) has dim = 5 and max = 4. + * @param dim the dimension/size of the vector (number of elements) + * @param max generate integers in the range min (inclusive) to max (inclusive) (@note: max arg first) + * @param min generate integers in the range min (inclusive) to max (inclusive) + * @param stream the random number stream + */ +case class RandomVecIR (dim: Int = 10, max: Int = 20, min: Int = 0, stream: Int = 0) + extends VariateVec (stream): + + _discrete = true + + private val mu = (max - min) / 2.0 // mean + private val rng = Randi (min, max, stream) // random integer generator + + def mean: VectorD = VectorD.fill (dim)(mu) + + def pf (z: VectorD): Double = 1.0 / (max - min) ~^ dim + + def gen: VectorD = igen.toDouble + + def igen: VectorI = + val y = new VectorI (dim) + for i <- 0 until dim do y(i) = rng.igen + y + end igen + +end RandomVecIR + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `RandomVecS` class generates a random vector of strings. * Ex: ("3", "2", "0", "4", "1") has dim = 5 and max = 4. @@ -540,7 +579,7 @@ case class Multinomial (p: Array [Double] = Array (.4, .7, 1.0), n: Int = 5, str def gen: VectorD = igen.toDouble - def igen: VectorI = VectorI (for i <- p.indices yield dice.igen) + def igen: VectorI = VectorI (for _ <- p.indices yield dice.igen) end Multinomial @@ -581,7 +620,7 @@ end RandomVecTrend banner ("Test: ProbabilityVec random vector generation ----------------") rvv = ProbabilityVec (10) println ("mean = " + rvv.mean) // probability vector generator - for k <- 0 until 30 do println (rvv.gen) + cfor (0, 30) { _ => println (rvv.gen) } banner ("Test: NormalVec random vector generation ---------------------") val mu = VectorD (5.0, 5.0) @@ -589,60 +628,60 @@ end RandomVecTrend 1.0, 2.0) rvv = NormalVec (mu, cov) // multivariate normal generator println ("mean = " + rvv.mean) - for k <- 0 until 30 do println (rvv.gen) + cfor (0, 30) { _ => println (rvv.gen) } banner ("Test: NormalVec_ random vector generation --------------------") val sig = VectorD (2.0, 1.0) rvv = NormalVec_ (mu, sig) // ind. multivariate normal generator println ("mean = " + rvv.mean) - for k <- 0 until 30 do println (rvv.gen) + cfor (0, 30) { _ => println (rvv.gen) } banner ("Test: PermutedVecD random vector generation ------------------") val x = VectorD (1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0) rvv = PermutedVecD (x) // random permutation generator println ("mean = " + rvv.mean) - for k <- 0 until 30 do println (rvv.gen) + cfor (0, 30) { _ => println (rvv.gen) } banner ("Test: PermutedVecI random vector generation ------------------") val y = VectorI (1, 2, 3, 4, 5, 6, 7, 8, 9) rvv = PermutedVecI (y) // random permutation generator println ("mean = " + rvv.mean) - for k <- 0 until 30 do println (rvv.igen) + cfor (0, 30) { _ => println (rvv.igen) } banner ("Test: RandomVecSample random vector generation ---------------") rvv = RandomVecSample (10, 5) // random permutation generator println ("mean = " + rvv.mean) - for k <- 0 until 30 do println (rvv.igen) + cfor (0, 30) { _ => println (rvv.igen) } banner ("Test: RandomVecD random vector generation --------------------") rvv = RandomVecD () // random vector generator doubles println ("mean = " + rvv.mean) - for k <- 0 until 30 do println (rvv.gen) + cfor (0, 30) { _ => println (rvv.gen) } banner ("Test: RandomVecD_ random vector generation -------------------") rvv = RandomVecD_ (2, VectorD (10, 8), VectorD (0, 0)) // random vector generator doubles println ("mean = " + rvv.mean) - for k <- 0 until 30 do println (rvv.gen) + cfor (0, 30) { _ => println (rvv.gen) } banner ("Test: RandomVecI random vector generation --------------------") rvv = RandomVecI () // random vector generator ints println ("mean = " + rvv.mean) - for k <- 0 until 30 do println (rvv.igen) + cfor (0, 30) { _ => println (rvv.igen) } banner ("Test: RandomVecS random vector generation --------------------") rvv = RandomVecS () // random vector generator strings println ("mean = " + rvv.mean) - for k <- 0 until 30 do println (rvv.asInstanceOf [RandomVecS].sgen) + cfor (0, 30) { _ => println (rvv.asInstanceOf [RandomVecS].sgen) } banner ("Test: Multinomial random vector generation --------------------") rvv = Multinomial () // random multinomial generator println ("mean = " + rvv.mean) - for k <- 0 until 30 do println (rvv.igen) + cfor (0, 30) { _ => println (rvv.igen) } banner ("Test: RandomVecTrend random vector generation -----------------") rvv = RandomVecTrend () // time-series vector generator println ("mean = " + rvv.mean) - for k <- 0 until 30 do println (rvv.gen) + cfor (0, 30) { _ => println (rvv.gen) } end variateVecTest diff --git a/src/main/scala/scalation/scala2d/CurvilinearShape.scala b/src/main/scala/scalation/scala2d/CurvilinearShape.scala index 39b0b12a9..acd831635 100644 --- a/src/main/scala/scalation/scala2d/CurvilinearShape.scala +++ b/src/main/scala/scalation/scala2d/CurvilinearShape.scala @@ -67,7 +67,7 @@ trait CurvilinearShape extends Shape: * @param p2 the ending point */ def setLine (p1: R2, pc: R2, p2: R2): Unit = - flaw ("setFrame (p1, pc, p2)", "this method is not overridden by mixin class") + flaw (s"setLine (p1 = $p1, pc = $pc, p2 = $p2)", "method is not overridden by mixin class") end setLine def setLine (p1: VectorD, pc: VectorD, p2: VectorD): Unit = @@ -83,7 +83,7 @@ trait CurvilinearShape extends Shape: * @param height the height of object traversing the curve */ def next (width: Double, height: Double): R2 = - flaw ("next (width, height)", "this method is not overridden by mixin class") + flaw (s"next (width = $width, height = $height)", "method is not overridden by mixin class") null end next diff --git a/src/main/scala/scalation/scala2d/QCurve.scala b/src/main/scala/scalation/scala2d/QCurve.scala index f2f4bc18e..8e7296e0c 100644 --- a/src/main/scala/scalation/scala2d/QCurve.scala +++ b/src/main/scala/scalation/scala2d/QCurve.scala @@ -264,7 +264,6 @@ object QCurve: val dist = bend * distance (p1.x, p1.y, p2.x, p2.y) if m.isInfinity then new R2 (mid.x + dist, mid.y) else new R2 (mid.x + dist * m / sqrt (1.0 + pow (m, 2)), mid.y - dist / sqrt (1.0 + pow (m, 2))) - end if end if end calcControlPoint diff --git a/src/main/scala/scalation/scala2d/VizFrame.scala b/src/main/scala/scalation/scala2d/VizFrame.scala index 98a5d85af..0cec570a9 100644 --- a/src/main/scala/scalation/scala2d/VizFrame.scala +++ b/src/main/scala/scalation/scala2d/VizFrame.scala @@ -22,14 +22,13 @@ package scala2d class VizFrame (title: String, canvas: Panel, w: Int = 800, h: Int = 700, o: Int = 100) extends Frame (title): - println ("Run + title") + println (s"VizFrame: run $title") setLocation (o, o) setSize (new Dimension (w, h)) if canvas != null then // may need to set these later getContentPane ().add (canvas) setVisible (true) - end if //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::s /** Get the width of the frame. diff --git a/src/main/scala/scalation/scala2d/ZoomablePanel.scala b/src/main/scala/scalation/scala2d/ZoomablePanel.scala index 42ed70b35..d0ef187b1 100644 --- a/src/main/scala/scalation/scala2d/ZoomablePanel.scala +++ b/src/main/scala/scalation/scala2d/ZoomablePanel.scala @@ -35,8 +35,8 @@ trait ZoomablePanel private var scale = 1.0 private var basex = 0.0 private var basey = 0.0 - private var originx = 0.0 - private var originy = 0.0 +// private var originx = 0.0 +// private var originy = 0.0 addMouseWheelListener (this) addMouseMotionListener (this) @@ -59,7 +59,7 @@ trait ZoomablePanel try at.inverseTransform (new Point2D.Double (x, y), p) catch - case ee: Exception => {} + case _ : Exception => {} end try x = p.getX () y = p.getY () @@ -83,8 +83,8 @@ trait ZoomablePanel override def mouseDragged (e: MouseEvent): Unit = val dx = (e.getX () - basex) / scale val dy = (e.getY () - basey) / scale - originx += dx * scale - originy += dy * scale +// originx += dx * scale +// originy += dy * scale at.translate (dx, dy) basex = e.getX () basey = e.getY () diff --git a/src/main/scala/scalation/scala3d/Clock.scala b/src/main/scala/scalation/scala3d/Clock.scalaa similarity index 100% rename from src/main/scala/scalation/scala3d/Clock.scala rename to src/main/scala/scalation/scala3d/Clock.scalaa diff --git a/src/main/scala/scalation/scala3d/Gfx3DExample.scalaa b/src/main/scala/scalation/scala3d/Gfx3DExample.scalaa index b7c57db44..5c0c45072 100644 --- a/src/main/scala/scalation/scala3d/Gfx3DExample.scalaa +++ b/src/main/scala/scalation/scala3d/Gfx3DExample.scalaa @@ -47,7 +47,6 @@ class Gfx3DExample extends Application: if ! is3DSupported then println ("Sorry, 3D is not supported in JavaFX on this platform.") return - end if val box = new Box (100, 100, 100) box.setCullFace (CullFace.NONE) diff --git a/src/main/scala/scalation/scala3d/Road3d.scala b/src/main/scala/scalation/scala3d/Road3d.scala deleted file mode 100644 index d60d34e91..000000000 --- a/src/main/scala/scalation/scala3d/Road3d.scala +++ /dev/null @@ -1,227 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author Jacobi Coleman - * @version 2.0 - * @date Wed May 1 01:19:46 EDT 2024 - * @see LICENSE (MIT style license file). - * - * @note Example Animation of a Road - */ - -package scalation -package scala3d - -import javafx.scene.input.{MouseEvent, ScrollEvent} - -import scala.collection.mutable.ArrayBuffer - -import scalafx.application.JFXApp3 -import scalafx.application.JFXApp3.PrimaryStage -import scalafx.animation.{KeyFrame, Timeline} -import scalafx.geometry.Point3D -import scalafx.scene.{Group, PerspectiveCamera, Scene} -import scalafx.scene.paint.{Color, PhongMaterial} -import scalafx.scene.paint.Color.sfxColor2jfx -import scalafx.scene.shape.{Box, Cylinder} -import scalafx.scene.shape.Cylinder.sfxCylinder2jfx -import scalafx.scene.shape.Box.sfxBox2jfx -import scalafx.scene.shape.Shape3D.sfxShape3D2jfx -import scalafx.util.Duration - -import scalation.mathstat.Statistic - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Road3d` object implements a simple road animation. - * > runMain scalation.scala3d.Road3d - */ -object Road3d extends JFXApp3: - - println ("Running Road3d Animation") - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** The `Direction` enumeration indicates the directions that cars can move. - */ - enum Direction: - case South - end Direction - - private val clock = new Clock () - private val roadLength = 400 - private val roadColor = Color.LightGrey - private val lightColor = Color.Red - private val lightColor2 = Color.Green - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a 3D scene and start the animation. - */ - override def start (): Unit = - val root = new Group () // root node for scene - - // create sources and add to root - val source = List ( new Source (15, "northSpawn", Point3D (390, 0, -25)) {}) - source.foreach { s => root.children.add (s.entry) } - - // create sinks and add to root - val sink = List ( new Sink (15, "fromNorth", Point3D (390, 840, -25)) {}) - sink.foreach { s => root.children.add (s.exit) } - - // create roads and add to root - val roads = List ( - new Box (15, roadLength, 5) { // from north-in position - translateX = 390; translateY = 200 }, - new Box (15, roadLength, 5) { // from south-out position - translateX = 390; translateY = 640 }) - - roads.foreach { road => - val roadMaterial = new PhongMaterial () - roadMaterial.diffuseColor = roadColor - road.material = roadMaterial - root.children.add (road) } - - // create traffic lights - for pedestrian crossing - val nslights = List ( - new Cylinder (8, 4) { - translateX = 390; translateY = 405; translateZ = -25 }) - - nslights.foreach { light => - light.material = new PhongMaterial (lightColor) - root.children.add (light) } - - // create a timeline for changing light colors - val lightsTimeline = new Timeline { - // Initialize light colors - var stopColor = lightColor - var goColor = lightColor2 - - // create key frames - keyFrames = Seq ( - KeyFrame (Duration (2000), onFinished = _ => { - // Toggle light colors - nslights.foreach { light => - val lightMaterial = new PhongMaterial () - lightMaterial.diffuseColor = stopColor - light.material = lightMaterial } - val tempColor = stopColor - stopColor = goColor - goColor = tempColor }), - - KeyFrame (Duration (8000)) // add a keyframe with no action, just to create a loop - ) // end keyFrames - - cycleCount = Timeline.Indefinite - - val northcars = ArrayBuffer [Vehicle] () - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** The cars to add to the animation scene. - * @param cars the cars to be added - * @param direction direction of motion - */ - def addCarToList (cars: ArrayBuffer [Vehicle], direction: Direction): Unit = - val carColor = Color.Blue - direction match - case Direction.South => - val car = new Vehicle (clock, carColor) - car.body.translateX = (source(0).entry.translateX.value); - car.body.translateY = (source(0).entry.translateY.value) - northcars += car - root.children.add (car.body) - end addCarToList - - // add more cars as needed - var car = new Vehicle (clock) - car.body.translateX = (source(0).entry.translateX.value); - car.body.translateY = (source(0).entry.translateY.value) - northcars += car - - northcars.foreach { car => root.children.add (car.body)} - - val addCarsTimeline = new Timeline { - keyFrames = Seq ( - KeyFrame (Duration (3000), onFinished = _ => { - addCarToList (northcars, Direction.South) })) - cycleCount = Timeline.Indefinite } - - addCarsTimeline.play () - - val nmaterial = nslights(0).material.value.asInstanceOf [javafx.scene.paint.PhongMaterial] - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Determine whether the given car should stop at the traffic light. - * @param car the car that may need to stop - * @param direction direction of motion - */ - def shouldStop (car: Vehicle, direction: Direction): Boolean = - direction match - case Direction.South => nmaterial.getDiffuseColor == sfxColor2jfx (goColor) - && car.body.translateY.value + 2 >= nslights(0).translateY.value -15 - && car.body.translateY.value + 2 <= nslights(0).translateY.value - 10 - end shouldStop - - // create a timeline for car animation - val timeline = new Timeline { - keyFrames = Seq ( - KeyFrame (Duration (30), onFinished = _ => { - northcars.foreach { car => - if car.body.translateY.value >= sink(0).exit.translateY.value then - car.body.translateY = sink(0).exit.translateY.value // car reached the sink box, stop - sink(0).terminate (car, clock) - else if shouldStop (car, Direction.South) then - car.body.translateY = car.body.translateY.value - else - car.body.translateY = car.body.translateY.value + 2 - end if - } // end northcars - }) // end KeyFrame - ) // end keyFrames - cycleCount = Timeline.Indefinite - } // end timeline - timeline.play () - - } // end lightsTimeline - lightsTimeline.play () - - println (Statistic.labels) - for s <- sink do println (s.showStats ()) - - // create and position the camera - val camera = new PerspectiveCamera (false) - camera.translateZ = -400 - camera.translateY = 50 - camera.translateX = -300 - camera.rotationAxis = new Point3D (400, 0, 0) - camera.rotate = 67 - - // camera movement variables - var anchorX, anchorY, anchorPosX, anchorPosY = 0.0 - - // mouse drag event handler for camera rotation - root.onMousePressed = (event: MouseEvent) => { - anchorX = event.getSceneX - anchorY = event.getSceneY - anchorPosX = camera.translateX.value - anchorPosY = camera.translateY.value - println ("mouse is pressed") } - - // mouse dragged event handler - root.onMouseDragged = (event: MouseEvent) => { - camera.translateX.value = anchorPosX - (anchorY - event.getSceneY) / 5.0 - camera.translateY.value = anchorPosY + (anchorX - event.getSceneX) / 5.0 - println ("mouse is pressed and dragged") } - - // mouse scroll event handler for camera zooming - root.onScroll = (event: ScrollEvent) => { - val delta = event.getDeltaY - camera.translateZ.value -= delta - println ("scroll wheel is used") } - - // create the main scene - val mainScene = new Scene (root, 1000, 800, Color.White) - mainScene.camera = camera - - // set the stage - stage = new PrimaryStage { title = "Traffic 3D"; scene = mainScene } - end start - -end Road3d - diff --git a/src/main/scala/scalation/scala3d/Road3d.scalaa b/src/main/scala/scalation/scala3d/Road3d.scalaa new file mode 100644 index 000000000..43f280770 --- /dev/null +++ b/src/main/scala/scalation/scala3d/Road3d.scalaa @@ -0,0 +1,226 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author Jacobi Coleman + * @version 2.0 + * @date Wed May 1 01:19:46 EDT 2024 + * @see LICENSE (MIT style license file). + * + * @note Example Animation of a Road + */ + +package scalation +package scala3d + +import javafx.scene.input.{MouseEvent, ScrollEvent} + +import scala.collection.mutable.ArrayBuffer + +import scalafx.application.JFXApp3 +import scalafx.application.JFXApp3.PrimaryStage +import scalafx.animation.{KeyFrame, Timeline} +import scalafx.geometry.Point3D +import scalafx.scene.{Group, PerspectiveCamera, Scene} +import scalafx.scene.paint.{Color, PhongMaterial} +import scalafx.scene.paint.Color.sfxColor2jfx +import scalafx.scene.shape.{Box, Cylinder} +import scalafx.scene.shape.Cylinder.sfxCylinder2jfx +import scalafx.scene.shape.Box.sfxBox2jfx +import scalafx.scene.shape.Shape3D.sfxShape3D2jfx +import scalafx.util.Duration + +import scalation.mathstat.Statistic + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `Road3d` object implements a simple road animation. + * > runMain scalation.scala3d.Road3d + */ +object Road3d extends JFXApp3: + + println ("Running Road3d Animation") + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** The `Direction` enumeration indicates the directions that cars can move. + */ + enum Direction: + case South + end Direction + + private val clock = new Clock () + private val roadLength = 400 + private val roadColor = Color.LightGrey + private val lightColor = Color.Red + private val lightColor2 = Color.Green + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Create a 3D scene and start the animation. + */ + override def start (): Unit = + val root = new Group () // root node for scene + + // create sources and add to root + val source = List ( new Source (15, "northSpawn", Point3D (390, 0, -25)) {}) + source.foreach { s => root.children.add (s.entry) } + + // create sinks and add to root + val sink = List ( new Sink (15, "fromNorth", Point3D (390, 840, -25)) {}) + sink.foreach { s => root.children.add (s.exit) } + + // create roads and add to root + val roads = List ( + new Box (15, roadLength, 5) { // from north-in position + translateX = 390; translateY = 200 }, + new Box (15, roadLength, 5) { // from south-out position + translateX = 390; translateY = 640 }) + + roads.foreach { road => + val roadMaterial = new PhongMaterial () + roadMaterial.diffuseColor = roadColor + road.material = roadMaterial + root.children.add (road) } + + // create traffic lights - for pedestrian crossing + val nslights = List ( + new Cylinder (8, 4) { + translateX = 390; translateY = 405; translateZ = -25 }) + + nslights.foreach { light => + light.material = new PhongMaterial (lightColor) + root.children.add (light) } + + // create a timeline for changing light colors + val lightsTimeline = new Timeline { + // Initialize light colors + var stopColor = lightColor + var goColor = lightColor2 + + // create key frames + keyFrames = Seq ( + KeyFrame (Duration (2000), onFinished = _ => { + // Toggle light colors + nslights.foreach { light => + val lightMaterial = new PhongMaterial () + lightMaterial.diffuseColor = stopColor + light.material = lightMaterial } + val tempColor = stopColor + stopColor = goColor + goColor = tempColor }), + + KeyFrame (Duration (8000)) // add a keyframe with no action, just to create a loop + ) // end keyFrames + + cycleCount = Timeline.Indefinite + + val northcars = ArrayBuffer [Vehicle] () + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** The cars to add to the animation scene. + * @param cars the cars to be added + * @param direction direction of motion + */ + def addCarToList (cars: ArrayBuffer [Vehicle], direction: Direction): Unit = + val carColor = Color.Blue + direction match + case Direction.South => + val car = new Vehicle (clock, carColor) + car.body.translateX = (source(0).entry.translateX.value); + car.body.translateY = (source(0).entry.translateY.value) + northcars += car + root.children.add (car.body) + end addCarToList + + // add more cars as needed + var car = new Vehicle (clock) + car.body.translateX = (source(0).entry.translateX.value); + car.body.translateY = (source(0).entry.translateY.value) + northcars += car + + northcars.foreach { car => root.children.add (car.body)} + + val addCarsTimeline = new Timeline { + keyFrames = Seq ( + KeyFrame (Duration (3000), onFinished = _ => { + addCarToList (northcars, Direction.South) })) + cycleCount = Timeline.Indefinite } + + addCarsTimeline.play () + + val nmaterial = nslights(0).material.value.asInstanceOf [javafx.scene.paint.PhongMaterial] + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Determine whether the given car should stop at the traffic light. + * @param car the car that may need to stop + * @param direction direction of motion + */ + def shouldStop (car: Vehicle, direction: Direction): Boolean = + direction match + case Direction.South => nmaterial.getDiffuseColor == sfxColor2jfx (goColor) + && car.body.translateY.value + 2 >= nslights(0).translateY.value -15 + && car.body.translateY.value + 2 <= nslights(0).translateY.value - 10 + end shouldStop + + // create a timeline for car animation + val timeline = new Timeline { + keyFrames = Seq ( + KeyFrame (Duration (30), onFinished = _ => { + northcars.foreach { car => + if car.body.translateY.value >= sink(0).exit.translateY.value then + car.body.translateY = sink(0).exit.translateY.value // car reached the sink box, stop + sink(0).terminate (car, clock) + else if shouldStop (car, Direction.South) then + car.body.translateY = car.body.translateY.value + else + car.body.translateY = car.body.translateY.value + 2 + } // end northcars + }) // end KeyFrame + ) // end keyFrames + cycleCount = Timeline.Indefinite + } // end timeline + timeline.play () + + } // end lightsTimeline + lightsTimeline.play () + + println (Statistic.labels) + for s <- sink do println (s.showStats ()) + + // create and position the camera + val camera = new PerspectiveCamera (false) + camera.translateZ = -400 + camera.translateY = 50 + camera.translateX = -300 + camera.rotationAxis = new Point3D (400, 0, 0) + camera.rotate = 67 + + // camera movement variables + var anchorX, anchorY, anchorPosX, anchorPosY = 0.0 + + // mouse drag event handler for camera rotation + root.onMousePressed = (event: MouseEvent) => { + anchorX = event.getSceneX + anchorY = event.getSceneY + anchorPosX = camera.translateX.value + anchorPosY = camera.translateY.value + println ("mouse is pressed") } + + // mouse dragged event handler + root.onMouseDragged = (event: MouseEvent) => { + camera.translateX.value = anchorPosX - (anchorY - event.getSceneY) / 5.0 + camera.translateY.value = anchorPosY + (anchorX - event.getSceneX) / 5.0 + println ("mouse is pressed and dragged") } + + // mouse scroll event handler for camera zooming + root.onScroll = (event: ScrollEvent) => { + val delta = event.getDeltaY + camera.translateZ.value -= delta + println ("scroll wheel is used") } + + // create the main scene + val mainScene = new Scene (root, 1000, 800, Color.White) + mainScene.camera = camera + + // set the stage + stage = new PrimaryStage { title = "Traffic 3D"; scene = mainScene } + end start + +end Road3d + diff --git a/src/main/scala/scalation/scala3d/Sink.scala b/src/main/scala/scalation/scala3d/Sink.scalaa similarity index 100% rename from src/main/scala/scalation/scala3d/Sink.scala rename to src/main/scala/scalation/scala3d/Sink.scalaa diff --git a/src/main/scala/scalation/scala3d/Source.scala b/src/main/scala/scalation/scala3d/Source.scalaa similarity index 100% rename from src/main/scala/scalation/scala3d/Source.scala rename to src/main/scala/scalation/scala3d/Source.scalaa diff --git a/src/main/scala/scalation/scala3d/Vehicle.scala b/src/main/scala/scalation/scala3d/Vehicle.scalaa similarity index 100% rename from src/main/scala/scalation/scala3d/Vehicle.scala rename to src/main/scala/scalation/scala3d/Vehicle.scalaa diff --git a/src/main/scala/scalation/simulation/Coroutine.scala b/src/main/scala/scalation/simulation/Coroutine.scala index 24f8f77e8..98c11729b 100644 --- a/src/main/scala/scalation/simulation/Coroutine.scala +++ b/src/main/scala/scalation/simulation/Coroutine.scala @@ -51,7 +51,7 @@ abstract class Coroutine (label: String = "cor") nStarted += 1 try act () - catch case ex: InterruptedException => + catch case _ : InterruptedException => debug ("run", s"INTERRUPTED coroutine $cor_id") end try nTerminated +=1 @@ -87,7 +87,6 @@ abstract class Coroutine (label: String = "cor") else debug ("yyield", s"$cor_id STARTs that new coroutine ${that.cor_id}") that.start () - end if end if if quit then @@ -96,7 +95,6 @@ abstract class Coroutine (label: String = "cor") else debug ("yyield", s"$cor_id WAITs on semaphore") _sema.acquire () // wait until resumed - end if end yyield //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: @@ -114,7 +112,6 @@ abstract class Coroutine (label: String = "cor") vt.start () else pool.submit (this) - end if end start @@ -142,7 +139,7 @@ end Coroutine */ object Coroutine: - private [simulation] var useVirtualThread = false // whether to use regular or virtual threads + private [simulation] var useVirtualThread = true // whether to use regular or virtual threads private val debug = debugf ("Coroutine", false) // debug function private val flaw = flawf ("Coroutine") // flaw function @@ -177,7 +174,6 @@ object Coroutine: if nCoreThreads != CORE_THREADS then threadPoolExecutor.setCorePoolSize (nCoreThreads) else flaw ("startup", "coroutine system is already started") - end if end startup //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: diff --git a/src/main/scala/scalation/simulation/ErlangProcess.scala b/src/main/scala/scalation/simulation/ErlangProcess.scala new file mode 100644 index 000000000..c3ae3f996 --- /dev/null +++ b/src/main/scala/scalation/simulation/ErlangProcess.scala @@ -0,0 +1,112 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author John Miller + * @version 2.0 + * @date Sun Apr 6 17:25:30 EDT 2025 + * @see LICENSE (MIT style license file). + * + * @note Erlang Process (Counting Process with Erlang Inter-arrivale Times) + */ + +package scalation +package simulation + +import scala.collection.mutable.ArrayBuffer + +import scalation.mathstat._ +import scalation.random.{Erlang, VariateVec} + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `ErlangProcess` class generates data following a Erlng Process. + * @param t the terminal time + * @param lambda the arrival rate + * @param stream the random number stream to use + */ +class ErlangProcess (t: Double, lambda: Double = 1.0, stream: Int = 0) + extends VariateVec (stream): + + private val mu = 1.0 / lambda // mean interarrival time + private val k = 2 // number of stages/level of averaging + protected val t_ia = Erlang (mu/k, k, stream) // interarrival time distribution + protected var t_a = VectorD.nullv // arrival time vector + + def mean: VectorD = VectorD.fill (1)(lambda * t) // mean of N(t) + + def pf (z: VectorD): Double = ??? + + def igen: VectorI = gen.toInt + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Generate the arrival times in the time interval [0, t], returning them + * as a vector. + */ + def gen: VectorD = + val atime = ArrayBuffer [Double] () + var now = 0.0 + while now <= t do + now += t_ia.gen + atime += now + end while + t_a = VectorD (atime) + t_a + end gen + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the number of arrivals by time tt, i.e., N(tt) + * @param tt the inquiry time (how many arrivals by time tt) + */ + def num (tt: Double): Int = + if t_a == null then gen + val i = t_a.indexWhere (_ > tt) + if i < 0 then t_a.dim else i + end num + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the arrivals/events occurring during each time interval of length t_span. + * @param t_span the time span for an interval (e.g., 5 minute time span) + */ + def flow (t_span: Double): VectorI = + if t_a == null then gen + val flw = ArrayBuffer [Int] () + var now = 0.0 + var n1 = 0 + while now <= t do + val n2 = num (now) + flw += n2 - n1 + now += t_span + n1 = n2 + end while + VectorI (flw) + end flow + +end ErlangProcess + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `erlangProcessTest` main function is used to test the `ErlangProcess` class. + * Example of car arrivals and determination of traffic flow (car per 5-minutes + * passing by a sensor). + * > runMain scalation.simulation.erlangProcessTest + */ +@main def erlangProcessTest (): Unit = + + banner ("running erlangProcessTest") + val t_end = 50.0 // simulate for 50 minutes + val lambda = 1.0 // arrival rate 2 cars per minute + val pp = new ErlangProcess (t_end, lambda) + println (s"pp.gen = ${pp.gen}") + println (s"pp.num (5) = ${pp.num (5)}") + + banner ("Plot the Erlng Process: total cars") + val t = VectorD.range (0, 501) / 10.0 + val nt = new VectorI (t.dim) + for i <- t.indices do nt(i) = pp.num (t(i)) + new Plot (t, nt.toDouble, null, "ErlangProcess total cars", lines = true) + + banner ("Plot the flow of cars per 5 min.") + val flw = pp.flow (5.0) + val tflw = VectorD.range (0, 11) * 5.0 + new Plot (tflw, flw.toDouble, null, "ErlangProcess cars per 5 min.", lines = true) + +end erlangProcessTest + diff --git a/src/main/scala/scalation/simulation/Monitor.scala b/src/main/scala/scalation/simulation/Monitor.scala index 2f592e860..f65179fe0 100644 --- a/src/main/scala/scalation/simulation/Monitor.scala +++ b/src/main/scala/scalation/simulation/Monitor.scala @@ -53,7 +53,6 @@ case class Monitor (project: String = "simulation"): ew.println (s"${who.me} \t $what \t at time $when.") else ew.println (s"${who.me} \t $what \t ${whom.me} \t at time $when.") - end if end if end trace diff --git a/src/main/scala/scalation/simulation/NH_ErlangProcess.scala b/src/main/scala/scalation/simulation/NH_ErlangProcess.scala new file mode 100644 index 000000000..e3f60f5a9 --- /dev/null +++ b/src/main/scala/scalation/simulation/NH_ErlangProcess.scala @@ -0,0 +1,116 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author John Miller + * @version 2.0 + * @date Sun Apr 6 18:21:33 EDT 2025 + * @see LICENSE (MIT style license file). + * + * @note Non-Homogeneous (changing arrival rate) Erlang Process (NHEP) + */ + +package scalation +package simulation + +import scala.collection.mutable.ArrayBuffer + +import scalation.mathstat._ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `NH_ErlangProcess` class generates data following a Non-Homogeneous Erlang + * Process. + * @param t the terminal time + * @param lambdaf the arrival rate function, lambda(t) + * @param stream the random number stream to use + */ +class NH_ErlangProcess (t: Double, lambdaf: FunctionS2S, stream: Int = 0) + extends ErlangProcess (t, 1.0, stream): // use rate = 1 as it will be adjusted + + private val lambdaBar = func2vector (lambdaf, (0, t)).mean + + override def mean: VectorD = VectorD.fill (1)(lambdaBar * t) // mean of N(t) + + override def pf (z: VectorD): Double = ??? + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Generate all arrival times in the time interval [0, t], returning them + * as a vector. + */ + override def gen: VectorD = + val atime = ArrayBuffer [Double] () + var now = 0.0 + while now <= t do + val lamb = lambdaf (now) // current value of the lambda function + println (s"lamb = $lamb") + now += t_ia.gen / lamb // adjust by dividing current lambda + atime += now + end while + t_a = VectorD (atime) + t_a + end gen + +end NH_ErlangProcess + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `nH_ErlangProcessTest` main function is used to test the `NH_ErlangProcess` class. + * Example of car arrivals and determination of traffic flow (car per 5-minutes + * passing by a sensor). + * > runMain scalation.simulation.nH_ErlangProcessTest + */ +@main def nH_ErlangProcessTest (): Unit = + + val t_end = 50.0 // simulate for 50 minutes + val tl = VectorD.range (0, 101) / 2.0 + def lambdaf (t: Double): Double = 1.5 - 0.001 * (t - 25.0)~^2 + new Plot (tl, func2vector (lambdaf, (0, t_end)), null, "Arrival Rate Function: lambdaf", lines = true) + + val pp = new NH_ErlangProcess (t_end, lambdaf) + println (s"pp.gen = ${pp.gen}") + println (s"pp.num (5) = ${pp.num (5)}") + + val t = VectorD.range (0, 501) / 10.0 + val nt = new VectorI (t.dim) + for i <- t.indices do nt(i) = pp.num (t(i)) + new Plot (t, nt.toDouble, null, "NH_ErlangProcess total cars", lines = true) + + val flw = pp.flow (5.0) + val tflw = VectorD.range (0, 11) * 5.0 + new Plot (tflw, flw.toDouble, null, "NH_ErlangProcess cars per 5 min.", lines = true) + +end nH_ErlangProcessTest + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `nH_ErlangProcessTest2` main function is used to test the `NH_ErlangProcess` class. + * Example showing how to use the `PolyRegression` class to create a lambda function + * based on traffic data. + * > runMain scalation.simulation.nH_ErlangProcessTest2 + */ +@main def nH_ErlangProcessTest2 (): Unit = + + import scalation.modeling._ + + val fileName = "travelTime.csv" + val data = MatrixD.load (fileName) + val ord = 19 + + val (t, y) = (data(?, 0) * 60.0, data(?, 1)) // (time, vehicle count) + new Plot (t, y, null, "traffic data") + val mod = PolyRegression (t, y, ord, null, Regression.hp) + mod.train () + val (yp, qof) = mod.test () + println (mod.report (qof)) + new Plot (t, y, yp, "traffic: actual vs. predicted") + + def lambdaf (tt: Double): Double = mod.predict (tt) + + val pp = new NH_ErlangProcess (t.dim-1, lambdaf) + val flw = pp.flow (1.0).toDouble + new Plot (t, y, flw, "NH_ErlangProcess cars per 1 min.") + + val ft = new TestFit (y.dim) + ft.diagnose (y, flw) + println (FitM.fitMap (ft.fit, QoF.values.map (_.toString))) + +end nH_ErlangProcessTest2 + diff --git a/src/main/scala/scalation/simulation/NH_PoissonProcess.scala b/src/main/scala/scalation/simulation/NH_PoissonProcess.scala index 83701fb1e..1284184f0 100644 --- a/src/main/scala/scalation/simulation/NH_PoissonProcess.scala +++ b/src/main/scala/scalation/simulation/NH_PoissonProcess.scala @@ -5,7 +5,7 @@ * @date Wed Aug 25 15:38:28 EDT 2021 * @see LICENSE (MIT style license file). * - * @note Non-Homogeneous Process Process (NHPP) + * @note Non-Homogeneous (changing arrival rate) Poisson Process (NHPP) */ package scalation @@ -23,7 +23,7 @@ import scalation.mathstat._ * @param stream the random number stream to use */ class NH_PoissonProcess (t: Double, lambdaf: FunctionS2S, stream: Int = 0) - extends PoissonProcess (t, 1.0, stream): + extends PoissonProcess (t, 1.0, stream): // use rate = 1 as it will be adjusted private val lambdaBar = func2vector (lambdaf, (0, t)).mean @@ -39,9 +39,9 @@ class NH_PoissonProcess (t: Double, lambdaf: FunctionS2S, stream: Int = 0) val atime = ArrayBuffer [Double] () var now = 0.0 while now <= t do - val lamb = lambdaf (now) // current value of the lambda function + val lamb = lambdaf (now) // current value of the lambda function println (s"lamb = $lamb") - now += t_ia.gen / lamb // adjust by dividing current lambda + now += t_ia.gen / lamb // adjust by dividing current lambda atime += now end while t_a = VectorD (atime) @@ -59,7 +59,7 @@ end NH_PoissonProcess */ @main def nH_PoissonProcessTest (): Unit = - val t_end = 50.0 // simulate for 50 minutes + val t_end = 50.0 // simulate for 50 minutes val tl = VectorD.range (0, 101) / 2.0 def lambdaf (t: Double): Double = 1.5 - 0.001 * (t - 25.0)~^2 new Plot (tl, func2vector (lambdaf, (0, t_end)), null, "Arrival Rate Function: lambdaf", lines = true) @@ -94,7 +94,7 @@ end nH_PoissonProcessTest val data = MatrixD.load (fileName) val ord = 19 - val (t, y) = (data(?, 0) * 60.0, data(?, 1)) // (time, vehicle count) + val (t, y) = (data(?, 0) * 60.0, data(?, 1)) // (time, vehicle count) new Plot (t, y, null, "traffic data") val mod = PolyRegression (t, y, ord, null, Regression.hp) mod.train () diff --git a/src/main/scala/scalation/simulation/PoissonProcess.scala b/src/main/scala/scalation/simulation/PoissonProcess.scala index 6d390f1fc..5b12d674a 100644 --- a/src/main/scala/scalation/simulation/PoissonProcess.scala +++ b/src/main/scala/scalation/simulation/PoissonProcess.scala @@ -5,7 +5,7 @@ * @date Wed Aug 25 15:38:28 EDT 2021 * @see LICENSE (MIT style license file). * - * @note Poisson Process + * @note Poisson Process (Counting Process with Exponential Inter-arrival Times) */ package scalation diff --git a/src/main/scala/scalation/simulation/activity/PetriNet.scala b/src/main/scala/scalation/simulation/activity/PetriNet.scala index 91c2541f9..3028547da 100644 --- a/src/main/scala/scalation/simulation/activity/PetriNet.scala +++ b/src/main/scala/scalation/simulation/activity/PetriNet.scala @@ -14,7 +14,8 @@ package activity import java.util.concurrent.ConcurrentLinkedQueue -import collection.mutable.PriorityQueue +import scala.annotation.unused +import scala.collection.mutable.PriorityQueue import scalation.animation.{AnimateCommand, DgAnimator} import scalation.animation.CommandType._ @@ -381,7 +382,7 @@ class ArcI (val place: PlaceI, val transition: Transition, incoming: Boolean, va * @param time the current time * @param firingDelay the time it takes for the transition to fire */ - def _tokenFlow (tokens: VectorI, time: Double, firingDelay: Double): VectorI = + def _tokenFlow (tokens: VectorI, @unused time: Double, firingDelay: Double): VectorI = tokenFlow (tokens, minTokens, rates, firingDelay / scaleFactor) end _tokenFlow @@ -546,11 +547,12 @@ class PetriNet (colors: Array [Color], placeI: Array [PlaceI], placeD: Array [Pl val tokens = pI.tokens for i <- 0 until tokens.dim do // number of tokens by color at this place - for j <- 0 until tokens(i) do + cfor (0, tokens(i)) { _ => val tk_id = Counter.next () println ("PetriNet.initAnimation: token " + tk_id + " for place " + pI.id) cqueue.add (AnimateCommand (CreateToken, tk_id, Ellipse (), "tk" + tk_id, false, colors(i), null, 0, pI.id)) + } // cfor end for end for diff --git a/src/main/scala/scalation/simulation/activity/PetriNetRules.scala b/src/main/scala/scalation/simulation/activity/PetriNetRules.scala index c4d4f529d..6f53794ed 100644 --- a/src/main/scala/scalation/simulation/activity/PetriNetRules.scala +++ b/src/main/scala/scalation/simulation/activity/PetriNetRules.scala @@ -12,6 +12,8 @@ package scalation package simulation package activity +import scala.annotation.unused + import scalation.mathstat._ import scalation.random.{Sharp, Variate} @@ -121,10 +123,13 @@ end PetriNetRules //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `PetriNetRulesTest` object is used to test the `PetriNetRules` trait. - * > runMain scalation.simulation.activity.PetriNetRulesTest +/** The `petriNetRulesTest` main function is used to test the `PetriNetRules` trait. + * > runMain scalation.simulation.activity.petriNetRulesTest */ -object PetriNetRulesTest extends App with PetriNetRules: +@main def petriNetRulesTest (): Unit = + + object PetriNetRules_ extends PetriNetRules + import PetriNetRules_._ //:: Set the initial time. @@ -181,10 +186,10 @@ object PetriNetRulesTest extends App with PetriNetRules: println ("\n *** Test fluid flows: differential flow model integral derv\n") - def derv1 (t: Double, y: Double) = y - def derv2 (t: Double, y: Double) = 2.0 * y + def derv1 (@unused t: Double, y: Double) = y + def derv2 (@unused t: Double, y: Double) = 2.0 * y val dervs = Array [Derivative] (derv1, derv2) println ("Fluid flow: place to transition: " + fluidFlow (f, dervs, t0, d)) -end PetriNetRulesTest +end petriNetRulesTest diff --git a/src/main/scala/scalation/simulation/agent/Bus.scalaa b/src/main/scala/scalation/simulation/agent/Bus.scalaa index ed737aac0..f525c8c79 100644 --- a/src/main/scala/scalation/simulation/agent/Bus.scalaa +++ b/src/main/scala/scalation/simulation/agent/Bus.scalaa @@ -58,7 +58,6 @@ abstract class Bus (name: String, director: Model, lTime: Variate, cap: Int, pos director.schedule (r_i, i) // FIX - use longer delay rider(i) = null // open seat i nRiders -= 1 // decrement the number of riders - end if end for end unload diff --git a/src/main/scala/scalation/simulation/agent/EdgeAgents.scala b/src/main/scala/scalation/simulation/agent/EdgeAgents.scalaa similarity index 100% rename from src/main/scala/scalation/simulation/agent/EdgeAgents.scala rename to src/main/scala/scalation/simulation/agent/EdgeAgents.scalaa diff --git a/src/main/scala/scalation/simulation/agent/Gate.scala b/src/main/scala/scalation/simulation/agent/Gate.scalaa similarity index 100% rename from src/main/scala/scalation/simulation/agent/Gate.scala rename to src/main/scala/scalation/simulation/agent/Gate.scalaa diff --git a/src/main/scala/scalation/simulation/agent/Junction.scala b/src/main/scala/scalation/simulation/agent/Junction.scalaa similarity index 100% rename from src/main/scala/scalation/simulation/agent/Junction.scala rename to src/main/scala/scalation/simulation/agent/Junction.scalaa diff --git a/src/main/scala/scalation/simulation/agent/Link.scala b/src/main/scala/scalation/simulation/agent/Link.scalaa similarity index 100% rename from src/main/scala/scalation/simulation/agent/Link.scala rename to src/main/scala/scalation/simulation/agent/Link.scalaa diff --git a/src/main/scala/scalation/simulation/agent/Model.scala b/src/main/scala/scalation/simulation/agent/Model.scala deleted file mode 100644 index df06331a8..000000000 --- a/src/main/scala/scalation/simulation/agent/Model.scala +++ /dev/null @@ -1,401 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author Yulong Wang, John Miller - * @version 2.0 - * @date Tue Oct 3 19:40:51 EDT 2023 - * @see LICENSE (MIT style license file). - * - * @note Base Model Class for Agent-Based Simulation - */ - -package scalation -package simulation.agent - -import java.util.concurrent.Semaphore - -import scala.collection.mutable -import scala.collection.mutable.{PriorityQueue, ArrayBuffer as VEC} - -import scalation.animation.{AnimateCommand, CommandType} -import scalation.animation.CommandType.MoveToken -import scalation.database.Identifiable -import scalation.database.graph.{EdgeType, PGraph, VertexType} -import scalation.mathstat.{StatTable, Statistic, VectorD} -import scalation.simulation.{Completion, Coroutine} -import scalation.scala2d.Colors._ -import scalation.scala2d.Shape - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Model` class maintains a property graph making up the model and - * controls the flow of entities (`SimAgent`s) through the model, following the - * agent-based simulation world-view. It maintains a time-ordered priority queue - * to activate/re-activate each of the entities. Each entity (`SimAgent`) is - * implemented as a `Coroutine` and may be thought of as running in its own thread. - * @param _name the name of this simulation model - * @param reps the number of independent replications - * @param startSim the start time of this simulation - * @param animating whether to animate the model - * @param aniRatio the ratio of simulation speed vs. animation speed - */ -class Model (_name: String,val reps: Int = 1, startSim: Double = 0.0, - animating: Boolean = true, aniRatio: Double = 10.0, - width: Int = 800, height: Int = 600) - extends Identifiable (_name) - with Completion: - - protected val graphMod = PGraph (name, Model.vertexTypes, Model.edgeTypes, - animating, aniRatio, width, height) // the graph model - - private val debug = debugf ("Model", false) // debug function - private val flaw = flawf ("Model") // flaw function - private val agenda = PriorityQueue.empty [SimAgent] // time-ordered activation list - private val sema = new Semaphore(0) - - private [agent] var clock = startSim // the simulation clock - private [agent] var simulating = false // the simulation clock - private [agent] val log = Monitor ("simulation") // log for model execution - private [agent] var nAgents = 0 // current number of live agents - private [agent] val statList = VEC[Statistical]() - - var rep = 1 // which rep currently is - - val director = this - - debug ("init", s"name = $name, startSim = $startSim") - - /** The map of statistics vectors records the means of each replication - */ - val statV = mutable.LinkedHashMap [String, VectorD] () - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Reset the agenda and stateful components for next replication. - */ - def reset(): Unit = - while !agenda.isEmpty do agenda.dequeue () // clean out actors from agenda - // reset stateful components - for waitQueue <- WaitQueue.verts do waitQueue.asInstanceOf [WaitQueue].clear () - for waitQueue <- WaitQueue_LCFS.verts do waitQueue.asInstanceOf [WaitQueue_LCFS].clear () - end reset - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Reset and aggregate all statistics. - * @param rep the current replication (1, ... reps) - * @param rmax the maximum number of replications/batches - */ - def resetStats(rep: Int, rmax: Int = reps): Unit = - if rep == 1 then - for stat <- getStatistics do statV += stat.name -> new VectorD (rmax) - end if - for stat <- getStatistics do - statV(stat.name)(rep - 1) = stat.mean - stat.reset () - end for - end resetStats - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Execute the simulation (includes scheduling all Sources) returning summary - * statistics. - */ - def simulate (): Unit = - banner (s"start simulation $name at $startSim") - if rep == 1 then graphMod.print () // to be tuned - if rep == 1 && animating then graphMod.display (100000) // FIX - should be adaptive -// return // end before simulating to only examine initial graph - log.trace (this, "starts", this, clock) - for source <- Source.sources do - source.time = clock - schedule (source) // put all sources on agenda - end for - while rep <= reps do - simulating = true - yield2Next (null) - sema.acquire () // waits for all the virtual thread finish - fini (rep) - if rep < reps then reset () // reset for next replication - resetStats (rep) // reset and aggregate statistics - rep += 1 - end while - if reps > 1 then reportV () - end simulate - - private val timeDelayR = 900 // 15 minutes * 60 FIX - generalize - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** The `Reporter` inner class observes the statistic at a specific time so schedule this reporter. - * @param actTime the activation for the reporter. - */ - class Reporter (actTime: Double = clock) extends SimAgent ("reporter", actTime, this): - def act (): Unit = - customReport (statList) - director.yield2Next (this, true) - end act - end Reporter - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Re-simulate over the same components multiple times with different input(source); - * this is not the same as replication, replication input(source) is the same - * @param firstTime whether it is the first time - * @param simStart the time of simulation start - */ - def resimulate (firstTime: Boolean, simStart: Double = 0.0): Unit = - clock = simStart - val myreport = Reporter (clock + timeDelayR) - schedule (myreport) -// if firstTime && rep == 1 then graphMod.print () // to be tuned - if firstTime && rep == 1 && animating then graphMod.display (100000) - log.trace (this, "starts", this, clock) - - if firstTime then - for source <- Source.sources do - source.time = clock - schedule (source) // put all sources on agenda - end for - else - for source <- Source.sources do - source.resetStart () - source.time = simStart - schedule (source) -// reschedule (source) - end for - end if - - while rep <= reps do - simulating = true - yield2Next (null) - sema.acquire () - //fini (rep) // no need to print for now - //if rep < reps then reset () // reset for next replication - reset () // for re-simulating - Coroutine.waitThreadFinish () // clean the buffer - resetStats (rep) // reset and aggregate statistics - rep += 1 - end while - if reps > 1 then reportV () - rep = 1 // reset - end resimulate - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Finish by producing statistical reports and optionally animation. - * Typically animation and reports in pop up window turned off for high - * replications and/or simulation optimization. - * @param rep the replication number (1, ... reps) - */ - protected def fini (rep: Int): Unit = - report () // report in terminal -// if reps> 1 then reportV () - if animating then - reportF () // report in new window/frame -// if rep == 1 then dgAni.animate (0, 100000) // only animate first rep -// dgAni.saveImage (DATA_DIR + name + ".png") - end if - end fini - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Report on the statistical results of the simulation in a new GUI window/frame. - */ - protected def reportF (): Unit = new StatTable (s"$name statistics", getStatistics) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Schedule the agent to act (be activated) at agent.time (optionally delayed). - * @param agent the agent to be scheduled - * @param delay the amount of time to delay the agent's activation time - */ - def schedule (agent: SimAgent, delay: Double = 0.0): Unit = - if delay < 0.0 then - flaw ("schedule", s"agent $agent delay time is negative: $delay") - banner ("WARN") - end if - agent.time += delay -// agent.time = clock + delay - if agent.time < clock then // out of order scheduling => WARN - flaw ("schedule", s"agent $agent activation time < $clock") - banner ("WARN") - end if -// debug ("schedule", s"now = $clock: schedule agent $agent") - log.trace (this, "schedules agent", agent, clock) - agenda += agent - end schedule - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Reschedule the agent to act (be activated) at agent.time (optionally delayed) - * for those agents ahead of director.clock. - * @param agent the agent to be rescheduled - * @param delay the amount of time to delay the agent's activation time - */ - def reschedule (agent: SimAgent, delay: Double = 0.0): Unit = - log.trace (this, "reschedules agent", agent, clock) - agent.time = clock + delay - agenda += agent - end reschedule - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Yield to the next agent, i.e., agent1 -- yield-to -> agent2. - * @param agent1 the currently executing agent - * @param quit whether agent1 wants to quit/terminate - */ - def yield2Next (agent1: SimAgent, quit: Boolean = false): Unit = - if agenda.nonEmpty then - val agent2 = agenda.dequeue () // get agent2 from the agenda - if agent2.time < clock then // out of order execution => QUIT - flaw ("y2n", s"agent $agent2 activation time < $clock") - println (s"this is ${agent2.time}, $clock") -// reschedule (agent2) - schedule (agent2) - banner (s" QUIT this is ${agent2.time} < $clock") - return - end if - - clock = agent2.time // advance the time -// debug ("yield2Next", s"${this.me} resumes ${agent2.me} at $clock") - - if agent1 == null then - log.trace (this, "starts", agent2, clock) - agent2.start () // source needs start first and start this, if there are multiple sources - else - log.trace (agent1, "resumes", agent2, clock) - if quit && !agent1.isInstanceOf [Gate] then nAgents -= 1 // decrement the number of live agents - agent1.yyield (agent2, quit) - - else // the last coroutine - log.trace (this, "stops", agent1, clock) - agent1.yyield (null,quit) - if rep < reps then - simulating = false - agenda.clear () - hasFinished () - else - finishedup () - end if - sema.release () // main thread continue - end if - end yield2Next - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Finish up the simulation. - */ - def finishedup (): Unit = - log.trace (this, s"ends", null, clock) -// cleanup () - reset () - log.trace (this, "terminates model", null, clock) - simulating = false - if animating then graphMod.setAniDone () - hasFinished () // signal via semaphore that simulation is finished -// yyield (null, true) // yield and terminate the director - end finishedup - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Put a token command (CreateToken, MoveToken or DestroyToken) on the animation queue. - * @param agent who is being animated - * @param what what animation command - * @param color the color the token - * @param shape the shape of the token - */ - def animate (agent: SimAgent, what: CommandType, color: Color = null, - shape: Shape = null): Unit = - var eid = agent.id - if agent.isInstanceOf [Gate] then eid += 1 // FIX - Gate's vertex is one more - val label = agent.name - val apos = if what == MoveToken then agent.pos(0 to 2) // agent's position (x, y) - else agent.pos // (x, y, w, h) -// debug ("animate", s">>> $label.$eid, $what, $color, $shape, $apos") - if animating then graphMod.add_aniQ (AnimateCommand (what, eid, shape, label, true, color, - apos.toArray, clock)) - end animate - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compare the order of agents based on their activation times. - * @param agent the first agent in comparison - private def orderedAgent (agent1: SimAgent): Ordered [SimAgent] = - new Ordered [SimAgent] - { def compare (agent2: SimAgent) = agent2.time compare agent1.time } - end orderedAgent - */ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the statistical results of the simulation (statistics for each vertex). - * Includes both sample and time-persistent statistics. - */ - def getStatistics: VEC [Statistic] = - val stats = VEC [Statistic] () - for stat <- statList do -// if reps == 1 && !stat.isInstanceOf [Transport] then - stat.addStats (stats) - stats - end getStatistics - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Report on the statistical results of a simulation run. - */ - private def report (): Unit = - println (Statistic.line) - println (Statistic.labels) - println (Statistic.line) - for stat <- statList do - println (stat.durationStat) - if ! stat.isInstanceOf [Source] && ! stat.isInstanceOf [Sink] && ! stat.isInstanceOf [Gate] then - println (stat.persistentStat) - end if - println (Statistic.line) -// customReport (statList) - end report - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Report (custom) on the statistical results of a simulation run. - * @param statList the list of statistics - */ - def customReport (statList: VEC [Statistical]): Unit = - println ("user definable custom report") - end customReport - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Report on the statistical results of the overall simulation as recorded - * in statV (may include multiple replications/batches). - * @param showMeans whether to show the individual run/batch means - */ - protected def reportV (showMeans: Boolean = false): Unit = - println (Statistic.line) - println (Statistic.labels) - println (Statistic.line) - for (k, v) <- statV do - val aStat = new Statistic(k) - aStat.tallyVec(v) - println(aStat) - end for - println (Statistic.line) - end reportV - -end Model - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Model` companion object provides a shutdown method and methods to add - * vertex/edge types to the model. - */ -object Model: - - private val vertexTypes = VEC [VertexType] () // collection of vertex types - private val edgeTypes = VEC [EdgeType] () // collection of edge types - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Add vertex type vt to the collection of vertex types. - * @param vt the vertex type to add - */ - def add (vt: VertexType): Unit = vertexTypes += vt - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Add edge type et to the collection of edge types. - * @param et the edge type to add - */ - def add (et: EdgeType): Unit = edgeTypes += et - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Shutdown the Model execution infrastructure (WARNING: this method should - * only be called right before program termination). Make sure all threads - * have finished (e.g., call `waitFinished`), not just the main thread. - * If `shutdown` is not called, the application may hang. - */ - def shutdown (): Unit = Coroutine.shutdown () - -end Model - diff --git a/src/main/scala/scalation/simulation/agent/Model.scalaa b/src/main/scala/scalation/simulation/agent/Model.scalaa new file mode 100644 index 000000000..950519adf --- /dev/null +++ b/src/main/scala/scalation/simulation/agent/Model.scalaa @@ -0,0 +1,395 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author Yulong Wang, John Miller + * @version 2.0 + * @date Tue Oct 3 19:40:51 EDT 2023 + * @see LICENSE (MIT style license file). + * + * @note Base Model Class for Agent-Based Simulation + */ + +package scalation +package simulation.agent + +import java.util.concurrent.Semaphore + +import scala.collection.mutable +import scala.collection.mutable.{PriorityQueue, ArrayBuffer as VEC} + +import scalation.animation.{AnimateCommand, CommandType} +import scalation.animation.CommandType.MoveToken +import scalation.database.Identifiable +import scalation.database.graph.{EdgeType, PGraph, VertexType} +import scalation.mathstat.{StatTable, Statistic, VectorD} +import scalation.simulation.{Completion, Coroutine} +import scalation.scala2d.Colors._ +import scalation.scala2d.Shape + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `Model` class maintains a property graph making up the model and + * controls the flow of entities (`SimAgent`s) through the model, following the + * agent-based simulation world-view. It maintains a time-ordered priority queue + * to activate/re-activate each of the entities. Each entity (`SimAgent`) is + * implemented as a `Coroutine` and may be thought of as running in its own thread. + * @param _name the name of this simulation model + * @param reps the number of independent replications + * @param startSim the start time of this simulation + * @param animating whether to animate the model + * @param aniRatio the ratio of simulation speed vs. animation speed + */ +class Model (_name: String,val reps: Int = 1, startSim: Double = 0.0, + animating: Boolean = true, aniRatio: Double = 10.0, + width: Int = 800, height: Int = 600) + extends Identifiable (_name) + with Completion: + + protected val graphMod = PGraph (name, Model.vertexTypes, Model.edgeTypes, + animating, aniRatio, width, height) // the graph model + + private val debug = debugf ("Model", false) // debug function + private val flaw = flawf ("Model") // flaw function + private val agenda = PriorityQueue.empty [SimAgent] // time-ordered activation list + private val sema = new Semaphore(0) + + private [agent] var clock = startSim // the simulation clock + private [agent] var simulating = false // the simulation clock + private [agent] val log = Monitor ("simulation") // log for model execution + private [agent] var nAgents = 0 // current number of live agents + private [agent] val statList = VEC[Statistical]() + + var rep = 1 // which rep currently is + + val director = this + + debug ("init", s"name = $name, startSim = $startSim") + + /** The map of statistics vectors records the means of each replication + */ + val statV = mutable.LinkedHashMap [String, VectorD] () + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Reset the agenda and stateful components for next replication. + */ + def reset(): Unit = + while !agenda.isEmpty do agenda.dequeue () // clean out actors from agenda + // reset stateful components + for waitQueue <- WaitQueue.verts do waitQueue.asInstanceOf [WaitQueue].clear () + for waitQueue <- WaitQueue_LCFS.verts do waitQueue.asInstanceOf [WaitQueue_LCFS].clear () + end reset + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Reset and aggregate all statistics. + * @param rep the current replication (1, ... reps) + * @param rmax the maximum number of replications/batches + */ + def resetStats(rep: Int, rmax: Int = reps): Unit = + if rep == 1 then + for stat <- getStatistics do statV += stat.name -> new VectorD (rmax) + for stat <- getStatistics do + statV(stat.name)(rep - 1) = stat.mean + stat.reset () + end for + end resetStats + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Execute the simulation (includes scheduling all Sources) returning summary + * statistics. + */ + def simulate (): Unit = + banner (s"start simulation $name at $startSim") + if rep == 1 then graphMod.print () // to be tuned + if rep == 1 && animating then graphMod.display (100000) // FIX - should be adaptive +// return // end before simulating to only examine initial graph + log.trace (this, "starts", this, clock) + for source <- Source.sources do + source.time = clock + schedule (source) // put all sources on agenda + end for + while rep <= reps do + simulating = true + yield2Next (null) + sema.acquire () // waits for all the virtual thread finish + fini (rep) + if rep < reps then reset () // reset for next replication + resetStats (rep) // reset and aggregate statistics + rep += 1 + end while + if reps > 1 then reportV () + end simulate + + private val timeDelayR = 900 // 15 minutes * 60 FIX - generalize + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** The `Reporter` inner class observes the statistic at a specific time so schedule this reporter. + * @param actTime the activation for the reporter. + */ + class Reporter (actTime: Double = clock) extends SimAgent ("reporter", actTime, this): + def act (): Unit = + customReport (statList) + director.yield2Next (this, true) + end act + end Reporter + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Re-simulate over the same components multiple times with different input(source); + * this is not the same as replication, replication input(source) is the same + * @param firstTime whether it is the first time + * @param simStart the time of simulation start + */ + def resimulate (firstTime: Boolean, simStart: Double = 0.0): Unit = + clock = simStart + val myreport = Reporter (clock + timeDelayR) + schedule (myreport) +// if firstTime && rep == 1 then graphMod.print () // to be tuned + if firstTime && rep == 1 && animating then graphMod.display (100000) + log.trace (this, "starts", this, clock) + + if firstTime then + for source <- Source.sources do + source.time = clock + schedule (source) // put all sources on agenda + end for + else + for source <- Source.sources do + source.resetStart () + source.time = simStart + schedule (source) +// reschedule (source) + end for + end if + + while rep <= reps do + simulating = true + yield2Next (null) + sema.acquire () + //fini (rep) // no need to print for now + //if rep < reps then reset () // reset for next replication + reset () // for re-simulating + Coroutine.waitThreadFinish () // clean the buffer + resetStats (rep) // reset and aggregate statistics + rep += 1 + end while + if reps > 1 then reportV () + rep = 1 // reset + end resimulate + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Finish by producing statistical reports and optionally animation. + * Typically animation and reports in pop up window turned off for high + * replications and/or simulation optimization. + * @param rep the replication number (1, ... reps) + */ + protected def fini (rep: Int): Unit = + report () // report in terminal +// if reps> 1 then reportV () + if animating then + reportF () // report in new window/frame +// if rep == 1 then dgAni.animate (0, 100000) // only animate first rep +// dgAni.saveImage (DATA_DIR + name + ".png") + end fini + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Report on the statistical results of the simulation in a new GUI window/frame. + */ + protected def reportF (): Unit = new StatTable (s"$name statistics", getStatistics) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Schedule the agent to act (be activated) at agent.time (optionally delayed). + * @param agent the agent to be scheduled + * @param delay the amount of time to delay the agent's activation time + */ + def schedule (agent: SimAgent, delay: Double = 0.0): Unit = + if delay < 0.0 then + flaw ("schedule", s"agent $agent delay time is negative: $delay") + banner ("WARN") + agent.time += delay +// agent.time = clock + delay + if agent.time < clock then // out of order scheduling => WARN + flaw ("schedule", s"agent $agent activation time < $clock") + banner ("WARN") +// debug ("schedule", s"now = $clock: schedule agent $agent") + log.trace (this, "schedules agent", agent, clock) + agenda += agent + end schedule + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Reschedule the agent to act (be activated) at agent.time (optionally delayed) + * for those agents ahead of director.clock. + * @param agent the agent to be rescheduled + * @param delay the amount of time to delay the agent's activation time + */ + def reschedule (agent: SimAgent, delay: Double = 0.0): Unit = + log.trace (this, "reschedules agent", agent, clock) + agent.time = clock + delay + agenda += agent + end reschedule + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Yield to the next agent, i.e., agent1 -- yield-to -> agent2. + * @param agent1 the currently executing agent + * @param quit whether agent1 wants to quit/terminate + */ + def yield2Next (agent1: SimAgent, quit: Boolean = false): Unit = + if agenda.nonEmpty then + val agent2 = agenda.dequeue () // get agent2 from the agenda + if agent2.time < clock then // out of order execution => QUIT + flaw ("y2n", s"agent $agent2 activation time < $clock") + println (s"this is ${agent2.time}, $clock") +// reschedule (agent2) + schedule (agent2) + banner (s" QUIT this is ${agent2.time} < $clock") + return + end if + + clock = agent2.time // advance the time +// debug ("yield2Next", s"${this.me} resumes ${agent2.me} at $clock") + + if agent1 == null then + log.trace (this, "starts", agent2, clock) + agent2.start () // source needs start first and start this, if there are multiple sources + else + log.trace (agent1, "resumes", agent2, clock) + if quit && !agent1.isInstanceOf [Gate] then nAgents -= 1 // decrement the number of live agents + agent1.yyield (agent2, quit) + + else // the last coroutine + log.trace (this, "stops", agent1, clock) + agent1.yyield (null,quit) + if rep < reps then + simulating = false + agenda.clear () + hasFinished () + else + finishedup () + sema.release () // main thread continue + end if + end yield2Next + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Finish up the simulation. + */ + def finishedup (): Unit = + log.trace (this, s"ends", null, clock) +// cleanup () + reset () + log.trace (this, "terminates model", null, clock) + simulating = false + if animating then graphMod.setAniDone () + hasFinished () // signal via semaphore that simulation is finished +// yyield (null, true) // yield and terminate the director + end finishedup + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Put a token command (CreateToken, MoveToken or DestroyToken) on the animation queue. + * @param agent who is being animated + * @param what what animation command + * @param color the color the token + * @param shape the shape of the token + */ + def animate (agent: SimAgent, what: CommandType, color: Color = null, + shape: Shape = null): Unit = + var eid = agent.id + if agent.isInstanceOf [Gate] then eid += 1 // FIX - Gate's vertex is one more + val label = agent.name + val apos = if what == MoveToken then agent.pos(0 to 2) // agent's position (x, y) + else agent.pos // (x, y, w, h) +// debug ("animate", s">>> $label.$eid, $what, $color, $shape, $apos") + if animating then graphMod.add_aniQ (AnimateCommand (what, eid, shape, label, true, color, + apos.toArray, clock)) + end animate + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Compare the order of agents based on their activation times. + * @param agent the first agent in comparison + private def orderedAgent (agent1: SimAgent): Ordered [SimAgent] = + new Ordered [SimAgent] + { def compare (agent2: SimAgent) = agent2.time compare agent1.time } + end orderedAgent + */ + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the statistical results of the simulation (statistics for each vertex). + * Includes both sample and time-persistent statistics. + */ + def getStatistics: VEC [Statistic] = + val stats = VEC [Statistic] () + for stat <- statList do +// if reps == 1 && !stat.isInstanceOf [Transport] then + stat.addStats (stats) + stats + end getStatistics + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Report on the statistical results of a simulation run. + */ + private def report (): Unit = + println (Statistic.line) + println (Statistic.labels) + println (Statistic.line) + for stat <- statList do + println (stat.durationStat) + if ! stat.isInstanceOf [Source] && ! stat.isInstanceOf [Sink] && ! stat.isInstanceOf [Gate] then + println (stat.persistentStat) + println (Statistic.line) +// customReport (statList) + end report + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Report (custom) on the statistical results of a simulation run. + * @param statList the list of statistics + */ + def customReport (statList: VEC [Statistical]): Unit = + println ("user definable custom report") + end customReport + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Report on the statistical results of the overall simulation as recorded + * in statV (may include multiple replications/batches). + * @param showMeans whether to show the individual run/batch means + */ + protected def reportV (showMeans: Boolean = false): Unit = + println (Statistic.line) + println (Statistic.labels) + println (Statistic.line) + for (k, v) <- statV do + val aStat = new Statistic(k) + aStat.tallyVec(v) + println(aStat) + end for + println (Statistic.line) + end reportV + +end Model + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `Model` companion object provides a shutdown method and methods to add + * vertex/edge types to the model. + */ +object Model: + + private val vertexTypes = VEC [VertexType] () // collection of vertex types + private val edgeTypes = VEC [EdgeType] () // collection of edge types + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Add vertex type vt to the collection of vertex types. + * @param vt the vertex type to add + */ + def add (vt: VertexType): Unit = vertexTypes += vt + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Add edge type et to the collection of edge types. + * @param et the edge type to add + */ + def add (et: EdgeType): Unit = edgeTypes += et + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Shutdown the Model execution infrastructure (WARNING: this method should + * only be called right before program termination). Make sure all threads + * have finished (e.g., call `waitFinished`), not just the main thread. + * If `shutdown` is not called, the application may hang. + */ + def shutdown (): Unit = Coroutine.shutdown () + +end Model + diff --git a/src/main/scala/scalation/simulation/agent/Monitor.scala b/src/main/scala/scalation/simulation/agent/Monitor.scala deleted file mode 100644 index 8df553678..000000000 --- a/src/main/scala/scalation/simulation/agent/Monitor.scala +++ /dev/null @@ -1,83 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Mon Sep 7 15:05:06 EDT 2009 - * @see LICENSE (MIT style license file). - * - * @note Monitor is used for Tracing Action/Events - */ - -package scalation -package simulation.agent - -import scalation.database.Identifiable - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Monitor` class is used to trace the actions/events in the models. - * @param project the project to be monitored - */ -case class Monitor (project: String = "simulation"): - - /** Flag indicating whether tracing is on (initially on) - */ - private var tracing = true - - /** Use `EasyWriter` to make it easy to switch from standard out to a (log) file - */ - private val ew = new EasyWriter (project, "monitor.log") - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Toggle output destination from default of (log) file to standard output. etc. - */ - def toggle (): Unit = ew.toggle () - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Turn tracing off. - */ - def traceOff (): Unit = tracing = false - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Turn tracing back on. - */ - def traceOn (): Unit = tracing = true - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Trace an action/event. - * @param who who caused the action - * @param what what was the action - * @param whom whom did the action effect - * @param when when was the action taken - */ - def trace (who: Identifiable, what: String, whom: Identifiable, when: Double): Unit = - if tracing then - if whom == null then - ew.println (s"${who.me} \t $what \t at time $when.") - else - ew.println (s"${who.me} \t $what \t ${whom.me} \t at time $when.") - end if - end if - end trace - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Finish up by flushing and closing the log. - */ - def finish (): Unit = ew.finish () - -end Monitor - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `MonitorTest` function is used to test the `Monitor` class. - * > runMain scalation.simulation.agent.monitorTest - */ -@main def monitorTest (): Unit = - - object Mon extends Identifiable ("MonitorTest") - - val log = Monitor ("simulation") - log.trace (Mon, "writes log entry", null, 0.0) - log.finish () - -end monitorTest - diff --git a/src/main/scala/scalation/simulation/agent/Monitor.scalaa b/src/main/scala/scalation/simulation/agent/Monitor.scalaa new file mode 100644 index 000000000..66d477ce3 --- /dev/null +++ b/src/main/scala/scalation/simulation/agent/Monitor.scalaa @@ -0,0 +1,82 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author John Miller + * @version 2.0 + * @date Mon Sep 7 15:05:06 EDT 2009 + * @see LICENSE (MIT style license file). + * + * @note Monitor is used for Tracing Action/Events + */ + +package scalation +package simulation.agent + +import scalation.database.Identifiable + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `Monitor` class is used to trace the actions/events in the models. + * @param project the project to be monitored + */ +case class Monitor (project: String = "simulation"): + + /** Flag indicating whether tracing is on (initially on) + */ + private var tracing = true + + /** Use `EasyWriter` to make it easy to switch from standard out to a (log) file + */ + private val ew = new EasyWriter (project, "monitor.log") + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Toggle output destination from default of (log) file to standard output. etc. + */ + def toggle (): Unit = ew.toggle () + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Turn tracing off. + */ + def traceOff (): Unit = tracing = false + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Turn tracing back on. + */ + def traceOn (): Unit = tracing = true + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Trace an action/event. + * @param who who caused the action + * @param what what was the action + * @param whom whom did the action effect + * @param when when was the action taken + */ + def trace (who: Identifiable, what: String, whom: Identifiable, when: Double): Unit = + if tracing then + if whom == null then + ew.println (s"${who.me} \t $what \t at time $when.") + else + ew.println (s"${who.me} \t $what \t ${whom.me} \t at time $when.") + end if + end trace + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Finish up by flushing and closing the log. + */ + def finish (): Unit = ew.finish () + +end Monitor + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `MonitorTest` function is used to test the `Monitor` class. + * > runMain scalation.simulation.agent.monitorTest + */ +@main def monitorTest (): Unit = + + object Mon extends Identifiable ("MonitorTest") + + val log = Monitor ("simulation") + log.trace (Mon, "writes log entry", null, 0.0) + log.finish () + +end monitorTest + diff --git a/src/main/scala/scalation/simulation/agent/QueueOps.scala b/src/main/scala/scalation/simulation/agent/QueueOps.scalaa similarity index 100% rename from src/main/scala/scalation/simulation/agent/QueueOps.scala rename to src/main/scala/scalation/simulation/agent/QueueOps.scalaa diff --git a/src/main/scala/scalation/simulation/agent/Resource.scala b/src/main/scala/scalation/simulation/agent/Resource.scalaa similarity index 100% rename from src/main/scala/scalation/simulation/agent/Resource.scala rename to src/main/scala/scalation/simulation/agent/Resource.scalaa diff --git a/src/main/scala/scalation/simulation/agent/Route.scala b/src/main/scala/scalation/simulation/agent/Route.scalaa similarity index 100% rename from src/main/scala/scalation/simulation/agent/Route.scala rename to src/main/scala/scalation/simulation/agent/Route.scalaa diff --git a/src/main/scala/scalation/simulation/agent/SimAgent.scala b/src/main/scala/scalation/simulation/agent/SimAgent.scala deleted file mode 100644 index 1d5dd5452..000000000 --- a/src/main/scala/scalation/simulation/agent/SimAgent.scala +++ /dev/null @@ -1,213 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Mon Sep 20 15:47:16 EDT 2021 - * @see LICENSE (MIT style license file). - * - * @note Active Entity/Simulation Agent - */ - -package scalation -package simulation.agent - -import scala.math.{cos, sin} - -import scalation.database.{Identifiable, Spatial, Temporal} -import scalation.database.graph.{Edge, Element, Topological} -import scalation.mathstat.VectorD - -import simulation.Coroutine - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `SimAgent` companion object provides static information for the class. - * The director keeps track of the current number of live agents (nAgents). - * `Gate`s are not considered live, since they simply cycle until the simulation ends. - * `Source`s are considered live, since they produce application agents and terminate - * when their production finishes. - */ -object SimAgent: - - private val wh = (10.0, 10.0) // default display size - - private var THROUGH = false // yield THROUGH (true) or TO director (false) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** By default each yield is TO an active director (has an act method), but a - * faster option is yield THROUGH a passive director to the first `SimAgent` in - * the director's agenda, e.g., `Model2` calls `setTHROUGH` to set THROUGH to true - */ - def setTHROUGH (): Unit = THROUGH = true - -end SimAgent - -import SimAgent._ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `SimAgent` abstract class represents entities that are active in the model. - * The act abstract method, which specifies entity behavior, must be defined - * for each subclass. Each `SimAgent` extends extends `Coroutine` and may be - * roughly thought of as running in its own thread/virtual-thread. - * SimAgent adds knowledge of its own properties, the agents it follows, and the - * component it is currently at. - * @param _name the name of this simulation agent (name from `Identifiable`) - * @param _time the activation time for this agent - * @param director the director controlling the model - * @param _pos the position (Euclidean coordinates) of this agent - * @param loc the location (graph coordinates) of this agent - * @param prop the properties (Map) for this agent, e.g., color, weight - */ -abstract class SimAgent (_name: String, _time: Double, director: Model, - _pos: VectorD = VectorD (0, 0, wh._1, wh._2), - var loc: (Element, Double) = (null, 0.0), - val prop: Map [String, ValueType] = null) - extends Coroutine (_name) - with Identifiable (_name) - with Temporal (_time) - with Spatial (_pos) - with Topological (loc._1, loc._2): // (element, distance) -// with Ordered [SimAgent]: // ordered in time -// with PartiallyOrdered [SimAgent]: // partially ordered in space - - var subtype = 0 // indicator of subtype of this agent - - private val debug = debugf ("SimAgent", true) // debug function - - private [agent] var fore: SimAgent = null // the SimAgent ahead - private [agent] var aft: SimAgent = null // the SimAgent behind - private [agent] val arrivalT = time // time agent started/arrived at the model - private [agent] var nextTransport: Transport = null // the next transport to move along - - if ! this.isInstanceOf [Gate] then director.nAgents += 1 // increment the number of live agents - - debug ("init", s" <-- SimAgent $me created at time $time = ${director.clock}") - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compare the activation times of the two agents, this and other. - * Their activation times are used to order them in the director's agenda - * (a time-based HPF priority queue). - * @param other the other agent to compare with this - override def compare (other: SimAgent): Int = other.time compare time - */ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Set the initial position of this agent using the given x and y coordinates. - * Add half width and height to position center rather than top-left. - * @param x the x-coordinate - * @param y the y-coordinate - */ - def setPos (x: Double, y: Double): Unit = - pos(0) = x - wh._1 / 2.0 // x - half width - pos(1) = y - wh._2 / 2.0 // y - half height - end setPos - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Update the position of this agent by moving distance units in the direction - * determined by the angle. - * @param distance the incremental distance to move - * @param angle the angle/direction to move in (0 radians => move right) - */ - def updatePos (distance: Double, angle: Double): Unit = - pos(0) += cos (angle) * distance - pos(1) += sin (angle) * distance - end updatePos - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Update the location of this agent by moving distance units along the - * given edge. - * @param distance the incremental distance to move along the edge - * @param edge the current edge (typically the same one it was on) - */ - def updateLoc (distance: Double, edge: Edge = null): Unit = - loc = if edge == null then (loc._1, loc._2 + distance) // same edge - else (edge, distance) // new edge - end updateLoc - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** The abstract method, act, is defined in each subclass to provide specific - * behavior. - */ - def act (): Unit - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compare two spatial objects based on their space coordinates. - * @param other the other item to compare with this item - */ - override def tryCompareTo [B >: SimAgent: AsPartiallyOrdered] (other: B): Option [Int] = - val oth = other.asInstanceOf [SimAgent] - pos tryCompareTo oth.pos - end tryCompareTo - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Find the `SimAgent` ahead (e.g., to follow). Must be nearest ahead in your element. - * @param d the maximum allowed distance to be considered in the neighborhood - */ - def findFore (d: Double): SimAgent = - var minDiff = Double.MaxValue - var minA: SimAgent = null - - val agents = if d > 0.0 then neighbors (d) else neighbors - for a <- agents do - val diff = a.dist - dist - if diff > 0 && diff < minDiff then - minDiff = diff - minA = a.asInstanceOf [SimAgent] - end if - end for - minA - end findFore - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Reorient the simulation agent behind when making a change (e.g., exit, - * turn or lane change). - */ - def reorient (): Unit = aft.fore = fore - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Yield control TO the director so the director can take the next action. - * For efficiency, can yield THROUGH the director to the next agent, rather - * than TO the director itself. CURRENTLY ONLY THROUGH WORKS. - * @param quit the flag indicating whether this agent is done - */ - def yieldToDirector (quit: Boolean = false): Unit = - director.yield2Next (this, quit) // skips resuming director's act method -/* - if THROUGH then - director.yield2Next (this, quit) // skips resuming director's act method - else - director.log.trace (this, "resumes", director, director.clock) - if quit && ! this.isInstanceOf [Gate] then director.nAgents -= 1 // decrement the number of live agents - yyield (director, quit) // resumes the director's act method - end if -*/ - end yieldToDirector - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert the simulation agent to a string. - */ - override def toString: String = s"SimAgent ($me, $time, $pos, $loc)" - -end SimAgent - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `simAgentTest` function tests the `SimAgent` class. - * > runMain scalation.simulation.agent.simAgentTest - */ -@main def simAgentTest (): Unit = - - val director = Model ("TestModel") - - case class TestAgent () extends SimAgent ("TestAgent", 0.0, director): - def act (): Unit = - println ("act") - yieldToDirector (true) - end act - end TestAgent - - val myAgent = TestAgent () - println (s"myAgent = $myAgent") - myAgent.start () - -end simAgentTest - diff --git a/src/main/scala/scalation/simulation/agent/SimAgent.scalaa b/src/main/scala/scalation/simulation/agent/SimAgent.scalaa new file mode 100644 index 000000000..13c084356 --- /dev/null +++ b/src/main/scala/scalation/simulation/agent/SimAgent.scalaa @@ -0,0 +1,211 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author John Miller + * @version 2.0 + * @date Mon Sep 20 15:47:16 EDT 2021 + * @see LICENSE (MIT style license file). + * + * @note Active Entity/Simulation Agent + */ + +package scalation +package simulation.agent + +import scala.math.{cos, sin} + +import scalation.database.{Identifiable, Spatial, Temporal} +import scalation.database.graph.{Edge, Element, Topological} +import scalation.mathstat.VectorD + +import simulation.Coroutine + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `SimAgent` companion object provides static information for the class. + * The director keeps track of the current number of live agents (nAgents). + * `Gate`s are not considered live, since they simply cycle until the simulation ends. + * `Source`s are considered live, since they produce application agents and terminate + * when their production finishes. + */ +object SimAgent: + + private val wh = (10.0, 10.0) // default display size + + private var THROUGH = false // yield THROUGH (true) or TO director (false) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** By default each yield is TO an active director (has an act method), but a + * faster option is yield THROUGH a passive director to the first `SimAgent` in + * the director's agenda, e.g., `Model2` calls `setTHROUGH` to set THROUGH to true + */ + def setTHROUGH (): Unit = THROUGH = true + +end SimAgent + +import SimAgent._ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `SimAgent` abstract class represents entities that are active in the model. + * The act abstract method, which specifies entity behavior, must be defined + * for each subclass. Each `SimAgent` extends extends `Coroutine` and may be + * roughly thought of as running in its own thread/virtual-thread. + * SimAgent adds knowledge of its own properties, the agents it follows, and the + * component it is currently at. + * @param _name the name of this simulation agent (name from `Identifiable`) + * @param _time the activation time for this agent + * @param director the director controlling the model + * @param _pos the position (Euclidean coordinates) of this agent + * @param loc the location (graph coordinates) of this agent + * @param prop the properties (Map) for this agent, e.g., color, weight + */ +abstract class SimAgent (_name: String, _time: Double, director: Model, + _pos: VectorD = VectorD (0, 0, wh._1, wh._2), + var loc: (Element, Double) = (null, 0.0), + val prop: Map [String, ValueType] = null) + extends Coroutine (_name) + with Identifiable (_name) + with Temporal (_time) + with Spatial (_pos) + with Topological (loc._1, loc._2): // (element, distance) +// with Ordered [SimAgent]: // ordered in time +// with PartiallyOrdered [SimAgent]: // partially ordered in space + + var subtype = 0 // indicator of subtype of this agent + + private val debug = debugf ("SimAgent", true) // debug function + + private [agent] var fore: SimAgent = null // the SimAgent ahead + private [agent] var aft: SimAgent = null // the SimAgent behind + private [agent] val arrivalT = time // time agent started/arrived at the model + private [agent] var nextTransport: Transport = null // the next transport to move along + + if ! this.isInstanceOf [Gate] then director.nAgents += 1 // increment the number of live agents + + debug ("init", s" <-- SimAgent $me created at time $time = ${director.clock}") + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Compare the activation times of the two agents, this and other. + * Their activation times are used to order them in the director's agenda + * (a time-based HPF priority queue). + * @param other the other agent to compare with this + override def compare (other: SimAgent): Int = other.time compare time + */ + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Set the initial position of this agent using the given x and y coordinates. + * Add half width and height to position center rather than top-left. + * @param x the x-coordinate + * @param y the y-coordinate + */ + def setPos (x: Double, y: Double): Unit = + pos(0) = x - wh._1 / 2.0 // x - half width + pos(1) = y - wh._2 / 2.0 // y - half height + end setPos + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Update the position of this agent by moving distance units in the direction + * determined by the angle. + * @param distance the incremental distance to move + * @param angle the angle/direction to move in (0 radians => move right) + */ + def updatePos (distance: Double, angle: Double): Unit = + pos(0) += cos (angle) * distance + pos(1) += sin (angle) * distance + end updatePos + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Update the location of this agent by moving distance units along the + * given edge. + * @param distance the incremental distance to move along the edge + * @param edge the current edge (typically the same one it was on) + */ + def updateLoc (distance: Double, edge: Edge = null): Unit = + loc = if edge == null then (loc._1, loc._2 + distance) // same edge + else (edge, distance) // new edge + end updateLoc + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** The abstract method, act, is defined in each subclass to provide specific + * behavior. + */ + def act (): Unit + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Compare two spatial objects based on their space coordinates. + * @param other the other item to compare with this item + */ + override def tryCompareTo [B >: SimAgent: AsPartiallyOrdered] (other: B): Option [Int] = + val oth = other.asInstanceOf [SimAgent] + pos tryCompareTo oth.pos + end tryCompareTo + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Find the `SimAgent` ahead (e.g., to follow). Must be nearest ahead in your element. + * @param d the maximum allowed distance to be considered in the neighborhood + */ + def findFore (d: Double): SimAgent = + var minDiff = Double.MaxValue + var minA: SimAgent = null + + val agents = if d > 0.0 then neighbors (d) else neighbors + for a <- agents do + val diff = a.dist - dist + if diff > 0 && diff < minDiff then + minDiff = diff + minA = a.asInstanceOf [SimAgent] + end for + minA + end findFore + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Reorient the simulation agent behind when making a change (e.g., exit, + * turn or lane change). + */ + def reorient (): Unit = aft.fore = fore + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Yield control TO the director so the director can take the next action. + * For efficiency, can yield THROUGH the director to the next agent, rather + * than TO the director itself. CURRENTLY ONLY THROUGH WORKS. + * @param quit the flag indicating whether this agent is done + */ + def yieldToDirector (quit: Boolean = false): Unit = + director.yield2Next (this, quit) // skips resuming director's act method +/* + if THROUGH then + director.yield2Next (this, quit) // skips resuming director's act method + else + director.log.trace (this, "resumes", director, director.clock) + if quit && ! this.isInstanceOf [Gate] then director.nAgents -= 1 // decrement the number of live agents + yyield (director, quit) // resumes the director's act method +*/ + end yieldToDirector + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Convert the simulation agent to a string. + */ + override def toString: String = s"SimAgent ($me, $time, $pos, $loc)" + +end SimAgent + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `simAgentTest` function tests the `SimAgent` class. + * > runMain scalation.simulation.agent.simAgentTest + */ +@main def simAgentTest (): Unit = + + val director = Model ("TestModel") + + case class TestAgent () extends SimAgent ("TestAgent", 0.0, director): + def act (): Unit = + println ("act") + yieldToDirector (true) + end act + end TestAgent + + val myAgent = TestAgent () + println (s"myAgent = $myAgent") + myAgent.start () + +end simAgentTest + diff --git a/src/main/scala/scalation/simulation/agent/Sink.scala b/src/main/scala/scalation/simulation/agent/Sink.scalaa similarity index 100% rename from src/main/scala/scalation/simulation/agent/Sink.scala rename to src/main/scala/scalation/simulation/agent/Sink.scalaa diff --git a/src/main/scala/scalation/simulation/agent/Source.scala b/src/main/scala/scalation/simulation/agent/Source.scalaa similarity index 100% rename from src/main/scala/scalation/simulation/agent/Source.scala rename to src/main/scala/scalation/simulation/agent/Source.scalaa diff --git a/src/main/scala/scalation/simulation/agent/Statistical.scala b/src/main/scala/scalation/simulation/agent/Statistical.scalaa similarity index 100% rename from src/main/scala/scalation/simulation/agent/Statistical.scala rename to src/main/scala/scalation/simulation/agent/Statistical.scalaa diff --git a/src/main/scala/scalation/simulation/agent/Transport.scala b/src/main/scala/scalation/simulation/agent/Transport.scalaa similarity index 100% rename from src/main/scala/scalation/simulation/agent/Transport.scala rename to src/main/scala/scalation/simulation/agent/Transport.scalaa diff --git a/src/main/scala/scalation/simulation/agent/WaitQueue.scala b/src/main/scala/scalation/simulation/agent/WaitQueue.scalaa similarity index 100% rename from src/main/scala/scalation/simulation/agent/WaitQueue.scala rename to src/main/scala/scalation/simulation/agent/WaitQueue.scalaa diff --git a/src/main/scala/scalation/simulation/agent/WaitQueue_LCFS.scala b/src/main/scala/scalation/simulation/agent/WaitQueue_LCFS.scalaa similarity index 100% rename from src/main/scala/scalation/simulation/agent/WaitQueue_LCFS.scala rename to src/main/scala/scalation/simulation/agent/WaitQueue_LCFS.scalaa diff --git a/src/main/scala/scalation/simulation/agent/example_1/Bank.scala b/src/main/scala/scalation/simulation/agent/example_1/Bank.scalaa similarity index 100% rename from src/main/scala/scalation/simulation/agent/example_1/Bank.scala rename to src/main/scala/scalation/simulation/agent/example_1/Bank.scalaa diff --git a/src/main/scala/scalation/simulation/agent/example_1/CallCenter.scala b/src/main/scala/scalation/simulation/agent/example_1/CallCenter.scalaa similarity index 100% rename from src/main/scala/scalation/simulation/agent/example_1/CallCenter.scala rename to src/main/scala/scalation/simulation/agent/example_1/CallCenter.scalaa diff --git a/src/main/scala/scalation/simulation/agent/example_1/Traffic2L.scala b/src/main/scala/scalation/simulation/agent/example_1/Traffic2L.scalaa similarity index 100% rename from src/main/scala/scalation/simulation/agent/example_1/Traffic2L.scala rename to src/main/scala/scalation/simulation/agent/example_1/Traffic2L.scalaa diff --git a/src/main/scala/scalation/simulation/agent/example_1/Traffic4L.scala b/src/main/scala/scalation/simulation/agent/example_1/Traffic4L.scala deleted file mode 100644 index 6450fddab..000000000 --- a/src/main/scala/scalation/simulation/agent/example_1/Traffic4L.scala +++ /dev/null @@ -1,151 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Mon Sep 20 15:47:16 EDT 2021 - * @see LICENSE (MIT style license file). - * - * @note Example Model: Traffic4L (Four-Lane) for Agent-Based Simulation - */ - -package scalation -package simulation.agent -package example_1 - -import scala.collection.mutable.{ArrayBuffer => VEC} - -import scalation.random.{Bernoulli, Sharp, Uniform} -import scalation.random.RandomSeeds.N_STREAMS - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `runTraffic4L` function is used to launch the `Traffic4LModel` class. - * > runMain scalation.simulation.agent.example_1.runTraffic4L - */ -@main def runTraffic4L (): Unit = new Traffic4LModel () - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Traffic4LModel` class simulates an intersection with four traffic lights - * `Gates` and four roads. Each road consists of two routes with one in each - * direction. Each `Route` has two lanes (`Transport`s). - * @param name the name of the simulation model - * @param reps the number of independent replications to run - * @param startSim the start time of the simulation - * @param animating whether to animate the model - * @param aniRatio the ratio of simulation speed vs. animation speed - * @param nStop the number arrivals before stopping - * @param stream the base random number stream (0 to 999) - */ -class Traffic4LModel (name: String = "Traffic4L", reps: Int = 1, startSim: Double = 0.0, - animating: Boolean = true, aniRatio: Double = 8.0, - nStop: Int = 1, stream: Int = 0) - extends Model (name, reps, startSim, animating, aniRatio): - - //-------------------------------------------------- - // Initialize Model Constants - - val iaTime = (4000.0, 6000.0) // (lower, upper) on inter-arrival time - val onTime = 8000.0 // on (green-light) time for North-South traffic - val offTime = 6000.0 // off (red-light) time for North-South traffic - val mvTime = (2900.0, 3100.0) // (lower, upper) on move time - - //-------------------------------------------------- - // Create Random Variates (RVs) - - val iArrivalRV = Uniform (iaTime, stream) - val onTimeRV = Sharp (onTime, (stream + 1) % N_STREAMS) - val offTimeRV = Sharp (offTime, (stream + 2) % N_STREAMS) - val moveRV = Uniform (mvTime, (stream + 3) % N_STREAMS) - val laneRV = Bernoulli (stream = (stream + 4) % N_STREAMS) - - //-------------------------------------------------- - // Create the Graph Model: Vertices and Edges - - val base = (800.0, 400.0) - - val source = Source.group (this, 0.0, () => Car (), nStop, null, base, - ("s1N", iArrivalRV, 0, (-16, -250)), // from North - ("s1E", iArrivalRV, 1, (250, -16)), - ("s1S", iArrivalRV, 2, (16, 250)), - ("s1W", iArrivalRV, 3, (-250, 16))) - - val queue = WaitQueue.group (this, Int.MaxValue, null, base, - ("q1N", (-16, -40)), // before North light - ("q1E", (40, -16)), - ("q1S", (16, 40)), - ("q1W", (-40, 16))) - - val light = Gate.group (this, 0.0, onTimeRV, offTimeRV, 15, null, base, - ("l1N", queue(0), (-16, 16)), // traffic from North - ("l1E", queue(1), (-16, -16)), - ("l1S", queue(2), (16, -16)), - ("l1W", queue(3), (16, 16))) - - val sink = Sink.group (this, null, base, - ("k1N", (16, -250)), - ("k1E", (250, 16)), - ("k1S", (-16, 250)), // end for North traffic - ("k1W", (-250, -16))) - - val road = VEC [Route] () - for i <- source.indices do - road += Route (s"ra$i", this, 2, source(i).vert, queue(i), moveRV) - end for - for i <- source.indices do - road += Route (s"rb$i", this, 2, light(i).vert, sink((i + 2) % 4), moveRV) - end for - - //-------------------------------------------------- - // Specify Scripts for each Type of Simulation Agent - - case class Car () extends SimAgent ("c", director.clock, this): - - def act (): Unit = - println (s"Car $me running") - val i = subtype // from North (0), East (1), South (2), West (3) - val l = laneRV.igen // randomly select lane l - println (s"Car $me move on road $i lane $l") - road(i).lane(l).move (this) // move half way down lane l - if light(i).open (this) then - println (s"Car $me skips queue $i") - queue(i).noWait (this) // skip the queue - else - println (s"Car $me wait in queue $i") - queue(i).waitIn (this) // stop and wait for red light - end if - println (s"Car $me move on road ${i+4} lane $l") - road(i + 4).lane(l).move (this) // add 4 for next segment - sink((i + 2) % 4).leave (this) // end at this sink - end act - - end Car - -/* - road(i).lane(l).move (this, 0.5) // move half way down lane l - val l2 = (l + 1) % 2 // index of other lane - road(i).changeLane (this, l, l2) // change to lane l2 - road(i).lane(l2).move (this, 0.5) // move the rest of the way down lane l2 - if light(i).shut (this) then queue(i).waitIn (this) // stop and wait for red light - road(i + 4).lane(l2).move (this) // add 4 for next segment - sink((i + 2) % 4).leave (this) // end at this sink - - case class Car (prop: Map [String, ValueType]) extends SimAgent ("c", this, prop): - val turnRV = Randi (-1, 1, (stream + 5) % N_STREAMS) - def prop_gen = Map ("turn" -> turnRV.gen) - val source = { val p = prop_gen; Source.group (this, () => Car (p), nStop, (800, 250), - prop("turn") match - case -1 => road(i + 3).lane(l).move () // left turn - sink((i + 1) % 4).leave () - case 0 => road(i + 4).lane(l).move () // straight - sink((i + 2) % 4).leave () - case _ => road((i + 5) % 8).lane(l).move () // right turn - sink((i + 3) % 4).leave () - end match -*/ - - simulate () - waitFinished () - Model.shutdown () - -end Traffic4LModel - diff --git a/src/main/scala/scalation/simulation/agent/example_1/Traffic4L.scalaa b/src/main/scala/scalation/simulation/agent/example_1/Traffic4L.scalaa new file mode 100644 index 000000000..882ce1278 --- /dev/null +++ b/src/main/scala/scalation/simulation/agent/example_1/Traffic4L.scalaa @@ -0,0 +1,150 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author John Miller + * @version 2.0 + * @date Mon Sep 20 15:47:16 EDT 2021 + * @see LICENSE (MIT style license file). + * + * @note Example Model: Traffic4L (Four-Lane) for Agent-Based Simulation + */ + +package scalation +package simulation.agent +package example_1 + +import scala.collection.mutable.{ArrayBuffer => VEC} + +import scalation.random.{Bernoulli, Sharp, Uniform} +import scalation.random.RandomSeeds.N_STREAMS + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `runTraffic4L` function is used to launch the `Traffic4LModel` class. + * > runMain scalation.simulation.agent.example_1.runTraffic4L + */ +@main def runTraffic4L (): Unit = new Traffic4LModel () + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `Traffic4LModel` class simulates an intersection with four traffic lights + * `Gates` and four roads. Each road consists of two routes with one in each + * direction. Each `Route` has two lanes (`Transport`s). + * @param name the name of the simulation model + * @param reps the number of independent replications to run + * @param startSim the start time of the simulation + * @param animating whether to animate the model + * @param aniRatio the ratio of simulation speed vs. animation speed + * @param nStop the number arrivals before stopping + * @param stream the base random number stream (0 to 999) + */ +class Traffic4LModel (name: String = "Traffic4L", reps: Int = 1, startSim: Double = 0.0, + animating: Boolean = true, aniRatio: Double = 8.0, + nStop: Int = 1, stream: Int = 0) + extends Model (name, reps, startSim, animating, aniRatio): + + //-------------------------------------------------- + // Initialize Model Constants + + val iaTime = (4000.0, 6000.0) // (lower, upper) on inter-arrival time + val onTime = 8000.0 // on (green-light) time for North-South traffic + val offTime = 6000.0 // off (red-light) time for North-South traffic + val mvTime = (2900.0, 3100.0) // (lower, upper) on move time + + //-------------------------------------------------- + // Create Random Variates (RVs) + + val iArrivalRV = Uniform (iaTime, stream) + val onTimeRV = Sharp (onTime, (stream + 1) % N_STREAMS) + val offTimeRV = Sharp (offTime, (stream + 2) % N_STREAMS) + val moveRV = Uniform (mvTime, (stream + 3) % N_STREAMS) + val laneRV = Bernoulli (stream = (stream + 4) % N_STREAMS) + + //-------------------------------------------------- + // Create the Graph Model: Vertices and Edges + + val base = (800.0, 400.0) + + val source = Source.group (this, 0.0, () => Car (), nStop, null, base, + ("s1N", iArrivalRV, 0, (-16, -250)), // from North + ("s1E", iArrivalRV, 1, (250, -16)), + ("s1S", iArrivalRV, 2, (16, 250)), + ("s1W", iArrivalRV, 3, (-250, 16))) + + val queue = WaitQueue.group (this, Int.MaxValue, null, base, + ("q1N", (-16, -40)), // before North light + ("q1E", (40, -16)), + ("q1S", (16, 40)), + ("q1W", (-40, 16))) + + val light = Gate.group (this, 0.0, onTimeRV, offTimeRV, 15, null, base, + ("l1N", queue(0), (-16, 16)), // traffic from North + ("l1E", queue(1), (-16, -16)), + ("l1S", queue(2), (16, -16)), + ("l1W", queue(3), (16, 16))) + + val sink = Sink.group (this, null, base, + ("k1N", (16, -250)), + ("k1E", (250, 16)), + ("k1S", (-16, 250)), // end for North traffic + ("k1W", (-250, -16))) + + val road = VEC [Route] () + for i <- source.indices do + road += Route (s"ra$i", this, 2, source(i).vert, queue(i), moveRV) + end for + for i <- source.indices do + road += Route (s"rb$i", this, 2, light(i).vert, sink((i + 2) % 4), moveRV) + end for + + //-------------------------------------------------- + // Specify Scripts for each Type of Simulation Agent + + case class Car () extends SimAgent ("c", director.clock, this): + + def act (): Unit = + println (s"Car $me running") + val i = subtype // from North (0), East (1), South (2), West (3) + val l = laneRV.igen // randomly select lane l + println (s"Car $me move on road $i lane $l") + road(i).lane(l).move (this) // move half way down lane l + if light(i).open (this) then + println (s"Car $me skips queue $i") + queue(i).noWait (this) // skip the queue + else + println (s"Car $me wait in queue $i") + queue(i).waitIn (this) // stop and wait for red light + println (s"Car $me move on road ${i+4} lane $l") + road(i + 4).lane(l).move (this) // add 4 for next segment + sink((i + 2) % 4).leave (this) // end at this sink + end act + + end Car + +/* + road(i).lane(l).move (this, 0.5) // move half way down lane l + val l2 = (l + 1) % 2 // index of other lane + road(i).changeLane (this, l, l2) // change to lane l2 + road(i).lane(l2).move (this, 0.5) // move the rest of the way down lane l2 + if light(i).shut (this) then queue(i).waitIn (this) // stop and wait for red light + road(i + 4).lane(l2).move (this) // add 4 for next segment + sink((i + 2) % 4).leave (this) // end at this sink + + case class Car (prop: Map [String, ValueType]) extends SimAgent ("c", this, prop): + val turnRV = Randi (-1, 1, (stream + 5) % N_STREAMS) + def prop_gen = Map ("turn" -> turnRV.gen) + val source = { val p = prop_gen; Source.group (this, () => Car (p), nStop, (800, 250), + prop("turn") match + case -1 => road(i + 3).lane(l).move () // left turn + sink((i + 1) % 4).leave () + case 0 => road(i + 4).lane(l).move () // straight + sink((i + 2) % 4).leave () + case _ => road((i + 5) % 8).lane(l).move () // right turn + sink((i + 3) % 4).leave () + end match +*/ + + simulate () + waitFinished () + Model.shutdown () + +end Traffic4LModel + diff --git a/src/main/scala/scalation/simulation/agent/example_1/UGABusRoutes.scala b/src/main/scala/scalation/simulation/agent/example_1/UGABusRoutes.scalaa similarity index 100% rename from src/main/scala/scalation/simulation/agent/example_1/UGABusRoutes.scala rename to src/main/scala/scalation/simulation/agent/example_1/UGABusRoutes.scalaa diff --git a/src/main/scala/scalation/simulation/event/CausalLink.scala b/src/main/scala/scalation/simulation/event/CausalLink.scala index 19264d778..d3bb0a903 100644 --- a/src/main/scala/scalation/simulation/event/CausalLink.scala +++ b/src/main/scala/scalation/simulation/event/CausalLink.scala @@ -45,7 +45,6 @@ case class CausalLink (label: String, director: Model, val condition: () => Bool director.animate (this, CreateEdge, green, new QCurve (), from, to, Array (16.0 * bend)) else director.animate (this, CreateEdge, green, new QCurve (), from, to, Array (bend)) - end if end display //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: diff --git a/src/main/scala/scalation/simulation/event/Ex_Template.scala b/src/main/scala/scalation/simulation/event/Ex_Template.scala index fd528b597..75811eff6 100644 --- a/src/main/scala/scalation/simulation/event/Ex_Template.scala +++ b/src/main/scala/scalation/simulation/event/Ex_Template.scala @@ -79,7 +79,6 @@ class SOMEModel (name: String = "SOME", reps: Int = 1, nStop: Int = 100, stream: if nArr < nStop - 1 then val toArrive = Entity (iArrivalRV.gen, 0.0, SOMEModel.this) schedule (Arrival (toArrive, toArrive.iArrivalT)) - end if nArr += 1 // update the current state end occur diff --git a/src/main/scala/scalation/simulation/monte_carlo/Cards.scala b/src/main/scala/scalation/simulation/monte_carlo/Cards.scala index cf6b1135c..8ab495350 100644 --- a/src/main/scala/scalation/simulation/monte_carlo/Cards.scala +++ b/src/main/scala/scalation/simulation/monte_carlo/Cards.scala @@ -64,6 +64,9 @@ object Cards: val htype = Array ("high-card", "one-pair", "two-pair", "3-of-a-kind", "straight", "flush", "full-house", "4-of-a-kind", "straight-flush") + given intOrd: Ordering [Int] = new Ordering [Int]: + def compare (s: Int, t: Int) = s compare t + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Convert the integer card number c (0 to 51) to the * face value: 1(A), 2, 3, ..., 10, 11(J), 12(Q), 13(K) and @@ -108,7 +111,8 @@ object Cards: def classify (hand: IndexedSeq [Int]): Int = val flush = isFlush (hand) val hmap = handMap (hand) - val freq = hmap.values.toIndexedSeq.sorted ((x, y) => y.compare (x)) +// val freq = hmap.values.toIndexedSeq.sorted ((x, y) => y.compare (x)) + val freq = hmap.values.toIndexedSeq.sorted (using intOrd) freq(0) match case 4 => 7 @@ -139,7 +143,7 @@ end Cards println ("\nShuffled deck of cards:") println (deck) - val hand = for i <- 1 to 5 yield deck.draw () + val hand = for _ <- 1 to 5 yield deck.draw () val cards = hand.map (value (_)) val hmap = handMap (hand) println ("\n hand = " + hand) @@ -164,7 +168,7 @@ end cardsTest println (deck) for h <- 1 to 1000 do - val hand = for i <- 1 to 5 yield deck.draw () + val hand = for _ <- 1 to 5 yield deck.draw () val cards = hand.map (value (_)) val kind = classify (hand) if kind > 1 then // skip common hands @@ -196,11 +200,11 @@ end cardsTest2 val iter = 30000000 val count = new VectorD (htype.length) - for h <- 1 to iter do - val hand = for i <- 1 to 5 yield deck.draw () + cfor (0, iter) { _ => + val hand = for _ <- 1 to 5 yield deck.draw () count(classify (hand)) += 1 deck.shuffle () - end for + } // cfor banner ("Monte Carlo Simulation Poker Hand Precentages") val mul = 100.0 / iter diff --git a/src/main/scala/scalation/simulation/monte_carlo/GrainDropping.scala b/src/main/scala/scalation/simulation/monte_carlo/GrainDropping.scala index 5b4b43542..d6b0d11c9 100644 --- a/src/main/scala/scalation/simulation/monte_carlo/GrainDropping.scala +++ b/src/main/scala/scalation/simulation/monte_carlo/GrainDropping.scala @@ -30,7 +30,7 @@ class GrainDropping (stream: Int): */ def fraction (n: Int): Double = var count = 0 - for i <- 0 until n do if grain.gen.normSq <= 1.0 then count += 1 + cfor (0, n) { _ => if grain.gen.normSq <= 1.0 then count += 1 } count / n.toDouble end fraction diff --git a/src/main/scala/scalation/simulation/monte_carlo/MonteCarloIntegration.scala b/src/main/scala/scalation/simulation/monte_carlo/MonteCarloIntegration.scala index 167c14f60..2699c433b 100644 --- a/src/main/scala/scalation/simulation/monte_carlo/MonteCarloIntegration.scala +++ b/src/main/scala/scalation/simulation/monte_carlo/MonteCarloIntegration.scala @@ -34,7 +34,7 @@ object MonteCarloIntegration: val length = b - a val x = Uniform (a, b, s) var sum = 0.0 - for it <- 0 until m do sum += f(x.gen) + cfor (0, m) { _ => sum += f(x.gen) } sum * length / m end integrate diff --git a/src/main/scala/scalation/simulation/monte_carlo/MontyHall.scala b/src/main/scala/scalation/simulation/monte_carlo/MontyHall.scala index 296f39d88..1ff97f3d0 100644 --- a/src/main/scala/scalation/simulation/monte_carlo/MontyHall.scala +++ b/src/main/scala/scalation/simulation/monte_carlo/MontyHall.scala @@ -33,7 +33,7 @@ import scalation.random.{Bernoulli, Randi} var winStay = 0 // count wins with stay stategy var winSwitch = 0 // count wins with switch strategy - for it <- 1 to limit do + cfor (0, limit) { _ => val car = rg.igen // car randomly placed behind this door val pick = rg.igen // contestant randomly picks a door val show = (car, pick) match // Monty Hall show other non-car door @@ -46,7 +46,7 @@ import scalation.random.{Bernoulli, Randi} if pick == car then winStay += 1 // stay with initial pick else winSwitch += 1 // switch to the other door - end for + } // cfor println (s"winStay = $winStay") println (s"winSwitch = $winSwitch") diff --git a/src/main/scala/scalation/simulation/monte_carlo/RollDice.scala b/src/main/scala/scalation/simulation/monte_carlo/RollDice.scala index 5520cb557..7877efb7a 100644 --- a/src/main/scala/scalation/simulation/monte_carlo/RollDice.scala +++ b/src/main/scala/scalation/simulation/monte_carlo/RollDice.scala @@ -35,7 +35,7 @@ class RollDice (nDice: Int): //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Roll nDice dice, returning a vector of three integers. */ - def roll: Int = (for i <- 0 until nDice yield dice.igen).sum + def roll: Int = (for _ <- 0 until nDice yield dice.igen).sum //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Collect the result of of rolling nDice dice into the counter vector. @@ -98,7 +98,6 @@ object RollDice: var cnt = 0 for j <- 1 to min (6, k) do cnt += n_ways (n_d - 1, k - j) cnt - end if end n_ways end RollDice @@ -118,22 +117,22 @@ import RollDice._ val count_3 = for k <- 3 to 18 yield coeff_3 (k) banner ("Monte Carlo: number of ways for 1 Dice") - val monte_1 = new RollDice (1) - for i <- 0 until samples do monte_1.collect (monte_1.roll) + val monte_1 = new RollDice (1) + cfor (0, samples) { _ => monte_1.collect (monte_1.roll) } val result_1 = monte_1.counts * 6 println (s"count_2 = $count_1") println (s"result_1 = $result_1") banner ("Monte Carlo: number of ways for 2 Dice") - val monte_2 = new RollDice (2) - for i <- 0 until samples do monte_2.collect (monte_2.roll) + val monte_2 = new RollDice (2) + cfor (0, samples) { _ => monte_2.collect (monte_2.roll) } val result_2 = monte_2.counts * 6~^2 println (s"count_2 = $count_2") println (s"result_2 = $result_2") banner ("Monte Carlo: number of ways for 3 Dice") - val monte_3 = new RollDice (3) - for i <- 0 until samples do monte_3.collect (monte_3.roll) + val monte_3 = new RollDice (3) + cfor (0, samples) { _ => monte_3.collect (monte_3.roll) } val result_3 = monte_3.counts * 6~^3 println (s"count_3 = $count_3") println (s"result_3 = $result_3") @@ -163,10 +162,10 @@ import scalation.mathstat.Plot val dice = Randi (1, 6) val x = VectorD.range (3, 19) val freq = new VectorD (16) - for i <- 0 until 1000000 do + cfor (0, 1000000) { _ => val sum = dice.igen + dice.igen + dice.igen freq(sum-3) += 1 - end for + } // cfor new Plot (x, freq) end rollDiceTest3 diff --git a/src/main/scala/scalation/simulation/monte_carlo/SphereVolume.scala b/src/main/scala/scalation/simulation/monte_carlo/SphereVolume.scala index 30ed5254f..9571ae1a6 100644 --- a/src/main/scala/scalation/simulation/monte_carlo/SphereVolume.scala +++ b/src/main/scala/scalation/simulation/monte_carlo/SphereVolume.scala @@ -35,7 +35,7 @@ import scalation.random.Random val r = Random () var count = 0 - for i <- 1 to n if inSphere (r.gen, r.gen, r.gen) do count += 1 + cfor (0, n) { _ => if inSphere (r.gen, r.gen, r.gen) then count += 1 } println (s"Sphere Volume = ${(8.0 * count) / n.toDouble}") end sphereVolumeTest diff --git a/src/main/scala/scalation/simulation/process/Component.scala b/src/main/scala/scalation/simulation/process/Component.scala index 33d64382a..83c269189 100644 --- a/src/main/scala/scalation/simulation/process/Component.scala +++ b/src/main/scala/scalation/simulation/process/Component.scala @@ -77,9 +77,9 @@ trait Component * @param label the name of this component */ protected def initStats (label: String): Unit = - _durationStat = new Statistic (name) + _durationStat = new Statistic (label) if ! this.isInstanceOf [Source] && ! this.isInstanceOf [Sink] && ! this.isInstanceOf [Gate] then - _persistentStat = new TimeStatistic ("p-" + name) + _persistentStat = new TimeStatistic ("p-" + label) end initStats //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: diff --git a/src/main/scala/scalation/simulation/process/Dynamics.scala b/src/main/scala/scalation/simulation/process/Dynamics.scala index c5181f04d..f3b30b833 100644 --- a/src/main/scala/scalation/simulation/process/Dynamics.scala +++ b/src/main/scala/scalation/simulation/process/Dynamics.scala @@ -54,7 +54,8 @@ object GippsDynamics extends Dynamics: private val debug = debugf ("GippsDynamics", true) // debug function - private val EPSILON = 1.0 // FIX - hack - minimum velocity + private val flaw = flawf ("GippsDynamics") // flaw function + private val EPSILON = 0.1 // FIX - hack - minimum velocity //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Update the vehicle's velocity and position using Gipps' Model (located in `Motion`) @@ -63,8 +64,8 @@ object GippsDynamics * @param length the length of the road (`VTransport`) */ def updateM (car: Vehicle, length: Double): Unit = - debug ("updateM", s"car = $car with car.myNode = ${car.myNode}") - val ref = car.myNode.prev + debug ("updateM", s"car = $car with car.myNode = ${car.myNode}") // may switch to myPathNode + val ref = car.myNode.ahead val car_ahead = if ref == null then null else ref.elem.asInstanceOf [Vehicle] debug ("updateM", s"car = $car (velocity and position) based on car_ahead = $car_ahead") @@ -101,22 +102,34 @@ object GippsDynamics end gipps //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the velocity of the vehicle based on Gipps' model. + /** Return the new velocity v_n(t+rt) of the vehicle n based on Gipps' model. + * @seee en.wikipedia.org/wiki/Gipps%27_model * @param an the max acceleration of drivers * @param bn the max deceleration of drivers (negative #) * @param sp the size of vehicles * @param Vn the desired velocity of driver n * @param xn the current position of driver n * @param vn the current velocity of driver n - * @param xp the current position of the predecessor - * @param vp the current velocity of the predecessor + * @param xp the current position of the predecessor (car ahead) + * @param vp the current velocity of the predecessor (car ahead) * @param rt the reaction time of drivers */ private def gipps (an: Double, bn: Double, sp: Double, Vn: Double, xn: Double, vn: Double, xp: Double, vp: Double, rt: Double): Double = - val free = vn * 2.5 * an * rt * (1.0 - vn / Vn) * sqrt (0.025 + vn / Vn) - val cong = bn * rt + sqrt (bn * bn * rt * rt - bn * (2 * (xp - sp - xn) - vn * rt - vp * vp / bn)) - min (free, cong) + + // when the car ahead is not close, approach desired speed + val free = vn + 2.5 * an * rt * (1.0 - vn / Vn) * sqrt (0.025 + vn / Vn) + + // when close to car ahead, hard braking (negative) is reduced based on car gap + val brake = bn * rt // hard braking + var red_sq = bn~^2 * rt~^2 - bn * (2 * (xp - sp - xn) - vn * rt - vp~^2 / bn) + if red_sq < 0.0 then + flaw ("gipps", s"braking reduction squared can't be negative red_sq = $red_sq") + red_sq = 0.0 + val reduce = sqrt (red_sq) + val cong = brake + reduce + + min (free, cong) // take the minimum end gipps end GippsDynamics @@ -142,8 +155,9 @@ object IDMDynamics * @param length the length of the road (`VTransport`) */ def updateM (car: Vehicle, length: Double): Unit = - debug ("updateM", s"car = $car") - var a = iDM (car, car.myNode.prev.asInstanceOf [Vehicle], del) + debug ("updateM", s"car = $car, length = $length") +// var a = iDM (car, car.myNode.prev.asInstanceOf [Vehicle], del) + var a = iDM (car, car.myNode.ahead.asInstanceOf [Vehicle], del) debug ("updateM", s"car = $car \t the new ACCELERATION is: $a") if a.isNaN then a = 0.0 if a.isNegInfinity then a = bmax // max braking acceleration @@ -214,9 +228,9 @@ object IDMDynamics * @param an the max acceleration of drivers * @param vn the current velocity of driver n * @param Vn the desired velocity of driver n - * @param del the acceleration exponent (defaults to 4) + * @param del the acceleration exponent (commonly to 4) */ - private def iDMFree (an: Double, vn: Double, Vn: Double, del: Double = 4.0): Double = + private def iDMFree (an: Double, vn: Double, Vn: Double, del: Double): Double = an * (1.0 - (vn / Vn) ~^ del) end iDMFree diff --git a/src/main/scala/scalation/simulation/process/Dynamics.scala.bak b/src/main/scala/scalation/simulation/process/Dynamics.scala.bak new file mode 100644 index 000000000..f22e130f0 --- /dev/null +++ b/src/main/scala/scalation/simulation/process/Dynamics.scala.bak @@ -0,0 +1,226 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author John Miller, Casey Bowman + * @version 2.0 + * @date Tue Feb 4 14:56:34 EST 2020 + * @see LICENSE (MIT style license file). + * + * @note Supports Physics Models for Motion of Vehicles + */ + +package scalation +package simulation +package process + +import scala.math.{log, min, sqrt} + +import Vehicle._ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `Dynamics` trait supports physics models for the motion of vehicles, e.g., + * car-following models. + */ +trait Dynamics: + + private [process] var disp = 0.0 // set initial current displacement to 0 + private [process] var t_disp = 0.0 // set initial total displacement to 0 + private [process] var velocity = v0 // set initial velocity to v0 + private [process] var o_t_disp = t_disp // set initial old total displacement t_disp + private [process] var o_velocity = velocity // set initial old velocity to velocity + private [process] var acc = 0.0 // set initial acceleration to 0 + private [process] var o_acc = acc // set initial old acceleration acc + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Update the values of the vehicle: velocity, and displacement according to + * to the car-following model being used. + * @param car the vehicle to move + * @param length the length of the road (`VTransport`) + */ + def updateV (car: Vehicle, length: Double): Unit = + print (s"Dynamics.updateV: called ") + this match + case GippsDynamics => { println ("Gipps"); GippsDynamics.updateM (car, length) } + case _ => { println ("IDM"); IDMDynamics.updateM (car, length) } + end updateV + +end Dynamics + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `GippsDynamics` object provides equations for the Gipps car-following model. + * @see https://en.wikipedia.org/wiki/Gipps%27_model + */ +object GippsDynamics + extends Dynamics: + + private val debug = debugf ("GippsDynamics", true) // debug function + private val EPSILON = 1.0 // FIX - hack - minimum velocity + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Update the vehicle's velocity and position using Gipps' Model (located in `Motion`) + * and Butcher's method for solving ordinary differential equations. + * @param car the car/vehicle whose velocity and position are to be updated + * @param length the length of the road (`VTransport`) + */ + def updateM (car: Vehicle, length: Double): Unit = + debug ("updateM", s"car = $car with car.myNode = ${car.myNode}") +// val ref = car.myNode.prev + val ref = car.myNode.ahead + val car_ahead = if ref == null then null else ref.elem.asInstanceOf [Vehicle] + debug ("updateM", s"car = $car (velocity and position) based on car_ahead = $car_ahead") + + val v = gipps (car, car_ahead) + EPSILON // determine new velocity + debug ("updateM", s"car = $car \t the new VELOCITY is: $v") + + val x = butcher (car.t_disp, v, car.velocity, rt) // new proposed position for car + debug ("updateM", s"car = $car \t the new POSITION is: $x") + + car.o_velocity = car.velocity // save the old velocity + car.velocity = v // assign new velocity + + car.o_t_disp = car.t_disp // save old car position + val dx = x - car.t_disp // change in car's position + val new_disp = if car.disp + dx <= length then car.disp + dx // new car displacement on road + else length + + car.t_disp += new_disp - car.disp // new car position + car.disp = new_disp // displacement on road + debug ("updateM", s"car.disp = ${car.disp}, car.t_disp = ${car.t_disp}") + end updateM + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the velocity of the vehicle based on Gipps' model for a vehicle and its predecessor. + * @param cn the current vehicle + * @param cp the predecessor of the current vehicle (car ahead) + */ + def gipps (cn: Vehicle, cp: Vehicle): Double = + if cp == null then + // when there is no car ahead, just tell Gipps' model it is well ahead, e.g., 1000 meters + gipps (amax, bmax, len, vmax, cn.t_disp, cn.velocity, cn.t_disp + 1000, vmax, rt) + else + gipps (amax, bmax, len, vmax, cn.t_disp, cn.velocity, cp.t_disp, cp.velocity, rt) + end gipps + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the velocity of the vehicle based on Gipps' model. + * @param an the max acceleration of drivers + * @param bn the max deceleration of drivers (negative #) + * @param sp the size of vehicles + * @param Vn the desired velocity of driver n + * @param xn the current position of driver n + * @param vn the current velocity of driver n + * @param xp the current position of the predecessor + * @param vp the current velocity of the predecessor + * @param rt the reaction time of drivers + */ + private def gipps (an: Double, bn: Double, sp: Double, Vn: Double, xn: Double, + vn: Double, xp: Double, vp: Double, rt: Double): Double = + val free = vn * 2.5 * an * rt * (1.0 - vn / Vn) * sqrt (0.025 + vn / Vn) + val cong = bn * rt + sqrt (bn * bn * rt * rt - bn * (2 * (xp - sp - xn) - vn * rt - vp * vp / bn)) + min (free, cong) + end gipps + +end GippsDynamics + + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `IDMDynamics` object provides equations for the Intelligent Driver Model (IDM) + * car-following model. + * @see https://en.wikipedia.org/wiki/Intelligent_driver_model + */ +object IDMDynamics + extends Dynamics: + + private val debug = debugf ("IDMDynamics", true) // debug function + + private val FREERANGE = 50.0 + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Update the vehicle's acceleration, velocity, and position using the + * Intelligent Driver Model (located in `Motion`) and Butcher's method + * for solving ordinary differential equations. + * @param car the car/vehicle whose acceleration, velocity, and position is being updated + * @param length the length of the road (`VTransport`) + */ + def updateM (car: Vehicle, length: Double): Unit = + debug ("updateM", s"car = $car, length = $length") +// var a = iDM (car, car.myNode.prev.asInstanceOf [Vehicle], del) + var a = iDM (car, car.myNode.ahead.asInstanceOf [Vehicle], del) + debug ("updateM", s"car = $car \t the new ACCELERATION is: $a") + if a.isNaN then a = 0.0 + if a.isNegInfinity then a = bmax // max braking acceleration + if a.isPosInfinity then a = amax // max forward acceleration + if a < 0.0 && a < bmax then + val r = log(a) / log (bmax) + a = if r > 5.0 then 3.0 * bmax else bmax // FIX - unclear + if a > 0.0 && a > amax then a = amax + + var v = butcher (car.velocity, a, car.acc, rt) + debug ("updateM", s"car = $car \t the new VELOCITY is: $v") + if v < 0.0 then v = 1.0 // move slowly, not stopped + + val x = butcher (car.t_disp, v, car.velocity, rt) + debug ("updateM", s"car = $car \t the new POSITION is: $x") + + car.o_acc = car.acc + car.acc = a + car.o_velocity = car.velocity + car.velocity = v + val dx = x - car.t_disp + car.disp += dx + car.o_t_disp = car.t_disp + car.t_disp = x + end updateM + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the acceleration of the vehicle based on the Intelligent Driver Model + * for a vehicle and its predecessor. + * @param cn the current vehicle + * @param cp the predecessor of the current vehicle + * @param del the acceleration exponent (defaults to 4) + */ + def iDM (cn: Vehicle, cp: Vehicle, del: Double = 4.0): Double = + if cp == null then + iDMFree (amax, cn.velocity, vmax, del) + else if cp.t_disp - cn.t_disp > FREERANGE then + iDMFree (amax, cn.velocity, vmax, del) + else + iDM (amax, -bmax, len, vmax, cn.t_disp, cn.velocity, cp.t_disp, cp.velocity, T, s, del) + end iDM + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the acceleration of the vehicle based on the Intelligent Driver Model. + * @param an the max acceleration of drivers + * @param bn the max deceleration of drivers (negative #) + * @param sp the size of vehicles + * @param Vn the desired velocity of driver n + * @param xn the current position of driver n + * @param vn the current velocity of driver n + * @param xp the current position of the predecessor + * @param vp the current velocity of the predecessor + * @param T the safe min time headway + * @param s0 the safe min distance headway + * @param del the acceleration exponent (defaults to 4) + */ + private def iDM (an: Double, bn: Double, sp: Double, Vn: Double, xn: Double, vn: Double, + xp: Double, vp: Double, T: Double, s0: Double, del: Double): Double = + val Δx = xp - xn - sp + val Δv = vn - vp + val ss = s0 + vn * T + (vn * Δv) / (2.0 * sqrt (an * bn)) + an * (1.0 - (vn / Vn) ~^ del - (ss / Δx) ~^ 2.0) + end iDM + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the acceleration of the vehicle based on the Intelligent Driver Model + * when there is no predecessor. + * @param an the max acceleration of drivers + * @param vn the current velocity of driver n + * @param Vn the desired velocity of driver n + * @param del the acceleration exponent (commonly to 4) + */ + private def iDMFree (an: Double, vn: Double, Vn: Double, del: Double): Double = + an * (1.0 - (vn / Vn) ~^ del) + end iDMFree + +end IDMDynamics + diff --git a/src/main/scala/scalation/simulation/process/Ex_Template.scsla b/src/main/scala/scalation/simulation/process/Ex_Template.scala similarity index 100% rename from src/main/scala/scalation/simulation/process/Ex_Template.scsla rename to src/main/scala/scalation/simulation/process/Ex_Template.scala diff --git a/src/main/scala/scalation/simulation/process/Gate.scala b/src/main/scala/scalation/simulation/process/Gate.scala index 93c71c35f..d3d75ab92 100644 --- a/src/main/scala/scalation/simulation/process/Gate.scala +++ b/src/main/scala/scalation/simulation/process/Gate.scala @@ -107,7 +107,7 @@ class Gate (name: String, director: Model, line: WaitQueue, units: Int, /** Specifies how the gate is controlled. */ override def act (): Unit = - for i <- 1 to units do + cfor (0, units) { _ => flip () if ! _shut then release () director.animate (this, SetPaintNode, gateColor, Rectangle (), at) @@ -115,7 +115,7 @@ class Gate (name: String, director: Model, line: WaitQueue, units: Int, tally (dur) schedule (dur) yieldToDirector () - end for + } // cfor yieldToDirector (true) end act diff --git a/src/main/scala/scalation/simulation/process/Model.scala b/src/main/scala/scalation/simulation/process/Model.scala index 9b2e6e017..0294f10d5 100644 --- a/src/main/scala/scalation/simulation/process/Model.scala +++ b/src/main/scala/scalation/simulation/process/Model.scala @@ -281,7 +281,6 @@ class Model (name: String, val reps: Int = 1, animating: Boolean = true, aniRati reportF () // report in new window/frame if rep == 1 then dgAni.animate (0, 100000) // only animate first rep // dgAni.saveImage (DATA_DIR + name + ".png") - end if end fini //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: diff --git a/src/main/scala/scalation/simulation/process/Path.scala b/src/main/scala/scalation/simulation/process/Path.scala index 3f440fda1..f17e98a7f 100644 --- a/src/main/scala/scalation/simulation/process/Path.scala +++ b/src/main/scala/scalation/simulation/process/Path.scala @@ -72,7 +72,7 @@ class Path (name: String, k: Int, val from: Component, val to: Component, if abs (l1 - l2) > 2 then flaw ("changeLane", s"UNSAFE to cross multiple lanes at once $l1 to #l2") val actor = director.theActor.asInstanceOf [Vehicle] - val (open, p, s) = laneOpenAt (l2, actor.disp) + val (open, _, _) = laneOpenAt (l2, actor.disp) // (open, p, s) if open then debug ("changeLane", s"from lane $l1 to lane $l2") director.log.trace (this, s"change lane from $l1 to $l2", actor, director.clock) @@ -90,6 +90,7 @@ class Path (name: String, k: Int, val from: Component, val to: Component, * @param displacement the displacement (distance from start of the new lane) */ def laneOpenAt (newLane: Int, displacement: Double): (Boolean, Vehicle, Vehicle) = + println (s"laneOpenAt: newLane = $newLane, displacement = $displacement") (false, null, null) // FIX -- use B+Tree to see if there is a car in the way end laneOpenAt diff --git a/src/main/scala/scalation/simulation/process/Pathway.scala b/src/main/scala/scalation/simulation/process/Pathway.scala new file mode 100644 index 000000000..f8e9ff11d --- /dev/null +++ b/src/main/scala/scalation/simulation/process/Pathway.scala @@ -0,0 +1,133 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author John Miller, Korede Bishi + * @version 2.0 + * @date Sat Jan 25 19:44:16 EST 2014 + * @see LICENSE (MIT style license file). + * + * @note Pathway for Modeling a Lane Consisting of Multiple Segments + */ + +package scalation +package simulation +package process + +import scalation.animation.CommandType._ +import scalation.mathstat.VectorD +import scalation.random.Variate +import scalation.scala2d.Colors._ + +//import scala.math.hypot + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `Pathway` class defines a single lane with multiple segments, connected by + * shared junctions. + * pathway: VSource --- Segment0 --- Junction0 --- Segment1 --- Junction1 --- Segment2 --- Sink + * @param name the name of the pathway + * @param junc the array of junctions connecting the segments + * @param from the starting component (e.g., `VSource`) + * @param to the ending component (e.g., `Sink`) + * @param motion the variate or dynamics model + * @param isSpeed whether speed or trip-time is used + * @param bend curvature of the lane + */ +class Pathway (name: String, val junc: Array [Junction], val from: Component, val to: Component, + motion: Dynamics, isSpeed: Boolean = false, bend: Double = 0.0) + extends Component: + + private val debug = debugf ("Pathway", true) // debug function +// private val GAP = 30.0 // gap between lanes/pathways +// private val delta = calcShift + private val delta = VectorD (0.0, 0.0) // no need for calcShift since this is a single pathway (Single lane) + val vList = DoublyLinkedList [Vehicle] // one lane = one doubly linked list + val seg = Array.ofDim [VTransport] (junc.length + 1) // single pathway (lane) with numJunc+1 segments + + val points = from +: junc.toList :+ to + for i <- 0 until points.length - 1 do + val p1 = points(i) + val p2 = points(i + 1) + val shift = delta + seg(i) = new VTransport (s"seg${i + 1}", p1, p2, motion, isSpeed, bend, shift, shift) + subpart += seg(i) // add to the subpart + end for + initComponent (name, Array ()) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Add a vehicle to the correct pathway's doubly linked list. + * @param actor the vehicle to add + * @param other the other vehicle (the one ahead, null if none) + */ + def addToAlist (actor: Vehicle, other: Vehicle): Unit = + val otherNode = if other != null then other.myPathNode.asInstanceOf [vList.Node] + else null + debug ("addToList", s"actor = $actor follows otherNode = $otherNode") + actor.myPathNode = vList.add (actor, otherNode) + end addToAlist + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Remove a vehicle from the correct pathway's doubly linked list. + * @param actor the vehicle to remove + */ + def removeFromAlist (actor: Vehicle): Unit = + vList.remove (actor.myPathNode.asInstanceOf [vList.Node]) + end removeFromAlist + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Get the first vehicle in this pathway. + */ + def getFirst: Vehicle = + if vList.isEmpty then null else vList.head // return first vehicle in this doubly linked list + end getFirst + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Get the last vehicle in this pathway. + */ + def getLast: Vehicle = + if vList.isEmpty then null else vList.last // return last vehicle in this doubly linked list + end getLast + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Calculate the shift for this lane for animation. + * + private def calcShift: VectorD = + val xdist = from.at(0) - to.at(0) + val ydist = from.at(1) - to.at(1) + val hyp = hypot(xdist, ydist) + VectorD ((ydist / hyp) * GAP, -(xdist / hyp) * GAP) + end calcShift + */ + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Return the location of the first curve to be the pathway starting point. + */ + override def at: Array [Double] = seg(0).at + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Get the direction/turn random variate to determine next the direction. + * This allows an application model to select the next component. + * FIX - this won't work in general - seg(0) will only allow turns from first segment + */ + def selector: Variate = seg(0).selector + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Set the direction/turn random variate for this pathway. + * FIX - this won't work in general + * @param selectorRV the random variate used to select the direction + */ + def selector_= (selectorRV: Variate): Unit = seg(0).selector = selectorRV + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Display this pathway. + */ + override def display (): Unit = + for s <- seg.indices do + val segment = seg(s) + director.animate (segment, CreateEdge, blue, segment.curve, segment.from, segment.to, + Array (segment.p1(0), segment.p1(1), + segment.pc(0), segment.pc(1), + segment.p2(0), segment.p2(1))) + end for + end display + +end Pathway + diff --git a/src/main/scala/scalation/simulation/process/Pathway.scala.bak b/src/main/scala/scalation/simulation/process/Pathway.scala.bak new file mode 100644 index 000000000..96be07a9e --- /dev/null +++ b/src/main/scala/scalation/simulation/process/Pathway.scala.bak @@ -0,0 +1,236 @@ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author John Miller, Korede Bishe + * @version 2.0 + * @date Thu May 15 18:19:39 EDT 2025 + * @see LICENSE (MIT style license file). + * + * @note Path for Modeling Multi-Lane Pathway + */ + +package scalation +package simulation +package process + +import scala.math.{abs, hypot, min} + +import scalation.animation.CommandType._ +import scalation.mathstat.VectorD +import scalation.random.{Bernoulli, Variate} +import scalation.scala2d.Colors._ + +/* + Example of four pathways (e.g., four lanes of an interstate highway (one direction): + pathway1: VSource-----------------path-----------------Junction-----------------path-----------------Sink + pathway2: VSource-----------------path-----------------Junction-----------------path-----------------Sink + pathway3: VSource-----------------path-----------------Junction-----------------path-----------------Sink + pathway4: VSource-----------------path-----------------Junction-----------------path-----------------Sink +*/ + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `Pathway` class represents a single lane that consists of `VTransport`s and `Junction`s. + * As lanes come and go away, their endpoints may be a `Junction`, `VSource`, or `Sink`. + * Each pathway has a single `DoublyLinkedList` to keep track of cars on the pathway. + * On a pathway, 'your' car will follow the 'car-ahead' unless that car exits (`Ramp` or `Sink`) or changes lanes. + * Also, 'your' car may take an exit `Ramp` or change lanes, in which case it is no longer in the same pathway. + * Finally, a third car may enter your lane between 'your' car and the 'car-ahead' via an entrance `Ramp` or lane change. + * @param name the name of the pathway + * @param k the ? + * @param j the ? + * @param from the starting component + * @param to the ending component + * @param junc the array of junctions in the middle of the pathway + * @param motion the variate or dynamics model for the speed/trip-time for motion down the `Path` + * @param isSpeed whether speed or trip-time is used for motion + * @param bend the bend or curvature of the `Path` (0 => line) + */ +class Pathway (name: String, k: Int, j: Int, from: Component, val srcSensor: Junction, + val junc: Array [Junction], val exitSensor: Junction, val to: Component, + motion: Dynamics, isSpeed: Boolean = false, bend: Double = 0.0) + extends Component: + + private val debug = debugf ("Pathway", true) // debug function + private val flaw = flawf ("Pathway") // flaw function + + private val GAP = 30.0 // ? + private val delta = calcShift // ? + private val coin = Bernoulli (0.9) // ? + + // Create pathway structure: k pathways, each with j+1 segments (including exit) + + val paths = Array.ofDim [VTransport] (k, j+1) // # junctions is j, so segments must be j+1 + val vtrees = Array.fill(k)(DoublyLinkedList [Vehicle] ()) + + debug("init", s"name = $name, k = $k, from = ${from.name}, junc =, to = ${to.name}") + + for i <- 0 until k do + val shift = VectorD((i - (k - 1) / 2.0) * delta(0), (i - (k - 1) / 2.0) * delta(1)) + + // Hidden segment: from : srcSensor (Not displayed) + val entrySegment = new VTransport(s"${name}l${i}_entry", from, srcSensor, motion, isSpeed, 0.0, shift, shift) + + // First visible segment: srcSensor : junc(0) + pathways(i)(0) = new VTransport(s"${name}l${i}_seg0", srcSensor, junc(0), motion, isSpeed, 0.0, shift, shift) + + // Mid-segments: Junction connections + for s <- 0 until j - 1 do + pathways(i)(s + 1) = new VTransport(s"${name}l${i}_seg${s + 1}", junc(s), junc(s + 1), motion, isSpeed, 0.0, shift, shift) + + // Last visible segment: junc(j - 1) : exitSensor + pathways(i)(j) = new VTransport(s"${name}l${i}_seg${j}", junc(j - 1), exitSensor, motion, isSpeed, 0.0, shift, shift) + + // Hidden segment: exitSensor : to (Not displayed) + val exitSegment = new VTransport(s"${name}l${i}_exit", exitSensor, to, motion, isSpeed, 0.0, shift, shift) + + // Add segments to the pathway + subpart ++= (entrySegment :: pathways(i).toList) :+ exitSegment + end for + + + initComponent(name, Array()) + + /** + * Attempt to change pathways for a vehicle. + * + * @param i The current pathway index + * @param j The target pathway index + * @param k The segment index + * @param actor The vehicle attempting to change pathways + * @return True if lane change was successful, False otherwise + */ + def changeLane(i: Int, j: Int, actor: Vehicle, k:Int): Boolean = + if abs(i - j) > 1 then + flaw("changePathway", s"Car: $actor UNSAFE to change multiple pathways at once: lane:$i to lane:$j") + return false + println(s"Vehicle $actor needs to change lane from lane$i to lane$j with seg:$k") + val safeDisp = pathways(i)(k).safetydist + val vBehind = pathways(j)(k).getFirst + val vAhead = if vBehind != null && vBehind.myPathNode.ahead != null then vBehind.myPathNode.ahead.elem else null + + println(s"changeLane $actor in lane $i: vAhead: $vAhead and vBehind: $vBehind @ lane$j") + + //suppose car4 in pathway(i) needs to change lane to pathway(j): + val gapBehind = if vBehind != null then pathways(j)(k).length - vBehind.disp else safeDisp // use gap as displacement between length and car or make it 20 if otherwise + val gapVahead = if vAhead != null then abs(vAhead.t_disp - pathways(j)(k+1).length) else safeDisp // do same as above here too + + val gap = min(gapBehind, gapVahead) // return the minimum gap between ahead and behind, this allows for a safe lane change. + if gap >= safeDisp + then + removeFromAlist(actor, i) // remove the car from it's doublylinkedlist + actor.laneID = j // update the car lane id to reflect the new lane info + actor.pathInfo = pathways(j)(k).name // update the pathway info before adding to the dll for consistency of information + addToAlist(actor, vAhead, j) // add the car to the new doublylinkedlist + true + else + println(s"Unsafe to change lane from $i to $j because the gap is small") + false // gap too small, no lane change allowed. + end changeLane + + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Add a vehicle to the correct pathway's doubly linked list. + * @param actor the vehicle to add + * @param other the other vehicle (the one ahead, null if none) + * @param j the pathway index (0 to k-1), where k is the number of pathways + * */ + def addToAlist(actor: Vehicle, other: Vehicle, j: Int): Unit = + actor.myPathway = this //set the actor's pathway to this pathway + val thisVtree = vtrees(j) //extract the correct vtree first + val otherNode = if other != null then other.myPathNode.asInstanceOf[thisVtree.Node] else null//thisVtree.headNode //maybe null //get the other actor's node + if otherNode != null then + println(s"the if part(addBefore method) is used otherNode: $otherNode") + actor.myPathNode = thisVtree.addBefore(actor, otherNode) + else + println(s"the else part (add method) was used otherNode: $otherNode") + actor.myPathNode = thisVtree.add(actor, otherNode) //add the actor to the vtree + end addToAlist + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Remove a vehicle from the correct pathway's doubly linked list. + * @param actor the vehicle to remove + * @param i the pathway index (0 to k-1), where k is the number of pathways + * */ + def removeFromAlist(actor: Vehicle, i: Int): Unit = + val thisVtree = vtrees(i) + thisVtree.remove(actor.myPathNode.asInstanceOf[thisVtree.Node]) + end removeFromAlist + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Get the first vehicle in a specific pathway. + * @param i the pathway index + * */ + def getFirst(i: Int): Vehicle = + if vtrees(i).isEmpty then null else vtrees(i).head + end getFirst + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Get the last vehicle in a specific pathway. + * @param i the pathway index + * */ + def getLast(i: Int): Vehicle = + val thisVtree = vtrees(i) + println(vtrees(i)) + if thisVtree.isEmpty then + {println(s"the vtree is empty"); null} + else + println(s"the vtree is not empty ${vtrees(i)}") + thisVtree.last + end getLast + + /** Calculate spacing adjustment. + * + * */ + private def calcShift: VectorD = + val xdist = from.at(0) - to.at(0) + val ydist = from.at(1) - to.at(1) + val hyp = hypot(xdist, ydist) + VectorD((ydist / hyp) * GAP, -(xdist / hyp) * GAP) + end calcShift + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + + /** + * Give the location of the curve to be its starting point. + */ + override def at: Array[Double] = pathways(0)(0).at + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + + /** + * Get the selector random variate (delegated to the first transport segment). + */ + def selector: Variate = pathways(0)(0).selector + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + + /** + * Set the selector random variate in the first transport segment. + */ + def selector_=(selectorRV: Variate): Unit = pathways(0)(0).selector = selectorRV + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + + /** + * Display all pathways. + * Ensures correct animation of all segments from Source → Junctions → Sink. + */ + override def display(): Unit = + for i <- 0 until k do + for j <- pathways(i).indices do // Iterate through all pathway segments + val segment = pathways(i)(j) + director.animate(segment, CreateEdge, blue, segment.curve, + segment.from, segment.to, + Array(segment.p1(0), segment.p1(1), + segment.pc(0), segment.pc(1), + segment.p2(0), segment.p2(1))) + end for + end for + end display + +end Pathway + + + + + + diff --git a/src/main/scala/scalation/simulation/process/Recorder.scala b/src/main/scala/scalation/simulation/process/Recorder.scala index 262343016..5b681fb82 100644 --- a/src/main/scala/scalation/simulation/process/Recorder.scala +++ b/src/main/scala/scalation/simulation/process/Recorder.scala @@ -15,31 +15,127 @@ package process import scala.math.floor +import scalation.mathstat.{MatrixD, Statistic} + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Recorder` trait allows Nodes to easily record the flow of actors/entities - * (e.g., vehicles) in terms of counts and optionally average speed. - * @param nt the number of time intervals +/** The `Recorder` trait allows Components/Nodes to easily record the flow of actors/entities + * (e.g., vehicles) in terms of counts and optionally average speed (or other property + * of interest). + * @param nt the number of time intervals (defaults to 60) + * 15-minute or 900-second intervals over 6:00 AM to 9:00 PM + * @param nLanes the number of lanes */ -trait Recorder (nt: Int = 200): +trait Recorder (nt: Int = 60, nLanes: Int = 4): + + protected val r_counts = new MatrixD (nt, nLanes) // record counts in time interval + protected val r_speeds = new MatrixD (nt, nLanes) // record average speed in time interval - private val timeConv = 86400.0 / nt // 50 * 60 * 24 = 86400 seconds per day + private val timeConv = 54000.0 / nt // 60 * 60 * 15 = 54000 seconds per busy part of the day +// private val timeConv = 86400.0 / nt // 60 * 60 * 24 = 86400 seconds per day + private var i_pre = 0 // the current and previous time intervals + private val lane_stat = Array.fill (nLanes) (new Statistic ("lane")) // array of `Statistic` - protected val r_counts = Array.ofDim [Int] (nt) // record counts in time interval - protected val r_speeds = Array.ofDim [Double] (nt) // record average speed in time interval + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Get the recorder matrices. + */ + def getRecorderMat: (MatrixD, MatrixD) = (r_counts, r_speeds) + + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Record the entity and optionally its speed (or other property of interest). + * @param ctime the clock time the entity entered the component (e.g., Junction, Sink) + * @param speed the speed at which entity entered the component (e.g., Junction, Sink) + * @param lane the lane the vehicle is in + */ + def record (ctime: Double, speed: Double, lane: Int): Unit = + val i_cur = floor (ctime / timeConv).toInt // determine the current time interval + if i_cur > i_pre then // detected start of new time interval + recordInMatrix (i_pre) // put stats in recorder matrices + i_pre = i_cur // update i_pre + lane_stat(lane).tally (speed) // record the speed/property of interest + end record //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Record the entity and optionally its speed. - * @param ctime the clock time the entity entered the component (e.g., Sink) - * @param speed the speed at which entity entered the component (e.g., Sink) + /** Record the vehicle entity and optionally its speed (or other property of interest). + * @param actor the actor/vehicle being recorded + * @param ctime the clock time the entity entered the component (e.g., Junction, Sink) */ - def record (actor: SimActor, ctime: Double): Unit = - val i = floor (ctime / timeConv).toInt - val cnt = r_counts(i) + 1 - r_counts(i) = cnt + inline def record (actor: SimActor, ctime: Double): Unit = if actor.isInstanceOf [Vehicle] then - val speed = actor.asInstanceOf [Vehicle].velocity - r_speeds(i) = (r_speeds(i) * (cnt - 1) + speed) / cnt + val car = actor.asInstanceOf [Vehicle] + record (ctime, car.velocity, car.subtype) + else + if actor.prop != null then + record (ctime, actor.prop.head._2, actor.subtype) // record value of first property + else + record (ctime, -0.0, actor.subtype) // no property to record end record + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Put the lane statistics in recorder matrices and reset the statistical counters + * at the end of each (ii-th) observation/time interval. + * @param ii the relevant observation/time interval + */ + private def recordInMatrix (ii: Int): Unit = + for l <- r_counts.indices2 do // for each lane + r_counts(ii, l) = lane_stat(l).num // vehicles counted during the time interval + r_speeds(ii, l) = lane_stat(l).mean // average speed during the time interval + lane_stat(l).reset () // reset statistical counters + end recordInMatrix + end Recorder + +//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `recorderTest` main function tests the `Recorder` trait. + * Creates fake simulated and actual values to test `Recorder` and `Fit` methods. + * > runMain scalation.simulation.process.recorderTest + */ +@main def recorderTest (): Unit = + + import scalation.modeling.Fit + import scalation.random.{Normal, Randi, Uniform} + + val nparams = 5 // number of parameters in fake simulation model + + val rlane = Randi (0, 3, 1) // stream 1 + val rspeed = Uniform (22.0, 34.0, 2) // stream 2 + val noise = Normal (0.0, 2.0, 3) // stream 3 (use independent streams) + + object TestRec extends Recorder () // create a `Recorder` object + + var ctime = 0.0 // initialize simulated time to zero + for _ <- 0 until 10800 do // for each of 10800 fake cars + ctime += 5.0 // time between cars = 5 (should use Erlang process) + val lane = rlane.igen // randomly pick lane + val speed = lane + rspeed.gen // randomly set speed in m/s + TestRec.record (ctime, speed, lane) // record information about this fake car + end for + + val (cmat, smat) = TestRec.getRecorderMat // get fake simulated values + banner ("Recorder Matrix for counts") + println (s"cmat = $cmat") + banner ("Recorder Matrix for speeds") + println (s"smat = $smat") + + val cmat_ = new MatrixD (cmat.dim, cmat.dim2) // make fake actual values + val smat_ = new MatrixD (smat.dim, smat.dim2) + + for i <- cmat.indices; j <- cmat.indices2 do + cmat_(i, j) = 45.0 + noise.gen + smat_(i, j) = 29.5 + noise.gen + + object TestFit extends Fit (dfr = nparams , df = cmat.dim - nparams) // create a `Fit` object + + val cqof = TestFit.diagnose_mat (cmat_, cmat) // diagnostics for counts + val sqof = TestFit.diagnose_mat (smat_, smat) // diagnostics for speeds + +// println (cqof) +// println (sqof) + + banner ("Quality of Fit (QoF) for counts") + println (Fit.showFitMap (cqof)) + banner ("Quality of Fit (QoF) for speeds") + println (Fit.showFitMap (sqof)) + +end recorderTest + diff --git a/src/main/scala/scalation/simulation/process/Transport.scala b/src/main/scala/scalation/simulation/process/Transport.scala index 06f5e82ec..d1924384c 100644 --- a/src/main/scala/scalation/simulation/process/Transport.scala +++ b/src/main/scala/scalation/simulation/process/Transport.scala @@ -64,7 +64,6 @@ class Transport (name: String, val from: Component, val to: Component, curve.setLine (p1, p2, bend) // curve.setLine (p1, pc, p2) // println ("loc = " + curve.getFirst) - end if //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Give the location of the curve to be its starting point. @@ -149,7 +148,7 @@ class Transport (name: String, val from: Component, val to: Component, var loc = curve.next (DIAM, DIAM) // get the starting position for the entity/token actor.trajectory = curve.traj - for i <- 1 to steps do + cfor (0, steps) { _ => if loc != null then director.animate (actor, MoveToken, null, null, Array (loc.x, loc.y)) actor.schedule (duration / steps.toDouble) @@ -160,7 +159,7 @@ class Transport (name: String, val from: Component, val to: Component, actor.trajectory = curve.traj // println ("Transport.move: -- after loc = " + loc) end if - end for + } // cfor accum (onTransport) onTransport -= 1 diff --git a/src/main/scala/scalation/simulation/process/Vehicle.scala b/src/main/scala/scalation/simulation/process/Vehicle.scala index f1ba65535..06771ce12 100644 --- a/src/main/scala/scalation/simulation/process/Vehicle.scala +++ b/src/main/scala/scalation/simulation/process/Vehicle.scala @@ -16,23 +16,27 @@ import scala.collection.immutable.Map //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `Vehicle` object contains driver/vehicle characteristics/properties. + * @see https://hypertextbook.com/facts/2001/MeredithBarricella.shtml */ object Vehicle: /** defaults values for driver/vehicle characteristics/properties (PUBLIC access required) + * units are meters and seconds * @see https://en.wikipedia.org/wiki/Intelligent_driver_model */ val def_prop = Map ("rt" -> 1.0, // driver reaction time - "amax" -> 2.0, // max acceleration - "bmax" -> -1.5, // max deceleration - "v0" -> 0.0, // starting velocity + "amax" -> 3.0, // max acceleration + "bmax" -> -3.5, // max deceleration (typically higher than acceleration) +// "v0" -> 0.0, // starting velocity, from a stopped position + "v0" -> 33.528, // starting velocity, sim segment of interstate "vmax" -> 33.528, // max velocity "T" -> 3.0, // safe min time headway "s" -> 5.0, // safe min distance headway "len" -> 4.0, // length of the vehicles "del" -> 4.0) // acceleration exponent (delta) - /** current values for driver/vehicle characteristics/properties + /** Current values for driver/vehicle characteristics/properties + * To change a property: Vehicle.prop("amax") = 4.0 */ private [process] var prop = def_prop @@ -50,7 +54,7 @@ object Vehicle: inline def del: Double = prop("del") // acceleration exponent (delta) //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Set the driver/vehicle characteristics/properties to the new property values. + /** Set all the driver/vehicle characteristics/properties to the new property values. * @param new_prop the new property values */ def setProps (new_prop: Map [String, Double]): Unit = prop = new_prop @@ -86,6 +90,10 @@ abstract class Vehicle (name_ : String, director: Model) extends SimActor (name_, director) with Dynamics: + // Each car needs to located itself in terms of what `Pathway` it is on and what node in that `Pathway` +// protected var myPathway: Pathway = null + private [process] var myPathNode: DoublyLinkedList [Vehicle]#Node = null + //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The abstract method, 'act', is defined in each subclass to provide specific * behavior. diff --git a/src/main/scala/scalation/simulation/process/example_1/TrafficTurn.scala b/src/main/scala/scalation/simulation/process/example_1/TrafficTurn.scala index cff36b433..bea9a2b82 100644 --- a/src/main/scala/scalation/simulation/process/example_1/TrafficTurn.scala +++ b/src/main/scala/scalation/simulation/process/example_1/TrafficTurn.scala @@ -110,7 +110,6 @@ class TrafficModelTurn (name: String = "TrafficTurn", reps: Int = 1, animating: else road(i + 4).lane(l).move () // add 4 for next segment (straight) sink((i + 2) % 4).leave () // add 2 - end if end act end Car diff --git a/src/main/scala/scalation/simulation/state/MarkovChain.scala b/src/main/scala/scalation/simulation/state/MarkovChain.scala index d29cd2d53..e7950e295 100644 --- a/src/main/scala/scalation/simulation/state/MarkovChain.scala +++ b/src/main/scala/scalation/simulation/state/MarkovChain.scala @@ -61,7 +61,7 @@ class MarkovChain (a: MatrixD): */ def next (π: VectorD, k: Int): VectorD = var p = π.copy - for i <- 1 to k do p = p *: a + cfor (0, k) { _ => p = p *: a } p end next @@ -151,7 +151,6 @@ class MarkovChain (a: MatrixD): else aniQ.add (AnimateCommand (CreateEdge, eid, shape, label, true, ecolor, Array (bend), 0.0, i, j)) - end if end for end if end animate @@ -168,7 +167,6 @@ class MarkovChain (a: MatrixD): if ! (a(i).sum =~ 1.0) then println (s"row $i sums to ${a(i).sum}") go = false - end if } // cfor go end isStochastic @@ -264,7 +262,7 @@ end markovChainTest2 var lose = 0 // number of games lost var win = 0 // number of games won - for it <- 1 to 10000 do // iterate playing the game + cfor (0, 10000) { _ => // iterate playing the game var j = 3 // enter game with j dollars var go = true // continue with the game while go do @@ -272,7 +270,7 @@ end markovChainTest2 if j == 0 then { println ("lose"); lose += 1; go = false } if j == 5 then { println ("win"); win += 1; go = false } end while - end for + } // cfor println (s"loses = $lose") println (s"wins = $win") diff --git a/src/main/scala/scalation/simulation/state/MarkovChainCT.scala b/src/main/scala/scalation/simulation/state/MarkovChainCT.scala index 155631ffa..1ef90a411 100644 --- a/src/main/scala/scalation/simulation/state/MarkovChainCT.scala +++ b/src/main/scala/scalation/simulation/state/MarkovChainCT.scala @@ -58,7 +58,6 @@ class MarkovCT (tr: MatrixD): jump(i, j) = if s =~ 0.0 then 0.0 else tr(i, j) / s else // on-diagonal jump(i, i) = if s =~ 0.0 then 1.0 else 0.0 - end if end for end for diff --git a/src/main/scala/scalation/simulation/tableau/Model.scala b/src/main/scala/scalation/simulation/tableau/Model.scala index eb7b81a20..f8f1c1fc6 100644 --- a/src/main/scala/scalation/simulation/tableau/Model.scala +++ b/src/main/scala/scalation/simulation/tableau/Model.scala @@ -171,7 +171,6 @@ class Model (name: String, m: Int, rv: Array [Variate], label_ : Array [String] n -= 1 et += dt; lt += n j += 1 - end if end while (VectorD (et), VectorD (lt)) end timeLine diff --git a/src/main/scala/scalation/theory/Theory.scala b/src/main/scala/scalation/theory/Theory.scala new file mode 100644 index 000000000..db792b428 --- /dev/null +++ b/src/main/scala/scalation/theory/Theory.scala @@ -0,0 +1,206 @@ + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** @author John Miller + * @version 2.0 + * @date Sun Sep 28 18:02:55 EDT 2025 + * @see LICENSE (MIT style license file). + * + * @note Class Bringing Together the Elements of Modeling and Theory + */ + +package scalation +package theory + +import scala.collection.mutable.{LinkedHashSet => LSET} +import scala.runtime.ScalaRunTime.stringOf + +import scalation.database.table.LTable +import scalation.mathstat.{PlotM, VectorD} +import scalation.modeling.{FeatureSelection, Fit, FitM, Model, QoF, Predictor, Regression, SelectionTech, SimpleRegression, TaskType} +import scalation.modeling.forecasting.{Forecaster, Forecaster_Reg} + +import modeling.newFname + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `Dataset` class supports loading data files (e.g., CSV files) and pre-processing + * them to create predictor/input matrices and response/output vectors. + * FIX -- extend to models that allow multiple responses/outputs (vector -> matrix). + * @param name the name of the dataset + * @param fileName the name of the file storing the dataset + * @parm ncols the number of columns in the dataset + * @param xcols the desired column numbers to take for predictors + * @param ycol the desired column number to take for the response + */ +case class Dataset (name: String, fileName: String, ncols: Int, xcols: Array [Int], ycol: Int): + + val rawData = LTable.load (fileName, name, ncols, null) // raw data table + val data = preProcess (rawData) // cleaned-up data table + val fname = xcols.map (data.schema (_)) // predictor feature/variable names + val ofname = "one" +: fname // names with intercept/one (1) term added + val rname = data.schema (ycol) // name of the response column + val (x, y) = data.toMatrixV (xcols, ycol) // (predictor matrix, response column) + val xy = x +^ y // combined xy matrix + val ox = VectorD.one (x.dim) +^: x // predictor matrix with one (1) term added + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Pre-process the raw data that may contain missing values, strings, dates, + * id columns, zero-variance columns, and outliers. Return the cleaned-up data. + * @param tab the table/linked-relation containing the raw data + */ + def preProcess (tab: LTable): LTable = + tab // FIX -- to be implemented + end preProcess + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Show/print the table/linked-relation containing the data. + * @param tab the table/linked-relation containing the data + */ + def show (tab: LTable = data): Unit = tab.show () + +end Dataset + + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `Theory` class provides a high-level unified way to run data science and + * machine learning models. + */ +case class Theory (): + + private val debug = debugf ("Theory", true) // debug function + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Perform Exploratory Data Analysis (EDA) using Simple Linear Regression y vs. x_j + * for each predictor variable x_j. + * FIX -- other more flexible options should be explored, exp. for time series + * @param dset the dataset to be explored + */ + def exploreData (dset: Dataset): Unit = + println (dset.xy.corr) + val (x, y, fname) = (dset.x, dset.y, dset.fname) + for j <- x.indices2 do + banner (s"Plot response y vs. predictor variable ${fname(j)}") + val mod = SimpleRegression (x(?, j), y, Array ("one", fname(j))) + mod.trainNtest ()() // train and test the model + end for + end exploreData + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Run all models in the given list. + */ + def runModels (models: List [Model & Fit]): Unit = + for mod <- models do + banner (s"In-Sample Testing of model ${mod.modelName}") + mod.inSample_Test () // In-Sample Testing of full dataset + println (mod.summary ()) // FIX - only shows for last horizon + + banner (s"Validation Testing of model ${mod.modelName}") + mod.validate ()() // TnT: Train on training-set, Test on testing-set + println (mod.equation) // print the prediction equation using parameters from validate + // FIX -- override equation for each model + + banner (s"Cross-Validation Testing of model ${mod.modelName}") + if mod.taskType == TaskType.Predict then + val stats = mod.crossValidate () // Multiple TnT with Cross Validation + FitM.showQofStatTable (stats) + else if mod.taskType == TaskType.Forecast then + val fmod = mod.asInstanceOf [Forecaster & Fit] + fmod.setSkip (0) // can use values from training set to not skip any in test + fmod.rollValidate () // Multiple TnT with Rolling Validation + end for + end runModels + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Screen/reduce the features/variables based on model agnostic dependency/correlation + * analysis of the predictor and response variables. Performs a quick pre-screening. + * @param mod the model to simplify by using fewer predictor variables + */ + def screenFeatures (mod: Model & Fit): Model & Fit = + val xy = mod.getXy + val (xcols, idx) = mod.screen (xy)() // x-columns and their indices + debug ("screenModel", s"for ${mod.modelName} selected column idx = $idx") + if mod.taskType == TaskType.Predict then + val pmod = mod.asInstanceOf [Predictor & Fit] + pmod.buildModel (xcols, newFname (mod.getFname, idx)) + else if mod.isInstanceOf [Forecaster_Reg & Fit] then + val fmod = mod.asInstanceOf [Forecaster_Reg & Fit] + println (s"fnames = ${stringOf (mod.getFname)}") + fmod.convertReg2Forc (LSET.from (idx.toArray)) + else + mod // model does not yet support screening + end screenFeatures + + //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + /** Select the features/variables based on model QoF metrics. Requires the model + * to be run multiple times. + * @param mod the model to simplify by using fewer predictor variables + * @param tech the feature selection technique to use (defaults to Backward) + */ + def selectFeatures (mod: Model & FeatureSelection & Fit, + tech: SelectionTech = SelectionTech.Backward): Model & Fit = + banner (s"Feature Selection Technique: $tech") + if mod.taskType == TaskType.Predict then + given qk: Int = QoF.rSqBar.ordinal + val rSq = mod.selectFeatures (tech, "many")._2 // R^2, R^2 bar, sMAPE, R^2 cv + new PlotM (null, rSq.transpose, Regression.metrics, s"R^2 vs n for Regression with $tech", lines = true) + mod.getBest.mod + else if mod.isInstanceOf [Forecaster_Reg & Fit] then + val fmod = mod.asInstanceOf [Forecaster_Reg & Fit] + given qk: Int = QoF.smapeC.ordinal + val rSq = mod.selectFeatures (tech, "none")._2 // R^2, R^2 bar, sMAPE, NO R^2 cv + new PlotM (null, rSq.transpose, Regression.metrics, s"R^2 vs n for Regression with $tech", lines = true) + fmod.convertReg2Forc (mod.getBest.mod_cols) + else + mod // model does not yet support feature selection + end selectFeatures + +end Theory + + +//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +/** The `theoryTest` main method test the `Theory` class. + * > runMain scalation.theory.theoryTest + */ +@main def theoryTest (): Unit = + +// import scalation.mathstat.Transform.RootForm + import scalation.modeling._ + import scalation.modeling.forecasting._ + + val datasets = Map ("Covid19" -> Dataset ("Covid19", "covid_19_weekly.csv", + 17, Array (1, 3, 4, 5, 6, 7, 8, 9, 10), 2), + "Influenza" -> Dataset ("Influenza", "national_illness_clip.csv", + 8, Array (1, 2, 3, 4, 6, 7), 5)) + + for (dname, dset) <- datasets do + dset.show () + + banner (s"Perform Exploratory Data Analysis (EDA) on dataset $dname") + val theory = Theory () + theory.exploreData (dset) + + val models = List (new Regression (dset.ox, dset.y, dset.ofname), +// new TranRegression (dset.ox, dset.y, dset.ofname, yℱ = RootForm ()), + SymbolicRegression.quadratic (dset.x, dset.y, dset.fname), + ARX (dset.x, dset.y, 6, dset.fname), + ARX_Quad (dset.x, dset.y, 6, dset.fname), + ARX_SR (dset.x, dset.y, 6, dset.fname)) + + banner (s"Apply the following models to dataset $dname") + for mod <- models do println (mod.modelName) + + banner (s"Analyze dataset $dname using full models") + theory.runModels (models) + + banner (s"Analyze dataset $dname using feature screened models") + val models2 = models.map (theory.screenFeatures (_)) + theory.runModels (models2) + + banner (s"Analyze dataset $dname using feature selected best models") + val models3 = models.map (theory.selectFeatures (_)) // from full models +// val models3 = models2.map (theory.selectFeatures (_)) // from screened models + theory.runModels (models3) +/* +*/ + +end theoryTest + diff --git a/src/main/scala/scalation/modeling/Variable.scala b/src/main/scala/scalation/theory/Variable.scala similarity index 94% rename from src/main/scala/scalation/modeling/Variable.scala rename to src/main/scala/scalation/theory/Variable.scala index f2287e178..214597b99 100644 --- a/src/main/scala/scalation/modeling/Variable.scala +++ b/src/main/scala/scalation/theory/Variable.scala @@ -5,20 +5,23 @@ * @date Sat May 18 14:57:50 EDT 2019 * @see LICENSE (MIT style license file). * - * @note Model Support: Meta-data about a Variable + * @note Theory/Model Support: Meta-data about a Variable */ package scalation -package modeling +package theory import scalation.mathstat._ //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `VariableKind` enumeration indicates the kind of variable. + * @param name the name of the variable kind */ -enum VariableKind: +enum VariableKind (val name: String): - case Categorical, Ordinal, Continuous + case Categorical extends VariableKind ("Categorical") + case Ordinal extends VariableKind ("Ordinal") + case Continuous extends VariableKind ("Continuous") end VariableKind diff --git a/target/.history3 b/target/.history3 deleted file mode 100644 index f10d1c862..000000000 --- a/target/.history3 +++ /dev/null @@ -1,2 +0,0 @@ -1743563818937:compile -1743564199509:doc diff --git a/target/global-logging/sbt-global-log2269071071250128186.log b/target/global-logging/sbt-global-log2269071071250128186.log deleted file mode 100644 index d71c4c5b4..000000000 --- a/target/global-logging/sbt-global-log2269071071250128186.log +++ /dev/null @@ -1,6 +0,0 @@ -[debug] > Exec(compile, Some(4e666a5a-0970-428a-ba72-539e51e84700), Some(CommandSource(console0))) -[debug] Evaluating tasks: Compile / compile -[debug] Running task... Cancel: Signal, check cycles: false, forcegc: true -[info] compiling 567 Scala sources to C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes ... -[success] Total time: 264 s (04:24), completed Apr 1, 2025, 11:21:23 PM -[debug] > Exec(shell, None, None) diff --git a/target/global-logging/sbt-global-log5252979687430671642.log b/target/global-logging/sbt-global-log5252979687430671642.log deleted file mode 100644 index 83d6f2d7f..000000000 --- a/target/global-logging/sbt-global-log5252979687430671642.log +++ /dev/null @@ -1,10 +0,0 @@ -[debug] > Exec(doc, Some(4368a4e6-06af-40e5-83bd-3da16a37c6ab), Some(CommandSource(console0))) -[debug] Evaluating tasks: Compile / doc -[debug] Running task... Cancel: Signal, check cycles: false, forcegc: true -[info] Main Scala API documentation to C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\api... -[info] Skipping unused scalacOptions: -Werror, -new-syntax -[warn] Flag -classpath set repeatedly -[warn] one warning found -[info] Main Scala API documentation successful. -[success] Total time: 136 s (02:16), completed Apr 1, 2025, 11:25:35 PM -[debug] > Exec(shell, None, None) diff --git a/target/global-logging/sbt-global-log8519816407780403268.log b/target/global-logging/sbt-global-log8519816407780403268.log deleted file mode 100644 index 2aeaac04c..000000000 --- a/target/global-logging/sbt-global-log8519816407780403268.log +++ /dev/null @@ -1,84 +0,0 @@ -[debug] > Exec(early(addDefaultCommands), None, None) -[debug] > Exec(addDefaultCommands, None, None) -[debug] > Exec(early(initialize), None, None) -[debug] > Exec(initialize, None, None) -[debug] > Exec(boot, None, None) -[debug] > Exec(writeSbtVersion, None, None) -[debug] > Exec(reload, None, None) -[debug] > Exec(sbtStashOnFailure, None, None) -[debug] > Exec(onFailure loadFailed, None, None) -[debug] > Exec(loadp, None, None) -[info] welcome to sbt 1.10.1 (Oracle Corporation Java 21) -[debug] Load.loadUnit: plugins took 1473.1282ms -[debug] Load.loadUnit: defsScala took 0.7578ms -[debug] [Loading] Scanning directory C:\Users\youse\.sbt\1.0\plugins -[debug] [Loading] Found non-root projects -[debug] [Loading] Done in C:\Users\youse\.sbt\1.0\plugins, returning: () -[debug] deducing auto plugins based on known facts Set(Atom(sbt.plugins.CorePlugin)) and clauses Clauses(Clause(Atom(sbt.plugins.JvmPlugin),Set(Atom(sbt.plugins.IvyPlugin))) -[debug] Clause(Atom(sbt.ScriptedPlugin),Set(Atom(sbt.plugins.JvmPlugin))) -[debug] Clause(Atom(sbt.plugins.SbtPlugin),Set(Atom(sbt.ScriptedPlugin))) -[debug] Clause(Atom(sbt.plugins.SemanticdbPlugin),Set(Atom(sbt.plugins.JvmPlugin))) -[debug] Clause(Atom(sbt.plugins.JUnitXmlReportPlugin),Set(Atom(sbt.plugins.JvmPlugin))) -[debug] Clause(Atom(sbt.plugins.MiniDependencyTreePlugin),Set(Atom(sbt.plugins.JvmPlugin))) -[debug] Clause(Atom(sbt.plugins.CorePlugin),Set(Atom(sbt.plugins.IvyPlugin))) -[debug] Clause(Atom(sbt.plugins.IvyPlugin),Set(Atom(sbt.plugins.JvmPlugin))) -[debug] Clause(Atom(sbt.plugins.JvmPlugin),Set(Atom(sbt.plugins.SemanticdbPlugin))) -[debug] Clause(Atom(sbt.plugins.JvmPlugin),Set(Atom(sbt.plugins.JUnitXmlReportPlugin))) -[debug] Clause(Atom(sbt.plugins.CorePlugin),Set(Atom(sbt.plugins.Giter8TemplatePlugin))) -[debug] Clause(Atom(sbt.plugins.JvmPlugin),Set(Atom(sbt.plugins.MiniDependencyTreePlugin)))) -[debug] :: deduced result: Matched(sbt.plugins.CorePlugin,sbt.plugins.Giter8TemplatePlugin,sbt.plugins.IvyPlugin,sbt.plugins.JvmPlugin,sbt.plugins.MiniDependencyTreePlugin,sbt.plugins.JUnitXmlReportPlugin,sbt.plugins.SemanticdbPlugin) -[debug] Plugins.deducer#function took 104.922 ms -[debug] Load.resolveProject(global-plugins) took 367.982ms -[debug] Load.loadTransitive: finalizeProject(Project(id global-plugins, base: C:\Users\youse\.sbt\1.0\plugins, plugins: List())) took 775.8179ms -[debug] [Loading] Done in C:\Users\youse\.sbt\1.0\plugins, returning: (global-plugins) -[debug] Load.loadUnit: loadedProjectsRaw took 1227.1973ms -[debug] Load.loadUnit: cleanEvalClasses took 0.4961ms -[debug] Load.loadUnit(file:/C:/Users/youse/.sbt/1.0/plugins/, ...) took 2855.3203ms -[debug] Load.apply: load took 3782.5726ms -[debug] Load.apply: resolveProjects took 37.1452ms -[debug] Load.apply: finalTransforms took 234.7698ms -[debug] Load.apply: config.delegates took 67.5104ms -[debug] Load.apply: Def.make(settings)... took 2900.0754ms -[debug] Load.apply: structureIndex took 667.9205ms -[debug] Load.apply: mkStreams took 13.1994ms -[info] loading global plugins from C:\Users\youse\.sbt\1.0\plugins -[debug] Running task... Cancel: Signal, check cycles: false, forcegc: true -[debug] Load.defaultLoad until apply took 15525.9811ms -[debug] Load.loadUnit: plugins took 67.9228ms -[debug] Load.loadUnit: defsScala took 0.0097ms -[debug] [Loading] Scanning directory C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\project -[debug] [Loading] Found non-root projects -[debug] [Loading] Done in C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\project, returning: () -[debug] deducing auto plugins based on known facts Set(Atom(sbt.plugins.CorePlugin)) and clauses Clauses(Clause(Atom(sbt.plugins.JvmPlugin),Set(Atom(sbt.plugins.IvyPlugin))) -[debug] Clause(Atom(sbt.ScriptedPlugin),Set(Atom(sbt.plugins.JvmPlugin))) -[debug] Clause(Atom(sbt.plugins.SbtPlugin),Set(Atom(sbt.ScriptedPlugin))) -[debug] Clause(Atom(sbt.plugins.SemanticdbPlugin),Set(Atom(sbt.plugins.JvmPlugin))) -[debug] Clause(Atom(sbt.plugins.JUnitXmlReportPlugin),Set(Atom(sbt.plugins.JvmPlugin))) -[debug] Clause(Atom(sbt.plugins.MiniDependencyTreePlugin),Set(Atom(sbt.plugins.JvmPlugin))) -[debug] Clause(Atom(sbt.plugins.CorePlugin),Set(Atom(sbt.plugins.IvyPlugin))) -[debug] Clause(Atom(sbt.plugins.IvyPlugin),Set(Atom(sbt.plugins.JvmPlugin))) -[debug] Clause(Atom(sbt.plugins.JvmPlugin),Set(Atom(sbt.plugins.SemanticdbPlugin))) -[debug] Clause(Atom(sbt.plugins.JvmPlugin),Set(Atom(sbt.plugins.JUnitXmlReportPlugin))) -[debug] Clause(Atom(sbt.plugins.CorePlugin),Set(Atom(sbt.plugins.Giter8TemplatePlugin))) -[debug] Clause(Atom(sbt.plugins.JvmPlugin),Set(Atom(sbt.plugins.MiniDependencyTreePlugin)))) -[debug] :: deduced result: Matched(sbt.plugins.CorePlugin,sbt.plugins.Giter8TemplatePlugin,sbt.plugins.IvyPlugin,sbt.plugins.JvmPlugin,sbt.plugins.MiniDependencyTreePlugin,sbt.plugins.JUnitXmlReportPlugin,sbt.plugins.SemanticdbPlugin) -[debug] Plugins.deducer#function took 5.4148 ms -[debug] Load.resolveProject(scalation_2-0-build) took 3.0296ms -[debug] Load.loadTransitive: finalizeProject(Project(id scalation_2-0-build, base: C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\project, plugins: List())) took 9.571ms -[debug] [Loading] Done in C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\project, returning: (scalation_2-0-build) -[debug] Load.loadUnit: loadedProjectsRaw took 14.362ms -[debug] Load.loadUnit: cleanEvalClasses took 0.0346ms -[debug] Load.loadUnit(file:/C:/Users/youse/OneDrive/Documents/New%20Scalation/scalation_2.0/project/, ...) took 85.0626ms -[debug] Load.apply: load took 90.8556ms -[debug] Load.apply: resolveProjects took 0.2315ms -[debug] Load.apply: finalTransforms took 24.729ms -[debug] Load.apply: config.delegates took 0.6802ms -[debug] Load.apply: Def.make(settings)... took 228.6623ms -[debug] Load.apply: structureIndex took 76.1481ms -[debug] Load.apply: mkStreams took 0.0036ms -[info] loading project definition from C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\project -[debug] Running task... Cancel: Signal, check cycles: false, forcegc: true -[debug] Load.loadUnit: plugins took 3995.2992ms -[debug] Load.loadUnit: defsScala took 0.0071ms -[debug] [Loading] Scanning directory C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0 -[debug] Load.loadUnit: mkEval took 15.863ms diff --git a/target/scala-3.6.4/classes/index.html b/target/scala-3.6.4/classes/index.html deleted file mode 100644 index a920d3e22..000000000 --- a/target/scala-3.6.4/classes/index.html +++ /dev/null @@ -1,8 +0,0 @@ - - -

    Source files in scala Package

    -

    - - \ No newline at end of file diff --git a/target/scala-3.6.4/classes/scalation/BiMap$package$.class b/target/scala-3.6.4/classes/scalation/BiMap$package$.class deleted file mode 100644 index 8ede1bfc6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/BiMap$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/BiMap$package.class b/target/scala-3.6.4/classes/scalation/BiMap$package.class deleted file mode 100644 index 5a1436653..000000000 Binary files a/target/scala-3.6.4/classes/scalation/BiMap$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/BiMap$package.tasty b/target/scala-3.6.4/classes/scalation/BiMap$package.tasty deleted file mode 100644 index 8a84cb1f0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/BiMap$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/BiMap.class b/target/scala-3.6.4/classes/scalation/BiMap.class deleted file mode 100644 index 26c49e808..000000000 Binary files a/target/scala-3.6.4/classes/scalation/BiMap.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/BiMap.tasty b/target/scala-3.6.4/classes/scalation/BiMap.tasty deleted file mode 100644 index f02eca8ed..000000000 Binary files a/target/scala-3.6.4/classes/scalation/BiMap.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/Bool$package$.class b/target/scala-3.6.4/classes/scalation/Bool$package$.class deleted file mode 100644 index d94d8ba64..000000000 Binary files a/target/scala-3.6.4/classes/scalation/Bool$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/Bool$package.class b/target/scala-3.6.4/classes/scalation/Bool$package.class deleted file mode 100644 index 2cb238517..000000000 Binary files a/target/scala-3.6.4/classes/scalation/Bool$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/Bool$package.tasty b/target/scala-3.6.4/classes/scalation/Bool$package.tasty deleted file mode 100644 index 269e71672..000000000 Binary files a/target/scala-3.6.4/classes/scalation/Bool$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/Calc$.class b/target/scala-3.6.4/classes/scalation/Calc$.class deleted file mode 100644 index 4f8738ac1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/Calc$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/Calc$package$.class b/target/scala-3.6.4/classes/scalation/Calc$package$.class deleted file mode 100644 index b301c1f33..000000000 Binary files a/target/scala-3.6.4/classes/scalation/Calc$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/Calc$package.class b/target/scala-3.6.4/classes/scalation/Calc$package.class deleted file mode 100644 index 8a7af91e8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/Calc$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/Calc$package.tasty b/target/scala-3.6.4/classes/scalation/Calc$package.tasty deleted file mode 100644 index 9311b7f9a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/Calc$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/Calc.class b/target/scala-3.6.4/classes/scalation/Calc.class deleted file mode 100644 index b3b5e9b57..000000000 Binary files a/target/scala-3.6.4/classes/scalation/Calc.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/Calc.tasty b/target/scala-3.6.4/classes/scalation/Calc.tasty deleted file mode 100644 index 39be0efa0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/Calc.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/CircularQueue$package$.class b/target/scala-3.6.4/classes/scalation/CircularQueue$package$.class deleted file mode 100644 index 2eace54a9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/CircularQueue$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/CircularQueue$package.class b/target/scala-3.6.4/classes/scalation/CircularQueue$package.class deleted file mode 100644 index 6f5e9fd38..000000000 Binary files a/target/scala-3.6.4/classes/scalation/CircularQueue$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/CircularQueue$package.tasty b/target/scala-3.6.4/classes/scalation/CircularQueue$package.tasty deleted file mode 100644 index af7fe0532..000000000 Binary files a/target/scala-3.6.4/classes/scalation/CircularQueue$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/CircularQueue.class b/target/scala-3.6.4/classes/scalation/CircularQueue.class deleted file mode 100644 index 4b616659e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/CircularQueue.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/CircularQueue.tasty b/target/scala-3.6.4/classes/scalation/CircularQueue.tasty deleted file mode 100644 index 0f2e95bcf..000000000 Binary files a/target/scala-3.6.4/classes/scalation/CircularQueue.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/CommonFunctions$package$.class b/target/scala-3.6.4/classes/scalation/CommonFunctions$package$.class deleted file mode 100644 index b7b4c0393..000000000 Binary files a/target/scala-3.6.4/classes/scalation/CommonFunctions$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/CommonFunctions$package.class b/target/scala-3.6.4/classes/scalation/CommonFunctions$package.class deleted file mode 100644 index ab44bc38c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/CommonFunctions$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/CommonFunctions$package.tasty b/target/scala-3.6.4/classes/scalation/CommonFunctions$package.tasty deleted file mode 100644 index 29d41b024..000000000 Binary files a/target/scala-3.6.4/classes/scalation/CommonFunctions$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/Coordinates$package$.class b/target/scala-3.6.4/classes/scalation/Coordinates$package$.class deleted file mode 100644 index 5d7822d3c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/Coordinates$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/Coordinates$package.class b/target/scala-3.6.4/classes/scalation/Coordinates$package.class deleted file mode 100644 index 3f52b6733..000000000 Binary files a/target/scala-3.6.4/classes/scalation/Coordinates$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/Coordinates$package.tasty b/target/scala-3.6.4/classes/scalation/Coordinates$package.tasty deleted file mode 100644 index d764f1b27..000000000 Binary files a/target/scala-3.6.4/classes/scalation/Coordinates$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/Coordinates.class b/target/scala-3.6.4/classes/scalation/Coordinates.class deleted file mode 100644 index eb6fc26c0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/Coordinates.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/Coordinates.tasty b/target/scala-3.6.4/classes/scalation/Coordinates.tasty deleted file mode 100644 index 279c8573b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/Coordinates.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/Counter$.class b/target/scala-3.6.4/classes/scalation/Counter$.class deleted file mode 100644 index cb7e55a63..000000000 Binary files a/target/scala-3.6.4/classes/scalation/Counter$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/Counter$package$.class b/target/scala-3.6.4/classes/scalation/Counter$package$.class deleted file mode 100644 index fdb3c6c9d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/Counter$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/Counter$package.class b/target/scala-3.6.4/classes/scalation/Counter$package.class deleted file mode 100644 index 9910afcd0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/Counter$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/Counter$package.tasty b/target/scala-3.6.4/classes/scalation/Counter$package.tasty deleted file mode 100644 index 07462b9fb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/Counter$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/Counter.class b/target/scala-3.6.4/classes/scalation/Counter.class deleted file mode 100644 index 7b13c5548..000000000 Binary files a/target/scala-3.6.4/classes/scalation/Counter.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/Counter.tasty b/target/scala-3.6.4/classes/scalation/Counter.tasty deleted file mode 100644 index aa61b5f67..000000000 Binary files a/target/scala-3.6.4/classes/scalation/Counter.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/DoublyLinkedList$ListIterator$.class b/target/scala-3.6.4/classes/scalation/DoublyLinkedList$ListIterator$.class deleted file mode 100644 index 2ab45ea4f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/DoublyLinkedList$ListIterator$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/DoublyLinkedList$ListIterator.class b/target/scala-3.6.4/classes/scalation/DoublyLinkedList$ListIterator.class deleted file mode 100644 index 6d7830410..000000000 Binary files a/target/scala-3.6.4/classes/scalation/DoublyLinkedList$ListIterator.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/DoublyLinkedList$Node$.class b/target/scala-3.6.4/classes/scalation/DoublyLinkedList$Node$.class deleted file mode 100644 index 2283eaa6c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/DoublyLinkedList$Node$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/DoublyLinkedList$Node.class b/target/scala-3.6.4/classes/scalation/DoublyLinkedList$Node.class deleted file mode 100644 index 2ca95bf93..000000000 Binary files a/target/scala-3.6.4/classes/scalation/DoublyLinkedList$Node.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/DoublyLinkedList$NodeIterator$.class b/target/scala-3.6.4/classes/scalation/DoublyLinkedList$NodeIterator$.class deleted file mode 100644 index 51bbd1aa0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/DoublyLinkedList$NodeIterator$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/DoublyLinkedList$NodeIterator.class b/target/scala-3.6.4/classes/scalation/DoublyLinkedList$NodeIterator.class deleted file mode 100644 index 9f6a28a2e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/DoublyLinkedList$NodeIterator.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/DoublyLinkedList$package$.class b/target/scala-3.6.4/classes/scalation/DoublyLinkedList$package$.class deleted file mode 100644 index 8424c38e8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/DoublyLinkedList$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/DoublyLinkedList$package.class b/target/scala-3.6.4/classes/scalation/DoublyLinkedList$package.class deleted file mode 100644 index c1272b769..000000000 Binary files a/target/scala-3.6.4/classes/scalation/DoublyLinkedList$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/DoublyLinkedList$package.tasty b/target/scala-3.6.4/classes/scalation/DoublyLinkedList$package.tasty deleted file mode 100644 index bb2175912..000000000 Binary files a/target/scala-3.6.4/classes/scalation/DoublyLinkedList$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/DoublyLinkedList.class b/target/scala-3.6.4/classes/scalation/DoublyLinkedList.class deleted file mode 100644 index a2a4e064b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/DoublyLinkedList.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/DoublyLinkedList.scala.bak b/target/scala-3.6.4/classes/scalation/DoublyLinkedList.scala.bak deleted file mode 100644 index c4c843723..000000000 --- a/target/scala-3.6.4/classes/scalation/DoublyLinkedList.scala.bak +++ /dev/null @@ -1,211 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Sun Feb 25 20:55:28 EST 2024 - * @see LICENSE (MIT style license file). - * - * @note Data Structure: Doubly Linked List - */ - -package scalation - -import scala.collection.mutable.AbstractIterable -import scala.reflect.ClassTag - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `DoubleLinked` class provides a data structure implementing mutable doubly-linked - * lists. - * succ --> --> - * tail (last car) --> [e1] [e2] [e3] <-- head (lead car) - * pred <-- <-- - * @tparam A the type of the elements/values in the list - */ -class DoublyLinkedList [A: ClassTag] - extends AbstractIterable [A] - with Serializable: - - private val debug = debugf ("DoublyLinkedList", true) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** The `Node` inner case class wraps elements in nodes for double linkage. - * @param elem the element - * @param pred the predecessor node - * @param succ the successor node - */ - case class Node (elem: A, var pred: Node, var succ: Node): - def next: Node = succ - def prev: Node = pred - override def toString: String = s"Node ($elem)" - end Node - - private var head_ : Node = null // head node (lead car) - private var tail_ : Node = null // tail node (last car) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** The `NodeIterator` inner class supports iterating over all the nodes in this list. - * @param ns the starting node (defaults to tail) - */ - class NodeIterator (ns: Node = tail_) extends Iterator [Node]: - var n = ns - def hasNext: Boolean = n != null - def next (): Node = { val n_ = n; n = n.succ; n_ } - end NodeIterator - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return an iterator for retrieving all the nodes in this list. - * @see scala.collection.IterableOnce - */ - def nodeIterator: Iterator [Node] = new NodeIterator () - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** The `ListIterator` inner class supports iterating over all the elements in this list. - * @param ns the starting leaf node (defaults to tail) - */ - class ListIterator (ns: Node = tail_) extends Iterator [A]: - var n = ns - def hasNext: Boolean = n != null - def next (): A = { val n_ = n; n = n.succ; n_.elem } - end ListIterator - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return an iterator for retrieving all the elements in this list. - * @see scala.collection.IterableOnce - */ - def iterator: Iterator [A] = new ListIterator () - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Retreive the element in node nn (e.g., the current car). - * @param nn the node containing the sought element - */ - def elemAt (nn: Node): A = nn.elem - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the lead/first element/node in the list (e.g, node holding the lead car). - */ - override def head: A = head_.elem - def headNode: Node = head_ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the trail/last element/node in the list (e.g, node holding the trail car). - */ - override def last: A = tail_.elem - def lastNode: Node = tail_ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return whether the list is empty (head and tail are null). - */ - override def isEmpty: Boolean = head_ == null - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Add the first element to an empty list and return the new node nn. - * @param elm the element to be added - */ - def addFirst (elm: A): Node = - val nn = Node (elm, null, null) // make a new node nn - head_ = nn - tail_ = nn - nn - end addFirst - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Add the new element into the list AFTER the given predecessor node pn and - * return the new node nn. - * Relink: _pn_ <-> sn TO _pn_ <-> nn <-> sn - * @param elm the element to be added - * @param pn the predecessor node (use head if not given) - */ - def addAfter (elm: A, pn: Node = head_): Node = - if pn == null || isEmpty then - addFirst (elm) - else - val sn = pn.succ // successor node sn - val nn = Node (elm, pn, sn) // make a new node nn - pn.succ = nn // link forward - if sn != null then sn.pred = nn // link backward - - if pn == head_ then head_ = nn // if pn was head, reset to nn - debug ("addAfter", s"pn = $pn, nn = $nn, sn = $sn") - nn - end addAfter - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Add the new element into the list BEFORE the given successor node sn and - * return the new node nn. - * Relink: pn <-> _sn_ TO pn <-> nn <-> _sn_ - * @param elm the element to be added - * @param sn the successor node (use tail if not given) - */ - def add (elm: A, sn: Node = tail_): Node = - if sn == null || isEmpty then - addFirst (elm) - else - val pn = sn.pred // predecessor node sn - val nn = Node (elm, pn, sn) // make a new node nn - sn.pred = nn // link backward - if pn != null then pn.succ = nn // link forward - - if sn == tail_ then tail_ = nn // if sn was tail, reset to nn - debug ("add", s"pn = $pn, nn = $nn, sn = $sn") - nn - end add - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Remove the node nn from the linked list. - * Relink: pn <-> nn <-> sn TO pn <-> sn - * @param nn the node to remove (unlink) - */ - def remove (nn: Node = head_): Unit = - val pn = nn.pred // nn's predecessor - val sn = nn.succ // nn's successor - if pn != null then pn.succ = sn // forward bypass of nn - if sn != null then sn.pred = pn // backward bypass of nn - - if nn == head_ then head_ = pn // if nn was head, reset to pn - if nn == tail_ then tail_ = sn // if nn was tail, reset to sn - - nn.pred = null // nn no longer links - nn.succ = null - debug ("remove", s"pn = $pn, sn = $sn") - end remove - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Clear the list of all nodes (and their elements). - */ - def clear (): Unit = { tail_ = null; head_ = null } - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert this doubly linked list to a string. - */ - override def toString (): String = - val sb = StringBuilder ("DoublyLinkedList (tail -") - for n <- nodeIterator do sb.append (s"> [ $n ] <-") - sb.append (" head)").mkString - end toString - -end DoublyLinkedList - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `doublyLinkedListTest` main function tests the `DoublyLinkedList` class. - * > runMain scalation.doublyLinkedListTest - */ -@main def doublyLinkedListTest (): Unit = - - banner ("Test the add method") - val dll = DoublyLinkedList [Int] () - for i <- 0 until 10 do dll.add (i) - println (dll) - - banner ("Test the addAfter method") - dll.clear () - for i <- 0 until 10 do dll.addAfter (i) - println (dll) - - banner ("Test the remove method") - while ! dll.isEmpty do - dll.remove () - println (dll) - -end doublyLinkedListTest - diff --git a/target/scala-3.6.4/classes/scalation/DoublyLinkedList.tasty b/target/scala-3.6.4/classes/scalation/DoublyLinkedList.tasty deleted file mode 100644 index 71466073f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/DoublyLinkedList.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/Earth$.class b/target/scala-3.6.4/classes/scalation/Earth$.class deleted file mode 100644 index e7b0e81c8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/Earth$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/Earth.class b/target/scala-3.6.4/classes/scalation/Earth.class deleted file mode 100644 index 562a5e03b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/Earth.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/Earth.tasty b/target/scala-3.6.4/classes/scalation/Earth.tasty deleted file mode 100644 index c65d3d5ec..000000000 Binary files a/target/scala-3.6.4/classes/scalation/Earth.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/EasyWriter$.class b/target/scala-3.6.4/classes/scalation/EasyWriter$.class deleted file mode 100644 index 69533c3c0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/EasyWriter$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/EasyWriter$package$.class b/target/scala-3.6.4/classes/scalation/EasyWriter$package$.class deleted file mode 100644 index 53be468ae..000000000 Binary files a/target/scala-3.6.4/classes/scalation/EasyWriter$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/EasyWriter$package.class b/target/scala-3.6.4/classes/scalation/EasyWriter$package.class deleted file mode 100644 index ef1021758..000000000 Binary files a/target/scala-3.6.4/classes/scalation/EasyWriter$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/EasyWriter$package.tasty b/target/scala-3.6.4/classes/scalation/EasyWriter$package.tasty deleted file mode 100644 index 815448c01..000000000 Binary files a/target/scala-3.6.4/classes/scalation/EasyWriter$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/EasyWriter.class b/target/scala-3.6.4/classes/scalation/EasyWriter.class deleted file mode 100644 index 97c824a18..000000000 Binary files a/target/scala-3.6.4/classes/scalation/EasyWriter.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/EasyWriter.tasty b/target/scala-3.6.4/classes/scalation/EasyWriter.tasty deleted file mode 100644 index e6d018331..000000000 Binary files a/target/scala-3.6.4/classes/scalation/EasyWriter.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/Fib$.class b/target/scala-3.6.4/classes/scalation/Fib$.class deleted file mode 100644 index bf3c8c539..000000000 Binary files a/target/scala-3.6.4/classes/scalation/Fib$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/Fib$package$.class b/target/scala-3.6.4/classes/scalation/Fib$package$.class deleted file mode 100644 index 3f0ef2a26..000000000 Binary files a/target/scala-3.6.4/classes/scalation/Fib$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/Fib$package.class b/target/scala-3.6.4/classes/scalation/Fib$package.class deleted file mode 100644 index 748802a20..000000000 Binary files a/target/scala-3.6.4/classes/scalation/Fib$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/Fib$package.tasty b/target/scala-3.6.4/classes/scalation/Fib$package.tasty deleted file mode 100644 index a548036c7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/Fib$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/Fib.class b/target/scala-3.6.4/classes/scalation/Fib.class deleted file mode 100644 index 2f7b3dc33..000000000 Binary files a/target/scala-3.6.4/classes/scalation/Fib.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/Fib.tasty b/target/scala-3.6.4/classes/scalation/Fib.tasty deleted file mode 100644 index 68a2d5966..000000000 Binary files a/target/scala-3.6.4/classes/scalation/Fib.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/FileReader$package$.class b/target/scala-3.6.4/classes/scalation/FileReader$package$.class deleted file mode 100644 index a93835fc5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/FileReader$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/FileReader$package.class b/target/scala-3.6.4/classes/scalation/FileReader$package.class deleted file mode 100644 index 68afd4b47..000000000 Binary files a/target/scala-3.6.4/classes/scalation/FileReader$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/FileReader$package.tasty b/target/scala-3.6.4/classes/scalation/FileReader$package.tasty deleted file mode 100644 index b38a4d6ba..000000000 Binary files a/target/scala-3.6.4/classes/scalation/FileReader$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/GenIndexHtml$package$.class b/target/scala-3.6.4/classes/scalation/GenIndexHtml$package$.class deleted file mode 100644 index 35f7c670f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/GenIndexHtml$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/GenIndexHtml$package.class b/target/scala-3.6.4/classes/scalation/GenIndexHtml$package.class deleted file mode 100644 index 244146ca9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/GenIndexHtml$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/GenIndexHtml$package.tasty b/target/scala-3.6.4/classes/scalation/GenIndexHtml$package.tasty deleted file mode 100644 index 1b87cae0b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/GenIndexHtml$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/HyperParameter$package$.class b/target/scala-3.6.4/classes/scalation/HyperParameter$package$.class deleted file mode 100644 index ff3201c6d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/HyperParameter$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/HyperParameter$package.class b/target/scala-3.6.4/classes/scalation/HyperParameter$package.class deleted file mode 100644 index c566ff904..000000000 Binary files a/target/scala-3.6.4/classes/scalation/HyperParameter$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/HyperParameter$package.tasty b/target/scala-3.6.4/classes/scalation/HyperParameter$package.tasty deleted file mode 100644 index 9ed9f4b95..000000000 Binary files a/target/scala-3.6.4/classes/scalation/HyperParameter$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/HyperParameter.class b/target/scala-3.6.4/classes/scalation/HyperParameter.class deleted file mode 100644 index 3823a5277..000000000 Binary files a/target/scala-3.6.4/classes/scalation/HyperParameter.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/HyperParameter.tasty b/target/scala-3.6.4/classes/scalation/HyperParameter.tasty deleted file mode 100644 index 98da50acb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/HyperParameter.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/LatLong$.class b/target/scala-3.6.4/classes/scalation/LatLong$.class deleted file mode 100644 index 5373b812c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/LatLong$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/LatLong$package$.class b/target/scala-3.6.4/classes/scalation/LatLong$package$.class deleted file mode 100644 index 443aa4924..000000000 Binary files a/target/scala-3.6.4/classes/scalation/LatLong$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/LatLong$package.class b/target/scala-3.6.4/classes/scalation/LatLong$package.class deleted file mode 100644 index dcf44c617..000000000 Binary files a/target/scala-3.6.4/classes/scalation/LatLong$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/LatLong$package.tasty b/target/scala-3.6.4/classes/scalation/LatLong$package.tasty deleted file mode 100644 index ab26fd1dd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/LatLong$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/LatLong.class b/target/scala-3.6.4/classes/scalation/LatLong.class deleted file mode 100644 index 0c172d12e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/LatLong.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/LatLong.tasty b/target/scala-3.6.4/classes/scalation/LatLong.tasty deleted file mode 100644 index e14801dac..000000000 Binary files a/target/scala-3.6.4/classes/scalation/LatLong.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/LatLong2CTM$.class b/target/scala-3.6.4/classes/scalation/LatLong2CTM$.class deleted file mode 100644 index 13bd33d75..000000000 Binary files a/target/scala-3.6.4/classes/scalation/LatLong2CTM$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/LatLong2CTM.class b/target/scala-3.6.4/classes/scalation/LatLong2CTM.class deleted file mode 100644 index 7ca3c282e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/LatLong2CTM.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/LatLong2CTM.tasty b/target/scala-3.6.4/classes/scalation/LatLong2CTM.tasty deleted file mode 100644 index e6e840416..000000000 Binary files a/target/scala-3.6.4/classes/scalation/LatLong2CTM.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/LatLong2UTM$.class b/target/scala-3.6.4/classes/scalation/LatLong2UTM$.class deleted file mode 100644 index 7bef55f8b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/LatLong2UTM$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/LatLong2UTM.class b/target/scala-3.6.4/classes/scalation/LatLong2UTM.class deleted file mode 100644 index 769db59c8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/LatLong2UTM.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/LatLong2UTM.tasty b/target/scala-3.6.4/classes/scalation/LatLong2UTM.tasty deleted file mode 100644 index 48f2d306a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/LatLong2UTM.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/Make_VectorI$package$.class b/target/scala-3.6.4/classes/scalation/Make_VectorI$package$.class deleted file mode 100644 index 7c4d59a2c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/Make_VectorI$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/Make_VectorI$package.class b/target/scala-3.6.4/classes/scalation/Make_VectorI$package.class deleted file mode 100644 index f247abba9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/Make_VectorI$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/Make_VectorI$package.tasty b/target/scala-3.6.4/classes/scalation/Make_VectorI$package.tasty deleted file mode 100644 index 4b480c3e4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/Make_VectorI$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/MergeSortIndirect$.class b/target/scala-3.6.4/classes/scalation/MergeSortIndirect$.class deleted file mode 100644 index 9c89ddaaf..000000000 Binary files a/target/scala-3.6.4/classes/scalation/MergeSortIndirect$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/MergeSortIndirect$package$.class b/target/scala-3.6.4/classes/scalation/MergeSortIndirect$package$.class deleted file mode 100644 index 732e75c7d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/MergeSortIndirect$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/MergeSortIndirect$package.class b/target/scala-3.6.4/classes/scalation/MergeSortIndirect$package.class deleted file mode 100644 index d220030d4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/MergeSortIndirect$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/MergeSortIndirect$package.tasty b/target/scala-3.6.4/classes/scalation/MergeSortIndirect$package.tasty deleted file mode 100644 index 262903051..000000000 Binary files a/target/scala-3.6.4/classes/scalation/MergeSortIndirect$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/MergeSortIndirect.class b/target/scala-3.6.4/classes/scalation/MergeSortIndirect.class deleted file mode 100644 index 422409536..000000000 Binary files a/target/scala-3.6.4/classes/scalation/MergeSortIndirect.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/MergeSortIndirect.tasty b/target/scala-3.6.4/classes/scalation/MergeSortIndirect.tasty deleted file mode 100644 index af831f20f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/MergeSortIndirect.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/MultiArrayDeque$package$.class b/target/scala-3.6.4/classes/scalation/MultiArrayDeque$package$.class deleted file mode 100644 index 0684f06ea..000000000 Binary files a/target/scala-3.6.4/classes/scalation/MultiArrayDeque$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/MultiArrayDeque$package$Car$1.class b/target/scala-3.6.4/classes/scalation/MultiArrayDeque$package$Car$1.class deleted file mode 100644 index e49197623..000000000 Binary files a/target/scala-3.6.4/classes/scalation/MultiArrayDeque$package$Car$1.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/MultiArrayDeque$package$Car$3$.class b/target/scala-3.6.4/classes/scalation/MultiArrayDeque$package$Car$3$.class deleted file mode 100644 index d5651a88b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/MultiArrayDeque$package$Car$3$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/MultiArrayDeque$package.class b/target/scala-3.6.4/classes/scalation/MultiArrayDeque$package.class deleted file mode 100644 index 315e6bb12..000000000 Binary files a/target/scala-3.6.4/classes/scalation/MultiArrayDeque$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/MultiArrayDeque$package.tasty b/target/scala-3.6.4/classes/scalation/MultiArrayDeque$package.tasty deleted file mode 100644 index 69eb290da..000000000 Binary files a/target/scala-3.6.4/classes/scalation/MultiArrayDeque$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/MultiArrayDeques.class b/target/scala-3.6.4/classes/scalation/MultiArrayDeques.class deleted file mode 100644 index cf31f3cb2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/MultiArrayDeques.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/MultiArrayDeques.tasty b/target/scala-3.6.4/classes/scalation/MultiArrayDeques.tasty deleted file mode 100644 index 7f6545b7b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/MultiArrayDeques.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/PriorityQueue$$anon$1.class b/target/scala-3.6.4/classes/scalation/PriorityQueue$$anon$1.class deleted file mode 100644 index 0a8b0af72..000000000 Binary files a/target/scala-3.6.4/classes/scalation/PriorityQueue$$anon$1.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/PriorityQueue$$anon$2.class b/target/scala-3.6.4/classes/scalation/PriorityQueue$$anon$2.class deleted file mode 100644 index c466d89a5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/PriorityQueue$$anon$2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/PriorityQueue$.class b/target/scala-3.6.4/classes/scalation/PriorityQueue$.class deleted file mode 100644 index 0662136dc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/PriorityQueue$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/PriorityQueue$ResizableArrayAccess.class b/target/scala-3.6.4/classes/scalation/PriorityQueue$ResizableArrayAccess.class deleted file mode 100644 index bf9f3db0a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/PriorityQueue$ResizableArrayAccess.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/PriorityQueue.class b/target/scala-3.6.4/classes/scalation/PriorityQueue.class deleted file mode 100644 index e4d1200cd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/PriorityQueue.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/PriorityQueue.tasty b/target/scala-3.6.4/classes/scalation/PriorityQueue.tasty deleted file mode 100644 index 5a2b4380b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/PriorityQueue.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/Ring$package$.class b/target/scala-3.6.4/classes/scalation/Ring$package$.class deleted file mode 100644 index 13c4134fc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/Ring$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/Ring$package.class b/target/scala-3.6.4/classes/scalation/Ring$package.class deleted file mode 100644 index 41933ea64..000000000 Binary files a/target/scala-3.6.4/classes/scalation/Ring$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/Ring$package.tasty b/target/scala-3.6.4/classes/scalation/Ring$package.tasty deleted file mode 100644 index 8b9afc908..000000000 Binary files a/target/scala-3.6.4/classes/scalation/Ring$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/Ring.class b/target/scala-3.6.4/classes/scalation/Ring.class deleted file mode 100644 index 59dd8922b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/Ring.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/Ring.tasty b/target/scala-3.6.4/classes/scalation/Ring.tasty deleted file mode 100644 index 68f63de58..000000000 Binary files a/target/scala-3.6.4/classes/scalation/Ring.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/SetExt$package$.class b/target/scala-3.6.4/classes/scalation/SetExt$package$.class deleted file mode 100644 index c359f7f6e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/SetExt$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/SetExt$package.class b/target/scala-3.6.4/classes/scalation/SetExt$package.class deleted file mode 100644 index c6ebc53a9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/SetExt$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/SetExt$package.tasty b/target/scala-3.6.4/classes/scalation/SetExt$package.tasty deleted file mode 100644 index ab7749656..000000000 Binary files a/target/scala-3.6.4/classes/scalation/SetExt$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/SimpleUniform$.class b/target/scala-3.6.4/classes/scalation/SimpleUniform$.class deleted file mode 100644 index f41c7480e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/SimpleUniform$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/SimpleUniform.class b/target/scala-3.6.4/classes/scalation/SimpleUniform.class deleted file mode 100644 index 57eb45062..000000000 Binary files a/target/scala-3.6.4/classes/scalation/SimpleUniform.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/SimpleUniform.tasty b/target/scala-3.6.4/classes/scalation/SimpleUniform.tasty deleted file mode 100644 index cd8233aab..000000000 Binary files a/target/scala-3.6.4/classes/scalation/SimpleUniform.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/SkipList$package$.class b/target/scala-3.6.4/classes/scalation/SkipList$package$.class deleted file mode 100644 index 48266a450..000000000 Binary files a/target/scala-3.6.4/classes/scalation/SkipList$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/SkipList$package.class b/target/scala-3.6.4/classes/scalation/SkipList$package.class deleted file mode 100644 index 47738191e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/SkipList$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/SkipList$package.tasty b/target/scala-3.6.4/classes/scalation/SkipList$package.tasty deleted file mode 100644 index 87fba8120..000000000 Binary files a/target/scala-3.6.4/classes/scalation/SkipList$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/SkipList.class b/target/scala-3.6.4/classes/scalation/SkipList.class deleted file mode 100644 index 4fffb2d35..000000000 Binary files a/target/scala-3.6.4/classes/scalation/SkipList.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/SkipList.tasty b/target/scala-3.6.4/classes/scalation/SkipList.tasty deleted file mode 100644 index f6f100221..000000000 Binary files a/target/scala-3.6.4/classes/scalation/SkipList.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/SkipNodeType.class b/target/scala-3.6.4/classes/scalation/SkipNodeType.class deleted file mode 100644 index ebf1c8dc6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/SkipNodeType.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/SkipNodeType.tasty b/target/scala-3.6.4/classes/scalation/SkipNodeType.tasty deleted file mode 100644 index 801cbc4e4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/SkipNodeType.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/TimeNum$$anon$1.class b/target/scala-3.6.4/classes/scalation/TimeNum$$anon$1.class deleted file mode 100644 index 07d5e1420..000000000 Binary files a/target/scala-3.6.4/classes/scalation/TimeNum$$anon$1.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/TimeNum$.class b/target/scala-3.6.4/classes/scalation/TimeNum$.class deleted file mode 100644 index cc8975214..000000000 Binary files a/target/scala-3.6.4/classes/scalation/TimeNum$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/TimeNum$package$.class b/target/scala-3.6.4/classes/scalation/TimeNum$package$.class deleted file mode 100644 index 61a893f9c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/TimeNum$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/TimeNum$package.class b/target/scala-3.6.4/classes/scalation/TimeNum$package.class deleted file mode 100644 index 2ac5ecc35..000000000 Binary files a/target/scala-3.6.4/classes/scalation/TimeNum$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/TimeNum$package.tasty b/target/scala-3.6.4/classes/scalation/TimeNum$package.tasty deleted file mode 100644 index 95d220a45..000000000 Binary files a/target/scala-3.6.4/classes/scalation/TimeNum$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/TimeNum.class b/target/scala-3.6.4/classes/scalation/TimeNum.class deleted file mode 100644 index 9ce3736d0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/TimeNum.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/TimeNum.tasty b/target/scala-3.6.4/classes/scalation/TimeNum.tasty deleted file mode 100644 index ce556498e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/TimeNum.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/Timer$package$.class b/target/scala-3.6.4/classes/scalation/Timer$package$.class deleted file mode 100644 index 665c314e5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/Timer$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/Timer$package.class b/target/scala-3.6.4/classes/scalation/Timer$package.class deleted file mode 100644 index ed525b832..000000000 Binary files a/target/scala-3.6.4/classes/scalation/Timer$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/Timer$package.tasty b/target/scala-3.6.4/classes/scalation/Timer$package.tasty deleted file mode 100644 index 5355e41ce..000000000 Binary files a/target/scala-3.6.4/classes/scalation/Timer$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/UTM2LatLong$.class b/target/scala-3.6.4/classes/scalation/UTM2LatLong$.class deleted file mode 100644 index 3c92d4ca3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/UTM2LatLong$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/UTM2LatLong.class b/target/scala-3.6.4/classes/scalation/UTM2LatLong.class deleted file mode 100644 index b6d81510e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/UTM2LatLong.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/UTM2LatLong.tasty b/target/scala-3.6.4/classes/scalation/UTM2LatLong.tasty deleted file mode 100644 index 18f3307b5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/UTM2LatLong.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/Unicode$.class b/target/scala-3.6.4/classes/scalation/Unicode$.class deleted file mode 100644 index fd40ba8bc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/Unicode$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/Unicode$package$.class b/target/scala-3.6.4/classes/scalation/Unicode$package$.class deleted file mode 100644 index 01ef0b39b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/Unicode$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/Unicode$package.class b/target/scala-3.6.4/classes/scalation/Unicode$package.class deleted file mode 100644 index cf780406d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/Unicode$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/Unicode$package.tasty b/target/scala-3.6.4/classes/scalation/Unicode$package.tasty deleted file mode 100644 index 608d0527e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/Unicode$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/Unicode.class b/target/scala-3.6.4/classes/scalation/Unicode.class deleted file mode 100644 index 242953dc3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/Unicode.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/Unicode.tasty b/target/scala-3.6.4/classes/scalation/Unicode.tasty deleted file mode 100644 index 77f8be3b4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/Unicode.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/Util$package$.class b/target/scala-3.6.4/classes/scalation/Util$package$.class deleted file mode 100644 index 6851dab49..000000000 Binary files a/target/scala-3.6.4/classes/scalation/Util$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/Util$package.class b/target/scala-3.6.4/classes/scalation/Util$package.class deleted file mode 100644 index f75004015..000000000 Binary files a/target/scala-3.6.4/classes/scalation/Util$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/Util$package.tasty b/target/scala-3.6.4/classes/scalation/Util$package.tasty deleted file mode 100644 index 921386801..000000000 Binary files a/target/scala-3.6.4/classes/scalation/Util$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/ValueType$package$.class b/target/scala-3.6.4/classes/scalation/ValueType$package$.class deleted file mode 100644 index 6bfb00760..000000000 Binary files a/target/scala-3.6.4/classes/scalation/ValueType$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/ValueType$package.class b/target/scala-3.6.4/classes/scalation/ValueType$package.class deleted file mode 100644 index 560291a29..000000000 Binary files a/target/scala-3.6.4/classes/scalation/ValueType$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/ValueType$package.tasty b/target/scala-3.6.4/classes/scalation/ValueType$package.tasty deleted file mode 100644 index 1bc6480b6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/ValueType$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/ValueTypeOrd$.class b/target/scala-3.6.4/classes/scalation/ValueTypeOrd$.class deleted file mode 100644 index 07b7ea10e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/ValueTypeOrd$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/ValueTypeOrd.class b/target/scala-3.6.4/classes/scalation/ValueTypeOrd.class deleted file mode 100644 index 1339bbcf5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/ValueTypeOrd.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/ValueTypeOrd.tasty b/target/scala-3.6.4/classes/scalation/ValueTypeOrd.tasty deleted file mode 100644 index 4f5e7abb3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/ValueTypeOrd.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/AnimateCommand$.class b/target/scala-3.6.4/classes/scalation/animation/AnimateCommand$.class deleted file mode 100644 index 364db679d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/AnimateCommand$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/AnimateCommand.class b/target/scala-3.6.4/classes/scalation/animation/AnimateCommand.class deleted file mode 100644 index 46de4e181..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/AnimateCommand.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/AnimateCommand.tasty b/target/scala-3.6.4/classes/scalation/animation/AnimateCommand.tasty deleted file mode 100644 index c85ce5d7f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/AnimateCommand.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/Animator.class b/target/scala-3.6.4/classes/scalation/animation/Animator.class deleted file mode 100644 index f267efa33..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/Animator.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/Animator.tasty b/target/scala-3.6.4/classes/scalation/animation/Animator.tasty deleted file mode 100644 index 59f2abb43..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/Animator.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/CommandType$$anon$1.class b/target/scala-3.6.4/classes/scalation/animation/CommandType$$anon$1.class deleted file mode 100644 index dc2483e38..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/CommandType$$anon$1.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/CommandType$$anon$10.class b/target/scala-3.6.4/classes/scalation/animation/CommandType$$anon$10.class deleted file mode 100644 index 70bdc37a3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/CommandType$$anon$10.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/CommandType$$anon$11.class b/target/scala-3.6.4/classes/scalation/animation/CommandType$$anon$11.class deleted file mode 100644 index 8df9c7547..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/CommandType$$anon$11.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/CommandType$$anon$12.class b/target/scala-3.6.4/classes/scalation/animation/CommandType$$anon$12.class deleted file mode 100644 index 27c8c20fe..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/CommandType$$anon$12.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/CommandType$$anon$13.class b/target/scala-3.6.4/classes/scalation/animation/CommandType$$anon$13.class deleted file mode 100644 index f1ae7999c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/CommandType$$anon$13.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/CommandType$$anon$14.class b/target/scala-3.6.4/classes/scalation/animation/CommandType$$anon$14.class deleted file mode 100644 index 800408681..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/CommandType$$anon$14.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/CommandType$$anon$15.class b/target/scala-3.6.4/classes/scalation/animation/CommandType$$anon$15.class deleted file mode 100644 index af107e208..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/CommandType$$anon$15.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/CommandType$$anon$16.class b/target/scala-3.6.4/classes/scalation/animation/CommandType$$anon$16.class deleted file mode 100644 index cd021d357..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/CommandType$$anon$16.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/CommandType$$anon$17.class b/target/scala-3.6.4/classes/scalation/animation/CommandType$$anon$17.class deleted file mode 100644 index 8ffb4e2af..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/CommandType$$anon$17.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/CommandType$$anon$18.class b/target/scala-3.6.4/classes/scalation/animation/CommandType$$anon$18.class deleted file mode 100644 index 6ad4c3a68..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/CommandType$$anon$18.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/CommandType$$anon$2.class b/target/scala-3.6.4/classes/scalation/animation/CommandType$$anon$2.class deleted file mode 100644 index 75c772bc7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/CommandType$$anon$2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/CommandType$$anon$3.class b/target/scala-3.6.4/classes/scalation/animation/CommandType$$anon$3.class deleted file mode 100644 index 7d14cd598..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/CommandType$$anon$3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/CommandType$$anon$4.class b/target/scala-3.6.4/classes/scalation/animation/CommandType$$anon$4.class deleted file mode 100644 index 7493be04b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/CommandType$$anon$4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/CommandType$$anon$5.class b/target/scala-3.6.4/classes/scalation/animation/CommandType$$anon$5.class deleted file mode 100644 index 586534d82..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/CommandType$$anon$5.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/CommandType$$anon$6.class b/target/scala-3.6.4/classes/scalation/animation/CommandType$$anon$6.class deleted file mode 100644 index 2fcb96be8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/CommandType$$anon$6.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/CommandType$$anon$7.class b/target/scala-3.6.4/classes/scalation/animation/CommandType$$anon$7.class deleted file mode 100644 index 8b81eaee6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/CommandType$$anon$7.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/CommandType$$anon$8.class b/target/scala-3.6.4/classes/scalation/animation/CommandType$$anon$8.class deleted file mode 100644 index af79b4590..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/CommandType$$anon$8.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/CommandType$$anon$9.class b/target/scala-3.6.4/classes/scalation/animation/CommandType$$anon$9.class deleted file mode 100644 index 23d406fda..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/CommandType$$anon$9.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/CommandType$.class b/target/scala-3.6.4/classes/scalation/animation/CommandType$.class deleted file mode 100644 index bdef6e831..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/CommandType$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/CommandType.class b/target/scala-3.6.4/classes/scalation/animation/CommandType.class deleted file mode 100644 index 136b43e48..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/CommandType.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/CommandType.tasty b/target/scala-3.6.4/classes/scalation/animation/CommandType.tasty deleted file mode 100644 index 375314e02..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/CommandType.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/Counter$.class b/target/scala-3.6.4/classes/scalation/animation/Counter$.class deleted file mode 100644 index 493b78478..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/Counter$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/Counter.class b/target/scala-3.6.4/classes/scalation/animation/Counter.class deleted file mode 100644 index 9a46d2217..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/Counter.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/Counter.tasty b/target/scala-3.6.4/classes/scalation/animation/Counter.tasty deleted file mode 100644 index daccec130..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/Counter.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/DgAnimator$.class b/target/scala-3.6.4/classes/scalation/animation/DgAnimator$.class deleted file mode 100644 index ccae555ff..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/DgAnimator$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/DgAnimator$Canvas.class b/target/scala-3.6.4/classes/scalation/animation/DgAnimator$Canvas.class deleted file mode 100644 index a9b7e6c23..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/DgAnimator$Canvas.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/DgAnimator$package$.class b/target/scala-3.6.4/classes/scalation/animation/DgAnimator$package$.class deleted file mode 100644 index f9ff56426..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/DgAnimator$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/DgAnimator$package.class b/target/scala-3.6.4/classes/scalation/animation/DgAnimator$package.class deleted file mode 100644 index 438952758..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/DgAnimator$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/DgAnimator$package.tasty b/target/scala-3.6.4/classes/scalation/animation/DgAnimator$package.tasty deleted file mode 100644 index 01fbee193..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/DgAnimator$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/DgAnimator.class b/target/scala-3.6.4/classes/scalation/animation/DgAnimator.class deleted file mode 100644 index db55a26a5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/DgAnimator.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/DgAnimator.tasty b/target/scala-3.6.4/classes/scalation/animation/DgAnimator.tasty deleted file mode 100644 index 5ad3c9f2c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/DgAnimator.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/Dgraph$.class b/target/scala-3.6.4/classes/scalation/animation/Dgraph$.class deleted file mode 100644 index 443faafe2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/Dgraph$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/Dgraph$Edge$.class b/target/scala-3.6.4/classes/scalation/animation/Dgraph$Edge$.class deleted file mode 100644 index a5a047f4b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/Dgraph$Edge$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/Dgraph$Edge.class b/target/scala-3.6.4/classes/scalation/animation/Dgraph$Edge.class deleted file mode 100644 index a227e7ff7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/Dgraph$Edge.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/Dgraph$Node$.class b/target/scala-3.6.4/classes/scalation/animation/Dgraph$Node$.class deleted file mode 100644 index 5b5c4369d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/Dgraph$Node$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/Dgraph$Node.class b/target/scala-3.6.4/classes/scalation/animation/Dgraph$Node.class deleted file mode 100644 index 5beb2ad15..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/Dgraph$Node.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/Dgraph$Token$.class b/target/scala-3.6.4/classes/scalation/animation/Dgraph$Token$.class deleted file mode 100644 index 453b56b4a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/Dgraph$Token$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/Dgraph$Token.class b/target/scala-3.6.4/classes/scalation/animation/Dgraph$Token.class deleted file mode 100644 index 4a7d4348e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/Dgraph$Token.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/Dgraph$package$.class b/target/scala-3.6.4/classes/scalation/animation/Dgraph$package$.class deleted file mode 100644 index b48b52da4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/Dgraph$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/Dgraph$package.class b/target/scala-3.6.4/classes/scalation/animation/Dgraph$package.class deleted file mode 100644 index 9b7493e81..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/Dgraph$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/Dgraph$package.tasty b/target/scala-3.6.4/classes/scalation/animation/Dgraph$package.tasty deleted file mode 100644 index 229fe7cb9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/Dgraph$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/Dgraph.class b/target/scala-3.6.4/classes/scalation/animation/Dgraph.class deleted file mode 100644 index e513979ff..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/Dgraph.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/Dgraph.scala.bak2 b/target/scala-3.6.4/classes/scalation/animation/Dgraph.scala.bak2 deleted file mode 100644 index 8d0ba4f21..000000000 --- a/target/scala-3.6.4/classes/scalation/animation/Dgraph.scala.bak2 +++ /dev/null @@ -1,576 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Mon Sep 21 15:05:06 EDT 2009 - * @see LICENSE (MIT style license file). - * - * @note Graph Structure Suitable for Animation - */ - -package scalation -package animation - -import scala.collection.mutable.{HashSet, ListBuffer} -import scala.math.{abs, atan2, cos, Pi, sin} - -import scalation.mathstat.VectorD -import scalation.scala2d._ -import scalation.scala2d.Colors._ - -import Counter.{nextE, nextN} - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Dgraph` class is for defining graph structures suitable for animation. - * Graphs consist of nodes, edges and tokens. Tokens can be positioned within - * nodes or on edges. A graph animation class that uses this class would typically - * move the tokens by changing there location over time. This class supports both - * directed graphs and bipartite graphs. Directed graphs contain only primary - * nodes, while bipartite graphs have both primary and secondary nodes along with - * the rule that edges must go from primaries to secondaries or secondaries to - * primaries. Bipartite graphs can be used to represent Petri Nets by letting - * Transitions be primary nodes and Places be secondary nodes. Everything can be - * labeled (nodes, edges and tokens as well as the graph itself). Nodes and edges - * may be added to/removed from graphs, while tokens may be added to/removed from - * either nodes or edges. Tokens may also be free (not bound to nodes or edges). - * @param name the name of the graph - * @param bipartite whether the graph is bipartite (edges only between 2 types of nodes) - */ -class Dgraph (name: String, bipartite: Boolean = false): - - private val debug = debugf ("Dgraph", false) // debug function - private val flaw = flawf ("Dgraph") // flaw function - - debug ("init", s"create graph: name = $name, bipartite = $bipartite") - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** The `Node` class is used to represent nodes in the graph. - * @param shape the shape of the node - * @param label the label for the created node - * @param primary whether it is a primary/transition/true or secondary/place node/false - * @param color the color of the node - * @param x the x-coordinate (top left) - * @param y the y-coordinate (top left) - * @param w the width - * @param h the height - */ -// case class Node (shape: RectangularShape, label: String, primary: Boolean, var color: Color, - case class Node (shape: RectPolyShape, label: String, primary: Boolean, var color: Color, - x: Double, y: Double, w: Double, h: Double): - - shape.setFrame (x, y, w, h) - - private val id = nextN () // node identifier - val outEdges = ListBuffer [Edge] () // list of outgoing edges - val tokens = ListBuffer [Token] () // list of tokens current in this node - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Set (or reset) the color. - * @param color the new color - */ - def setColor (color2: Color): Unit = color = color2 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Add an outgoing edge to this node. - * @param edge the edge to add - */ - def addEdge (edge: Edge): Boolean = - if bipartite && edge.from.primary == edge.to.primary then - flaw ("addEdge", "node types for edge endpoints may not be the same") - return false - end if - outEdges += edge - true - end addEdge - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Remove an outgoing edge from this node. - * @param edge the edge to remove - */ - def removeEdge (edge: Edge): Unit = outEdges -= edge - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Add a token from this node. - * @param token the token to add - */ - def addToken (token: Token): Unit = tokens += token - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Remove a token from this node. - * @param token the token to remove - */ - def removeToken (token: Token): Unit = tokens -= token - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert this node to a string. - */ - override def toString: String = s"Node $label [ $id ]" - - end Node - - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** The `Edge` class is used to represent edges in the graph. If bend = 0, a - * straight line is created, otherwise a quadratic curve is created. - * It uses implicit coordinates for the edge endpoints. - * @param shape the shape (line/curve) of the edge - * @param label the label for the created edge - * @param primary whether it is a primary/transition/true or secondary/place node/false - * @param color the color of the edge - * @param from the origination node - * @param to the destination node - * @param bend the amount of bend in the curve (defaults to zero) - * @param shift amount of distance to shift the edge, e.g., to accommodate - * a bundle of edges in a composite edge - * @param direct whether to directly set the line or allow factory function to set it - */ - case class Edge (shape: CurvilinearShape, label: String, primary: Boolean, var color: Color, - from: Node, to: Node, bend: Double = 0.0, shift: Int = 0, direct: Boolean = true): - - from.addEdge (this) // add edge to outgoing edges of from node - - private val EPSILON = 1E-7 // very small real number - private val id = nextE () // edge identifier - val tokens = ListBuffer [Token] () // list of tokens current on this edge. - private val p1 = VectorD (from.shape.getCenterX (), from.shape.getCenterY ()) - private val p2 = VectorD (to.shape.getCenterX (), to.shape.getCenterY ()) - private val gap = 5 // for multiple edges between one pair of nodes - - if abs (bend) > EPSILON then // handle case where "def this" not called first - shape.setLine (p1, p2, bend) - else if direct then // directly set the line (use factory methods to move) - shape.setLine (p1, p2) - end if - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Move the edge endpoints so edge connects to vertex boundary, rather than center. - * Edge is from p1 to p2: p1 --> p2. - * @param p1 the position of the center of the from vertex - * @param p2 the position of the center of the to vertex - * - def move2Boundary (p1: VectorD, p2: VectorD): Unit = - val angle = atan2 (p2(1) - p1(1), p2(0) - p1(0)) - val radius1 = (from.shape.getWidth () + from.shape.getHeight ()) / 4.0 - val radius2 = (to.shape.getWidth () + to.shape.getHeight ()) / 4.0 - - p1(0) += radius1 * cos (angle); p1(1) += radius1 * sin (angle) - p2(0) += radius2 * cos (Pi+angle); p2(1) += radius2 * sin (Pi+angle) - end move2Boundary - */ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Move the edge endpoints so edge connects to vertex boundary, rather than center. - * Edge is from p1 to p2: p1 --> p2. - * @param p1 the position of the leftop of the from vertex - * @param p2 the position of the leftop of the to vertex - */ - def move2Boundary (p1: VectorD, p2: VectorD): Unit = - banner (s"move2Boundary: from p1 = $p1, to p2 = $p2") - val gapShift = shift * gap - val frCenterX = from.x + from.w / 2 // p1(0) - val frCenterY = from.y + from.h / 2 // p1(1) - val toCenterX = to.x + to.w / 2 // p2(0) - val toCenterY = to.y + to.h / 2 // p2(1) - val angle = atan2 (toCenterY - frCenterY, toCenterX - frCenterX) - - from.shape match -/* - case _: Rectangle => - val p1Vec = pointOnRect (toCenterX, toCenterY, from.x, from.y, from.x + from.w, from.y + from.h) - p1(0) = p1Vec(0) - p1(1) = p1Vec(1) - if shift != 0 then // Yulong note: Either on top/bot shift or left/right shift - if p1(0) == to.x || p1(0) == to.x + to.w then p1(1) += gapShift else p1(0) += gapShift -*/ - case _ => - // the circle is 'from' Yulong fixed the circle one way in case and shift - // p1 is the left top - val radius1 = (from.shape.getWidth () + from.shape.getHeight ()) / 4.0 - //p1(0) += radius1 + radius1 * cos (angle + gapShift) - //p1(1) += radius1 + radius1 * sin (angle + gapShift) - p1(0) += radius1 * cos (angle + gapShift) - p1(1) += radius1 * sin (angle + gapShift) - - to.shape match -/* - case _: Rectangle => - val p2Vec = pointOnRect (frCenterX, frCenterY, to.x, to.y, to.x + to.w, to.y + to.h) - p2(0) = p2Vec(0) - p2(1) = p2Vec(1) - if shift != 0 then - if p2(0) == from.x || p2(0) == from.x + from.w then p2(1) += gapShift else p2(0) += gapShift -*/ - case _ => - // the circle is 'to' - // p2 is the left top - val radius2 = (to.shape.getWidth() + to.shape.getHeight()) / 4.0 - //p2(0) += radius2 + radius2 * cos (Pi + angle - gapShift) - //p2(1) += radius2 + radius2 * sin (Pi + angle - gapShift) - p2(0) += radius2 * cos (Pi + angle - gapShift) - p2(1) += radius2 * sin (Pi + angle - gapShift) - - println (s"move2Boundary: from p1 = $p1, to p2 = $p2") - end move2Boundary - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Set (or reset) the curve by delegating to shape. - * @param p1 the first/starting point of the curve - * @param pc the control point of the curve - * @param p1 the second/ending point of the curve - */ - def setLine (p1: VectorD, pc: VectorD, p2: VectorD): Unit = shape.setLine (p1, pc, p2) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Set (or reset) the line by delegating to shape. - * @param p1 the first/starting point of the line - * @param p1 the second/ending point of the line - */ - def setLine (p1: VectorD, p2: VectorD): Unit = shape.setLine (p1, p2) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Set (or reset) the color. - * @param color the new color - */ - def setColor (color2: Color): Unit = color = color2 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Add a token from this node. - * @param token the token to add - */ - def addToken (token: Token): Unit = tokens += token - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert this edge to a string. - */ - override def toString: String = s"Edge $label [ $id ]" - - end Edge - - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** The `Edge` companion object provides factory mathods for creating various - * forms of edges. - */ - object Edge: - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Construct an edge as a line with explicit coordinates. - * @param shape the shape (line) of the edge - * @param label the label for the created edge - * @param primary whether it is a primary/transition/true or secondary/place node/false - * @param color the color of the edge - * @param from the origination node - * @param to the destination node - * @param p1 the (x,y)-coordinates of the edge's start - * @param p2 the (x,y)-coordinates of the edge's end - * @param shift amount of distance to shift the edge, e.g., to accommodate composite edges - */ - def apply (shape: CurvilinearShape, label: String, primary: Boolean, color: Color, - from: Node, to: Node, p1: VectorD, p2: VectorD, shift: Int): Edge = - val e = new Edge (shape, label, primary, color, from, to, 0.0, shift, false) - e.move2Boundary (p1, p2) - e.shape.setLine (p1, p2) - e - end apply - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Construct an edge as a curve with explicit coordinates. - * @param shape the shape (curve) of the edge - * @param label the label for the created edge - * @param primary whether it is a primary/transition/true or secondary/place node/false - * @param color the color of the edge - * @param from the origination node - * @param to the destination node - * @param p1 the (x,y)-coordinates of the edge's start - * @param pc the (x,y)-coordinates of the edge's control point - * @param p2 the (x,y)-coordinates of the edge's end - * @param shift amount of distance to shift the edge, e.g., to accommodate composite edges - */ - def apply (shape: CurvilinearShape, label: String, primary: Boolean, color: Color, - from: Node, to: Node, p1: VectorD, pc: VectorD, p2: VectorD, shift: Int): Edge = - val e = new Edge (shape, label, primary, color, from, to, 0.0, shift, false) - //e.pc = pc - e.move2Boundary (p1, p2) - e.shape.setLine (p1, p2) -// e.shape.setLine (p1, pc, p2) // pc => curve - e - end apply - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Construct an edge as a curve with explicit coordinates. - * @param shape the shape (curve) of the edge - * @param label the label for the created edge - * @param primary whether it is a primary/transition/true or secondary/place node/false - * @param color the color of the edge - * @param from the origination node - * @param to the destination node - */ - def apply (shape: CurvilinearShape, label: String, primary: Boolean, color: Color, - from: Node, to: Node): Edge = - val e = new Edge (shape, label, primary, color, from, to, direct = false) - e.move2Boundary (e.p1, e.p2) - e.shape.setLine (e.p1, e.p2) - e - end apply - - end Edge - - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** The `Token` class is used to represent tokens in the graph. - * @param shape the shape of the token - * @param label the label for the created token - * @param primary whether the token is primary/free/true to secondary/bound/false - * @param color the color of the token - * @param onNode the node the token is on - * @param w the width of the token - * @param h the height of the token - */ - case class Token (shape: RectangularShape, label: String, primary: Boolean, var color: Color, - var onNode: Node, val w: Double, val h: Double): - -// private val id = nextT () // token identifier - - if onNode != null then - onNode.addToken (this) - val x = onNode.shape.getCenterX () - w / 2.0 - val y = onNode.shape.getCenterY () - h / 2.0 - shape.setFrame (x, y, w, h) - end if - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Construct a primary/free token with explicit coordinates. - * Such tokens are free to move anywhere in the drawing panel. - * @param shape the shape of the token - * @param label the label for the created token - * @param color the color of the token - * @param x the x-coordinate of the token's location - * @param y the y-coordinate of the token's location - * @param w the width of the token - * @param h the height of the token - */ - def this (shape: RectangularShape, label: String, primary: Boolean, color: Color, - x: Double, y: Double, w: Double, h: Double) = - this (shape, label, true, color, null, w, h) - shape.setFrame (x, y, w, h) - end this - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Set (or reset) the color. - * @param color the new color - */ - def setColor (color2: Color): Unit = color = color2 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Set the node the token is on. - * @param onNode2 the node the token is on - */ - def setOnNode (onNode2: Node): Unit = onNode = onNode2 - - end Token - - - /** List of nodes in the graph - */ - val nodes = ListBuffer [Node] () - - /** List of edges in the graph - */ - val edges = ListBuffer [Edge] () - - /** List of free tokens in the graph (bound tokens must be in a nodes or edges list) - */ - val freeTokens = ListBuffer [Token] () - - /** Whether the nodes have been visited (internal use only) - */ - private val visited = new HashSet [Node] () - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Add a node to the graph. - * @param n the node to add - */ - def addNode (n: Node): Unit = nodes += n - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Remove a node from the graph. - * @param n the node to remove - */ - def removeNode (n: Node): Unit = nodes -= n - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Add an edge to the graph. - * @param e the edge to add - */ - def addEdge (e: Edge): Unit = edges += e - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Remove an edge from the graph. - * @param e the edge to remove - */ - def removeEdge (e: Edge): Unit = edges -= e - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Add a free token to the graph. - * @param t the free token to add - */ - def addFreeToken (t: Token): Unit = freeTokens += t - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Remove a free token from the graph. - * @param t the free token to remove - */ - def removeFreeToken (t: Token): Unit = freeTokens -= t - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Get all the root nodes (those with no incoming edges). - */ - def getRoots: ListBuffer [Node] = - val roots = new ListBuffer [Node] () - for n <- nodes do - var keep = true - for e <- edges if n == e.to do keep = false - if keep then roots += n - end for - roots - end getRoots - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Mark all nodes as unvisited by clearing them from the hash set. - */ - private def clearVisited (): Unit = visited.clear () - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Recursively visit all nodes in the graph. - * @param n the current node - * @param level the recursion level - */ - def traverse (n: Node, level: Int): Unit = - for i <- 0 until level do print ("\t") - println (n) // print visited node - //visited.add (n) - val outgoing = n.outEdges - if outgoing != null then - for oEdge <- outgoing do - val next = oEdge.to - traverse (next, level + 1) - // if ! visited. contains (next) then traverse (next, level + 1) - end for - end if - end traverse - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Traverse the graph printing out its nodes and showing connectivity by indentation. - */ - def traverseNodes (): Unit = - clearVisited () - // traverse (nodes.get (0), 0) // only from node 0 - for r <- getRoots do traverse (r, 0) // from all roots - end traverseNodes - -end Dgraph - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Counter` object maintains counters. - */ -object Counter: - - private var nCounter = 0 - private var eCounter = 0 - private var tCounter = 0 - - def nextN (): Int = { nCounter += 1; nCounter } - def nextE (): Int = { eCounter += 1; eCounter } - def nextT (): Int = { tCounter += 1; tCounter } - -end Counter - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `dgraphTest` main function to test the `Dgraph` class. - * > runMain scalation.animation.dgraphTest - */ -@main def dgraphTest (): Unit = - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Build and test a directed graph. - */ - def testDirectedGraph (g: Dgraph): Unit = - - // Create nodes - val n1 = g.Node (Ellipse (), "node1", true, red, 100, 200, 20, 20) - val n2 = g.Node (Ellipse (), "node2", true, blue, 300, 100, 20, 20) - val n3 = g.Node (Ellipse (), "node3", true, green, 300, 300, 20, 20) - val n4 = g.Node (Ellipse (), "node4", true, purple, 500, 200, 20, 20) - - // Create edges - val e1 = new g.Edge (QCurve (), "edge1", true, black, n1, n2) // 120, 210, 300, 110) - n1.addEdge (e1) - val e2 = new g.Edge (QCurve (), "edge1", true, black, n1, n3) // 120, 210, 300, 310) - n1.addEdge (e2) - val e3 = new g.Edge (QCurve (), "edge1", true, black, n2, n4) // 320, 110, 500, 210) - n2.addEdge (e3) - val e4 = new g.Edge (QCurve (), "edge1", true, black, n3, n4) // 320, 310, 500, 210) - n3.addEdge (e4) - - // Add the nodes and edges to the directed graph - g.addNode (n1) - g.addNode (n2) - g.addNode (n3) - g.addNode (n4) - g.addEdge (e1) - g.addEdge (e2) - g.addEdge (e3) - g.addEdge (e4) - - // Traverse the directed graph printing out its nodes - g.traverseNodes () - end testDirectedGraph - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Build and test a bipartite graph. - */ - def testBipartiteGraph (g: Dgraph): Unit = - // Create nodes - val n1 = g.Node (Ellipse (), "node1", false, orange, 100, 100, 30, 30) - val n2 = g.Node (Ellipse (), "node2", false, orange, 100, 300, 30, 30) - val n3 = g.Node (Rectangle (), "node2", true, lightgreen, 300, 185, 30, 60) - val n4 = g.Node (Ellipse (), "node4", false, red, 500, 100, 30, 30) - val n5 = g.Node (Ellipse (), "node5", false, red, 500, 300, 30, 30) - - // Create edges - val e1 = new g.Edge (QCurve (), "edge1", true, black, n1, n3) // 130, 115, 300, 215) - n1.addEdge (e1) - val e2 = new g.Edge (QCurve (), "edge2", true, black, n2, n3) // 130, 315, 300, 215) - n2.addEdge (e2) - val e3 = new g.Edge (QCurve (), "edge3", true, black, n3, n4) // 330, 215, 500, 115) - n3.addEdge (e3) - val e4 = new g.Edge (QCurve (), "edge4", true, black, n3, n5) // 330, 215, 500, 315) - n3.addEdge (e4) - - // Add the nodes and edges to the directed graph - g.addNode (n1) - g.addNode (n2) - g.addNode (n3) - g.addNode (n4) - g.addNode (n5) - g.addEdge (e1) - g.addEdge (e2) - g.addEdge (e3) - g.addEdge (e4) - - // Traverse the directed graph printing out its nodes - g.traverseNodes () - end testBipartiteGraph - - println ("Run DgraphTest - Bipartite Graph Test\n") - val bg = new Dgraph ("Bipartite_Graph", true) - testBipartiteGraph (bg) - - println ("Run DgraphTest - Directed Graph Test\n") - val dg = new Dgraph ("Directed_Graph", false) - testDirectedGraph (dg) - -end dgraphTest - diff --git a/target/scala-3.6.4/classes/scalation/animation/Dgraph.tasty b/target/scala-3.6.4/classes/scalation/animation/Dgraph.tasty deleted file mode 100644 index bb7e2a764..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/Dgraph.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/EidCounter$.class b/target/scala-3.6.4/classes/scalation/animation/EidCounter$.class deleted file mode 100644 index 8fa0976c5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/EidCounter$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/EidCounter.class b/target/scala-3.6.4/classes/scalation/animation/EidCounter.class deleted file mode 100644 index ddec9c73a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/EidCounter.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/EidCounter.tasty b/target/scala-3.6.4/classes/scalation/animation/EidCounter.tasty deleted file mode 100644 index 3a883cff6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/EidCounter.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/PointOn$package$.class b/target/scala-3.6.4/classes/scalation/animation/PointOn$package$.class deleted file mode 100644 index e7936bf26..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/PointOn$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/PointOn$package.class b/target/scala-3.6.4/classes/scalation/animation/PointOn$package.class deleted file mode 100644 index bbcad7d77..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/PointOn$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/PointOn$package.tasty b/target/scala-3.6.4/classes/scalation/animation/PointOn$package.tasty deleted file mode 100644 index 88dc52015..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/PointOn$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/SimpleAnimator$Canvas.class b/target/scala-3.6.4/classes/scalation/animation/SimpleAnimator$Canvas.class deleted file mode 100644 index 2bb4ed270..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/SimpleAnimator$Canvas.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/SimpleAnimator$package$.class b/target/scala-3.6.4/classes/scalation/animation/SimpleAnimator$package$.class deleted file mode 100644 index b4335a55c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/SimpleAnimator$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/SimpleAnimator$package.class b/target/scala-3.6.4/classes/scalation/animation/SimpleAnimator$package.class deleted file mode 100644 index 609c0419d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/SimpleAnimator$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/SimpleAnimator$package.tasty b/target/scala-3.6.4/classes/scalation/animation/SimpleAnimator$package.tasty deleted file mode 100644 index d60d2ba81..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/SimpleAnimator$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/SimpleAnimator.class b/target/scala-3.6.4/classes/scalation/animation/SimpleAnimator.class deleted file mode 100644 index 466e20726..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/SimpleAnimator.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/SimpleAnimator.tasty b/target/scala-3.6.4/classes/scalation/animation/SimpleAnimator.tasty deleted file mode 100644 index 949c98052..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/SimpleAnimator.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/SimpleAnimator2$Canvas.class b/target/scala-3.6.4/classes/scalation/animation/SimpleAnimator2$Canvas.class deleted file mode 100644 index e351236a0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/SimpleAnimator2$Canvas.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/SimpleAnimator2$package$.class b/target/scala-3.6.4/classes/scalation/animation/SimpleAnimator2$package$.class deleted file mode 100644 index 795ff307e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/SimpleAnimator2$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/SimpleAnimator2$package.class b/target/scala-3.6.4/classes/scalation/animation/SimpleAnimator2$package.class deleted file mode 100644 index 6cbed1cd5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/SimpleAnimator2$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/SimpleAnimator2$package.tasty b/target/scala-3.6.4/classes/scalation/animation/SimpleAnimator2$package.tasty deleted file mode 100644 index ae52df549..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/SimpleAnimator2$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/SimpleAnimator2.class b/target/scala-3.6.4/classes/scalation/animation/SimpleAnimator2.class deleted file mode 100644 index 427287692..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/SimpleAnimator2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/SimpleAnimator2.tasty b/target/scala-3.6.4/classes/scalation/animation/SimpleAnimator2.tasty deleted file mode 100644 index c44d30124..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/SimpleAnimator2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/dgAnimatorTest.class b/target/scala-3.6.4/classes/scalation/animation/dgAnimatorTest.class deleted file mode 100644 index ace9b3fc9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/dgAnimatorTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/dgAnimatorTest.tasty b/target/scala-3.6.4/classes/scalation/animation/dgAnimatorTest.tasty deleted file mode 100644 index 7bfe433ed..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/dgAnimatorTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/dgAnimatorTest2.class b/target/scala-3.6.4/classes/scalation/animation/dgAnimatorTest2.class deleted file mode 100644 index bf1f42ba5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/dgAnimatorTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/dgAnimatorTest2.tasty b/target/scala-3.6.4/classes/scalation/animation/dgAnimatorTest2.tasty deleted file mode 100644 index 307d26910..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/dgAnimatorTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/dgAnimatorTest3.class b/target/scala-3.6.4/classes/scalation/animation/dgAnimatorTest3.class deleted file mode 100644 index 4d2766d89..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/dgAnimatorTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/dgAnimatorTest3.tasty b/target/scala-3.6.4/classes/scalation/animation/dgAnimatorTest3.tasty deleted file mode 100644 index 8e42f1f88..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/dgAnimatorTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/dgraphTest.class b/target/scala-3.6.4/classes/scalation/animation/dgraphTest.class deleted file mode 100644 index 0f391295b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/dgraphTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/dgraphTest.tasty b/target/scala-3.6.4/classes/scalation/animation/dgraphTest.tasty deleted file mode 100644 index 880344291..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/dgraphTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/index.html b/target/scala-3.6.4/classes/scalation/animation/index.html deleted file mode 100644 index 2ee9d1db1..000000000 --- a/target/scala-3.6.4/classes/scalation/animation/index.html +++ /dev/null @@ -1,15 +0,0 @@ - - -

    Source files in animation Package

    -

    - - \ No newline at end of file diff --git a/target/scala-3.6.4/classes/scalation/animation/old/DgAnimator.scala.bak b/target/scala-3.6.4/classes/scalation/animation/old/DgAnimator.scala.bak deleted file mode 100644 index 8fe54f982..000000000 --- a/target/scala-3.6.4/classes/scalation/animation/old/DgAnimator.scala.bak +++ /dev/null @@ -1,497 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Mon Sep 14 14:15:51 EDT 2009 - * @see LICENSE (MIT style license file). - * - * @title Animation Engine for Animating Graphs - */ - -package scalation -package animation - -import java.awt.Font -import java.awt.event.{MouseEvent, MouseListener, MouseMotionListener, MouseWheelEvent, MouseWheelListener} -import java.awt.geom.{AffineTransform, Point2D} -import java.util.concurrent.ConcurrentLinkedQueue - -import scala.math.round -import scala.util.control.Breaks.{breakable, break} - -import scalation.scala2d._ -import scalation.scala2d.Colors._ - -import CommandType._ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `DgAnimator` class is an animation engine for animating graphs. - * For example, it can animate bipartite graphs to animate Petri Nets. - * @param _title the title for the display frame - * @param fgColor the foreground color - * @param bgColor the background color - * @param aniRatio the ratio of simulation speed vs. animation speed - * @param width the width of the animation panel - * @param height the height of the animation panel - * @param labels the labels of the animation panel - */ -class DgAnimator (_title: String, fgColor: Color = black, bgColor: Color = white, - aniRatio: Double = 1.0, width: Int = 800, height: Int = 800, labels: Boolean = true) - extends VizFrame (_title, null, width, height) - with Runnable: - - /** The debug function - */ - private val debug = debugf ("DgAnimator", true) - - /** The flaw function - */ - private val flaw = flawf ("DgAnimator") - - /** Clock for animation engine - */ - private var clock = 0.0 - - /** Width and height for the clock - */ - private val clockWH = (20, 30) - - /** Stop time for animation engine - */ - private var stopTime = 0.0 - - /** Graph to animate - */ - private val graph = new Dgraph ("Animated_Graph") - - /** Shared queue holding animation commands - */ - private val cmdQ = new ConcurrentLinkedQueue [AnimateCommand] () - - /** Animation command processor - */ - private val ani = new Animator (graph) - - /** Flag to indicate that the animation is complete - */ - private var aniDone = false - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Set the animation complete flag to true. - */ - def setAniDone () = aniDone = true - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Save the graphics into an image file. - * @param fname the file name - */ - def saveImage (fname: String): Unit = writeImage (fname, this) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** The canvas Panel is used to place shapes in the drawing region. - * @param width the width of the canvas - * @param height the height of the canvas - */ - class Canvas (width: Int = width, height: Int = height) - extends ZoomablePanel: -/* - extends Panel - with MouseWheelListener - with MouseListener - with MouseMotionListener: - - private val at = new AffineTransform() - private var scale = 1.0 - private var basex = 0.0 - private var basey = 0.0 - private var originx = 0.0 - private var originy = 0.0 - - addMouseWheelListener (this) - addMouseMotionListener (this) - addMouseListener (this) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Override the mouseWheelMoved method to set the scale of the - * canvas using the scroll value (up is negative, down is positive). - * @param e the mouse wheel event - */ - override def mouseWheelMoved (e: MouseWheelEvent): Unit = - var x = e.getX ().toDouble - var y = e.getY ().toDouble - var p = new Point2D.Double () - try - at.inverseTransform (new Point2D.Double (x, y), p) - catch - case ee: Exception => {} - end try - x = p.getX () - y = p.getY () - var zoom = 1.0 - val r = e.getWheelRotation () - if r < 0 then zoom *= 1.1 - if r > 0 then zoom /= 1.1 - at.translate (x, y) - at.scale (zoom, zoom) - scale *= zoom - at.translate (-x, -y) - revalidate () - repaint () - end mouseWheelMoved - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Override the mouseDeagged method to what? - FIX - * @param e the mouse dragged event - */ - override def mouseDragged (e: MouseEvent): Unit = - val dx = (e.getX () - basex) / scale - val dy = (e.getY () - basey) / scale - originx += dx * scale - originy += dy * scale - at.translate (dx, dy) - basex = e.getX () - basey = e.getY () - revalidate () - repaint () - end mouseDragged - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Override the mousePressed method to what? - FIX - * @param e the mouse pressed event - */ - override def mousePressed (e: MouseEvent): Unit = - basex = e.getX() - basey = e.getY() - end mousePressed - - override def mouseMoved (e: MouseEvent): Unit = {} - override def mouseClicked (e: MouseEvent): Unit = {} - override def mouseEntered (e: MouseEvent): Unit = {} - override def mouseExited (e: MouseEvent): Unit = {} - override def mouseReleased (e: MouseEvent): Unit = {} -*/ - - private val fsize = 12 - private val f = new Font ("Serif", Font.BOLD, fsize) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Paint the display panel component. - * @param gr low-resolution graphics environment - */ - override def paintComponent (gr: Graphics): Unit = - super.paintComponent (gr) - val g2d = gr.asInstanceOf [Graphics2D] // use hi-resolution - - g2d.setTransform (at) // used for zooming @author Casey Bowman - - //:: Display the animation clock - - g2d.setFont (f) - g2d.setPaint (fgColor) - g2d.drawString ("CLOCK = " + "%10.3f".format(clock), clockWH._1, getH - clockWH._2) - - //:: Display all nodes in graph and tokens bound to these nodes. - - // println ("paintComponent: paint " + graph.nodes.length + " nodes") - for node <- graph.nodes do - g2d.setPaint (node.color) - g2d.fill (node.shape) - g2d.setPaint (black) - g2d.draw (node.shape) - val x = node.shape.getCenterX ().asInstanceOf [Float] // - 20.0f - val y = node.shape.getBounds2D.getMaxY ().asInstanceOf [Float] // + 12.0f - g2d.drawString (node.label, x, y) - val node_tokens = node.tokens.toList // copy to avoid ConcurrentModificationException - for token <- node_tokens do - g2d.setPaint (token.color) - g2d.fill (token.shape) - end for - end for - - //:: Display all edges in graph and tokens bound to these edges. - - // println ("paintComponent: paint " + graph.edges.length + " edges") - for edge <- graph.edges do - g2d.setPaint (edge.color) - g2d.draw (edge.shape) - val x = edge.shape.getCenterX.asInstanceOf [Float] // - 30.0f - val y = edge.shape.getCenterY.asInstanceOf [Float] - g2d.drawString (edge.label, x, y) - val edge_tokens = edge.tokens.toList // copy to avoid Exception - for token <- edge_tokens if token.shape.getWidth () > 0.0 do - g2d.setPaint (token.color) - g2d.fill (token.shape) - end for - end for - - //:: Display all free tokens in the graph. - - // println ("paintComponent: paint " + graph.freeTokens.length + " free tokens") - val free_tokens = graph.freeTokens.toList // copy to avoid Exception - for token <- free_tokens if token.shape.getWidth () > 0.0 do - g2d.setPaint (token.color) - g2d.fill (token.shape) - end for - end paintComponent - - end Canvas - - { - getContentPane ().add (new Canvas (width, height)) - setVisible (true) - setBackground (bgColor) - } // primary constructor - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Invoke the animation command. - * @param c the animation command to invoke - */ - private def invokeCommand (c: AnimateCommand): Unit = - if c.action != MoveToken then // remove if to see all move steps - println (s"DgAnimator.invokeCommand: $c") - end if - - c.action match - case CreateNode => - ani.createNode (c.eid, c.shape.asInstanceOf [RectPolyShape], c.label, c.primary, c.color, c.pts) -// ani.createNode (c.eid, c.shape.asInstanceOf [RectangularShape], c.label, c.primary, c.color, c.pts) - case CreateEdge => - ani.createEdge (c.eid, c.shape.asInstanceOf [CurvilinearShape], c.label, c.primary, c.color, c.from_eid, c.to_eid, - c.pts, c.shift) -// ani.createEdge (c.eid, c.shape.asInstanceOf [QCurve], c.label, c.primary, c.color, c.from_eid, c.to_eid, c.pts) - case CreateToken => - ani.createToken (c.eid, c.shape.asInstanceOf [RectangularShape], c.label, c.primary, c.color, c.from_eid, c.pts) - case DestroyNode => - ani.destroyNode (c.eid) - case DestroyEdge => - ani.destroyEdge (c.eid) - case DestroyToken => - ani.destroyToken (c.eid) - case MoveNode => - ani.moveNode (c.eid, c.pts) - case MoveToken => - ani.moveToken (c.eid, c.pts) - case MoveToken2Node => - ani.moveToken2Node (c.eid, c.from_eid) - case MoveTokens2Node => - ani.moveTokens2Node (c.color, c.from_eid, c.to_eid, c.pts) - case MoveToken2Edge => - ani.moveToken2Edge (c.eid, c.from_eid, 10.0) // FIX: 10.0? - case ScaleNode => - ani.scaleNode (c.eid, c.pts) - case ScaleToken => - ani.scaleToken (c.eid, c.pts) - case ScaleTokensAt => - ani.scaleTokensAt (c.color, c.from_eid, c.to_eid, c.pts) - case SetPaintNode => - ani.setPaintNode (c.eid, c.color) - case SetPaintEdge => - ani.setPaintEdge (c.eid, c.color) - case SetPaintToken => - ani.setPaintToken (c.eid, c.color) - case TimeDilation => - ani.timeDilation (c.pts) - end match - end invokeCommand - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Repeatedly execute animation commands, sleep and repaint. - */ - def run (): Unit = - var cmd: AnimateCommand = null - var when = 0.0 - var delay = 0L - var nCmds = 0 - - println (s"DgAnimator.run: start animation at time $clock") - printCommandQueue (clock) - - breakable { - while clock < stopTime do - - //:: Get the next animation command from the shared queue. - - if cmdQ.isEmpty && aniDone then - println ("DgAnimator.run: command queue is empty") - break () - else if ! cmdQ.isEmpty then - cmd = cmdQ.poll () - when = cmd.time - delay = round ((when - clock) * aniRatio * ani.timeDilationFactor) - - //:: Sleep for the given number (delay) of milliseconds. - - Thread.sleep (delay) - - //:: set the animation clock and invoke the animation command - - clock = when - nCmds += 1 - invokeCommand (cmd) - - //:: Repaint the canvas. - - repaint () - end if - end while - } // breakable - - println (s"DgAnimator.run: end animation at time $clock with $nCmds commands invoked") - end run - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Start the animation by staring the animation thread. - * @param tStart the animation start time - * @param tStop the animation stop time - */ - def animate (tStart: Double, tStop: Double): Unit = - clock = tStart - stopTime = tStop - new Thread (this).start () - end animate - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Invoke animation command cmd immediately (useful for testing). - * @param cmd the animation command to invoke - */ - def invokeNow (cmd: AnimateCommand): Unit = - invokeCommand (cmd) - repaint () - end invokeNow - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Get the animation command queue. - */ - def getCommandQueue: ConcurrentLinkedQueue [AnimateCommand] = cmdQ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Print the contents of the command queue one animation command per line. - * @param t the given time - */ - def printCommandQueue (t: Double): Unit = - println (s"At time t = $t: command queue = ") - println (cmdQ.toString.replace ("), A", ")\nA")) - println ("-" * 80) - end printCommandQueue - -end DgAnimator - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `dgAnimatorTest` main function is used to test the `DgAnimator` class. - * It tests the creation of nodes. - * > runMain scalation.animation.dgAnimatorTest - */ -@main def dgAnimatorTest (): Unit = - - banner ("Run dgAnimatorTest") - val dga = new DgAnimator ("DgAnimator", bgColor = lightgrey) - val aniQ = dga.getCommandQueue - - //:: Place the nodes into graph. - - aniQ.add (AnimateCommand (CreateNode, 1, Ellipse (), "node1", false, yellow, Array (100.0, 110.0, 30.0, 30.0), 0)) - aniQ.add (AnimateCommand (CreateNode, 2, Ellipse (), "node2", false, yellow, Array (100.0, 290.0, 30.0, 30.0), 0)) - aniQ.add (AnimateCommand (CreateNode, 3, Rectangle (), "node3", true, gold, Array (300.0, 185.0, 30.0, 60.0), 1000)) - aniQ.add (AnimateCommand (CreateNode, 4, Ellipse (), "node4", false, silver, Array (500.0, 110.0, 30.0, 30.0), 2000)) - aniQ.add (AnimateCommand (CreateNode, 5, Ellipse (), "node5", false, silver, Array (500.0, 290.0, 30.0, 30.0), 2000)) - aniQ.add (AnimateCommand (CreateNode, 6, Rectangle (), "node6", true, gold, Array (300.0, 35.0, 30.0, 60.0), 3000)) - aniQ.add (AnimateCommand (CreateNode, 7, Rectangle (), "node7", true, gold, Array (300.0, 335.0, 30.0, 60.0), 3000)) - - dga.animate (0, 100000) - -end dgAnimatorTest - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `dgAnimatorTest2` main function is used to test the `DgAnimator` class. - * It tests the creation of nodes, edges and tokens. - * > runMain scalation.animation.dgAnimatorTest2 - */ -@main def dgAnimatorTest2 (): Unit = - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Sample method for loading the shared command queue. - * Ordinarily these commands would come from some simulation engine. - * @param cq the animation command queue - */ - def loadCommandQueue (cq: ConcurrentLinkedQueue [AnimateCommand]): Unit = - - //:: Place the nodes into graph. - - cq.add (AnimateCommand (CreateNode, 1, Ellipse (), "node1", false, yellow, Array (100.0, 110.0, 30.0, 30.0), 0)) - cq.add (AnimateCommand (CreateNode, 2, Ellipse (), "node2", false, yellow, Array (100.0, 290.0, 30.0, 30.0), 0)) - cq.add (AnimateCommand (CreateNode, 3, Rectangle (), "node3", true, gold, Array (300.0, 185.0, 30.0, 60.0), 1000)) - cq.add (AnimateCommand (CreateNode, 4, Ellipse (), "node4", false, silver, Array (500.0, 110.0, 30.0, 30.0), 2000)) - cq.add (AnimateCommand (CreateNode, 5, Ellipse (), "node5", false, silver, Array (500.0, 290.0, 30.0, 30.0), 2000)) - cq.add (AnimateCommand (CreateNode, 6, Rectangle (), "node6", true, gold, Array (300.0, 35.0, 30.0, 60.0), 3000)) - cq.add (AnimateCommand (CreateNode, 7, Rectangle (), "node7", true, gold, Array (300.0, 335.0, 30.0, 60.0), 3000)) - - //:: Place the edges into graph. - - cq.add (AnimateCommand (CreateEdge, 8, QCurve (), "edge1", true, red, null, 4000, 1, 3)) - cq.add (AnimateCommand (CreateEdge, 9, QCurve (), "edge2", true, red, null, 4000, 2, 3)) - cq.add (AnimateCommand (CreateEdge, 10, QCurve (), "edge3", true, red, null, 5000, 3, 4)) - cq.add (AnimateCommand (CreateEdge, 11, QCurve (), "edge4", true, red, null, 5000, 3, 5)) - cq.add (AnimateCommand (CreateEdge, 12, QCurve (), "edge5", true, red, null, 6000, 4, 6)) - cq.add (AnimateCommand (CreateEdge, 13, QCurve (), "edge6", true, red, null, 6000, 5, 7)) - cq.add (AnimateCommand (CreateEdge, 14, QCurve (), "edge7", true, red, null, 7000, 6, 1)) - cq.add (AnimateCommand (CreateEdge, 15, QCurve (), "edge8", true, red, null, 7000, 7, 2)) - - //:: Place the tokens into graph. - - cq.add (AnimateCommand (CreateToken, 16, Ellipse (), "token1", false, blue, null, 8000, 1)) - cq.add (AnimateCommand (CreateToken, 17, Ellipse (), "token2", false, cyan, null, 8000, 2)) - - //:: Move the tokens around graph. - - for i <- 0 to 10 do - cq.add (AnimateCommand (MoveToken2Node, 16, null, null, false, null, null, 12000 + 10000 * i, 3)) - cq.add (AnimateCommand (MoveToken2Node, 17, null, null, false, null, null, 12000 + 10000 * i, 3)) - cq.add (AnimateCommand (MoveToken2Node, 16, null, null, false, null, null, 13000 + 10000 * i, 4)) - cq.add (AnimateCommand (MoveToken2Node, 17, null, null, false, null, null, 13000 + 10000 * i, 5)) - cq.add (AnimateCommand (MoveToken2Node, 16, null, null, false, null, null, 17000 + 10000 * i, 6)) - cq.add (AnimateCommand (MoveToken2Node, 17, null, null, false, null, null, 17000 + 10000 * i, 7)) - cq.add (AnimateCommand (MoveToken2Node, 16, null, null, false, null, null, 18000 + 10000 * i, 1)) - cq.add (AnimateCommand (MoveToken2Node, 17, null, null, false, null, null, 18000 + 10000 * i, 2)) - end for - end loadCommandQueue - - banner ("Run DgAnimatorTest2") - val dga = new DgAnimator ("DgAnimator", bgColor = lightgrey) - loadCommandQueue (dga.getCommandQueue) - dga.animate (0, 100000) - -end dgAnimatorTest2 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `dgAnimatorTest3` main function is used to test the `DgAnimator` class. - * It tests zoom in and zoom out of a triagle with three nodes and three edges. - * > runMain scalation.animation.dgAnimatorTest3 - */ -@main def dgAnimatorTest3 (): Unit = - - banner ("Run dgAnimatorTest3") - val dga = new DgAnimator ("DgAnimator") - val aniQ = dga.getCommandQueue - - println ("Make a triangle and zoom in and out") - println ("print zooming instructions") - - //:: Place the nodes into graph. - - aniQ.add (AnimateCommand (CreateNode, 1, Ellipse(), "node1", false, yellow, Array(100.0, 110.0, 30.0, 30.0), 0)) - aniQ.add (AnimateCommand (CreateNode, 2, Ellipse(), "node2", false, yellow, Array(100.0, 290.0, 30.0, 30.0), 0)) - aniQ.add (AnimateCommand (CreateNode, 3, Rectangle(), "node3", true, gold, Array(300.0, 185.0, 30.0, 60.0), 0)) - - //:: Place the edges into graph. - - aniQ.add (AnimateCommand (CreateEdge, 4, QCurve(), "edge1", true, blue, null, 100, 1, 2)) - aniQ.add (AnimateCommand (CreateEdge, 5, QCurve(), "edge2", true, blue, null, 200, 2, 3)) - aniQ.add (AnimateCommand (CreateEdge, 6, QCurve(), "edge3", true, blue, null, 300, 3, 1)) - - dga.animate (0, 100000) - -end dgAnimatorTest3 - diff --git a/target/scala-3.6.4/classes/scalation/animation/old/Dgraph.scala.bak b/target/scala-3.6.4/classes/scalation/animation/old/Dgraph.scala.bak deleted file mode 100644 index d244eaa47..000000000 --- a/target/scala-3.6.4/classes/scalation/animation/old/Dgraph.scala.bak +++ /dev/null @@ -1,514 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Mon Sep 21 15:05:06 EDT 2009 - * @see LICENSE (MIT style license file). - * - * @title Graph Structure Suitable for Animation - */ - -package scalation -package animation - -import scala.collection.mutable.{HashSet, ListBuffer} -import scala.math.{abs, atan2, cos, Pi, sin} - -import scalation.mathstat.VectorD -import scalation.scala2d._ -import scalation.scala2d.Colors._ - -import Counter.{nextE, nextN, nextT} - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Dgraph` class is for defining graph structures suitable for animation. - * Graphs consist of nodes, edges and tokens. Tokens can be positioned within - * nodes or on edges. A graph animation class that uses this class would typically - * move the tokens by changing there location over time. This class supports both - * directed graphs and bipartite graphs. Directed graphs contain only primary - * nodes, while bipartite graphs have both primary and secondary nodes along with - * the rule that edges must go from primaries to secondaries or secondaries to - * primaries. Bipartite graphs can be used to represent Petri Nets by letting - * Transitions be primary nodes and Places be secondary nodes. Everything can be - * labeled (nodes, edges and tokens as well as the graph itself). Nodes and edges - * may be added to/removed from graphs, while tokens may be added to/removed from - * either nodes or edges. Tokens may also be free (not bound to nodes or edges). - */ -class Dgraph (name: String = "Dgraph", bipartite: Boolean = false): - - private val flaw = flawf ("Dgraph") // flaw function - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** The `Node` class is used to represent nodes in the graph. - * @param shape the shape of the node - * @param label the label for the created node - * @param primary whether it is a primary/transition/true or secondary/place node/false - * @param color the color of the node - * @param x the x-coordinate (top left) - * @param y the y-coordinate (top left) - * @param w the width - * @param h the height - */ -// case class Node (shape: RectangularShape, label: String, primary: Boolean, var color: Color, - case class Node (shape: RectPolyShape, label: String, primary: Boolean, var color: Color, - x: Double, y: Double, w: Double, h: Double): - - shape.setFrame (x, y, w, h) - - private val id = nextN () // node identifier - val outEdges = ListBuffer [Edge] () // list of outgoing edges - val tokens = ListBuffer [Token] () // list of tokens current in this node - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Set (or reset) the color. - * @param color the new color - */ - def setColor (color2: Color): Unit = color = color2 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Add an outgoing edge to this node. - * @param edge the edge to add - */ - def addEdge (edge: Edge): Boolean = - if bipartite && edge.from.primary == edge.to.primary then - flaw ("addEdge", "node types for edge endpoints may not be the same") - return false - end if - outEdges += edge - true - end addEdge - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Remove an outgoing edge from this node. - * @param edge the edge to remove - */ - def removeEdge (edge: Edge): Unit = outEdges -= edge - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Add a token from this node. - * @param token the token to add - */ - def addToken (token: Token): Unit = tokens += token - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Remove a token from this node. - * @param token the token to remove - */ - def removeToken (token: Token): Unit = tokens -= token - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert this node to a string. - */ - override def toString: String = s"Node $label [ $id ]" - - end Node - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** The `Edge` class is used to represent edges in the graph. If bend = 0, a - * straight line is created, otherwise a quadratic curve is created. - * It uses implicit coordinates for the edge endpoints. - * @param shape the shape (line/curve) of the edge - * @param label the label for the created edge - * @param primary whether it is a primary/transition/true or secondary/place node/false - * @param color the color of the edge - * @param from the origination node - * @param to the destination node - * @param bend the amount of bend in the curve (defaults to zero) - * @param shift amount of distance to shift the edge, e.g., to accommodate - * a bundle of edges in a composite edge - * @param direct whether to directly set the line or allow factory function to set it - */ - case class Edge (shape: CurvilinearShape, label: String, primary: Boolean, var color: Color, - from: Node, to: Node, bend: Double = 0.0, shift: Int = 0, direct: Boolean = true): - - from.addEdge (this) // add edge to outgoing edges of from node - - private val EPSILON = 1E-7 // very small real number - private val id = nextE () // edge identifier - val tokens = ListBuffer [Token] () // list of tokens current on this edge. - private val p1 = VectorD (from.shape.getCenterX (), from.shape.getCenterY ()) - private val p2 = VectorD (to.shape.getCenterX (), to.shape.getCenterY ()) - private val gap = 5 // for multiple edges between one pair of nodes - - if abs (bend) > EPSILON then // hendle case where "def this" not called first - shape.setLine (p1, p2, bend) - else if direct then // directly set the line (use factory methods to move) - shape.setLine (p1, p2) - end if - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Construct an edge as a line with explicit coordinates. - * @param shape the shape (line) of the edge - * @param label the label for the created edge - * @param primary whether it is a primary/transition/true or secondary/place node/false - * @param color the color of the edge - * @param from the origination node - * @param to the destination node - * @param p1 the (x,y)-coordinates of the edge's start - * @param p2 the (x,y)-coordinates of the edge's end - * @param shift amount of distance to shift the edge - */ - def this (shape: CurvilinearShape, label: String, primary: Boolean, color: Color, - from: Node, to: Node, p1: VectorD, p2: VectorD, shift: Int) = - this (shape, label, primary, color, from, to, 0.0, shift) - move2Boundary (p1, p2) - shape.setLine (p1, p2) - end this - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Construct an edge as a curve with explicit coordinates. - * @param shape the shape (curve) of the edge - * @param label the label for the created edge - * @param primary whether it is a primary/transition/true or secondary/place node/false - * @param color the color of the edge - * @param from the origination node - * @param to the destination node - * @param p1 the (x,y)-coordinates of the edge's start - * @param pc the (x,y)-coordinates of the edge's control point - * @param p2 the (x,y)-coordinates of the edge's end - * @param shift amount of distance to shift the edge - */ - def this (shape: CurvilinearShape, label: String, primary: Boolean, color: Color, - from: Node, to: Node, p1: VectorD, pc: VectorD, p2: VectorD, shift: Int) = - this (shape, label, primary, color, from, to, 0.0, shift) - move2Boundary (p1, p2) - shape.setLine (p1, pc, p2) - end this - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Move the edge endpoints so edge connects to vertex boundary, rather than center. - * Edge is from p1 to p2: p1 --> p2. - * @param p1 the position of the center of the from vertex - * @param p2 the position of the center of the to vertex - * - def move2Boundary (p1: VectorD, p2: VectorD): Unit = - val angle = atan2 (p2(1) - p1(1), p2(0) - p1(0)) - val radius1 = (from.shape.getWidth () + from.shape.getHeight ()) / 4.0 - val radius2 = (to.shape.getWidth () + to.shape.getHeight ()) / 4.0 - - p1(0) += radius1 * cos (angle); p1(1) += radius1 * sin (angle) - p2(0) += radius2 * cos (Pi+angle); p2(1) += radius2 * sin (Pi+angle) - end move2Boundary - */ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Move the edge endpoints so edge connects to vertex boundary, rather than center. - * Edge is from p1 to p2: p1 --> p2. - * @param p1 the position of the leftop of the from vertex - * @param p2 the position of the leftop of the to vertex - */ - def move2Boundary (p1: VectorD, p2: VectorD): Unit = - val gapShift = shift * gap - val toCenterX = to.x + to.w / 2 // p1(0) - val toCenterY = to.y + to.h / 2 // p1(1) - val fromCenterX = from.x + from.w / 2 // p2(0) - val fromCenterY = from.y + from.h / 2 // p2(1) - val angle = atan2 (toCenterY - fromCenterY, toCenterX - fromCenterX) - - from.shape match - case _: Rectangle => - val p1Vec = pointOnRect (toCenterX, toCenterY, from.x, from.y, from.x + from.w, from.y + from.h) - p1(0) = p1Vec(0) - p1(1) = p1Vec(1) - if shift != 0 then // Yulong note: Either on top/bot shift or left/right shift - if p1(0) == to.x || p1(0) == to.x + to.w then p1(1) += gapShift else p1(0) += gapShift - - case _ => - // the circle is 'from' Yulong fixed the circle one way in case and shift - // p1 is the left top - val radius1 = (from.shape.getWidth () + from.shape.getHeight ()) / 4.0 - p1(0) += radius1 + radius1 * cos (angle + gapShift) - p1(1) += radius1 + radius1 * sin (angle + gapShift) - - to.shape match - case _: Rectangle => - val p2Vec = pointOnRect (fromCenterX, fromCenterY, to.x, to.y, to.x + to.w, to.y + to.h) - p2(0) = p2Vec(0) - p2(1) = p2Vec(1) - if shift != 0 then - if p2(0) == from.x || p2(0) == from.x + from.w then p2(1) += gapShift else p2(0) += gapShift - - case _ => - // the circle is 'to' - // p2 is the left top - val radius2 = (to.shape.getWidth() + to.shape.getHeight()) / 4.0 - p2(0) += radius2 + radius2 * cos (Pi + angle - gapShift) - p2(1) += radius2 + radius2 * sin (Pi + angle - gapShift) - - end move2Boundary - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Set (or reset) the color. - * @param color the new color - */ - def setColor (color2: Color): Unit = color = color2 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Add a token from this node. - * @param token the token to add - */ - def addToken (token: Token): Unit = tokens += token - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert this edge to a string. - */ - override def toString: String = s"Edge $label [ $id ]" - - end Edge - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** The `Token` class is used to represent tokens in the graph. - * @param shape the shape of the token - * @param label the label for the created token - * @param primary whether the token is primary/free/true to secondary/bound/false - * @param color the color of the token - * @param onNode the node the token is on - * @param w the width of the token - * @param h the height of the token - */ - case class Token (shape: RectangularShape, label: String, primary: Boolean, var color: Color, - var onNode: Node, val w: Double, val h: Double): - - private val id = nextT () // token identifier - - if onNode != null then - onNode.addToken (this) - val x = onNode.shape.getCenterX () - w / 2.0 - val y = onNode.shape.getCenterY () - h / 2.0 - shape.setFrame (x, y, w, h) - end if - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Construct a primary/free token with explicit coordinates. - * Such tokens are free to move anywhere in the drawing panel. - * @param shape the shape of the token - * @param label the label for the created token - * @param color the color of the token - * @param x the x-coordinate of the token's location - * @param y the y-coordinate of the token's location - * @param w the width of the token - * @param h the height of the token - */ - def this (shape: RectangularShape, label: String, primary: Boolean, color: Color, - x: Double, y: Double, w: Double, h: Double) = - this (shape, label, true, color, null, w, h) - shape.setFrame (x, y, w, h) - end this - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Set (or reset) the color. - * @param color the new color - */ - def setColor (color2: Color): Unit = color = color2 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Set the node the token is on. - * @param onNode2 the node the token is on - */ - def setOnNode (onNode2: Node): Unit = onNode = onNode2 - - end Token - - /** List of nodes in the graph - */ - val nodes = ListBuffer [Node] () - - /** List of edges in the graph - */ - val edges = ListBuffer [Edge] () - - /** List of free tokens in the graph (bound tokens must be in a nodes or edges list) - */ - val freeTokens = ListBuffer [Token] () - - /** Whether the nodes have been visited (internal use only) - */ - private val visited = new HashSet [Node] () - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Add a node to the graph. - * @param n the node to add - */ - def addNode (n: Node): Unit = nodes += n - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Remove a node from the graph. - * @param n the node to remove - */ - def removeNode (n: Node): Unit = nodes -= n - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Add an edge to the graph. - * @param e the edge to add - */ - def addEdge (e: Edge): Unit = edges += e - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Remove an edge from the graph. - * @param e the edge to remove - */ - def removeEdge (e: Edge): Unit = edges -= e - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Add a free token to the graph. - * @param t the free token to add - */ - def addFreeToken (t: Token): Unit = freeTokens += t - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Remove a free token from the graph. - * @param t the free token to remove - */ - def removeFreeToken (t: Token): Unit = freeTokens -= t - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Get all the root nodes (those with no incoming edges). - */ - def getRoots: ListBuffer [Node] = - val roots = new ListBuffer [Node] () - for n <- nodes do - var keep = true - for e <- edges if n == e.to do keep = false - if keep then roots += n - end for - roots - end getRoots - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Mark all nodes as unvisited by clearing them from the hash set. - */ - private def clearVisited (): Unit = visited.clear () - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Recursively visit all nodes in the graph. - * @param n the current node - * @param level the recursion level - */ - def traverse (n: Node, level: Int): Unit = - for i <- 0 until level do print ("\t") - println (n) // print visited node - //visited.add (n) - val outgoing = n.outEdges - if outgoing != null then - for oEdge <- outgoing do - val next = oEdge.to - traverse (next, level + 1) - // if ! visited. contains (next) then traverse (next, level + 1) - end for - end if - end traverse - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Traverse the graph printing out its nodes and showing connectivity by indentation. - */ - def traverseNodes (): Unit = - clearVisited () - // traverse (nodes.get (0), 0) // only from node 0 - for r <- getRoots do traverse (r, 0) // from all roots - end traverseNodes - -end Dgraph - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Counter` object maintains counters. - */ -object Counter: - - private var nCounter = 0 - private var eCounter = 0 - private var tCounter = 0 - - def nextN (): Int = { nCounter += 1; nCounter } - def nextE (): Int = { eCounter += 1; eCounter } - def nextT (): Int = { tCounter += 1; tCounter } - -end Counter - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `dgraphTest` main function to test the `Dgraph` class. - * > runMain scalation.animation.dgraphTest - */ -@main def dgraphTest (): Unit = - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Build and test a directed graph. - */ - def testDirectedGraph (g: Dgraph): Unit = - // Create nodes - val n1 = g.Node (Ellipse (), "node1", true, red, 100, 200, 20, 20) - val n2 = g.Node (Ellipse (), "node2", true, blue, 300, 100, 20, 20) - val n3 = g.Node (Ellipse (), "node3", true, green, 300, 300, 20, 20) - val n4 = g.Node (Ellipse (), "node4", true, purple, 500, 200, 20, 20) - - // Create edges - val e1 = new g.Edge (QCurve (), "edge1", true, black, n1, n2) // 120, 210, 300, 110) - n1.addEdge (e1) - val e2 = new g.Edge (QCurve (), "edge1", true, black, n1, n3) // 120, 210, 300, 310) - n1.addEdge (e2) - val e3 = new g.Edge (QCurve (), "edge1", true, black, n2, n4) // 320, 110, 500, 210) - n2.addEdge (e3) - val e4 = new g.Edge (QCurve (), "edge1", true, black, n3, n4) // 320, 310, 500, 210) - n3.addEdge (e4) - - // Add the nodes and edges to the directed graph - g.addNode (n1) - g.addNode (n2) - g.addNode (n3) - g.addNode (n4) - g.addEdge (e1) - g.addEdge (e2) - g.addEdge (e3) - g.addEdge (e4) - - // Traverse the directed graph printing out its nodes - g.traverseNodes () - end testDirectedGraph - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Build and test a bipartite graph. - */ - def testBipartiteGraph (g: Dgraph): Unit = - // Create nodes - val n1 = g.Node (Ellipse (), "node1", false, orange, 100, 100, 30, 30) - val n2 = g.Node (Ellipse (), "node2", false, orange, 100, 300, 30, 30) - val n3 = g.Node (Rectangle (), "node2", true, lightgreen, 300, 185, 30, 60) - val n4 = g.Node (Ellipse (), "node4", false, red, 500, 100, 30, 30) - val n5 = g.Node (Ellipse (), "node5", false, red, 500, 300, 30, 30) - - // Create edges - val e1 = new g.Edge (QCurve (), "edge1", true, black, n1, n3) // 130, 115, 300, 215) - n1.addEdge (e1) - val e2 = new g.Edge (QCurve (), "edge2", true, black, n2, n3) // 130, 315, 300, 215) - n2.addEdge (e2) - val e3 = new g.Edge (QCurve (), "edge3", true, black, n3, n4) // 330, 215, 500, 115) - n3.addEdge (e3) - val e4 = new g.Edge (QCurve (), "edge4", true, black, n3, n5) // 330, 215, 500, 315) - n3.addEdge (e4) - - // Add the nodes and edges to the directed graph - g.addNode (n1) - g.addNode (n2) - g.addNode (n3) - g.addNode (n4) - g.addNode (n5) - g.addEdge (e1) - g.addEdge (e2) - g.addEdge (e3) - g.addEdge (e4) - - // Traverse the directed graph printing out its nodes - g.traverseNodes () - end testBipartiteGraph - - println ("Run DgraphTest - Bipartite Graph Test\n") - val bg = new Dgraph ("Bipartite_Graph", true) - testBipartiteGraph (bg) - - println ("Run DgraphTest - Directed Graph Test\n") - val dg = new Dgraph ("Directed_Graph", false) - testDirectedGraph (dg) - -end dgraphTest - diff --git a/target/scala-3.6.4/classes/scalation/animation/simpleAnimator2Test.class b/target/scala-3.6.4/classes/scalation/animation/simpleAnimator2Test.class deleted file mode 100644 index f8ae64866..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/simpleAnimator2Test.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/simpleAnimator2Test.tasty b/target/scala-3.6.4/classes/scalation/animation/simpleAnimator2Test.tasty deleted file mode 100644 index dc2062bfa..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/simpleAnimator2Test.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/simpleAnimatorTest.class b/target/scala-3.6.4/classes/scalation/animation/simpleAnimatorTest.class deleted file mode 100644 index a8589d4ea..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/simpleAnimatorTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/animation/simpleAnimatorTest.tasty b/target/scala-3.6.4/classes/scalation/animation/simpleAnimatorTest.tasty deleted file mode 100644 index c50fec55b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/animation/simpleAnimatorTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/biMapTest.class b/target/scala-3.6.4/classes/scalation/biMapTest.class deleted file mode 100644 index fb49982a3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/biMapTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/biMapTest.tasty b/target/scala-3.6.4/classes/scalation/biMapTest.tasty deleted file mode 100644 index 81faf90aa..000000000 Binary files a/target/scala-3.6.4/classes/scalation/biMapTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/boolTest.class b/target/scala-3.6.4/classes/scalation/boolTest.class deleted file mode 100644 index 3b1b9889f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/boolTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/boolTest.tasty b/target/scala-3.6.4/classes/scalation/boolTest.tasty deleted file mode 100644 index eaeb3e325..000000000 Binary files a/target/scala-3.6.4/classes/scalation/boolTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/AutoDiff$package$.class b/target/scala-3.6.4/classes/scalation/calculus/AutoDiff$package$.class deleted file mode 100644 index 158a7c22f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/AutoDiff$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/AutoDiff$package.class b/target/scala-3.6.4/classes/scalation/calculus/AutoDiff$package.class deleted file mode 100644 index 16462fd12..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/AutoDiff$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/AutoDiff$package.tasty b/target/scala-3.6.4/classes/scalation/calculus/AutoDiff$package.tasty deleted file mode 100644 index 74ad941ae..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/AutoDiff$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/AutoDiff.class b/target/scala-3.6.4/classes/scalation/calculus/AutoDiff.class deleted file mode 100644 index c8e9d6b31..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/AutoDiff.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/AutoDiff.tasty b/target/scala-3.6.4/classes/scalation/calculus/AutoDiff.tasty deleted file mode 100644 index 04120cdc6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/AutoDiff.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/B_Spline$.class b/target/scala-3.6.4/classes/scalation/calculus/B_Spline$.class deleted file mode 100644 index e8e9f1bdf..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/B_Spline$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/B_Spline$package$.class b/target/scala-3.6.4/classes/scalation/calculus/B_Spline$package$.class deleted file mode 100644 index 849cb931e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/B_Spline$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/B_Spline$package.class b/target/scala-3.6.4/classes/scalation/calculus/B_Spline$package.class deleted file mode 100644 index 3cefd9c64..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/B_Spline$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/B_Spline$package.tasty b/target/scala-3.6.4/classes/scalation/calculus/B_Spline$package.tasty deleted file mode 100644 index 28a8bf0d0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/B_Spline$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/B_Spline.class b/target/scala-3.6.4/classes/scalation/calculus/B_Spline.class deleted file mode 100644 index cc479545a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/B_Spline.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/B_Spline.tasty b/target/scala-3.6.4/classes/scalation/calculus/B_Spline.tasty deleted file mode 100644 index 4cf4f7330..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/B_Spline.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/BasisFunction$.class b/target/scala-3.6.4/classes/scalation/calculus/BasisFunction$.class deleted file mode 100644 index 45f2a3e88..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/BasisFunction$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/BasisFunction.class b/target/scala-3.6.4/classes/scalation/calculus/BasisFunction.class deleted file mode 100644 index e05a46b70..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/BasisFunction.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/BasisFunction.tasty b/target/scala-3.6.4/classes/scalation/calculus/BasisFunction.tasty deleted file mode 100644 index 91c3b45eb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/BasisFunction.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/DB_Spline$.class b/target/scala-3.6.4/classes/scalation/calculus/DB_Spline$.class deleted file mode 100644 index 8cee6d505..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/DB_Spline$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/DB_Spline$package$.class b/target/scala-3.6.4/classes/scalation/calculus/DB_Spline$package$.class deleted file mode 100644 index 8a92060e8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/DB_Spline$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/DB_Spline$package.class b/target/scala-3.6.4/classes/scalation/calculus/DB_Spline$package.class deleted file mode 100644 index d0092bab6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/DB_Spline$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/DB_Spline$package.tasty b/target/scala-3.6.4/classes/scalation/calculus/DB_Spline$package.tasty deleted file mode 100644 index 85b8db0d1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/DB_Spline$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/DB_Spline.class b/target/scala-3.6.4/classes/scalation/calculus/DB_Spline.class deleted file mode 100644 index 9e55fcc7b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/DB_Spline.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/DB_Spline.tasty b/target/scala-3.6.4/classes/scalation/calculus/DB_Spline.tasty deleted file mode 100644 index 933a74db7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/DB_Spline.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/DBasisFunction$.class b/target/scala-3.6.4/classes/scalation/calculus/DBasisFunction$.class deleted file mode 100644 index 2ba39795a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/DBasisFunction$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/DBasisFunction.class b/target/scala-3.6.4/classes/scalation/calculus/DBasisFunction.class deleted file mode 100644 index 586efc8d3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/DBasisFunction.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/DBasisFunction.tasty b/target/scala-3.6.4/classes/scalation/calculus/DBasisFunction.tasty deleted file mode 100644 index 9db31068b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/DBasisFunction.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/DFourier$.class b/target/scala-3.6.4/classes/scalation/calculus/DFourier$.class deleted file mode 100644 index 29ee1feed..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/DFourier$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/DFourier$package$.class b/target/scala-3.6.4/classes/scalation/calculus/DFourier$package$.class deleted file mode 100644 index 4bb9053b3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/DFourier$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/DFourier$package.class b/target/scala-3.6.4/classes/scalation/calculus/DFourier$package.class deleted file mode 100644 index 403234384..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/DFourier$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/DFourier$package.tasty b/target/scala-3.6.4/classes/scalation/calculus/DFourier$package.tasty deleted file mode 100644 index 56c88e5f8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/DFourier$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/DFourier.class b/target/scala-3.6.4/classes/scalation/calculus/DFourier.class deleted file mode 100644 index 16c7491e2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/DFourier.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/DFourier.tasty b/target/scala-3.6.4/classes/scalation/calculus/DFourier.tasty deleted file mode 100644 index b5aa2dcba..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/DFourier.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/DRadial$.class b/target/scala-3.6.4/classes/scalation/calculus/DRadial$.class deleted file mode 100644 index 20189552a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/DRadial$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/DRadial$package$.class b/target/scala-3.6.4/classes/scalation/calculus/DRadial$package$.class deleted file mode 100644 index 43a532162..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/DRadial$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/DRadial$package.class b/target/scala-3.6.4/classes/scalation/calculus/DRadial$package.class deleted file mode 100644 index e2762b7ba..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/DRadial$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/DRadial$package.tasty b/target/scala-3.6.4/classes/scalation/calculus/DRadial$package.tasty deleted file mode 100644 index 5a2bfbbbc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/DRadial$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/DRadial.class b/target/scala-3.6.4/classes/scalation/calculus/DRadial.class deleted file mode 100644 index 709d79d4d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/DRadial.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/DRadial.tasty b/target/scala-3.6.4/classes/scalation/calculus/DRadial.tasty deleted file mode 100644 index 8426cb920..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/DRadial.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/Differential$.class b/target/scala-3.6.4/classes/scalation/calculus/Differential$.class deleted file mode 100644 index a0bcb172e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/Differential$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/Differential$package$.class b/target/scala-3.6.4/classes/scalation/calculus/Differential$package$.class deleted file mode 100644 index 9a4a2a918..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/Differential$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/Differential$package.class b/target/scala-3.6.4/classes/scalation/calculus/Differential$package.class deleted file mode 100644 index d08f17d3b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/Differential$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/Differential$package.tasty b/target/scala-3.6.4/classes/scalation/calculus/Differential$package.tasty deleted file mode 100644 index 3bc8a7be1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/Differential$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/Differential.class b/target/scala-3.6.4/classes/scalation/calculus/Differential.class deleted file mode 100644 index f98a6a97c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/Differential.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/Differential.tasty b/target/scala-3.6.4/classes/scalation/calculus/Differential.tasty deleted file mode 100644 index 1abb98e0a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/Differential.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/FFT$.class b/target/scala-3.6.4/classes/scalation/calculus/FFT$.class deleted file mode 100644 index 8fe6e07ee..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/FFT$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/FFT$package$.class b/target/scala-3.6.4/classes/scalation/calculus/FFT$package$.class deleted file mode 100644 index c45f35480..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/FFT$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/FFT$package.class b/target/scala-3.6.4/classes/scalation/calculus/FFT$package.class deleted file mode 100644 index 4bab33646..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/FFT$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/FFT$package.tasty b/target/scala-3.6.4/classes/scalation/calculus/FFT$package.tasty deleted file mode 100644 index 75d7020b1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/FFT$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/FFT.class b/target/scala-3.6.4/classes/scalation/calculus/FFT.class deleted file mode 100644 index 244e5160f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/FFT.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/FFT.tasty b/target/scala-3.6.4/classes/scalation/calculus/FFT.tasty deleted file mode 100644 index 44c88028a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/FFT.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/Fourier$.class b/target/scala-3.6.4/classes/scalation/calculus/Fourier$.class deleted file mode 100644 index 6ff485f2b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/Fourier$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/Fourier$package$.class b/target/scala-3.6.4/classes/scalation/calculus/Fourier$package$.class deleted file mode 100644 index 1ad5a1c22..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/Fourier$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/Fourier$package.class b/target/scala-3.6.4/classes/scalation/calculus/Fourier$package.class deleted file mode 100644 index 3fcc430c7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/Fourier$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/Fourier$package.tasty b/target/scala-3.6.4/classes/scalation/calculus/Fourier$package.tasty deleted file mode 100644 index 56961ca42..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/Fourier$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/Fourier.class b/target/scala-3.6.4/classes/scalation/calculus/Fourier.class deleted file mode 100644 index 47bf92d92..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/Fourier.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/Fourier.tasty b/target/scala-3.6.4/classes/scalation/calculus/Fourier.tasty deleted file mode 100644 index f3af40480..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/Fourier.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/GaussianFunc.class b/target/scala-3.6.4/classes/scalation/calculus/GaussianFunc.class deleted file mode 100644 index 2894694bd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/GaussianFunc.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/GaussianFunc.tasty b/target/scala-3.6.4/classes/scalation/calculus/GaussianFunc.tasty deleted file mode 100644 index f8dac28cf..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/GaussianFunc.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/Hilbert$.class b/target/scala-3.6.4/classes/scalation/calculus/Hilbert$.class deleted file mode 100644 index 8741b6c53..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/Hilbert$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/Hilbert$package$.class b/target/scala-3.6.4/classes/scalation/calculus/Hilbert$package$.class deleted file mode 100644 index de413d4c6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/Hilbert$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/Hilbert$package.class b/target/scala-3.6.4/classes/scalation/calculus/Hilbert$package.class deleted file mode 100644 index 470030045..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/Hilbert$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/Hilbert$package.tasty b/target/scala-3.6.4/classes/scalation/calculus/Hilbert$package.tasty deleted file mode 100644 index 3f03db79d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/Hilbert$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/Hilbert.class b/target/scala-3.6.4/classes/scalation/calculus/Hilbert.class deleted file mode 100644 index 3f95d6b49..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/Hilbert.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/Hilbert.tasty b/target/scala-3.6.4/classes/scalation/calculus/Hilbert.tasty deleted file mode 100644 index 21b6b0fa3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/Hilbert.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/Integral$.class b/target/scala-3.6.4/classes/scalation/calculus/Integral$.class deleted file mode 100644 index a43349c2c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/Integral$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/Integral$package$.class b/target/scala-3.6.4/classes/scalation/calculus/Integral$package$.class deleted file mode 100644 index dafad9281..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/Integral$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/Integral$package.class b/target/scala-3.6.4/classes/scalation/calculus/Integral$package.class deleted file mode 100644 index 9e01c24c2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/Integral$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/Integral$package.tasty b/target/scala-3.6.4/classes/scalation/calculus/Integral$package.tasty deleted file mode 100644 index 51bcf3df2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/Integral$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/Integral.class b/target/scala-3.6.4/classes/scalation/calculus/Integral.class deleted file mode 100644 index e3074bce0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/Integral.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/Integral.tasty b/target/scala-3.6.4/classes/scalation/calculus/Integral.tasty deleted file mode 100644 index 853e28f14..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/Integral.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/Node$.class b/target/scala-3.6.4/classes/scalation/calculus/Node$.class deleted file mode 100644 index 207684c6d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/Node$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/Node.class b/target/scala-3.6.4/classes/scalation/calculus/Node.class deleted file mode 100644 index a5c437955..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/Node.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/Node.tasty b/target/scala-3.6.4/classes/scalation/calculus/Node.tasty deleted file mode 100644 index 522588632..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/Node.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/Poly$.class b/target/scala-3.6.4/classes/scalation/calculus/Poly$.class deleted file mode 100644 index 7be9a8e37..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/Poly$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/Poly$package$.class b/target/scala-3.6.4/classes/scalation/calculus/Poly$package$.class deleted file mode 100644 index d2ca19faa..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/Poly$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/Poly$package.class b/target/scala-3.6.4/classes/scalation/calculus/Poly$package.class deleted file mode 100644 index b7c7d0d50..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/Poly$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/Poly$package.tasty b/target/scala-3.6.4/classes/scalation/calculus/Poly$package.tasty deleted file mode 100644 index 42d55ee94..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/Poly$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/Poly.class b/target/scala-3.6.4/classes/scalation/calculus/Poly.class deleted file mode 100644 index 4d24dd7cc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/Poly.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/Poly.tasty b/target/scala-3.6.4/classes/scalation/calculus/Poly.tasty deleted file mode 100644 index 3a212966a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/Poly.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/Radial$.class b/target/scala-3.6.4/classes/scalation/calculus/Radial$.class deleted file mode 100644 index 2a44ff854..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/Radial$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/Radial$package$.class b/target/scala-3.6.4/classes/scalation/calculus/Radial$package$.class deleted file mode 100644 index 9a025f883..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/Radial$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/Radial$package.class b/target/scala-3.6.4/classes/scalation/calculus/Radial$package.class deleted file mode 100644 index d120cfb6d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/Radial$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/Radial$package.tasty b/target/scala-3.6.4/classes/scalation/calculus/Radial$package.tasty deleted file mode 100644 index 7b7412006..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/Radial$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/Radial.class b/target/scala-3.6.4/classes/scalation/calculus/Radial.class deleted file mode 100644 index 75c981668..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/Radial.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/Radial.tasty b/target/scala-3.6.4/classes/scalation/calculus/Radial.tasty deleted file mode 100644 index a81cfd856..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/Radial.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/RadialType$$anon$1.class b/target/scala-3.6.4/classes/scalation/calculus/RadialType$$anon$1.class deleted file mode 100644 index 28919cc10..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/RadialType$$anon$1.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/RadialType$.class b/target/scala-3.6.4/classes/scalation/calculus/RadialType$.class deleted file mode 100644 index f21ae482a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/RadialType$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/RadialType.class b/target/scala-3.6.4/classes/scalation/calculus/RadialType.class deleted file mode 100644 index 6b92d16b8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/RadialType.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/RadialType.tasty b/target/scala-3.6.4/classes/scalation/calculus/RadialType.tasty deleted file mode 100644 index e73ce275a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/RadialType.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/autoDiffTest.class b/target/scala-3.6.4/classes/scalation/calculus/autoDiffTest.class deleted file mode 100644 index 42c6e3c92..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/autoDiffTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/autoDiffTest.tasty b/target/scala-3.6.4/classes/scalation/calculus/autoDiffTest.tasty deleted file mode 100644 index 6c3b91b73..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/autoDiffTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/autoDiffTest2.class b/target/scala-3.6.4/classes/scalation/calculus/autoDiffTest2.class deleted file mode 100644 index fe1d37c6f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/autoDiffTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/autoDiffTest2.tasty b/target/scala-3.6.4/classes/scalation/calculus/autoDiffTest2.tasty deleted file mode 100644 index 313d6424d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/autoDiffTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/b_SplineTest.class b/target/scala-3.6.4/classes/scalation/calculus/b_SplineTest.class deleted file mode 100644 index 6a1a8d5ec..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/b_SplineTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/b_SplineTest.tasty b/target/scala-3.6.4/classes/scalation/calculus/b_SplineTest.tasty deleted file mode 100644 index 6204f6c65..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/b_SplineTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/b_SplineTest2.class b/target/scala-3.6.4/classes/scalation/calculus/b_SplineTest2.class deleted file mode 100644 index 82865c4b4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/b_SplineTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/b_SplineTest2.tasty b/target/scala-3.6.4/classes/scalation/calculus/b_SplineTest2.tasty deleted file mode 100644 index 120e6e966..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/b_SplineTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/b_SplineTest3.class b/target/scala-3.6.4/classes/scalation/calculus/b_SplineTest3.class deleted file mode 100644 index 66b7bea5b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/b_SplineTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/b_SplineTest3.tasty b/target/scala-3.6.4/classes/scalation/calculus/b_SplineTest3.tasty deleted file mode 100644 index e07db4882..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/b_SplineTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/b_SplineTest4.class b/target/scala-3.6.4/classes/scalation/calculus/b_SplineTest4.class deleted file mode 100644 index f94466576..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/b_SplineTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/b_SplineTest4.tasty b/target/scala-3.6.4/classes/scalation/calculus/b_SplineTest4.tasty deleted file mode 100644 index df64f4fb4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/b_SplineTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/dB_SplineTest.class b/target/scala-3.6.4/classes/scalation/calculus/dB_SplineTest.class deleted file mode 100644 index 0a351c59c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/dB_SplineTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/dB_SplineTest.tasty b/target/scala-3.6.4/classes/scalation/calculus/dB_SplineTest.tasty deleted file mode 100644 index dbac68326..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/dB_SplineTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/dB_SplineTest2.class b/target/scala-3.6.4/classes/scalation/calculus/dB_SplineTest2.class deleted file mode 100644 index 5682de67b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/dB_SplineTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/dB_SplineTest2.tasty b/target/scala-3.6.4/classes/scalation/calculus/dB_SplineTest2.tasty deleted file mode 100644 index af066f311..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/dB_SplineTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/dFourierTest.class b/target/scala-3.6.4/classes/scalation/calculus/dFourierTest.class deleted file mode 100644 index 330368e36..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/dFourierTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/dFourierTest.tasty b/target/scala-3.6.4/classes/scalation/calculus/dFourierTest.tasty deleted file mode 100644 index d61bf2b75..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/dFourierTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/dRadialTest.class b/target/scala-3.6.4/classes/scalation/calculus/dRadialTest.class deleted file mode 100644 index 70d24ed48..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/dRadialTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/dRadialTest.tasty b/target/scala-3.6.4/classes/scalation/calculus/dRadialTest.tasty deleted file mode 100644 index ca33a33c2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/dRadialTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/differentialTest.class b/target/scala-3.6.4/classes/scalation/calculus/differentialTest.class deleted file mode 100644 index 3220f0f30..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/differentialTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/differentialTest.tasty b/target/scala-3.6.4/classes/scalation/calculus/differentialTest.tasty deleted file mode 100644 index 2ee02e996..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/differentialTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/differentialTest2.class b/target/scala-3.6.4/classes/scalation/calculus/differentialTest2.class deleted file mode 100644 index 96e53a300..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/differentialTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/differentialTest2.tasty b/target/scala-3.6.4/classes/scalation/calculus/differentialTest2.tasty deleted file mode 100644 index a7594f9db..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/differentialTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/fFTTest.class b/target/scala-3.6.4/classes/scalation/calculus/fFTTest.class deleted file mode 100644 index f7e231e79..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/fFTTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/fFTTest.tasty b/target/scala-3.6.4/classes/scalation/calculus/fFTTest.tasty deleted file mode 100644 index dba21600d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/fFTTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/fourierTest.class b/target/scala-3.6.4/classes/scalation/calculus/fourierTest.class deleted file mode 100644 index c3a557e3a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/fourierTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/fourierTest.tasty b/target/scala-3.6.4/classes/scalation/calculus/fourierTest.tasty deleted file mode 100644 index a1e568444..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/fourierTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/hilbertTest.class b/target/scala-3.6.4/classes/scalation/calculus/hilbertTest.class deleted file mode 100644 index 391309b11..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/hilbertTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/hilbertTest.tasty b/target/scala-3.6.4/classes/scalation/calculus/hilbertTest.tasty deleted file mode 100644 index 76e92db6b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/hilbertTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/index.html b/target/scala-3.6.4/classes/scalation/calculus/index.html deleted file mode 100644 index 587516b87..000000000 --- a/target/scala-3.6.4/classes/scalation/calculus/index.html +++ /dev/null @@ -1,21 +0,0 @@ - - -

    Source files in calculus Package

    -

    - - \ No newline at end of file diff --git a/target/scala-3.6.4/classes/scalation/calculus/integralTest.class b/target/scala-3.6.4/classes/scalation/calculus/integralTest.class deleted file mode 100644 index e91e44ed7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/integralTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/integralTest.tasty b/target/scala-3.6.4/classes/scalation/calculus/integralTest.tasty deleted file mode 100644 index b2ebde979..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/integralTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/integralTest2.class b/target/scala-3.6.4/classes/scalation/calculus/integralTest2.class deleted file mode 100644 index 0d469e053..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/integralTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/integralTest2.tasty b/target/scala-3.6.4/classes/scalation/calculus/integralTest2.tasty deleted file mode 100644 index 1da2075c1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/integralTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/polyTest.class b/target/scala-3.6.4/classes/scalation/calculus/polyTest.class deleted file mode 100644 index 9493da55c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/polyTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/polyTest.tasty b/target/scala-3.6.4/classes/scalation/calculus/polyTest.tasty deleted file mode 100644 index 1e8f6ec88..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/polyTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/radialTest.class b/target/scala-3.6.4/classes/scalation/calculus/radialTest.class deleted file mode 100644 index 406854590..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/radialTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/calculus/radialTest.tasty b/target/scala-3.6.4/classes/scalation/calculus/radialTest.tasty deleted file mode 100644 index 67740a35c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/calculus/radialTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/cforTest.class b/target/scala-3.6.4/classes/scalation/cforTest.class deleted file mode 100644 index ada9fdaa4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/cforTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/cforTest.tasty b/target/scala-3.6.4/classes/scalation/cforTest.tasty deleted file mode 100644 index 9fc1f7f76..000000000 Binary files a/target/scala-3.6.4/classes/scalation/cforTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/circularQueueTest.class b/target/scala-3.6.4/classes/scalation/circularQueueTest.class deleted file mode 100644 index 5f6b5b413..000000000 Binary files a/target/scala-3.6.4/classes/scalation/circularQueueTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/circularQueueTest.tasty b/target/scala-3.6.4/classes/scalation/circularQueueTest.tasty deleted file mode 100644 index 9d68fb476..000000000 Binary files a/target/scala-3.6.4/classes/scalation/circularQueueTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/commonFunTest.class b/target/scala-3.6.4/classes/scalation/commonFunTest.class deleted file mode 100644 index 6a7e93417..000000000 Binary files a/target/scala-3.6.4/classes/scalation/commonFunTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/commonFunTest.tasty b/target/scala-3.6.4/classes/scalation/commonFunTest.tasty deleted file mode 100644 index d3b12442d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/commonFunTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/coordinatesTest.class b/target/scala-3.6.4/classes/scalation/coordinatesTest.class deleted file mode 100644 index 878a12497..000000000 Binary files a/target/scala-3.6.4/classes/scalation/coordinatesTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/coordinatesTest.tasty b/target/scala-3.6.4/classes/scalation/coordinatesTest.tasty deleted file mode 100644 index 95d09af2d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/coordinatesTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/coordinatesTest2.class b/target/scala-3.6.4/classes/scalation/coordinatesTest2.class deleted file mode 100644 index b1ae7fea6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/coordinatesTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/coordinatesTest2.tasty b/target/scala-3.6.4/classes/scalation/coordinatesTest2.tasty deleted file mode 100644 index de54d4e98..000000000 Binary files a/target/scala-3.6.4/classes/scalation/coordinatesTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/coordinatesTest3.class b/target/scala-3.6.4/classes/scalation/coordinatesTest3.class deleted file mode 100644 index daedf910c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/coordinatesTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/coordinatesTest3.tasty b/target/scala-3.6.4/classes/scalation/coordinatesTest3.tasty deleted file mode 100644 index f119d191b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/coordinatesTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/coordinatesTest4.class b/target/scala-3.6.4/classes/scalation/coordinatesTest4.class deleted file mode 100644 index ee6ae0b99..000000000 Binary files a/target/scala-3.6.4/classes/scalation/coordinatesTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/coordinatesTest4.tasty b/target/scala-3.6.4/classes/scalation/coordinatesTest4.tasty deleted file mode 100644 index f45a3139c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/coordinatesTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/counterTest.class b/target/scala-3.6.4/classes/scalation/counterTest.class deleted file mode 100644 index 9e93eb99f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/counterTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/counterTest.tasty b/target/scala-3.6.4/classes/scalation/counterTest.tasty deleted file mode 100644 index 451489bce..000000000 Binary files a/target/scala-3.6.4/classes/scalation/counterTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/BinTree.class b/target/scala-3.6.4/classes/scalation/database/BinTree.class deleted file mode 100644 index 00f9dd25e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/BinTree.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/BinTree.tasty b/target/scala-3.6.4/classes/scalation/database/BinTree.tasty deleted file mode 100644 index b07076741..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/BinTree.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/BpNode$.class b/target/scala-3.6.4/classes/scalation/database/BpNode$.class deleted file mode 100644 index 1653baca8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/BpNode$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/BpNode$package$.class b/target/scala-3.6.4/classes/scalation/database/BpNode$package$.class deleted file mode 100644 index 1e25bc6e0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/BpNode$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/BpNode$package.class b/target/scala-3.6.4/classes/scalation/database/BpNode$package.class deleted file mode 100644 index 052ec96de..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/BpNode$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/BpNode$package.tasty b/target/scala-3.6.4/classes/scalation/database/BpNode$package.tasty deleted file mode 100644 index ee04df4ed..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/BpNode$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/BpNode.class b/target/scala-3.6.4/classes/scalation/database/BpNode.class deleted file mode 100644 index a9287ab35..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/BpNode.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/BpNode.tasty b/target/scala-3.6.4/classes/scalation/database/BpNode.tasty deleted file mode 100644 index 3ed28700f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/BpNode.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/BpTreeMap$.class b/target/scala-3.6.4/classes/scalation/database/BpTreeMap$.class deleted file mode 100644 index 4c1d870bf..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/BpTreeMap$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/BpTreeMap$TreeIterator$.class b/target/scala-3.6.4/classes/scalation/database/BpTreeMap$TreeIterator$.class deleted file mode 100644 index eed18c081..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/BpTreeMap$TreeIterator$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/BpTreeMap$TreeIterator.class b/target/scala-3.6.4/classes/scalation/database/BpTreeMap$TreeIterator.class deleted file mode 100644 index 6b6ba4551..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/BpTreeMap$TreeIterator.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/BpTreeMap$package$.class b/target/scala-3.6.4/classes/scalation/database/BpTreeMap$package$.class deleted file mode 100644 index 094679463..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/BpTreeMap$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/BpTreeMap$package$Car$1.class b/target/scala-3.6.4/classes/scalation/database/BpTreeMap$package$Car$1.class deleted file mode 100644 index 25f40e0e9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/BpTreeMap$package$Car$1.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/BpTreeMap$package$Car$3$.class b/target/scala-3.6.4/classes/scalation/database/BpTreeMap$package$Car$3$.class deleted file mode 100644 index 588353154..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/BpTreeMap$package$Car$3$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/BpTreeMap$package.class b/target/scala-3.6.4/classes/scalation/database/BpTreeMap$package.class deleted file mode 100644 index 276fd7bf3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/BpTreeMap$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/BpTreeMap$package.tasty b/target/scala-3.6.4/classes/scalation/database/BpTreeMap$package.tasty deleted file mode 100644 index 3e5325bee..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/BpTreeMap$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/BpTreeMap.class b/target/scala-3.6.4/classes/scalation/database/BpTreeMap.class deleted file mode 100644 index 02823bb8a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/BpTreeMap.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/BpTreeMap.tasty b/target/scala-3.6.4/classes/scalation/database/BpTreeMap.tasty deleted file mode 100644 index 395957715..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/BpTreeMap.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/BpTreeMultiMap$.class b/target/scala-3.6.4/classes/scalation/database/BpTreeMultiMap$.class deleted file mode 100644 index d140dec57..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/BpTreeMultiMap$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/BpTreeMultiMap.class b/target/scala-3.6.4/classes/scalation/database/BpTreeMultiMap.class deleted file mode 100644 index d438d6331..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/BpTreeMultiMap.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/BpTreeMultiMap.tasty b/target/scala-3.6.4/classes/scalation/database/BpTreeMultiMap.tasty deleted file mode 100644 index 9021362cf..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/BpTreeMultiMap.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/FD$.class b/target/scala-3.6.4/classes/scalation/database/FD$.class deleted file mode 100644 index a4e6d46d2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/FD$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/FD.class b/target/scala-3.6.4/classes/scalation/database/FD.class deleted file mode 100644 index dd8916174..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/FD.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/FD.tasty b/target/scala-3.6.4/classes/scalation/database/FD.tasty deleted file mode 100644 index f38fda570..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/FD.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/HashMultiMap$.class b/target/scala-3.6.4/classes/scalation/database/HashMultiMap$.class deleted file mode 100644 index 93ec75360..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/HashMultiMap$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/HashMultiMap.class b/target/scala-3.6.4/classes/scalation/database/HashMultiMap.class deleted file mode 100644 index bea103aa4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/HashMultiMap.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/HashMultiMap.tasty b/target/scala-3.6.4/classes/scalation/database/HashMultiMap.tasty deleted file mode 100644 index 6d11c07fd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/HashMultiMap.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/Identifiable$.class b/target/scala-3.6.4/classes/scalation/database/Identifiable$.class deleted file mode 100644 index 2474c2629..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/Identifiable$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/Identifiable.class b/target/scala-3.6.4/classes/scalation/database/Identifiable.class deleted file mode 100644 index a256d0de6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/Identifiable.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/Identifiable.tasty b/target/scala-3.6.4/classes/scalation/database/Identifiable.tasty deleted file mode 100644 index a6ad9e0d6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/Identifiable.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/JHashMap$.class b/target/scala-3.6.4/classes/scalation/database/JHashMap$.class deleted file mode 100644 index 27a15310a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/JHashMap$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/JHashMap.class b/target/scala-3.6.4/classes/scalation/database/JHashMap.class deleted file mode 100644 index 182aab39f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/JHashMap.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/JHashMap.tasty b/target/scala-3.6.4/classes/scalation/database/JHashMap.tasty deleted file mode 100644 index c18dcbe80..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/JHashMap.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/JHashMultiMap$.class b/target/scala-3.6.4/classes/scalation/database/JHashMultiMap$.class deleted file mode 100644 index 4438e4f36..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/JHashMultiMap$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/JHashMultiMap.class b/target/scala-3.6.4/classes/scalation/database/JHashMultiMap.class deleted file mode 100644 index c09a076fd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/JHashMultiMap.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/JHashMultiMap.tasty b/target/scala-3.6.4/classes/scalation/database/JHashMultiMap.tasty deleted file mode 100644 index 006a5ecb4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/JHashMultiMap.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/JTreeMap.class b/target/scala-3.6.4/classes/scalation/database/JTreeMap.class deleted file mode 100644 index 100be4b05..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/JTreeMap.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/JTreeMap.tasty b/target/scala-3.6.4/classes/scalation/database/JTreeMap.tasty deleted file mode 100644 index 4586af611..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/JTreeMap.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/JTreeMultiMap.class b/target/scala-3.6.4/classes/scalation/database/JTreeMultiMap.class deleted file mode 100644 index ecad119f6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/JTreeMultiMap.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/JTreeMultiMap.tasty b/target/scala-3.6.4/classes/scalation/database/JTreeMultiMap.tasty deleted file mode 100644 index 460bf51d9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/JTreeMultiMap.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/JavaMap$package$.class b/target/scala-3.6.4/classes/scalation/database/JavaMap$package$.class deleted file mode 100644 index 5d49c9dce..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/JavaMap$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/JavaMap$package.class b/target/scala-3.6.4/classes/scalation/database/JavaMap$package.class deleted file mode 100644 index 5514d3eea..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/JavaMap$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/JavaMap$package.tasty b/target/scala-3.6.4/classes/scalation/database/JavaMap$package.tasty deleted file mode 100644 index 8370c0163..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/JavaMap$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/KeyType$.class b/target/scala-3.6.4/classes/scalation/database/KeyType$.class deleted file mode 100644 index c47ac2973..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/KeyType$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/KeyType$given_Ordering_KeyType$.class b/target/scala-3.6.4/classes/scalation/database/KeyType$given_Ordering_KeyType$.class deleted file mode 100644 index 66ff5bf35..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/KeyType$given_Ordering_KeyType$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/KeyType.class b/target/scala-3.6.4/classes/scalation/database/KeyType.class deleted file mode 100644 index 519e2556b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/KeyType.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/KeyType.tasty b/target/scala-3.6.4/classes/scalation/database/KeyType.tasty deleted file mode 100644 index 78bc39994..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/KeyType.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/LinHashMap$.class b/target/scala-3.6.4/classes/scalation/database/LinHashMap$.class deleted file mode 100644 index 9abc233b4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/LinHashMap$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/LinHashMap$Bucket.class b/target/scala-3.6.4/classes/scalation/database/LinHashMap$Bucket.class deleted file mode 100644 index 281efe91b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/LinHashMap$Bucket.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/LinHashMap$HTIterator$.class b/target/scala-3.6.4/classes/scalation/database/LinHashMap$HTIterator$.class deleted file mode 100644 index 54c62b0e4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/LinHashMap$HTIterator$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/LinHashMap$HTIterator.class b/target/scala-3.6.4/classes/scalation/database/LinHashMap$HTIterator.class deleted file mode 100644 index 76ccc55c9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/LinHashMap$HTIterator.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/LinHashMap$package$.class b/target/scala-3.6.4/classes/scalation/database/LinHashMap$package$.class deleted file mode 100644 index 8b3235cf1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/LinHashMap$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/LinHashMap$package.class b/target/scala-3.6.4/classes/scalation/database/LinHashMap$package.class deleted file mode 100644 index deed9aac8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/LinHashMap$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/LinHashMap$package.tasty b/target/scala-3.6.4/classes/scalation/database/LinHashMap$package.tasty deleted file mode 100644 index ec5114aba..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/LinHashMap$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/LinHashMap.class b/target/scala-3.6.4/classes/scalation/database/LinHashMap.class deleted file mode 100644 index 6758f9e23..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/LinHashMap.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/LinHashMap.tasty b/target/scala-3.6.4/classes/scalation/database/LinHashMap.tasty deleted file mode 100644 index f32f61058..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/LinHashMap.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/LinHashMultiMap$.class b/target/scala-3.6.4/classes/scalation/database/LinHashMultiMap$.class deleted file mode 100644 index 4f6569679..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/LinHashMultiMap$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/LinHashMultiMap.class b/target/scala-3.6.4/classes/scalation/database/LinHashMultiMap.class deleted file mode 100644 index 8206e18a0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/LinHashMultiMap.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/LinHashMultiMap.tasty b/target/scala-3.6.4/classes/scalation/database/LinHashMultiMap.tasty deleted file mode 100644 index d52808c93..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/LinHashMultiMap.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/MakeSchema$.class b/target/scala-3.6.4/classes/scalation/database/MakeSchema$.class deleted file mode 100644 index ad9218fc4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/MakeSchema$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/MakeSchema$package$.class b/target/scala-3.6.4/classes/scalation/database/MakeSchema$package$.class deleted file mode 100644 index 3b96b1b69..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/MakeSchema$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/MakeSchema$package.class b/target/scala-3.6.4/classes/scalation/database/MakeSchema$package.class deleted file mode 100644 index bfcdf7ee8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/MakeSchema$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/MakeSchema$package.tasty b/target/scala-3.6.4/classes/scalation/database/MakeSchema$package.tasty deleted file mode 100644 index e4d7dbac1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/MakeSchema$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/MakeSchema.class b/target/scala-3.6.4/classes/scalation/database/MakeSchema.class deleted file mode 100644 index e73e4f1d8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/MakeSchema.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/MakeSchema.tasty b/target/scala-3.6.4/classes/scalation/database/MakeSchema.tasty deleted file mode 100644 index cf44ced8f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/MakeSchema.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/MaxSpanningTree$.class b/target/scala-3.6.4/classes/scalation/database/MaxSpanningTree$.class deleted file mode 100644 index da0b6edb3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/MaxSpanningTree$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/MaxSpanningTree.class b/target/scala-3.6.4/classes/scalation/database/MaxSpanningTree.class deleted file mode 100644 index 3d505c0e6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/MaxSpanningTree.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/MaxSpanningTree.tasty b/target/scala-3.6.4/classes/scalation/database/MaxSpanningTree.tasty deleted file mode 100644 index d039c4bf4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/MaxSpanningTree.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/MinSpanningTree$.class b/target/scala-3.6.4/classes/scalation/database/MinSpanningTree$.class deleted file mode 100644 index 202bdc86a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/MinSpanningTree$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/MinSpanningTree$Elem$.class b/target/scala-3.6.4/classes/scalation/database/MinSpanningTree$Elem$.class deleted file mode 100644 index c024d6b52..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/MinSpanningTree$Elem$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/MinSpanningTree$Elem.class b/target/scala-3.6.4/classes/scalation/database/MinSpanningTree$Elem.class deleted file mode 100644 index 2f4690941..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/MinSpanningTree$Elem.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/MinSpanningTree$NodeOrder$.class b/target/scala-3.6.4/classes/scalation/database/MinSpanningTree$NodeOrder$.class deleted file mode 100644 index 973aa1c8c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/MinSpanningTree$NodeOrder$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/MinSpanningTree$package$.class b/target/scala-3.6.4/classes/scalation/database/MinSpanningTree$package$.class deleted file mode 100644 index ff60296a0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/MinSpanningTree$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/MinSpanningTree$package.class b/target/scala-3.6.4/classes/scalation/database/MinSpanningTree$package.class deleted file mode 100644 index c261857da..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/MinSpanningTree$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/MinSpanningTree$package.tasty b/target/scala-3.6.4/classes/scalation/database/MinSpanningTree$package.tasty deleted file mode 100644 index d055672e7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/MinSpanningTree$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/MinSpanningTree.class b/target/scala-3.6.4/classes/scalation/database/MinSpanningTree.class deleted file mode 100644 index d1863c0c2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/MinSpanningTree.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/MinSpanningTree.tasty b/target/scala-3.6.4/classes/scalation/database/MinSpanningTree.tasty deleted file mode 100644 index a2e2927db..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/MinSpanningTree.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/MultiMap$package$.class b/target/scala-3.6.4/classes/scalation/database/MultiMap$package$.class deleted file mode 100644 index a4db57ab1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/MultiMap$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/MultiMap$package.class b/target/scala-3.6.4/classes/scalation/database/MultiMap$package.class deleted file mode 100644 index 50861c5c6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/MultiMap$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/MultiMap$package.tasty b/target/scala-3.6.4/classes/scalation/database/MultiMap$package.tasty deleted file mode 100644 index 1722c001f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/MultiMap$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/Normalization$package$.class b/target/scala-3.6.4/classes/scalation/database/Normalization$package$.class deleted file mode 100644 index c24254b7f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/Normalization$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/Normalization$package.class b/target/scala-3.6.4/classes/scalation/database/Normalization$package.class deleted file mode 100644 index 8a287fcd3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/Normalization$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/Normalization$package.tasty b/target/scala-3.6.4/classes/scalation/database/Normalization$package.tasty deleted file mode 100644 index 95b64d2bb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/Normalization$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/Normalization.class b/target/scala-3.6.4/classes/scalation/database/Normalization.class deleted file mode 100644 index 1b7235a5a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/Normalization.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/Normalization.tasty b/target/scala-3.6.4/classes/scalation/database/Normalization.tasty deleted file mode 100644 index b2eaaedfc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/Normalization.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/SpanningTree$package$.class b/target/scala-3.6.4/classes/scalation/database/SpanningTree$package$.class deleted file mode 100644 index e4d70b718..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/SpanningTree$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/SpanningTree$package.class b/target/scala-3.6.4/classes/scalation/database/SpanningTree$package.class deleted file mode 100644 index 076e1486a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/SpanningTree$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/SpanningTree$package.tasty b/target/scala-3.6.4/classes/scalation/database/SpanningTree$package.tasty deleted file mode 100644 index 23c21d23c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/SpanningTree$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/SpanningTree.class b/target/scala-3.6.4/classes/scalation/database/SpanningTree.class deleted file mode 100644 index b3005aab4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/SpanningTree.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/SpanningTree.tasty b/target/scala-3.6.4/classes/scalation/database/SpanningTree.tasty deleted file mode 100644 index f99ec7a0a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/SpanningTree.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/Spatial.class b/target/scala-3.6.4/classes/scalation/database/Spatial.class deleted file mode 100644 index 81c7727c1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/Spatial.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/Spatial.tasty b/target/scala-3.6.4/classes/scalation/database/Spatial.tasty deleted file mode 100644 index 68f85317d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/Spatial.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/TNode$.class b/target/scala-3.6.4/classes/scalation/database/TNode$.class deleted file mode 100644 index e34cb5819..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/TNode$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/TNode$package$.class b/target/scala-3.6.4/classes/scalation/database/TNode$package$.class deleted file mode 100644 index d6a78ed7d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/TNode$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/TNode$package$NamedTNode$1.class b/target/scala-3.6.4/classes/scalation/database/TNode$package$NamedTNode$1.class deleted file mode 100644 index 19251f036..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/TNode$package$NamedTNode$1.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/TNode$package$NamedTNode$3$.class b/target/scala-3.6.4/classes/scalation/database/TNode$package$NamedTNode$3$.class deleted file mode 100644 index 4920231e5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/TNode$package$NamedTNode$3$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/TNode$package.class b/target/scala-3.6.4/classes/scalation/database/TNode$package.class deleted file mode 100644 index dbba803c9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/TNode$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/TNode$package.tasty b/target/scala-3.6.4/classes/scalation/database/TNode$package.tasty deleted file mode 100644 index 19182c65e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/TNode$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/TNode.class b/target/scala-3.6.4/classes/scalation/database/TNode.class deleted file mode 100644 index 165907ebf..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/TNode.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/TNode.tasty b/target/scala-3.6.4/classes/scalation/database/TNode.tasty deleted file mode 100644 index fda3ba4f6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/TNode.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/Tabular$.class b/target/scala-3.6.4/classes/scalation/database/Tabular$.class deleted file mode 100644 index 32f0c7938..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/Tabular$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/Tabular$package$.class b/target/scala-3.6.4/classes/scalation/database/Tabular$package$.class deleted file mode 100644 index f4f37db36..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/Tabular$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/Tabular$package.class b/target/scala-3.6.4/classes/scalation/database/Tabular$package.class deleted file mode 100644 index 44bf92722..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/Tabular$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/Tabular$package.tasty b/target/scala-3.6.4/classes/scalation/database/Tabular$package.tasty deleted file mode 100644 index ce8d97d60..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/Tabular$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/Tabular.class b/target/scala-3.6.4/classes/scalation/database/Tabular.class deleted file mode 100644 index 3a19a1896..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/Tabular.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/Tabular.tasty b/target/scala-3.6.4/classes/scalation/database/Tabular.tasty deleted file mode 100644 index 4a1000769..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/Tabular.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/Temporal.class b/target/scala-3.6.4/classes/scalation/database/Temporal.class deleted file mode 100644 index de0680f1d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/Temporal.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/Temporal.tasty b/target/scala-3.6.4/classes/scalation/database/Temporal.tasty deleted file mode 100644 index 33563adfd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/Temporal.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/TimeInterval$.class b/target/scala-3.6.4/classes/scalation/database/TimeInterval$.class deleted file mode 100644 index 66aaa0357..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/TimeInterval$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/TimeInterval$package$.class b/target/scala-3.6.4/classes/scalation/database/TimeInterval$package$.class deleted file mode 100644 index 39c9f054c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/TimeInterval$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/TimeInterval$package.class b/target/scala-3.6.4/classes/scalation/database/TimeInterval$package.class deleted file mode 100644 index 08eb9e7fe..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/TimeInterval$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/TimeInterval$package.tasty b/target/scala-3.6.4/classes/scalation/database/TimeInterval$package.tasty deleted file mode 100644 index 89e6071c6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/TimeInterval$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/TimeInterval.class b/target/scala-3.6.4/classes/scalation/database/TimeInterval.class deleted file mode 100644 index dc3b35862..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/TimeInterval.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/TimeInterval.tasty b/target/scala-3.6.4/classes/scalation/database/TimeInterval.tasty deleted file mode 100644 index aca8a1223..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/TimeInterval.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/TimeOfWeek$.class b/target/scala-3.6.4/classes/scalation/database/TimeOfWeek$.class deleted file mode 100644 index 2ae21a420..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/TimeOfWeek$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/TimeOfWeek$package$.class b/target/scala-3.6.4/classes/scalation/database/TimeOfWeek$package$.class deleted file mode 100644 index 99d10808c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/TimeOfWeek$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/TimeOfWeek$package.class b/target/scala-3.6.4/classes/scalation/database/TimeOfWeek$package.class deleted file mode 100644 index 3ae9f4d8e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/TimeOfWeek$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/TimeOfWeek$package.tasty b/target/scala-3.6.4/classes/scalation/database/TimeOfWeek$package.tasty deleted file mode 100644 index 7c991e8ca..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/TimeOfWeek$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/TimeOfWeek.class b/target/scala-3.6.4/classes/scalation/database/TimeOfWeek.class deleted file mode 100644 index d83201eb7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/TimeOfWeek.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/TimeOfWeek.tasty b/target/scala-3.6.4/classes/scalation/database/TimeOfWeek.tasty deleted file mode 100644 index 4b7604507..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/TimeOfWeek.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/Tree$.class b/target/scala-3.6.4/classes/scalation/database/Tree$.class deleted file mode 100644 index cb09462b4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/Tree$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/Tree$package$.class b/target/scala-3.6.4/classes/scalation/database/Tree$package$.class deleted file mode 100644 index 1234e9375..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/Tree$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/Tree$package.class b/target/scala-3.6.4/classes/scalation/database/Tree$package.class deleted file mode 100644 index a7ec10dbc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/Tree$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/Tree$package.tasty b/target/scala-3.6.4/classes/scalation/database/Tree$package.tasty deleted file mode 100644 index ebb961e7c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/Tree$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/Tree.class b/target/scala-3.6.4/classes/scalation/database/Tree.class deleted file mode 100644 index e7aeb48ab..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/Tree.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/Tree.tasty b/target/scala-3.6.4/classes/scalation/database/Tree.tasty deleted file mode 100644 index f3b186b19..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/Tree.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/TreeMultiMap.class b/target/scala-3.6.4/classes/scalation/database/TreeMultiMap.class deleted file mode 100644 index 3f27f55de..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/TreeMultiMap.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/TreeMultiMap.tasty b/target/scala-3.6.4/classes/scalation/database/TreeMultiMap.tasty deleted file mode 100644 index eab33bdff..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/TreeMultiMap.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/TreeNode$.class b/target/scala-3.6.4/classes/scalation/database/TreeNode$.class deleted file mode 100644 index 3b84c17fc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/TreeNode$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/TreeNode.class b/target/scala-3.6.4/classes/scalation/database/TreeNode.class deleted file mode 100644 index 162f200dc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/TreeNode.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/TreeNode.tasty b/target/scala-3.6.4/classes/scalation/database/TreeNode.tasty deleted file mode 100644 index d0884179c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/TreeNode.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/bpNodeTest.class b/target/scala-3.6.4/classes/scalation/database/bpNodeTest.class deleted file mode 100644 index c209be0fe..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/bpNodeTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/bpNodeTest.tasty b/target/scala-3.6.4/classes/scalation/database/bpNodeTest.tasty deleted file mode 100644 index 10d56e60e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/bpNodeTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/bpNodeTest2.class b/target/scala-3.6.4/classes/scalation/database/bpNodeTest2.class deleted file mode 100644 index a656399a6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/bpNodeTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/bpNodeTest2.tasty b/target/scala-3.6.4/classes/scalation/database/bpNodeTest2.tasty deleted file mode 100644 index f92737fe8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/bpNodeTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/bpNodeTest3.class b/target/scala-3.6.4/classes/scalation/database/bpNodeTest3.class deleted file mode 100644 index 0dd44d0d2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/bpNodeTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/bpNodeTest3.tasty b/target/scala-3.6.4/classes/scalation/database/bpNodeTest3.tasty deleted file mode 100644 index 5500669b4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/bpNodeTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/bpTreeMapTest.class b/target/scala-3.6.4/classes/scalation/database/bpTreeMapTest.class deleted file mode 100644 index 2fc68a6e1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/bpTreeMapTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/bpTreeMapTest.tasty b/target/scala-3.6.4/classes/scalation/database/bpTreeMapTest.tasty deleted file mode 100644 index 1b01132a7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/bpTreeMapTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/bpTreeMapTest2.class b/target/scala-3.6.4/classes/scalation/database/bpTreeMapTest2.class deleted file mode 100644 index 946ee389d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/bpTreeMapTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/bpTreeMapTest2.tasty b/target/scala-3.6.4/classes/scalation/database/bpTreeMapTest2.tasty deleted file mode 100644 index 6baf93ea4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/bpTreeMapTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/bpTreeMapTest3.class b/target/scala-3.6.4/classes/scalation/database/bpTreeMapTest3.class deleted file mode 100644 index ac5478264..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/bpTreeMapTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/bpTreeMapTest3.tasty b/target/scala-3.6.4/classes/scalation/database/bpTreeMapTest3.tasty deleted file mode 100644 index c7648bc90..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/bpTreeMapTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/bpTreeMapTest4.class b/target/scala-3.6.4/classes/scalation/database/bpTreeMapTest4.class deleted file mode 100644 index 2a60f8574..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/bpTreeMapTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/bpTreeMapTest4.tasty b/target/scala-3.6.4/classes/scalation/database/bpTreeMapTest4.tasty deleted file mode 100644 index 100c4522d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/bpTreeMapTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/bpTreeMapTest5.class b/target/scala-3.6.4/classes/scalation/database/bpTreeMapTest5.class deleted file mode 100644 index e0da66dd8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/bpTreeMapTest5.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/bpTreeMapTest5.tasty b/target/scala-3.6.4/classes/scalation/database/bpTreeMapTest5.tasty deleted file mode 100644 index 4bb53197a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/bpTreeMapTest5.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph/Edge$.class b/target/scala-3.6.4/classes/scalation/database/graph/Edge$.class deleted file mode 100644 index f44a7c40b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph/Edge$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph/Edge$package$.class b/target/scala-3.6.4/classes/scalation/database/graph/Edge$package$.class deleted file mode 100644 index c60276d0e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph/Edge$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph/Edge$package.class b/target/scala-3.6.4/classes/scalation/database/graph/Edge$package.class deleted file mode 100644 index d99f4dc89..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph/Edge$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph/Edge$package.tasty b/target/scala-3.6.4/classes/scalation/database/graph/Edge$package.tasty deleted file mode 100644 index f7aa665eb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph/Edge$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph/Edge.class b/target/scala-3.6.4/classes/scalation/database/graph/Edge.class deleted file mode 100644 index 08a103d06..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph/Edge.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph/Edge.tasty b/target/scala-3.6.4/classes/scalation/database/graph/Edge.tasty deleted file mode 100644 index dda38b9c1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph/Edge.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph/EdgeType$.class b/target/scala-3.6.4/classes/scalation/database/graph/EdgeType$.class deleted file mode 100644 index 03668bab9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph/EdgeType$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph/EdgeType$package$.class b/target/scala-3.6.4/classes/scalation/database/graph/EdgeType$package$.class deleted file mode 100644 index 85bbfd5b2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph/EdgeType$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph/EdgeType$package.class b/target/scala-3.6.4/classes/scalation/database/graph/EdgeType$package.class deleted file mode 100644 index d386ffdd2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph/EdgeType$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph/EdgeType$package.tasty b/target/scala-3.6.4/classes/scalation/database/graph/EdgeType$package.tasty deleted file mode 100644 index 286740fec..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph/EdgeType$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph/EdgeType.class b/target/scala-3.6.4/classes/scalation/database/graph/EdgeType.class deleted file mode 100644 index 4fbccf6d4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph/EdgeType.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph/EdgeType.tasty b/target/scala-3.6.4/classes/scalation/database/graph/EdgeType.tasty deleted file mode 100644 index ceadbacb7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph/EdgeType.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph/PGraph$.class b/target/scala-3.6.4/classes/scalation/database/graph/PGraph$.class deleted file mode 100644 index 9b1f3956b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph/PGraph$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph/PGraph$package$.class b/target/scala-3.6.4/classes/scalation/database/graph/PGraph$package$.class deleted file mode 100644 index 3651f3e36..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph/PGraph$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph/PGraph$package.class b/target/scala-3.6.4/classes/scalation/database/graph/PGraph$package.class deleted file mode 100644 index 18737a1f1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph/PGraph$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph/PGraph$package.tasty b/target/scala-3.6.4/classes/scalation/database/graph/PGraph$package.tasty deleted file mode 100644 index c5e84214c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph/PGraph$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph/PGraph.class b/target/scala-3.6.4/classes/scalation/database/graph/PGraph.class deleted file mode 100644 index d2989c44a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph/PGraph.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph/PGraph.tasty b/target/scala-3.6.4/classes/scalation/database/graph/PGraph.tasty deleted file mode 100644 index aacba2881..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph/PGraph.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph/SocialNetwork$.class b/target/scala-3.6.4/classes/scalation/database/graph/SocialNetwork$.class deleted file mode 100644 index 1f6fd5b8c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph/SocialNetwork$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph/SocialNetwork.class b/target/scala-3.6.4/classes/scalation/database/graph/SocialNetwork.class deleted file mode 100644 index c84e5d53b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph/SocialNetwork.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph/SocialNetwork.tasty b/target/scala-3.6.4/classes/scalation/database/graph/SocialNetwork.tasty deleted file mode 100644 index 9effa839c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph/SocialNetwork.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph/Topological$package$.class b/target/scala-3.6.4/classes/scalation/database/graph/Topological$package$.class deleted file mode 100644 index aa0f1304c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph/Topological$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph/Topological$package.class b/target/scala-3.6.4/classes/scalation/database/graph/Topological$package.class deleted file mode 100644 index 49f0c50f6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph/Topological$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph/Topological$package.tasty b/target/scala-3.6.4/classes/scalation/database/graph/Topological$package.tasty deleted file mode 100644 index 4d312681e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph/Topological$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph/Topological.class b/target/scala-3.6.4/classes/scalation/database/graph/Topological.class deleted file mode 100644 index e811197ad..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph/Topological.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph/Topological.tasty b/target/scala-3.6.4/classes/scalation/database/graph/Topological.tasty deleted file mode 100644 index 4934d7c38..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph/Topological.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph/Vertex$.class b/target/scala-3.6.4/classes/scalation/database/graph/Vertex$.class deleted file mode 100644 index 184b64526..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph/Vertex$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph/Vertex$package$.class b/target/scala-3.6.4/classes/scalation/database/graph/Vertex$package$.class deleted file mode 100644 index f6e0b6078..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph/Vertex$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph/Vertex$package.class b/target/scala-3.6.4/classes/scalation/database/graph/Vertex$package.class deleted file mode 100644 index d97ffeb86..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph/Vertex$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph/Vertex$package.tasty b/target/scala-3.6.4/classes/scalation/database/graph/Vertex$package.tasty deleted file mode 100644 index abd2c808e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph/Vertex$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph/Vertex.class b/target/scala-3.6.4/classes/scalation/database/graph/Vertex.class deleted file mode 100644 index 8ea329087..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph/Vertex.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph/Vertex.tasty b/target/scala-3.6.4/classes/scalation/database/graph/Vertex.tasty deleted file mode 100644 index 2e732a640..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph/Vertex.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph/VertexType$.class b/target/scala-3.6.4/classes/scalation/database/graph/VertexType$.class deleted file mode 100644 index fd4c0e74e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph/VertexType$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph/VertexType$package$.class b/target/scala-3.6.4/classes/scalation/database/graph/VertexType$package$.class deleted file mode 100644 index 274cece5b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph/VertexType$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph/VertexType$package.class b/target/scala-3.6.4/classes/scalation/database/graph/VertexType$package.class deleted file mode 100644 index cdd3c39fc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph/VertexType$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph/VertexType$package.tasty b/target/scala-3.6.4/classes/scalation/database/graph/VertexType$package.tasty deleted file mode 100644 index 3fee75f14..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph/VertexType$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph/VertexType.class b/target/scala-3.6.4/classes/scalation/database/graph/VertexType.class deleted file mode 100644 index b1cfd160b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph/VertexType.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph/VertexType.tasty b/target/scala-3.6.4/classes/scalation/database/graph/VertexType.tasty deleted file mode 100644 index 9aa23541b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph/VertexType.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph/edgeTest.class b/target/scala-3.6.4/classes/scalation/database/graph/edgeTest.class deleted file mode 100644 index b3476e462..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph/edgeTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph/edgeTest.tasty b/target/scala-3.6.4/classes/scalation/database/graph/edgeTest.tasty deleted file mode 100644 index abdff440a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph/edgeTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph/edgeTypeTest.class b/target/scala-3.6.4/classes/scalation/database/graph/edgeTypeTest.class deleted file mode 100644 index 04ad55d5c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph/edgeTypeTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph/edgeTypeTest.tasty b/target/scala-3.6.4/classes/scalation/database/graph/edgeTypeTest.tasty deleted file mode 100644 index 3e5230846..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph/edgeTypeTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph/index.html b/target/scala-3.6.4/classes/scalation/database/graph/index.html deleted file mode 100644 index b6ad33dac..000000000 --- a/target/scala-3.6.4/classes/scalation/database/graph/index.html +++ /dev/null @@ -1,13 +0,0 @@ - - -

    Source files in graph Package

    -

    - - \ No newline at end of file diff --git a/target/scala-3.6.4/classes/scalation/database/graph/old/ElementType.scala.bak b/target/scala-3.6.4/classes/scalation/database/graph/old/ElementType.scala.bak deleted file mode 100644 index 152a5f212..000000000 --- a/target/scala-3.6.4/classes/scalation/database/graph/old/ElementType.scala.bak +++ /dev/null @@ -1,31 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Sat Aug 29 14:14:32 EDT 2020 - * @see LICENSE (MIT style license file). - * - * @title ElementType - a generalization of vertex type and edge type - */ - -package scalation -package database -package graph - -import scala.collection.mutable.Map - -//import scala.collection.immutable.{Vector => VEC} -import scala.collection.mutable.{ArrayBuffer => VEC} - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Element` class is a generalization of vertex and edge. - * @param prop the properties - */ -abstract class Element (val prop: Property) extends Serializable - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ElementType` class is a generalization of vertex type and edge type. - * A vertex type is analogous to a relation with no foreign keys in an RDBMS. - */ -abstract class ElementType (val schema: VEC [String]) extends Serializable - diff --git a/target/scala-3.6.4/classes/scalation/database/graph/old/PGraph.scala.bak b/target/scala-3.6.4/classes/scalation/database/graph/old/PGraph.scala.bak deleted file mode 100644 index b1b8620fc..000000000 --- a/target/scala-3.6.4/classes/scalation/database/graph/old/PGraph.scala.bak +++ /dev/null @@ -1,218 +0,0 @@ - -//======================================================================================== -/** @author John Miller - * @version 1.8 - * @date Sat Aug 29 14:14:32 EDT 2020 - * @see LICENSE (MIT style license file). - */ - -package scalation - -import scala.collection.mutable.Map - -//import scala.collection.immutable.{Vector => VEC} -import scala.collection.mutable.{ArrayBuffer => VEC} - -//======================================================================================== -/** The `Vertex` class maintains properties for a vertex, e.g., a person. - * A vertex is analogous to a tuple in an RDBMS. - * @param prop maps vertex's property names into property values - */ -case class Vertex (prop: Property) extends Serializable: - - override def toString: String = - s"Vertex (${prop.mkString (", ")})" - -//======================================================================================== -/** The `VertexType` class collects vertices of the same type, e.g., a person vertex type. - * A vertex type is analogous to a relation with know foreign keys in an RDBMS. - * @param schema the property names for this vertex type - * @param vs the set of vertices having this vertex type (extension) - */ -case class VertexType (schema: VEC [String], vs: VEC [Vertex]) extends Serializable: - - //==================================================================================== - /** Check that the properties names are in the schema for this vertex type, returning - * whether they match the schema. - */ - def check: Boolean = - for v <- vs; pname <- v.prop.keys if ! (schema contains pname) do - println (s"check: error pname = $pname not found in schema") - return false - true - - //==================================================================================== - /** Return the vertices where property pname has value pval. - * @param pname the property name - * @param pval the property value - */ - def == (pname: String, pval: ValueType): VEC [Vertex] = - for v <- vs if v.prop(pname) == pval yield v - - //==================================================================================== - /** Return the vertices where property pname is less than value pval. - * @param pname the property name - * @param pval the property value - */ - def < (pname: String, pval: Double): VEC [Vertex] = // FIX - want Value not Double - for v <- vs if v.prop(pname) < pval yield v - - //==================================================================================== - /** Project each vertex in this vertex type down to the given subschema of properties. - * @param subschema the subset of properies to project onto - */ - def project (subschema: VEC [String]): VertexType = - VertexType (subschema, - for v <- vs yield - Vertex (v.prop.filter ((k: String, v: ValueType) => subschema contains k))) - - //==================================================================================== - /** Select the vertices in this vertex type that satisfy the predicate. - * @param pred the predicate to satisfy - */ - def select (pred: Property => Boolean): VertexType = - VertexType (schema, - for v <- vs if pred (v.prop) yield v) - - //==================================================================================== - /** Union this vertex type with a second vertex type. - * @param vt2 the second vertex type - */ - def union (vt2: VertexType): VertexType = - VertexType (schema, vs ++ vt2.vs) - - //==================================================================================== - /** Minus second vertex type from this vertex type. - * @param vt2 the second vertex type - */ - def minus (vt2: VertexType): VertexType = - VertexType (schema, vs diff vt2.vs) - - //---------------------------------------------------- - // Add more graph algebra operators for vertex types | - //---------------------------------------------------- - -//======================================================================================== -/** The `Edge` class maintains the edge's connections between vertices as well as its own properites. - * An edge is roughly analogous to implicit relationship manifest via foreign key-primary key pairs. - * The parameters may be thought of like a triple, e.g., (h, r, t) or (s, p, o). - * @param from the source vertex - * @param prop maps edges's property names into property values - * @param to the target vertex - */ -case class Edge (from: Vertex, prop: Property, to: Vertex) extends Serializable - -//======================================================================================== -/** The `EdgeType` class collects edges of the same type, e.g., knows relationship type. - * An edge type is analogous to a relation with foreign keys in an RDBMS. - * @param from the source vertex - * @param schema the property names for this edge type - * @param to the target vertex - * @param es the set of edges having this edge type (extension) - */ -case class EdgeType (from: VertexType, schema: VEC [String], to: VertexType, es: VEC [Edge]) extends Serializable: - - //==================================================================================== - /** Check that the properties names are in the schema for this edge type, returning - * whether they match the schema. - */ - def check: Boolean = true // FIX - implement - - //==================================================================================== - /** Join this edge type with its outgoing vertex type. - */ - def joinOut: VertexType = - VertexType (schema ++ to.schema, null) - - //==================================================================================== - /** Join this edge type with its incoming vertex type. - */ - def joinIn: VertexType = - VertexType (from.schema ++ schema, null) - - //==================================================================================== - /** Join this edge type with both its incoming and outgoing vertex types. - */ - def join: VertexType = - VertexType (from.schema ++ schema ++ to.schema, null) - - //-------------------------------------------------- - // Add more graph algebra operators for edge types | - //-------------------------------------------------- - -//======================================================================================== -/** The `PGraph` class is used to store property graphs. - * @param name the name of the property graph - * @param vs the set of vertex types - * @param e the set of edges connecting the vertices in the vertex types - */ -case class PGraph (name: String, vs: VEC [VertexType], e: VEC [EdgeType]) extends Serializable: - - val vmap = Map [String, VertexType] () // map name to vertex type - val emap = Map [String, EdgeType] () // map name to edge type - - // Add graph algebra operators that produce subgraph - - //==================================================================================== - /** Convert this property graph to a string. - */ - override def toString: String = - s"PGraph (name = $name,\n vs = $vs},\n e = $e\n)" - -//======================================================================================== -/** The `PGraphTest` object is used to test the `PGraph`. - * > runMain scalation.PGraphTest - */ -object PGraphTest extends App: - val v0 = Vertex (Map ("name" -> "Bob", "salary" -> 85000.0)) - val v1 = Vertex (Map ("name" -> "Sue", "salary" -> 95000.0)) - val vt0 = VertexType (VEC ("name", "salary"), VEC (v0, v1)) - println (s"check schema = ${vt0.check}") - - val e0 = Edge (v0, Map ("type" -> "knows"), v1) - val e1 = Edge (v1, Map ("type" -> "knows"), v0) - val et0 = EdgeType (vt0, VEC ("type"), vt0, VEC (e0, e1)) - println (s"check schema = ${et0.check}") - - val e2 = Edge (v1, Map ("type" -> "employs"), v0) - val et1 = EdgeType (vt0, VEC ("type"), vt0, VEC (e2)) - println (s"check schema = ${et1.check}") - - val g = PGraph ("links", VEC (vt0), VEC (et0, et1)) - g.vmap += "person" -> vt0 - g.emap += "knows" -> et0 - g.emap += "employs" -> et1 - println (s"g = $g") - - println ("query1") - val query1 = g.vs(0) == ("name", "Sue") - println (query1) - - println ("query2") - val query2 = g.vmap("person") == ("name", "Sue") - println (query2) - - println ("query3") - val query3 = g.vmap("person") < ("salary", 90000.0) - println (query3) - - println ("query4") - val query4 = g.vmap("person").project (VEC ("name")) - println (query4) - - println ("query5") - val query5 = g.vmap("person").select ((p: Property) => p("name") == "Sue") - println (query5) - - println ("query6") - val query6 = g.vmap("person").union (g.vmap("person")) - println (query6) - - println ("query7") - val query7 = g.vmap("person").minus (g.vmap("person")) - println (query7) - - println ("query8") - val query8 = g.emap("knows").joinOut - println (query8) - diff --git a/target/scala-3.6.4/classes/scalation/database/graph/old/ValueType.scala.bak b/target/scala-3.6.4/classes/scalation/database/graph/old/ValueType.scala.bak deleted file mode 100644 index 4e1c946e6..000000000 --- a/target/scala-3.6.4/classes/scalation/database/graph/old/ValueType.scala.bak +++ /dev/null @@ -1,50 +0,0 @@ - -//======================================================================================== -/** @author John Miller - * @version 1.8 - * @date Sat Aug 29 14:14:32 EDT 2020 - * @see LICENSE (MIT style license file). - * - * @title ValueType - union datatype for atomic database values - */ - -package scalation - -import scala.collection.mutable.Map - -//import scala.collection.immutable.{Vector => VEC} // for immutable -import scala.collection.mutable.{ArrayBuffer => VEC} // for mutable - -//======================================================================================== -/** The `ValueType` type is a union type for atomic database values. - */ -type ValueType = Int | Long | Double | String - -extension (x: ValueType) - def < (y: Int): Boolean = x.asInstanceOf [Int] < y - def < (y: Long): Boolean = x.asInstanceOf [Long] < y - def < (y: Double): Boolean = x.asInstanceOf [Double] < y - def < (y: String): Boolean = x.asInstanceOf [String] < y - -//======================================================================================== -/** The `Property` type is a map to take property names to property values. - */ -type Property = Map [String, ValueType] - -extension (p: Property) - def +++ (q: Property): Property = - val pq = p.clone - for qe <- q do pq += (if p contains qe._1 then (qe._1 + "2", qe._2) else qe) - pq - -//======================================================================================== -/** The `ValueTypeTest` object is used to test the `ValueType` type. - * > runMain scalation.ValueTypeTest - */ -object ValueTypeTest extends App: - - val store = VEC [ValueType] (0, 1L, 2.0, "three") - println (s"store = $store") - println (s"store(0) == 1: ${store(0) == 1}") - println (s"store(0) < 1: ${store(0) < 1}") - diff --git a/target/scala-3.6.4/classes/scalation/database/graph/pGraphTest.class b/target/scala-3.6.4/classes/scalation/database/graph/pGraphTest.class deleted file mode 100644 index e4271a35d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph/pGraphTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph/pGraphTest.tasty b/target/scala-3.6.4/classes/scalation/database/graph/pGraphTest.tasty deleted file mode 100644 index 6f7fe055d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph/pGraphTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph/pGraphTest2.class b/target/scala-3.6.4/classes/scalation/database/graph/pGraphTest2.class deleted file mode 100644 index 848dc8cf1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph/pGraphTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph/pGraphTest2.tasty b/target/scala-3.6.4/classes/scalation/database/graph/pGraphTest2.tasty deleted file mode 100644 index 8e808371e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph/pGraphTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph/vertexTest.class b/target/scala-3.6.4/classes/scalation/database/graph/vertexTest.class deleted file mode 100644 index 634419198..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph/vertexTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph/vertexTest.tasty b/target/scala-3.6.4/classes/scalation/database/graph/vertexTest.tasty deleted file mode 100644 index 418a28e33..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph/vertexTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph/vertexTypeTest.class b/target/scala-3.6.4/classes/scalation/database/graph/vertexTypeTest.class deleted file mode 100644 index 2a6b7c491..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph/vertexTypeTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph/vertexTypeTest.tasty b/target/scala-3.6.4/classes/scalation/database/graph/vertexTypeTest.tasty deleted file mode 100644 index 2e8e46623..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph/vertexTypeTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/DualIso$package$.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/DualIso$package$.class deleted file mode 100644 index 778bc69ce..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/DualIso$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/DualIso$package.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/DualIso$package.class deleted file mode 100644 index cd6b7411d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/DualIso$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/DualIso$package.tasty b/target/scala-3.6.4/classes/scalation/database/graph_pm/DualIso$package.tasty deleted file mode 100644 index eb3833192..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/DualIso$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/DualIso.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/DualIso.class deleted file mode 100644 index b4bf0d891..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/DualIso.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/DualIso.tasty b/target/scala-3.6.4/classes/scalation/database/graph_pm/DualIso.tasty deleted file mode 100644 index 2879168c2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/DualIso.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/DualSim$package$.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/DualSim$package$.class deleted file mode 100644 index 7c402fdd4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/DualSim$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/DualSim$package.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/DualSim$package.class deleted file mode 100644 index 9213c46ae..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/DualSim$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/DualSim$package.tasty b/target/scala-3.6.4/classes/scalation/database/graph_pm/DualSim$package.tasty deleted file mode 100644 index 9b71aebcd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/DualSim$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/DualSim.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/DualSim.class deleted file mode 100644 index 1abeb059f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/DualSim.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/DualSim.tasty b/target/scala-3.6.4/classes/scalation/database/graph_pm/DualSim.tasty deleted file mode 100644 index b61e9d31f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/DualSim.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/ExampleGraphD$.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/ExampleGraphD$.class deleted file mode 100644 index c40f48d4c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/ExampleGraphD$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/ExampleGraphD.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/ExampleGraphD.class deleted file mode 100644 index 7c54ce139..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/ExampleGraphD.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/ExampleGraphD.tasty b/target/scala-3.6.4/classes/scalation/database/graph_pm/ExampleGraphD.tasty deleted file mode 100644 index fa2f870ef..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/ExampleGraphD.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/ExampleGraphS$.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/ExampleGraphS$.class deleted file mode 100644 index c404ab0db..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/ExampleGraphS$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/ExampleGraphS.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/ExampleGraphS.class deleted file mode 100644 index e481fddd9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/ExampleGraphS.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/ExampleGraphS.tasty b/target/scala-3.6.4/classes/scalation/database/graph_pm/ExampleGraphS.tasty deleted file mode 100644 index 1c9f26f13..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/ExampleGraphS.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/Graph$.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/Graph$.class deleted file mode 100644 index 2bb6f5a46..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/Graph$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/Graph$package$.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/Graph$package$.class deleted file mode 100644 index ed4e3da20..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/Graph$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/Graph$package.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/Graph$package.class deleted file mode 100644 index 6e1ff09db..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/Graph$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/Graph$package.tasty b/target/scala-3.6.4/classes/scalation/database/graph_pm/Graph$package.tasty deleted file mode 100644 index df15789db..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/Graph$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/Graph.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/Graph.class deleted file mode 100644 index 01dda2b4a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/Graph.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/Graph.tasty b/target/scala-3.6.4/classes/scalation/database/graph_pm/Graph.tasty deleted file mode 100644 index b976347ed..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/Graph.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/Graph0$.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/Graph0$.class deleted file mode 100644 index d6d9a9af1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/Graph0$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/Graph0.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/Graph0.class deleted file mode 100644 index 508a1bdd6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/Graph0.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/Graph0.tasty b/target/scala-3.6.4/classes/scalation/database/graph_pm/Graph0.tasty deleted file mode 100644 index 002ef373f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/Graph0.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphDFS$.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphDFS$.class deleted file mode 100644 index f77aaef93..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphDFS$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphDFS$package$.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphDFS$package$.class deleted file mode 100644 index 5be9e98a7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphDFS$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphDFS$package.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphDFS$package.class deleted file mode 100644 index df42c2ea2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphDFS$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphDFS$package.tasty b/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphDFS$package.tasty deleted file mode 100644 index 2beffdb84..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphDFS$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphDFS.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphDFS.class deleted file mode 100644 index c2897faf2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphDFS.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphDFS.tasty b/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphDFS.tasty deleted file mode 100644 index 9e619809c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphDFS.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphGen$.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphGen$.class deleted file mode 100644 index ee997c3ed..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphGen$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphGen$package$.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphGen$package$.class deleted file mode 100644 index 02e8a90bd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphGen$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphGen$package.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphGen$package.class deleted file mode 100644 index bd232bc08..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphGen$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphGen$package.tasty b/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphGen$package.tasty deleted file mode 100644 index 89d6b2fa6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphGen$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphGen.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphGen.class deleted file mode 100644 index 0e57b40e5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphGen.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphGen.tasty b/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphGen.tasty deleted file mode 100644 index 285f9e46d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphGen.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphIO$.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphIO$.class deleted file mode 100644 index 050fe3d96..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphIO$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphIO$package$.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphIO$package$.class deleted file mode 100644 index 6a2912deb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphIO$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphIO$package.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphIO$package.class deleted file mode 100644 index 81d78440a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphIO$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphIO$package.tasty b/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphIO$package.tasty deleted file mode 100644 index fde989eaa..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphIO$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphIO.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphIO.class deleted file mode 100644 index 3c6f233df..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphIO.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphIO.tasty b/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphIO.tasty deleted file mode 100644 index a6743a4f0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphIO.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphMatcher.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphMatcher.class deleted file mode 100644 index 5d6ee5837..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphMatcher.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphMatcher.tasty b/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphMatcher.tasty deleted file mode 100644 index d76005dfa..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphMatcher.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphMetrics$.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphMetrics$.class deleted file mode 100644 index eca38495f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphMetrics$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphMetrics$package$.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphMetrics$package$.class deleted file mode 100644 index cbdd23d7d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphMetrics$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphMetrics$package.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphMetrics$package.class deleted file mode 100644 index 40fd7e132..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphMetrics$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphMetrics$package.tasty b/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphMetrics$package.tasty deleted file mode 100644 index 58a06c571..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphMetrics$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphMetrics.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphMetrics.class deleted file mode 100644 index 5d50cf062..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphMetrics.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphMetrics.tasty b/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphMetrics.tasty deleted file mode 100644 index 00bd1be3d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphMetrics.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphSim$package$.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphSim$package$.class deleted file mode 100644 index 6e0af3cbf..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphSim$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphSim$package.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphSim$package.class deleted file mode 100644 index 53cb73e92..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphSim$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphSim$package.tasty b/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphSim$package.tasty deleted file mode 100644 index 23e539f34..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphSim$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphSim.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphSim.class deleted file mode 100644 index a1dee3756..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphSim.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphSim.tasty b/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphSim.tasty deleted file mode 100644 index 9c93e5b4c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/GraphSim.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/MatchAnswers$.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/MatchAnswers$.class deleted file mode 100644 index bbf69ffc0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/MatchAnswers$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/MatchAnswers$package$.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/MatchAnswers$package$.class deleted file mode 100644 index 1de8ca652..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/MatchAnswers$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/MatchAnswers$package.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/MatchAnswers$package.class deleted file mode 100644 index 65496a458..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/MatchAnswers$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/MatchAnswers$package.tasty b/target/scala-3.6.4/classes/scalation/database/graph_pm/MatchAnswers$package.tasty deleted file mode 100644 index 6adae21ef..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/MatchAnswers$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/MatchAnswers.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/MatchAnswers.class deleted file mode 100644 index 15d04d2c4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/MatchAnswers.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/MatchAnswers.tasty b/target/scala-3.6.4/classes/scalation/database/graph_pm/MatchAnswers.tasty deleted file mode 100644 index 81f9cbff0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/MatchAnswers.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/ShortestPath$.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/ShortestPath$.class deleted file mode 100644 index 025127d35..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/ShortestPath$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/ShortestPath$Item$.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/ShortestPath$Item$.class deleted file mode 100644 index 9bbf5cae1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/ShortestPath$Item$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/ShortestPath$Item.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/ShortestPath$Item.class deleted file mode 100644 index 5f3e8ae99..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/ShortestPath$Item.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/ShortestPath$package$.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/ShortestPath$package$.class deleted file mode 100644 index 423120990..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/ShortestPath$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/ShortestPath$package.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/ShortestPath$package.class deleted file mode 100644 index 866ebf788..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/ShortestPath$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/ShortestPath$package.tasty b/target/scala-3.6.4/classes/scalation/database/graph_pm/ShortestPath$package.tasty deleted file mode 100644 index 92597463e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/ShortestPath$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/ShortestPath.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/ShortestPath.class deleted file mode 100644 index aaa6c63ca..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/ShortestPath.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/ShortestPath.tasty b/target/scala-3.6.4/classes/scalation/database/graph_pm/ShortestPath.tasty deleted file mode 100644 index c98708d2f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/ShortestPath.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/TopSort$.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/TopSort$.class deleted file mode 100644 index 08eba8a46..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/TopSort$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/TopSort$package$.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/TopSort$package$.class deleted file mode 100644 index 9198a9935..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/TopSort$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/TopSort$package.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/TopSort$package.class deleted file mode 100644 index ad211fd29..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/TopSort$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/TopSort$package.tasty b/target/scala-3.6.4/classes/scalation/database/graph_pm/TopSort$package.tasty deleted file mode 100644 index 1fa2784c1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/TopSort$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/TopSort.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/TopSort.class deleted file mode 100644 index 399e73503..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/TopSort.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/TopSort.tasty b/target/scala-3.6.4/classes/scalation/database/graph_pm/TopSort.tasty deleted file mode 100644 index 008a9e2e3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/TopSort.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/TrafficLight$$anon$1.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/TrafficLight$$anon$1.class deleted file mode 100644 index 1bf0389e2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/TrafficLight$$anon$1.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/TrafficLight$.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/TrafficLight$.class deleted file mode 100644 index c467fb8cb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/TrafficLight$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/TrafficLight.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/TrafficLight.class deleted file mode 100644 index c1c03b458..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/TrafficLight.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/TrafficLight.tasty b/target/scala-3.6.4/classes/scalation/database/graph_pm/TrafficLight.tasty deleted file mode 100644 index d3454e62f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/TrafficLight.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/dualIsoTest.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/dualIsoTest.class deleted file mode 100644 index 8f4377a76..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/dualIsoTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/dualIsoTest.tasty b/target/scala-3.6.4/classes/scalation/database/graph_pm/dualIsoTest.tasty deleted file mode 100644 index 5b2dc78a0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/dualIsoTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/dualIsoTest2.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/dualIsoTest2.class deleted file mode 100644 index 054587916..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/dualIsoTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/dualIsoTest2.tasty b/target/scala-3.6.4/classes/scalation/database/graph_pm/dualIsoTest2.tasty deleted file mode 100644 index 18648b757..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/dualIsoTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/dualIsoTest3.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/dualIsoTest3.class deleted file mode 100644 index 4594724ae..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/dualIsoTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/dualIsoTest3.tasty b/target/scala-3.6.4/classes/scalation/database/graph_pm/dualIsoTest3.tasty deleted file mode 100644 index 2a15275ef..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/dualIsoTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/dualSimTest.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/dualSimTest.class deleted file mode 100644 index 8a896230c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/dualSimTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/dualSimTest.tasty b/target/scala-3.6.4/classes/scalation/database/graph_pm/dualSimTest.tasty deleted file mode 100644 index 67bf4973d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/dualSimTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/dualSimTest2.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/dualSimTest2.class deleted file mode 100644 index bc38b019e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/dualSimTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/dualSimTest2.tasty b/target/scala-3.6.4/classes/scalation/database/graph_pm/dualSimTest2.tasty deleted file mode 100644 index 857d83600..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/dualSimTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/dualSimTest3.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/dualSimTest3.class deleted file mode 100644 index e630b5b59..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/dualSimTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/dualSimTest3.tasty b/target/scala-3.6.4/classes/scalation/database/graph_pm/dualSimTest3.tasty deleted file mode 100644 index 98fc6f8a2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/dualSimTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphDFSTest.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/graphDFSTest.class deleted file mode 100644 index dcfc1c117..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphDFSTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphDFSTest.tasty b/target/scala-3.6.4/classes/scalation/database/graph_pm/graphDFSTest.tasty deleted file mode 100644 index 10b2d5d3a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphDFSTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphGenTest.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/graphGenTest.class deleted file mode 100644 index 9bd30ee37..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphGenTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphGenTest.tasty b/target/scala-3.6.4/classes/scalation/database/graph_pm/graphGenTest.tasty deleted file mode 100644 index d5cd6c5b7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphGenTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphGenTest2.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/graphGenTest2.class deleted file mode 100644 index d4ad34813..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphGenTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphGenTest2.tasty b/target/scala-3.6.4/classes/scalation/database/graph_pm/graphGenTest2.tasty deleted file mode 100644 index d2e02a426..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphGenTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphGenTest3.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/graphGenTest3.class deleted file mode 100644 index 526008bd9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphGenTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphGenTest3.tasty b/target/scala-3.6.4/classes/scalation/database/graph_pm/graphGenTest3.tasty deleted file mode 100644 index acfb086d6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphGenTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphGenTest4.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/graphGenTest4.class deleted file mode 100644 index 70c0aac4e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphGenTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphGenTest4.tasty b/target/scala-3.6.4/classes/scalation/database/graph_pm/graphGenTest4.tasty deleted file mode 100644 index 4b74c4e5b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphGenTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphGenTest5.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/graphGenTest5.class deleted file mode 100644 index 4cdda5932..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphGenTest5.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphGenTest5.tasty b/target/scala-3.6.4/classes/scalation/database/graph_pm/graphGenTest5.tasty deleted file mode 100644 index efca68182..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphGenTest5.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphGenTest6.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/graphGenTest6.class deleted file mode 100644 index a38920934..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphGenTest6.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphGenTest6.tasty b/target/scala-3.6.4/classes/scalation/database/graph_pm/graphGenTest6.tasty deleted file mode 100644 index 4ab164504..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphGenTest6.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphGenTest7.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/graphGenTest7.class deleted file mode 100644 index 7fb1c5741..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphGenTest7.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphGenTest7.tasty b/target/scala-3.6.4/classes/scalation/database/graph_pm/graphGenTest7.tasty deleted file mode 100644 index 2df6c1fc5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphGenTest7.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphGenTest8.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/graphGenTest8.class deleted file mode 100644 index 9c1c422cd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphGenTest8.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphGenTest8.tasty b/target/scala-3.6.4/classes/scalation/database/graph_pm/graphGenTest8.tasty deleted file mode 100644 index 718a03eac..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphGenTest8.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphIOTest.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/graphIOTest.class deleted file mode 100644 index 5c63438c1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphIOTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphIOTest.tasty b/target/scala-3.6.4/classes/scalation/database/graph_pm/graphIOTest.tasty deleted file mode 100644 index 06fd29d65..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphIOTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphMetricsTest.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/graphMetricsTest.class deleted file mode 100644 index 063eb8647..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphMetricsTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphMetricsTest.tasty b/target/scala-3.6.4/classes/scalation/database/graph_pm/graphMetricsTest.tasty deleted file mode 100644 index bbef1029b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphMetricsTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphSimTest.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/graphSimTest.class deleted file mode 100644 index e28812ce1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphSimTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphSimTest.tasty b/target/scala-3.6.4/classes/scalation/database/graph_pm/graphSimTest.tasty deleted file mode 100644 index 0d0956cab..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphSimTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphSimTest2.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/graphSimTest2.class deleted file mode 100644 index d7522b6db..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphSimTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphSimTest2.tasty b/target/scala-3.6.4/classes/scalation/database/graph_pm/graphSimTest2.tasty deleted file mode 100644 index 9003f3416..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphSimTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphSimTest3.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/graphSimTest3.class deleted file mode 100644 index da73d6d93..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphSimTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphSimTest3.tasty b/target/scala-3.6.4/classes/scalation/database/graph_pm/graphSimTest3.tasty deleted file mode 100644 index 18bc8b5db..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphSimTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphSimTest4.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/graphSimTest4.class deleted file mode 100644 index e2d7c6fa9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphSimTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphSimTest4.tasty b/target/scala-3.6.4/classes/scalation/database/graph_pm/graphSimTest4.tasty deleted file mode 100644 index 8c5c4b55b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphSimTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphTest.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/graphTest.class deleted file mode 100644 index 6558621f0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphTest.tasty b/target/scala-3.6.4/classes/scalation/database/graph_pm/graphTest.tasty deleted file mode 100644 index b32fbfa21..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphTest2.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/graphTest2.class deleted file mode 100644 index ae2dde92e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphTest2.tasty b/target/scala-3.6.4/classes/scalation/database/graph_pm/graphTest2.tasty deleted file mode 100644 index f31eaa689..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphTest4.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/graphTest4.class deleted file mode 100644 index b8391f794..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphTest4.tasty b/target/scala-3.6.4/classes/scalation/database/graph_pm/graphTest4.tasty deleted file mode 100644 index b9d846efa..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/graphTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/index.html b/target/scala-3.6.4/classes/scalation/database/graph_pm/index.html deleted file mode 100644 index 91116ce6f..000000000 --- a/target/scala-3.6.4/classes/scalation/database/graph_pm/index.html +++ /dev/null @@ -1,21 +0,0 @@ - - -

    Source files in graph_pm Package

    -

    - - \ No newline at end of file diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/matchAnswersTest.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/matchAnswersTest.class deleted file mode 100644 index 1c3fa8ecb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/matchAnswersTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/matchAnswersTest.tasty b/target/scala-3.6.4/classes/scalation/database/graph_pm/matchAnswersTest.tasty deleted file mode 100644 index 3d63d336d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/matchAnswersTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/shortestPathTest.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/shortestPathTest.class deleted file mode 100644 index 8f7b6e4c5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/shortestPathTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/shortestPathTest.tasty b/target/scala-3.6.4/classes/scalation/database/graph_pm/shortestPathTest.tasty deleted file mode 100644 index 771e3b6cd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/shortestPathTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/shortestPathTest2.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/shortestPathTest2.class deleted file mode 100644 index c41d62f33..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/shortestPathTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/shortestPathTest2.tasty b/target/scala-3.6.4/classes/scalation/database/graph_pm/shortestPathTest2.tasty deleted file mode 100644 index 48b8c4233..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/shortestPathTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/topSortTest.class b/target/scala-3.6.4/classes/scalation/database/graph_pm/topSortTest.class deleted file mode 100644 index 95861ac72..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/topSortTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_pm/topSortTest.tasty b/target/scala-3.6.4/classes/scalation/database/graph_pm/topSortTest.tasty deleted file mode 100644 index 6745d7933..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_pm/topSortTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_relation/Vertex$.class b/target/scala-3.6.4/classes/scalation/database/graph_relation/Vertex$.class deleted file mode 100644 index 2f15ac79f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_relation/Vertex$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_relation/Vertex.class b/target/scala-3.6.4/classes/scalation/database/graph_relation/Vertex.class deleted file mode 100644 index aefe87de1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_relation/Vertex.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_relation/Vertex.tasty b/target/scala-3.6.4/classes/scalation/database/graph_relation/Vertex.tasty deleted file mode 100644 index a963e434f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_relation/Vertex.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_relation/VertexType$.class b/target/scala-3.6.4/classes/scalation/database/graph_relation/VertexType$.class deleted file mode 100644 index 83d707715..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_relation/VertexType$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_relation/VertexType$package$.class b/target/scala-3.6.4/classes/scalation/database/graph_relation/VertexType$package$.class deleted file mode 100644 index af1bd26ba..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_relation/VertexType$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_relation/VertexType$package.class b/target/scala-3.6.4/classes/scalation/database/graph_relation/VertexType$package.class deleted file mode 100644 index 60633cbcc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_relation/VertexType$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_relation/VertexType$package.tasty b/target/scala-3.6.4/classes/scalation/database/graph_relation/VertexType$package.tasty deleted file mode 100644 index 50972cd62..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_relation/VertexType$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_relation/VertexType.class b/target/scala-3.6.4/classes/scalation/database/graph_relation/VertexType.class deleted file mode 100644 index abc3cdc19..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_relation/VertexType.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_relation/VertexType.tasty b/target/scala-3.6.4/classes/scalation/database/graph_relation/VertexType.tasty deleted file mode 100644 index 94a2d3755..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_relation/VertexType.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_relation/index.html b/target/scala-3.6.4/classes/scalation/database/graph_relation/index.html deleted file mode 100644 index 74bbf972c..000000000 --- a/target/scala-3.6.4/classes/scalation/database/graph_relation/index.html +++ /dev/null @@ -1,8 +0,0 @@ - - -

    Source files in graph_relation Package

    -

    - - \ No newline at end of file diff --git a/target/scala-3.6.4/classes/scalation/database/graph_relation/vertexTypeTest.class b/target/scala-3.6.4/classes/scalation/database/graph_relation/vertexTypeTest.class deleted file mode 100644 index 3e3e83b82..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_relation/vertexTypeTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/graph_relation/vertexTypeTest.tasty b/target/scala-3.6.4/classes/scalation/database/graph_relation/vertexTypeTest.tasty deleted file mode 100644 index 4fc49fb7a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/graph_relation/vertexTypeTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/index.html b/target/scala-3.6.4/classes/scalation/database/index.html deleted file mode 100644 index bd7faf0c0..000000000 --- a/target/scala-3.6.4/classes/scalation/database/index.html +++ /dev/null @@ -1,34 +0,0 @@ - - -

    Source files in database Package

    -

    - - \ No newline at end of file diff --git a/target/scala-3.6.4/classes/scalation/database/javaMapTest.class b/target/scala-3.6.4/classes/scalation/database/javaMapTest.class deleted file mode 100644 index f86361710..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/javaMapTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/javaMapTest.tasty b/target/scala-3.6.4/classes/scalation/database/javaMapTest.tasty deleted file mode 100644 index b713faa44..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/javaMapTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/linHashMapTest.class b/target/scala-3.6.4/classes/scalation/database/linHashMapTest.class deleted file mode 100644 index cca804bcc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/linHashMapTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/linHashMapTest.tasty b/target/scala-3.6.4/classes/scalation/database/linHashMapTest.tasty deleted file mode 100644 index b0177ac09..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/linHashMapTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/logic/SATsolver$package$.class b/target/scala-3.6.4/classes/scalation/database/logic/SATsolver$package$.class deleted file mode 100644 index 6e956d0f1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/logic/SATsolver$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/logic/SATsolver$package.class b/target/scala-3.6.4/classes/scalation/database/logic/SATsolver$package.class deleted file mode 100644 index af4adf0af..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/logic/SATsolver$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/logic/SATsolver$package.tasty b/target/scala-3.6.4/classes/scalation/database/logic/SATsolver$package.tasty deleted file mode 100644 index 4601a6b75..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/logic/SATsolver$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/logic/index.html b/target/scala-3.6.4/classes/scalation/database/logic/index.html deleted file mode 100644 index 1e9a97a00..000000000 --- a/target/scala-3.6.4/classes/scalation/database/logic/index.html +++ /dev/null @@ -1,8 +0,0 @@ - - -

    Source files in logic Package

    -

    - - \ No newline at end of file diff --git a/target/scala-3.6.4/classes/scalation/database/logic/sATsolverTest.class b/target/scala-3.6.4/classes/scalation/database/logic/sATsolverTest.class deleted file mode 100644 index 2b35da290..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/logic/sATsolverTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/logic/sATsolverTest.tasty b/target/scala-3.6.4/classes/scalation/database/logic/sATsolverTest.tasty deleted file mode 100644 index 0be5575d1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/logic/sATsolverTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/makeSchemaTest.class b/target/scala-3.6.4/classes/scalation/database/makeSchemaTest.class deleted file mode 100644 index 94bffca13..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/makeSchemaTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/makeSchemaTest.tasty b/target/scala-3.6.4/classes/scalation/database/makeSchemaTest.tasty deleted file mode 100644 index 435055c7d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/makeSchemaTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/makeSchemaTest2.class b/target/scala-3.6.4/classes/scalation/database/makeSchemaTest2.class deleted file mode 100644 index abcf583bc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/makeSchemaTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/makeSchemaTest2.tasty b/target/scala-3.6.4/classes/scalation/database/makeSchemaTest2.tasty deleted file mode 100644 index 8dbe9099b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/makeSchemaTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/minSpanningTreeTest.class b/target/scala-3.6.4/classes/scalation/database/minSpanningTreeTest.class deleted file mode 100644 index 9d90bbfbb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/minSpanningTreeTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/minSpanningTreeTest.tasty b/target/scala-3.6.4/classes/scalation/database/minSpanningTreeTest.tasty deleted file mode 100644 index c384fe66f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/minSpanningTreeTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/minSpanningTreeTest2.class b/target/scala-3.6.4/classes/scalation/database/minSpanningTreeTest2.class deleted file mode 100644 index dcf36e695..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/minSpanningTreeTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/minSpanningTreeTest2.tasty b/target/scala-3.6.4/classes/scalation/database/minSpanningTreeTest2.tasty deleted file mode 100644 index fb86a49d2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/minSpanningTreeTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/minSpanningTreeTest3.class b/target/scala-3.6.4/classes/scalation/database/minSpanningTreeTest3.class deleted file mode 100644 index 6eb5089ca..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/minSpanningTreeTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/minSpanningTreeTest3.tasty b/target/scala-3.6.4/classes/scalation/database/minSpanningTreeTest3.tasty deleted file mode 100644 index 05c0f73f7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/minSpanningTreeTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/minSpanningTreeTest4.class b/target/scala-3.6.4/classes/scalation/database/minSpanningTreeTest4.class deleted file mode 100644 index 4cd213f2d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/minSpanningTreeTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/minSpanningTreeTest4.tasty b/target/scala-3.6.4/classes/scalation/database/minSpanningTreeTest4.tasty deleted file mode 100644 index e528a96ac..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/minSpanningTreeTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/ExampleMuGraphD$.class b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/ExampleMuGraphD$.class deleted file mode 100644 index 53c9b64fe..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/ExampleMuGraphD$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/ExampleMuGraphD.class b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/ExampleMuGraphD.class deleted file mode 100644 index 18180bf9c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/ExampleMuGraphD.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/ExampleMuGraphD.tasty b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/ExampleMuGraphD.tasty deleted file mode 100644 index 07b2f56b4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/ExampleMuGraphD.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/ExampleMuGraphS$.class b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/ExampleMuGraphS$.class deleted file mode 100644 index 86ae2f235..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/ExampleMuGraphS$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/ExampleMuGraphS.class b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/ExampleMuGraphS.class deleted file mode 100644 index 046f77c1c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/ExampleMuGraphS.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/ExampleMuGraphS.tasty b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/ExampleMuGraphS.tasty deleted file mode 100644 index 09ea17806..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/ExampleMuGraphS.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MatchAnswers$.class b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MatchAnswers$.class deleted file mode 100644 index 618bf9314..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MatchAnswers$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MatchAnswers$package$.class b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MatchAnswers$package$.class deleted file mode 100644 index b68b7a008..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MatchAnswers$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MatchAnswers$package.class b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MatchAnswers$package.class deleted file mode 100644 index 01879536f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MatchAnswers$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MatchAnswers$package.tasty b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MatchAnswers$package.tasty deleted file mode 100644 index 653148cc1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MatchAnswers$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MatchAnswers.class b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MatchAnswers.class deleted file mode 100644 index fe4352a7d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MatchAnswers.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MatchAnswers.tasty b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MatchAnswers.tasty deleted file mode 100644 index 21a8ca2f6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MatchAnswers.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuDualIso$package$.class b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuDualIso$package$.class deleted file mode 100644 index b7ed6f0f3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuDualIso$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuDualIso$package.class b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuDualIso$package.class deleted file mode 100644 index 7223c41ab..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuDualIso$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuDualIso$package.tasty b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuDualIso$package.tasty deleted file mode 100644 index 2251a00d8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuDualIso$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuDualIso.class b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuDualIso.class deleted file mode 100644 index beb3270d9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuDualIso.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuDualIso.tasty b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuDualIso.tasty deleted file mode 100644 index dcac24e05..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuDualIso.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuDualSim$package$.class b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuDualSim$package$.class deleted file mode 100644 index bbb57f289..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuDualSim$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuDualSim$package.class b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuDualSim$package.class deleted file mode 100644 index 11e8e48aa..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuDualSim$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuDualSim$package.tasty b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuDualSim$package.tasty deleted file mode 100644 index bfe939f48..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuDualSim$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuDualSim.class b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuDualSim.class deleted file mode 100644 index 4576ccc0b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuDualSim.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuDualSim.tasty b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuDualSim.tasty deleted file mode 100644 index 79cae7143..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuDualSim.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuGraph$.class b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuGraph$.class deleted file mode 100644 index 54375456b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuGraph$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuGraph$package$.class b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuGraph$package$.class deleted file mode 100644 index 9844a2f30..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuGraph$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuGraph$package.class b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuGraph$package.class deleted file mode 100644 index 71556ce65..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuGraph$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuGraph$package.tasty b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuGraph$package.tasty deleted file mode 100644 index 425d9690c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuGraph$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuGraph.class b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuGraph.class deleted file mode 100644 index f834663bf..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuGraph.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuGraph.tasty b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuGraph.tasty deleted file mode 100644 index 191132383..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuGraph.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuGraphGen$package$.class b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuGraphGen$package$.class deleted file mode 100644 index 3f16dd47d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuGraphGen$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuGraphGen$package.class b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuGraphGen$package.class deleted file mode 100644 index 47798da85..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuGraphGen$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuGraphGen$package.tasty b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuGraphGen$package.tasty deleted file mode 100644 index 3a8de6729..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuGraphGen$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuGraphGen.class b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuGraphGen.class deleted file mode 100644 index 1352ea227..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuGraphGen.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuGraphGen.tasty b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuGraphGen.tasty deleted file mode 100644 index 6ec92c788..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuGraphGen.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuGraphMatcher.class b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuGraphMatcher.class deleted file mode 100644 index c51fe5ca3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuGraphMatcher.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuGraphMatcher.tasty b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuGraphMatcher.tasty deleted file mode 100644 index b54ca61a7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuGraphMatcher.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuGraphSim$package$.class b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuGraphSim$package$.class deleted file mode 100644 index 62de8cdb8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuGraphSim$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuGraphSim$package.class b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuGraphSim$package.class deleted file mode 100644 index 708b49e68..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuGraphSim$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuGraphSim$package.tasty b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuGraphSim$package.tasty deleted file mode 100644 index c389ff924..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuGraphSim$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuGraphSim.class b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuGraphSim.class deleted file mode 100644 index dbd80848f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuGraphSim.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuGraphSim.tasty b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuGraphSim.tasty deleted file mode 100644 index 1ff6f5b63..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/MuGraphSim.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/index.html b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/index.html deleted file mode 100644 index 802e1ea02..000000000 --- a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/index.html +++ /dev/null @@ -1,16 +0,0 @@ - - -

    Source files in mugraph_pm Package

    -

    - - \ No newline at end of file diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/matchAnswersTest.class b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/matchAnswersTest.class deleted file mode 100644 index b670198e2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/matchAnswersTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/matchAnswersTest.tasty b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/matchAnswersTest.tasty deleted file mode 100644 index 4ba4ef2f6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/matchAnswersTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muDualIsoTest.class b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muDualIsoTest.class deleted file mode 100644 index 2500b4e1a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muDualIsoTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muDualIsoTest.tasty b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muDualIsoTest.tasty deleted file mode 100644 index a1f44d3d3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muDualIsoTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muDualIsoTest2.class b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muDualIsoTest2.class deleted file mode 100644 index f254249ca..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muDualIsoTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muDualIsoTest2.tasty b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muDualIsoTest2.tasty deleted file mode 100644 index 7b433eb72..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muDualIsoTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muDualIsoTest3.class b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muDualIsoTest3.class deleted file mode 100644 index 077fe4647..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muDualIsoTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muDualIsoTest3.tasty b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muDualIsoTest3.tasty deleted file mode 100644 index d68a9fa9d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muDualIsoTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muDualSimTest.class b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muDualSimTest.class deleted file mode 100644 index 12509402c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muDualSimTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muDualSimTest.tasty b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muDualSimTest.tasty deleted file mode 100644 index 20f93745e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muDualSimTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muDualSimTest2.class b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muDualSimTest2.class deleted file mode 100644 index 1135e4e85..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muDualSimTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muDualSimTest2.tasty b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muDualSimTest2.tasty deleted file mode 100644 index bd307bc60..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muDualSimTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muDualSimTest3.class b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muDualSimTest3.class deleted file mode 100644 index a3fa2af8d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muDualSimTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muDualSimTest3.tasty b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muDualSimTest3.tasty deleted file mode 100644 index 5ccdd7a20..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muDualSimTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muGraphGenTest.class b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muGraphGenTest.class deleted file mode 100644 index 51f02a29a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muGraphGenTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muGraphGenTest.tasty b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muGraphGenTest.tasty deleted file mode 100644 index 965388c05..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muGraphGenTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muGraphGenTest2.class b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muGraphGenTest2.class deleted file mode 100644 index 6bfb3904d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muGraphGenTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muGraphGenTest2.tasty b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muGraphGenTest2.tasty deleted file mode 100644 index 5f3362c23..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muGraphGenTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muGraphGenTest3.class b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muGraphGenTest3.class deleted file mode 100644 index befaa46a8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muGraphGenTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muGraphGenTest3.tasty b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muGraphGenTest3.tasty deleted file mode 100644 index d4eda2147..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muGraphGenTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muGraphSimTest.class b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muGraphSimTest.class deleted file mode 100644 index 3a9aea0b4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muGraphSimTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muGraphSimTest.tasty b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muGraphSimTest.tasty deleted file mode 100644 index 04b0da8b5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muGraphSimTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muGraphSimTest2.class b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muGraphSimTest2.class deleted file mode 100644 index b78483c07..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muGraphSimTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muGraphSimTest2.tasty b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muGraphSimTest2.tasty deleted file mode 100644 index 116ac77b6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muGraphSimTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muGraphSimTest3.class b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muGraphSimTest3.class deleted file mode 100644 index d493afcf9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muGraphSimTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muGraphSimTest3.tasty b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muGraphSimTest3.tasty deleted file mode 100644 index ce64effb7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muGraphSimTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muGraphTest.class b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muGraphTest.class deleted file mode 100644 index 8203201dc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muGraphTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muGraphTest.tasty b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muGraphTest.tasty deleted file mode 100644 index 0a2d1a5b4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muGraphTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muGraphTest2.class b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muGraphTest2.class deleted file mode 100644 index f2727f474..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muGraphTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muGraphTest2.tasty b/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muGraphTest2.tasty deleted file mode 100644 index 16ea4e3f4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/mugraph_pm/muGraphTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/multiMapTest.class b/target/scala-3.6.4/classes/scalation/database/multiMapTest.class deleted file mode 100644 index 62a5a0b19..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/multiMapTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/multiMapTest.tasty b/target/scala-3.6.4/classes/scalation/database/multiMapTest.tasty deleted file mode 100644 index bf634dba0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/multiMapTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/normalizationTest.class b/target/scala-3.6.4/classes/scalation/database/normalizationTest.class deleted file mode 100644 index e21617891..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/normalizationTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/normalizationTest.tasty b/target/scala-3.6.4/classes/scalation/database/normalizationTest.tasty deleted file mode 100644 index 37c0c7a0f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/normalizationTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/normalizationTest2.class b/target/scala-3.6.4/classes/scalation/database/normalizationTest2.class deleted file mode 100644 index d5c6d8d4e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/normalizationTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/normalizationTest2.tasty b/target/scala-3.6.4/classes/scalation/database/normalizationTest2.tasty deleted file mode 100644 index 7074e36fc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/normalizationTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/normalizationTest3.class b/target/scala-3.6.4/classes/scalation/database/normalizationTest3.class deleted file mode 100644 index 4936d40cd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/normalizationTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/normalizationTest3.tasty b/target/scala-3.6.4/classes/scalation/database/normalizationTest3.tasty deleted file mode 100644 index a51b3e709..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/normalizationTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/normalizationTest4.class b/target/scala-3.6.4/classes/scalation/database/normalizationTest4.class deleted file mode 100644 index 4a4a5f760..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/normalizationTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/normalizationTest4.tasty b/target/scala-3.6.4/classes/scalation/database/normalizationTest4.tasty deleted file mode 100644 index f6101c3dd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/normalizationTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/normalizationTest5.class b/target/scala-3.6.4/classes/scalation/database/normalizationTest5.class deleted file mode 100644 index 4c42f7422..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/normalizationTest5.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/normalizationTest5.tasty b/target/scala-3.6.4/classes/scalation/database/normalizationTest5.tasty deleted file mode 100644 index 5dd329541..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/normalizationTest5.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/normalizationTest6.class b/target/scala-3.6.4/classes/scalation/database/normalizationTest6.class deleted file mode 100644 index 2ded9b78b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/normalizationTest6.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/normalizationTest6.tasty b/target/scala-3.6.4/classes/scalation/database/normalizationTest6.tasty deleted file mode 100644 index ac97afcce..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/normalizationTest6.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/normalizationTest7.class b/target/scala-3.6.4/classes/scalation/database/normalizationTest7.class deleted file mode 100644 index 5994c0a22..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/normalizationTest7.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/normalizationTest7.tasty b/target/scala-3.6.4/classes/scalation/database/normalizationTest7.tasty deleted file mode 100644 index e918c66f0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/normalizationTest7.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/normalizationTest8.class b/target/scala-3.6.4/classes/scalation/database/normalizationTest8.class deleted file mode 100644 index 361b90c55..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/normalizationTest8.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/normalizationTest8.tasty b/target/scala-3.6.4/classes/scalation/database/normalizationTest8.tasty deleted file mode 100644 index 9530eaf80..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/normalizationTest8.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/old/BpNode.scala.bak b/target/scala-3.6.4/classes/scalation/database/old/BpNode.scala.bak deleted file mode 100644 index 3431dba78..000000000 --- a/target/scala-3.6.4/classes/scalation/database/old/BpNode.scala.bak +++ /dev/null @@ -1,357 +0,0 @@ - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Wed Aug 9 01:25:50 EDT 2023 - * @see LICENSE (MIT style license file). - * - * @note B+Trees Node - */ - -package scalation -package database - -import scala.reflect.ClassTag -import scala.runtime.ScalaRunTime.stringOf - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `BpTreeMap` class provides sorted maps that use the B+Tree Data Structure. - * Inserts may cause the splitting of nodes. - * @tparam V the type of the values assigned to keys in this sorted map - * @param order the order (maximum number of children per node) - */ -object BpNode: - - private val debug = debugf ("BpNode", true) // debug function - private val flaw = flawf ("BpNode") // flaw function - - private var order = 5 // maximum number of references (reset as needed) - private var maxk = order - 1 // maximum number of keys (before overflow) - private var half = maxk / 2 // half of max keys (floor) - private var halfp = maxk - half // half (plus) of max keys (ceiling) - private var mink = halfp - 1 // minimum number of keys (before underflow) - - debug ("init", s"order = $order, maxk = $maxk, half = $half, halfp = $halfp, mink = $mink") - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Set the order of BpNodes to order_. - * @param order_ the new order for BpNodes (must be at least 4) - */ - def setOrder (order_ : Int): Unit = - if order < 4 then flaw ("object", s"order_ = $order_ must be at least 4") - else - order = order_ - maxk = order - 1 - half = maxk / 2 - halfp = maxk - half - mink = half - 1 - end if - end setOrder - -end BpNode - -import BpNode._ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `BpNode` class defines nodes of size order that that may be stored in a B+tree. - * Keys have type `ValueType` and may reference values of `Any` type. - * @param isLeaf whether this node is a leaf - */ -class BpNode (val isLeaf: Boolean = true) - extends Serializable: - - private [database] var nKeys = 0 // number of active keys (initialize to 0) - private val key = Array.ofDim [ValueType] (maxk) // array to hold keys - private [database] val ref = Array.ofDim [Any] (order) // array to hold values or reference nodes - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Construct a new root node with one key (and two references) in it. - * @param left the left node (<= dkey) - * @param dkey the divider key - * @param right the right node (> dkey) - */ - def this (left: BpNode, dkey: ValueType, right: BpNode) = - this (false) - nKeys = 1 - key(0) = dkey // divider key - ref(0) = left; ref(1) = right // left and right references - end this - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return whether this node has underflowed. - */ - def underflow: Boolean = nKeys < mink - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return whether this node is rich (i.e., has surplus keys). - */ - def rich: Boolean = nKeys > mink - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the key at index position i. - * @param i the index position in this node - */ - def apply (i: Int): ValueType = - if i >= nKeys then flaw ("apply", s"index i = $i must be less than nKeys = $nKeys") - key(i) - end apply - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Update the key at index position i. - * @param i the index position in this node - * @param k the new value for key(i) - */ - def update (i: Int, k: ValueType): Unit = - if i >= nKeys then flaw ("update", s"index i = $i must be less than nKeys = $nKeys") - key(i) = k - end update - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return whether the key at index i equals k. - * @param k the key to check - * @param i the index position in this node - */ - def eqAt (k: ValueType, i: Int): Boolean = i < nKeys && k == key(i) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Find and return the first position where 'k <= key(i)' in this node. - * If k is at least as large as all keys in this node, return nkeys. - * @param k the key whose position is sought - */ - def find (k: ValueType): Int = - val i = key.indexWhere (k <= _) - if i < 0 then nKeys else i - end find - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Add the new key k and value v into this LEAF node at insertion position ip. - * Return whether this node needs to be split as the node is already full. - * @param k the new key - * @param v the new value - */ - def add (k: ValueType, v: Any): Boolean = - if nKeys >= maxk then true // leaf node needs to be split first - else - val ip = find (k) // insertion position - for i <- nKeys until ip by -1 do // make room - key(i) = key(i-1) - ref(i) = ref(i-1) - end for - key(ip) = k // insert key and value - ref(ip) = v - nKeys += 1 - false // split not needed - end if - end add - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Add the new key k and value v into this INTERNAL node at insertion position ip. - * Return whether this node needs to be split as the node is already full. - * @param k the new key - * @param v the new value - */ - def addI (k: ValueType, v: BpNode): Boolean = - if nKeys >= maxk then true // internal node needs to be split first - else - val ip = find (k) // insertion position - for i <- nKeys until ip by -1 do // make room - key(i) = key(i-1) - ref(i+1) = ref(i) // refs on right of key - end for - key(ip) = k // insert key and value - ref(ip+1) = v - nKeys += 1 - false // split not needed - end if - end addI - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Split this LEAF node by creating a right sibling rt and moving half - * the keys and references to that new node. Return the divider key and - * right sibling node. - * @param h_l the number of keys to remain in this (the left) node - * @param h_r the number of keys to move to the new right node - */ - def split (h_l: Int = halfp, h_r: Int = half): (ValueType, BpNode) = - val rt = new BpNode () - for i <- 0 until h_r do // move largest h_r to rt - rt.key(i) = key(h_l + i) - rt.ref(i) = ref(h_l + i) - end for - rt.ref(h_r) = ref(maxk) // move the last ref - ref(h_l) = rt // link the leaf nodes - rt.nKeys = h_r - nKeys = h_l - (key(nKeys-1), rt) - end split - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Split this INTERNAL node by creating a right sibling rt and moving half - * the keys and references to that new node. Return the divider key and - * right sibling node. - * @param h_l the number of keys to remain in this (the left) node - * @param h_r the number of keys to move to the new right node - */ - def splitI (h_l: Int = halfp+1, h_r: Int = half-1): (ValueType, BpNode) = - val rt = new BpNode (false) - for i <- 0 until h_r do // move largest h_r to rt - rt.key(i) = key(h_l + i) - rt.ref(i) = ref(h_l + i) - end for - rt.ref(h_r) = ref(maxk) // move the last ref - ref(maxk) = rt // link the leaf nodes - rt.nKeys = h_r - nKeys = h_l - (key(nKeys-1), rt) - end splitI - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Promote the divider key, so that it is only in the parent node. - * Called after split and should only be used for internal node splits. - */ - def promote (): Unit = nKeys -= 1 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Remove key k and its reference from this node and check for underflow. - * @param k the key to be removed from this node - * @param dp the deletion index position - */ - def remove (k: ValueType, dp: Int): Boolean = - if key(dp) == k then // make sure it really is the key - nKeys -= 1 // decrement the number of keys - for i <- dp until nKeys do - key(i) = key(i+1) // shift keys left - ref(i) = ref(i+1) - end for - underflow // if true, node underflows - else - println (s"remove: key $k not found") - false - end if - end remove - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Remove the divider key in this parant node at the dp index position and - * the reference to the right sibling (\) in [. / kdp \ .]. Check for underflow. - * @param dp the deletion index position - */ - def removeRight (dp: Int): Boolean = - nKeys -= 1 // decrement the number of keys - for i <- dp until nKeys do - key(i) = key(i+1) // shift keys left - ref(i+1) = ref(i+2) - end for - underflow // if true, node underflows - end removeRight - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Merge this node with its right sibling. - * @param right the right sibling node - */ - def merge (right: BpNode): Unit = - for i <- 0 until right.nKeys do - key(nKeys + i) = right.key(i) - ref(nKeys + i) = right.ref(i) - end for - nKeys += right.nKeys - ref(nKeys) = right.ref(right.nKeys) - end merge - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert this node to a string. - */ - override def toString: String = - val sb = StringBuilder ("[ . " ) - for i <- 0 until nKeys do sb ++= (s"${key(i)} . ") - sb ++= ("]" ) - sb.toString - end toString - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Show the node data structure. - */ - def show (): Unit = - println (s"isLeaf = $isLeaf") - println (s"nKeys = $nKeys") - println (s"key = ${stringOf (key)}") - println (s"ref = ${stringOf (ref)}") - end show - -end BpNode - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `bpNodeTest2` main function tests the `BpNode` class by inserting random - * key values and testing a leaf node split. - * > runMain scalation.database.bpNodeTest - */ -@main def bpNodeTest (): Unit = - - import java.util.Random - - banner ("Example of an Leaf Node Split") - - val totKeys = 5 - val mx = 10 * totKeys - val seed = 1 - val rng = new Random (seed) - - val node = new BpNode () - var k_n: (ValueType, BpNode) = null // divider key, new right node - - for i <- 1 to totKeys do - val key = rng.nextInt (mx) - banner (s"put key = $key") - val split = node.add (key, 2 * key) - println (s"split = $split, node = $node") - if split then - k_n = node.split () // split keys between node (2) and right (2) - node.add (key, 2 * key) // try again after split - end if - if k_n != null then println (s"node = $node, right = ${k_n._2}, divider = ${k_n._1}") - end for - - banner ("Show Arrays") - node.show () - -end bpNodeTest - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `bpNodeTest2` main function tests the `BpNode` class by inserting random - * key values and testing a internal node split. - * > runMain scalation.database.bpNodeTest2 - */ -@main def bpNodeTest2 (): Unit = - - import java.util.Random - - banner ("Example of an Internal Node Split") - - val totKeys = 5 - val mx = 10 * totKeys - val seed = 1 - val rng = new Random (seed) - - val node = new BpNode (false) // false => not isLeaf - var k_n: (ValueType, BpNode) = null // tuple: (divider key, new right node) - - for i <- 1 to totKeys do - val key = rng.nextInt (mx) - banner (s"put key = $key") - val split = node.add (key, 2 * key) - println (s"split = $split, node = $node") - if split then - k_n = node.split () // split keys between node (2) and right (2) - node.promote () // promote the divider key - node.add (key, 2 * key) // try again after split - end if - if k_n != null then println (s"node = $node, right = ${k_n._2}, divider = ${k_n._1}") - end for - - banner ("Show Arrays") - node.show () - -end bpNodeTest2 - diff --git a/target/scala-3.6.4/classes/scalation/database/old/BpNode.scala.bak2 b/target/scala-3.6.4/classes/scalation/database/old/BpNode.scala.bak2 deleted file mode 100644 index cffc203da..000000000 --- a/target/scala-3.6.4/classes/scalation/database/old/BpNode.scala.bak2 +++ /dev/null @@ -1,405 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Wed Aug 9 01:25:50 EDT 2023 - * @see LICENSE (MIT style license file). - * - * @note B+Tree Node with find, add, split, remove, and merge Operations - */ - -package scalation -package database - -import scala.runtime.ScalaRunTime.stringOf - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `BpNode` companion object provides settings for node sizes. - */ -object BpNode: - - private var DEFAULT_DLINK = true // default value for DLINK - - private val debug = debugf ("BpNode", true) // debug function - private val flaw = flawf ("BpNode") // flaw function - - private var order = 5 // maximum number of references (reset as needed) - private var half = order / 2 // half of max keys (floor) - private var halfp = order - half // rest of the overflowed keys - private var mink = halfp - 1 // minimum number of keys (before underflow) - - debug ("init", s"order = $order, half = $half, halfp = $halfp, mink = $mink") - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Set the leaf node DEFAULT_DLINK to dlink (false => forward links only via ref(0), - * true => both forward links via ref(0) and backward links via pre). - * @see `BpNode` class for specification of ref(0) and pre - * @param dlink whether to support unidirectionsl (false) or bidirections (true) linkage - */ - def set_DEFAULT_DLINK (dlink: Boolean): Unit = DEFAULT_DLINK = dlink - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Set the order of BpNodes to order_. - * @param order_ the new order for BpNodes (must be at least 4) - */ - def setOrder (order_ : Int): Unit = - if order < 4 then flaw ("setOrder", s"order_ = $order_ must be at least 4") - else - order = order_ - half = (order - 1) / 2 - halfp = order - half - mink = half - 1 - end setOrder - -end BpNode - -import BpNode._ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `BpNode` class defines nodes of size order that that may be stored in a B+tree. - * Keys have type `ValueType` and may reference values of `Any` type. - * @param keys number of active keys - * @param isLeaf whether this node is a leaf - * @param DLINK whether the leaf nodes support linkage in both directions (ref(0) & pre) - */ -class BpNode (private [database] var keys: Int, val isLeaf: Boolean, DLINK: Boolean = DEFAULT_DLINK) - extends Serializable: - - private [database] val key = Array.ofDim [ValueType] (order) // array to hold keys - private [database] val ref = Array.ofDim [Any] (order + 1) // array to hold values or reference nodes - private [database] var pre: BpNode = null // reference to previous LEAF node (if needed) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Construct a new root node with one key (and two references) in it. - * @param left the left node (< dkey) - * @param dkey the divider key - * @param right the right node (>= dkey) - */ - def this (left: BpNode, dkey: ValueType, right: BpNode) = - this (1, false) - key(0) = dkey // divider key - ref(0) = left; ref(1) = right // left and right references - end this - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return whether this node has overflowed (too many keys). - */ - inline def overflow: Boolean = keys >= order - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return whether this node has underflowed (too few keys). - */ - inline def underflow: Boolean = keys < mink - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return whether this node is rich (i.e., has surplus keys). - */ - inline def rich: Boolean = keys > mink - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the key at index position i. - * @param i the index position in this node - */ - inline def apply (i: Int): ValueType = - if i >= keys then flaw ("apply", s"index i = $i must be less than keys = $keys") - key(i) - end apply - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Update the key at index position i. - * @param i the index position in this node - * @param k the new value for key(i) - */ - inline def update (i: Int, k: ValueType): Unit = - if i >= keys then flaw ("update", s"index i = $i must be less than keys = $keys") - key(i) = k - end update - -// //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -// /** Find and return the first position where 'k < key_i' in this node. -// * @param k the key whose position is sought -// */ -// def find (k: ValueType): Int = -// val i = key.indexWhere (k < _) -// if i < 0 then keys else i -// end find -// -// //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -// /** Find and return the first position where 'k == key_i' in this node. -// * @param k the key whose position is sought -// */ -// def findEq (k: ValueType): Int = key.indexWhere (k == _) - - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Find and return the first position where 'k < key_i' in this node. - * If none, return keys (corresponds to last ref). - * @param k the key whose index position is sought - */ - def find(k: ValueType): Int = - var (found, i) = (false, 0) - while !found && i < keys do - if k < key(i) then found = true - else i += 1 - i - end find - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Find and return the first position where 'k == key_i' in this node. - * If none, return -1 meaning not found. - * @param k the key whose index position is sought - */ - def findEq(k: ValueType): Int = - var (found, i) = (false, 0) - while !found && i < keys do - if k == key(i) then found = true - else i += 1 - if i < keys then i else -1 - end findEq - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Add the new key k and value v into this node at insertion position (ip). - * @param k the new key - * @param v the new value (or node for internal nodes) - */ - def add (k: ValueType, v: Any): Unit = - val ip = find (k) // find insertion position (ip) - debug ("add", s"(k = $k, v = $v) pair at ip = $ip") - for i <- keys until ip by -1 do // make room by shifting keys right - key(i) = key(i-1) - ref(i+1) = ref(i) - key(ip) = k // insert new key - ref(ip+1) = v // insert new value (right of key) - keys += 1 // increment to number of active keys - end add - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Split this LEAF node by creating a right sibling node (rt) and moving - * half the keys and references to that new node, leaving halfp. - * Return the divider key and the right sibling node. - */ - def split (): (ValueType, BpNode) = - show() - val rt = new BpNode (half, true) // allocate leaf right sibling node (rt) - for i <- 0 until half do // move largest half of keys (with refs) to rt - rt.key(i) = key(halfp + i) - rt.ref(i+1) = ref(halfp + i + 1) // refs are right of keys - rt.ref(0) = ref(0) // update LINKED LIST of nodes - if DLINK then rt.pre = this // reverse link - ref(0) = rt // this -> rt -> old-right - keys = halfp // reset number of active keys to help plus - (rt.key(0), rt) // (divider key (smallest right) and right sibling - end split - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Split this INTERNAL node by creating a right sibling rt and moving half - * the keys and references to that new node, leaving halfp - 1. - * Return the divider key and the right sibling node. - */ - def splitI (): (ValueType, BpNode) = - val rt = new BpNode (half, false) // allocate internal right sibling node (rt) - for i <- 0 until half do // move largest half of keys (with refs) to rt - rt.key(i) = key(halfp + i) - rt.ref(i) = ref(halfp + i) - rt.ref(half) = ref(keys) // copy over the last ref - keys = halfp - 1 // reset number of active keys to help plus - 1 - (key(keys), rt) // divider key (middle key) and right sibling - end splitI - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Remove key at dp and its reference from this node and check for underflow. - * @param dp the deletion index position (may use findEq to find it) - */ - def remove (dp: Int): Boolean = - debug ("remove", s"dp = $dp, key = ${key(dp)}") - for i <- dp until keys do // remove at dp by shifting keys left - key(i) = key(i+1) - ref(i+1) = ref(i+2) // case: ref on right - keys -= 1 // decrement the number of keys - underflow // if true, node underflows - end remove - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Remove key at dp and its reference from this INTERNAL node and check for underflow. - * @param dp the deletion index position (may use findEq to find it) - */ - def removeI (dp: Int): Boolean = - debug("remove", s"dp = $dp, key = ${key(dp)}") - for i <- dp until keys do // remove at dp by shifting keys left - key(i) = key(i + 1) - ref(i) = ref(i + 1) // case: ref on left - keys -= 1 // decrement the number of keys - underflow // if true, node underflows - end removeI - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Merge this LEAF node with its right sibling node (rt). - * @param rt the right sibling node - */ - def merge (rt: BpNode): Unit = - for i <- 0 until rt.keys do // move keys from rt into this node - key(keys + i) = rt.key(i) - ref(keys + i + 1) = rt.ref(i + 1) - keys += rt.keys // add the number of keys from rt - rt.keys = 0 // make rt empty - val rt_next = rt.ref(0).asInstanceOf [BpNode] // the node to the right of rt - ref(0) = rt_next // unlink node rt in the forward (ref(0)) direction - if DLINK && rt_next != null then rt_next.pre = this // unlink node rt in the backward (pre) direction - end merge - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Merge this INTERNAL node with its right sibling node (rt). - * @param dk the divider key from parent - * @param rt the right sibling node - */ - def mergeI (dk: ValueType, rt: BpNode): Unit = - key(keys) = dk // move divider key - ref(keys + 1) = rt.ref(0) // node corresponding to divider key - for i <- 0 until rt.keys do // move keys from rt into this node - key(keys + i + 1) = rt.key(i) - ref(keys + i + 2) = rt.ref(i + 1) - keys += rt.keys + 1 // add the number of keys from rt - rt.keys = 0 // make rt empty - end mergeI - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert this node to a string. - */ - override def toString: String = - val sb = StringBuilder ("[ . " ) - for i <- 0 until keys do sb ++= (s"${key(i)} . ") - sb ++= ("]" ) - sb.toString - end toString - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Show the node's data structure. - */ - def show (): Unit = - println (s"isLeaf = $isLeaf") - println (s"keys = $keys") - println (s"key = ${stringOf (key)}") - println (s"ref = ${stringOf (ref)}") - if DLINK then println (s"pre = $pre") - end show - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Show the node's references. - */ - def showRef (): Unit = - println (s"ref = ${stringOf (ref)}") - end showRef - -end BpNode - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `bpNodeTest` main function tests the `BpNode` class by inserting random - * key values and testing a leaf node split. - * > runMain scalation.database.bpNodeTest - */ -@main def bpNodeTest (): Unit = - - import java.util.Random - - banner ("Example of an Leaf Node Split") - - val totKeys = 5 - val mx = 10 * totKeys - val seed = 1 - val rng = new Random (seed) - val node = new BpNode (0, true) // empty leaf node - var right: BpNode = null - - for i <- 1 to totKeys do - val key = rng.nextInt (mx) - banner (s"put key = $key") - node.add (key, 2 * key) - println (s"node = $node") - if node.overflow then - println (s"BEFORE split: node = $node") - node.showRef () - val (dk, rt) = node.split () // split keys between node (3) and right (2) - right = rt - println (s"AFTER split: node = $node, dk = $dk, rt = $rt") - node.showRef (); rt.showRef () - end for - - banner ("Show Arrays") - node.show () - - banner ("Example of an Leaf Node Merge") - node.remove (2) - node.merge (right) - println (s"AFTER merge: node = $node, right = $right") - node.show () - -end bpNodeTest - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `bpNodeTest2` main function tests the `BpNode` class by inserting random - * key values and testing a internal node split. - * > runMain scalation.database.bpNodeTest2 - */ -@main def bpNodeTest2 (): Unit = - - import java.util.Random - - banner ("Example of an Internal Node Split") - - val totKeys = 5 - val mx = 10 * totKeys - val seed = 1 - val rng = new Random (seed) - val node = new BpNode (0, false) // empty internal node: false => not isLeaf - - for i <- 1 to totKeys do - val key = rng.nextInt (mx) - banner (s"put key = $key") - node.add (key, 2 * key) - println (s"node = $node") - if node.overflow then - println (s"BEFORE split: node = $node") - node.showRef () - val (dk, rt) = node.splitI () // splitI keys between node (2) and right (2) - println (s"AFTER split: node = $node, dk = $dk, rt = $rt") - node.showRef (); rt.showRef () - end for - - banner ("Show Arrays") - node.show () - -end bpNodeTest2 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `bpNodeTest3` main function tests the `BpNode` class by inserting random - * and removing key values. - * > runMain scalation.database.bpNodeTest3 - */ -@main def bpNodeTest3 (): Unit = - - banner ("Example of key insertion and removal") - - val keyArr = Array (35, 47, 4, 38) - val node = new BpNode (0, true) // empty internal node: false => not isLeaf - - for k <- keyArr do - banner (s"put key = $k") - node.add (k, 2 * k) - println (s"node = $node") - end for - - for k <- keyArr do - banner (s"remove key = $k") - val dp = node.findEq (k) // find the deletion position (dp) - node.remove (dp) // remove the key and its reference - println (s"AFTER remove: node = $node") - node.showRef () - end for - - banner ("Show Arrays") - node.show () - -end bpNodeTest3 diff --git a/target/scala-3.6.4/classes/scalation/database/old/BpNode.scala.bak3 b/target/scala-3.6.4/classes/scalation/database/old/BpNode.scala.bak3 deleted file mode 100644 index 55e6e561e..000000000 --- a/target/scala-3.6.4/classes/scalation/database/old/BpNode.scala.bak3 +++ /dev/null @@ -1,358 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Wed Aug 9 01:25:50 EDT 2023 - * @see LICENSE (MIT style license file). - * - * @note B+Tree Node with find, add, split, remove, and merge Operations - * - * Node key: [ . k0 . k1 . k2 . k3 . ] - * < <= <= <= <= note: number of keys is one less than number of refs - * ref: r0 r1 r2 r3 r4 - * Leaf: r0 -> next leaf node; r1 -> tuple (k0, ...); r2 -> tuple (k1, ...); etc. - * Internal: r0 -> subtree with keys < k0; r1 -> subtree with keys in [k0, k1); etc. - * Split: extra room in nodes allows the overflow key to be inserted before split - */ - -package scalation -package database.bptree - -import scala.runtime.ScalaRunTime.stringOf - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `BpNode` companion object provides settings for node sizes. - */ -object BpNode: - - private val debug = debugf ("BpNode", true) // debug function - private val flaw = flawf ("BpNode") // flaw function - - private var order = 5 // maximum number of references (reset as needed) - private var half = (order - 1) / 2 // half of max keys (floor) - private var halfp = order - half // rest of the keys (half plus) - private var mink = halfp - 1 // minimum number of keys (before underflow) - - debug ("init", s"order = $order, half = $half, halfp = $halfp, mink = $mink") - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Set the order of BpNodes to order_. - * @param order_ the new order for BpNodes (must be at least 4) - */ - def setOrder (order_ : Int): Unit = - if order < 4 then flaw ("setOrder", s"order_ = $order_ must be at least 4") - else - order = order_ - half = (order - 1) / 2 - halfp = order - half - mink = half - 1 - end if - end setOrder - -end BpNode - -import BpNode._ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `BpNode` class defines nodes of size order that may be stored in a B+tree. - * Keys have type `ValueType` and may reference values of `Any` type. - * To hold an overflow before splitting, nodes have extra room (order + 1) - * @param keys number of active keys - * @param isLeaf whether this node is a leaf - */ -class BpNode (private [database] var keys: Int, val isLeaf: Boolean) - extends Serializable: - - private [database] val key = Array.ofDim [ValueType] (order) // array to hold keys - private [database] val ref = Array.ofDim [Any] (order + 1) // array to hold values or references to nodes - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Construct a new root node with one key (and two references) in it. - * @param left the left node (< dkey) - * @param dkey the divider key - * @param right the right node (>= dkey) - */ - def this (left: BpNode, dkey: ValueType, right: BpNode) = - this (1, false) - key(0) = dkey // divider key - ref(0) = left; ref(1) = right // left and right references - end this - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return whether this node has overflowed (too many keys). - */ - inline def overflow: Boolean = keys >= order - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return whether this node has underflowed (too few keys). - */ - inline def underflow: Boolean = keys < mink - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return whether this node is rich (i.e., has surplus keys). - */ - inline def rich: Boolean = keys > mink - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the key at index position i. - * @param i the index position in this node - */ - inline def apply (i: Int): ValueType = - if i >= keys then flaw ("apply", s"index i = $i must be less than keys = $keys") - key(i) - end apply - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Update the key at index position i. - * @param i the index position in this node - * @param k the new value for key(i) - */ - inline def update (i: Int, k: ValueType): Unit = - if i >= keys then flaw ("update", s"index i = $i must be less than keys = $keys") - key(i) = k - end update - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Find and return the first position where 'k < key_i' in this node. - * @param k the key whose position is sought - */ - def find (k: ValueType): Int = - val i = key.indexWhere (k < _) - if i < 0 then keys else i - end find - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Find and return the first position where 'k == key_i' in this node. - * @param k the key whose position is sought - */ - def findEq (k: ValueType): Int = key.indexWhere (k == _) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Add the new key k and value v into this node at insertion position (ip). - * @param k the new key - * @param v the new value (or node for internal nodes) - */ - def add (k: ValueType, v: Any): Unit = - val ip = find (k) // find insertion position (ip) - debug ("add", s"(k = $k, v = $v) pair at ip = $ip") - for i <- keys until ip by -1 do // make room by shifting keys right - key(i) = key(i-1) - ref(i+1) = ref(i) - key(ip) = k // insert new key - ref(ip+1) = v // insert new value (right of key) - keys += 1 // increment to number of active keys - end add - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Split this LEAF node by creating a right sibling node (rt) and moving - * half the keys and references to that new node, leaving halfp. - * Return the divider key and the right sibling node. - */ - def split (): (ValueType, BpNode) = - val rt = new BpNode (half, true) // allocate leaf right sibling node (rt) - for i <- 0 until half do // move largest half of keys (with refs) to rt - rt.key(i) = key(halfp + i) - rt.ref(i+1) = ref(halfp + i + 1) // refs are right of keys - rt.ref(0) = ref(0) // update LINKED LIST of nodes - ref(0) = rt // this -> rt -> old-right - keys = halfp // reset number of active keys to help plus - (rt.key(0), rt) // (divider key (smallest right) and right sibling - end split - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Split this INTERNAL node by creating a right sibling rt and moving half - * the keys and references to that new node, leaving halfp - 1. - * Return the divider key and the right sibling node. - */ - def splitI (): (ValueType, BpNode) = - val rt = new BpNode (half, false) // allocate internal right sibling node (rt) - for i <- 0 until half do // move largest half of keys (with refs) to rt - rt.key(i) = key(halfp + i) - rt.ref(i) = ref(halfp + i) - rt.ref(half) = ref(keys) // copy over the last ref - keys = halfp - 1 // reset number of active keys to help plus - 1 - (key(keys), rt) // divider key (middle key) and right sibling - end splitI - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Remove key at dp and its reference from this node and check for underflow. - * @param dp the deletion index position (may use findEq to find it) - */ - def remove (dp: Int): Boolean = - debug ("remove", s"dp = $dp, key = ${key(dp)}") - for i <- dp until keys do // remove at dp by shifting keys left - key(i) = key(i+1) - ref(i+1) = ref(i+2) - keys -= 1 // decrement the number of keys - underflow // if true, node underflows - end remove - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Merge this LEAF node with its right sibling node (rt). - * @param rt the right sibling node - */ - def merge (rt: BpNode): Unit = - for i <- 0 until rt.keys do // move keys from rt into this node - key(keys + i) = rt.key(i) - ref(keys + i + 1) = rt.ref(i + 1) - keys += rt.keys // add the number of keys from rt - rt.keys = 0 // make rt empty - ref(0) = rt.ref(0) // unlink node rt - end merge - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Merge this INTERNAL node with its right sibling node (rt). - * @param dk the divider key from parent - * @param rt the right sibling node - */ - def mergeI (dk: ValueType, rt: BpNode): Unit = - key(keys) = dk // move divider key - ref(keys + 1) = rt.ref(0) // node corresponding to divider key - for i <- 0 until rt.keys do // move keys from rt into this node - key(keys + i + 1) = rt.key(i) - ref(keys + i + 2) = rt.ref(i + 1) - keys += rt.keys + 1 // add the number of keys from rt - rt.keys = 0 // make rt empty - end mergeI - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert this node to a string. - */ - override def toString: String = - val sb = StringBuilder ("[ . " ) - for i <- 0 until keys do sb ++= (s"${key(i)} . ") - sb ++= ("]" ) - sb.toString - end toString - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Show the node's data structure. - */ - def show (): Unit = - println (s"isLeaf = $isLeaf") - println (s"keys = $keys") - println (s"key = ${stringOf (key)}") - println (s"ref = ${stringOf (ref)}") - end show - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Show the node's references. - */ - def showRef (): Unit = - println (s"ref = ${stringOf (ref)}") - end showRef - -end BpNode - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `bpNodeTest` main function tests the `BpNode` class by inserting random - * key values and testing a leaf node split. - * > runMain scalation.database.bptree.bpNodeTest - */ -@main def bpNodeTest (): Unit = - - import java.util.Random - - banner ("Example of an Leaf Node Split") - - val totKeys = 5 - val mx = 10 * totKeys - val seed = 1 - val rng = new Random (seed) - val node = new BpNode (0, true) // empty leaf node - var right: BpNode = null - - for i <- 1 to totKeys do - val key = rng.nextInt (mx) - banner (s"put key = $key") - node.add (key, 2 * key) - println (s"node = $node") - if node.overflow then - println (s"BEFORE split: node = $node") - node.showRef () - val (dk, rt) = node.split () // split keys between node (3) and right (2) - right = rt - println (s"AFTER split: node = $node, dk = $dk, rt = $rt") - node.showRef (); rt.showRef () - end for - - banner ("Show Arrays") - node.show () - - banner ("Example of an Leaf Node Merge") - node.remove (2) - node.merge (right) - println (s"AFTER merge: node = $node, right = $right") - node.show () - -end bpNodeTest - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `bpNodeTest2` main function tests the `BpNode` class by inserting random - * key values and testing a internal node split. - * > runMain scalation.database.bptree.bpNodeTest2 - */ -@main def bpNodeTest2 (): Unit = - - import java.util.Random - - banner ("Example of an Internal Node Split") - - val totKeys = 5 - val mx = 10 * totKeys - val seed = 1 - val rng = new Random (seed) - val node = new BpNode (0, false) // empty internal node: false => not isLeaf - - for i <- 1 to totKeys do - val key = rng.nextInt (mx) - banner (s"put key = $key") - node.add (key, 2 * key) - println (s"node = $node") - if node.overflow then - println (s"BEFORE split: node = $node") - node.showRef () - val (dk, rt) = node.splitI () // splitI keys between node (2) and right (2) - println (s"AFTER split: node = $node, dk = $dk, rt = $rt") - node.showRef (); rt.showRef () - end for - - banner ("Show Arrays") - node.show () - -end bpNodeTest2 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `bpNodeTest3` main function tests the `BpNode` class by inserting random - * and removing key values. - * > runMain scalation.database.bptree.bpNodeTest3 - */ -@main def bpNodeTest3 (): Unit = - - banner ("Example of key insertion and removal") - - val keyArr = Array (35, 47, 4, 38) - val node = new BpNode (0, true) // empty internal node: false => not isLeaf - - for k <- keyArr do - banner (s"put key = $k") - node.add (k, 2 * k) - println (s"node = $node") - end for - - for k <- keyArr do - banner (s"remove key = $k") - val dp = node.findEq (k) // find the deletion position (dp) - node.remove (dp) // remove the key and its reference - println (s"AFTER remove: node = $node") - node.showRef () - end for - - banner ("Show Arrays") - node.show () - -end bpNodeTest3 - diff --git a/target/scala-3.6.4/classes/scalation/database/old/BpNode.scala.bak4 b/target/scala-3.6.4/classes/scalation/database/old/BpNode.scala.bak4 deleted file mode 100644 index 12302bbb8..000000000 --- a/target/scala-3.6.4/classes/scalation/database/old/BpNode.scala.bak4 +++ /dev/null @@ -1,409 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Wed Aug 9 01:25:50 EDT 2023 - * @see LICENSE (MIT style license file). - * - * @note B+Tree Node with find, add, split, remove, and merge Operations - */ - -package scalation -package database - -import scala.runtime.ScalaRunTime.stringOf - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `BpNode` companion object provides settings for node sizes. - */ -object BpNode: - - private var DEFAULT_DLINK = true // default value for DLINK - - private val debug = debugf ("BpNode", false) // debug function - private val flaw = flawf ("BpNode") // flaw function - - private var order = 5 // maximum number of references (reset as needed) - private var half = order / 2 // half of order keys (floor) - private var halfp = order - half // rest of the overflowed keys - private var mink = halfp - 1 // minimum number of keys (before underflow) - - debug ("init", s"order = $order, half = $half, halfp = $halfp, mink = $mink") - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Set the leaf node DEFAULT_DLINK to dlink (false => forward links only via ref(0), - * true => both forward links via ref(0) and backward links via pre). - * @see `BpNode` class for specification of ref(0) and pre - * @param dlink whether to support unidirectionsl (false) or bidirections (true) linkage - */ - def set_DEFAULT_DLINK (dlink: Boolean): Unit = DEFAULT_DLINK = dlink - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Set the order of BpNodes to order_. - * @param order_ the new order for BpNodes (must be at least 4) - */ - def setOrder (order_ : Int): Unit = - if order < 3 then flaw ("setOrder", s"order_ = $order_ must be at least 3") - else - order = order_ - half = order / 2 -// half = (order - 1) / 2 - halfp = order - half - mink = half - 1 - end setOrder - -end BpNode - -import BpNode._ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `BpNode` class defines nodes of size order that that may be stored in a B+tree. - * Keys have type `ValueType` and may reference values of `Any` type. - * @param keys number of active keys - * @param isLeaf whether this node is a leaf - * @param DLINK whether the leaf nodes support linkage in both directions (ref(0) & pre) - */ -class BpNode (private [database] var keys: Int, val isLeaf: Boolean, DLINK: Boolean = DEFAULT_DLINK) - extends Serializable: - - private [database] val key = Array.ofDim [ValueType] (order) // array to hold keys - private [database] val ref = Array.ofDim [Any] (order + 1) // array to hold values or reference nodes - private [database] var pre: BpNode = null // reference to previous LEAF node (if needed) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Construct a new root node with one key (and two references) in it. - * @param left the left node (< dkey) - * @param dkey the divider key - * @param right the right node (>= dkey) - */ - def this (left: BpNode, dkey: ValueType, right: BpNode) = - this (1, false) - key(0) = dkey // divider key - ref(0) = left; ref(1) = right // left and right references - end this - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return whether this node has overflowed (too many keys). - */ - inline def overflow: Boolean = keys >= order - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return whether this node has underflowed (too few keys). - */ - inline def underflow: Boolean = keys < mink - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return whether this node is rich (i.e., has surplus keys). - */ - inline def rich: Boolean = keys > mink - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the key at index position i. - * @param i the index position in this node - */ - inline def apply (i: Int): ValueType = - if i >= keys then flaw ("apply", s"index i = $i must be less than keys = $keys") - key(i) - end apply - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Update the key at index position i. - * @param i the index position in this node - * @param k the new value for key(i) - */ - inline def update (i: Int, k: ValueType): Unit = - if i >= keys then flaw ("update", s"index i = $i must be less than keys = $keys") - key(i) = k - end update - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Find and return the first position where 'k < key_i' in this node. - * If none, return keys (corresponds to last ref). - * @param k the key whose index position is sought - */ - def find (k: ValueType): Int = - var (found, i) = (false, 0) - while ! found && i < keys do - if k < key(i) then found = true - else i += 1 - i - end find - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Find and return the first position where 'k == key_i' in this node. - * If none, return -1 meaning not found. - * @param k the key whose index position is sought - */ - def findEq (k: ValueType): Int = - var (found, i) = (false, 0) - while ! found && i < keys do - if k == key(i) then found = true - else i += 1 - if i < keys then i else -1 - end findEq - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Shift keys from ip to keys one position to the RIGHT (make room for insertion). - * Also shift all references to the right of key ip. - * @param ip the future insertion position - */ - def shiftR (ip: Int): Unit = - for i <- keys until ip by -1 do // make room by shifting keys right - key(i) = key(i-1) // move key right - ref(i+1) = ref(i) // ref to the right of key - end shiftR - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Add the new key k and value v into this node at the to be found insertion - * position (ip). If a duplicate key is entered, return the OLD VALUE stored - * with the key, otherwise return None (meaning no duplicate was found).. - * @param k the new key - * @param v the new value (or node for internal nodes) - */ - def add (k: ValueType, v: Any): Option [Any] = - val ip = find (k) // find insertion position (ip) - val duplicate = ip != 0 && k == key(ip-1) // determine whether the new key is a duplicate - debug ("add", s"(k = $k, v = $v) pair at ip = $ip") - - if duplicate then - val old = ref(ip) // save the OLD VALUE for duplicate key - ref(ip) = v // replace OLD VALUE with new value - Some (old) // return the OLD VALUE, indicates duplicate - else - shiftR (ip) // make room by shifting keys right - key(ip) = k // insert new key - ref(ip+1) = v // insert new value (right of key) - keys += 1 // increment the number of active keys - None // indicates no duplicate found - end add - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Split this LEAF node by creating a right sibling node (rt) and moving - * half the keys and references to that new node, leaving halfp. - * Return the divider key and the right sibling node. - */ - def split (): (ValueType, BpNode) = - val rt = new BpNode (half, true) // allocate leaf right sibling node (rt) - for i <- 0 until half do // move largest half of keys (with refs) to rt - rt.key(i) = key(halfp + i) - rt.ref(i+1) = ref(halfp + i + 1) // refs are right of keys - rt.ref(0) = ref(0) // update LINKED LIST of nodes - if DLINK then rt.pre = this // reverse link - ref(0) = rt // this -> rt -> old-right - keys = halfp // reset number of active keys to help plus - (rt.key(0), rt) // (divider key (smallest right) and right sibling - end split - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Split this INTERNAL node by creating a right sibling rt and moving half - * the keys and references to that new node, leaving halfp - 1. - * Return the divider key and the right sibling node. - */ - def splitI (): (ValueType, BpNode) = - val rt = new BpNode (half, false) // allocate internal right sibling node (rt) - for i <- 0 until half do // move largest half of keys (with refs) to rt - rt.key(i) = key(halfp + i) - rt.ref(i) = ref(halfp + i) - rt.ref(half) = ref(keys) // copy over the last ref - keys = halfp - 1 // reset number of active keys to help plus - 1 - (key(keys), rt) // divider key (middle key) and right sibling - end splitI - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Remove key at dp and its reference from this node and check for underflow. - * @param dp the deletion index position (may use findEq to find it) - */ - def remove (dp: Int): Boolean = - debug ("remove", s"dp = $dp, key = ${key(dp)}") - for i <- dp until keys do // remove at dp by shifting keys left - key(i) = key(i+1) - ref(i+1) = ref(i+2) // case: ref on right - keys -= 1 // decrement the number of keys - underflow // if true, node underflows - end remove - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Remove key at dp and its reference from this INTERNAL node and check for underflow. - * @param dp the deletion index position (may use findEq to find it) - */ - def removeI (dp: Int): Boolean = - debug("remove", s"dp = $dp, key = ${key(dp)}") - for i <- dp until keys do // remove at dp by shifting keys left - key(i) = key(i + 1) - ref(i) = ref(i + 1) // case: ref on left - keys -= 1 // decrement the number of keys - underflow // if true, node underflows - end removeI - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Merge this LEAF node with its right sibling node (rt). - * @param rt the right sibling node - */ - def merge (rt: BpNode): Unit = - for i <- 0 until rt.keys do // move keys from rt into this node - key(keys + i) = rt.key(i) - ref(keys + i + 1) = rt.ref(i + 1) - keys += rt.keys // add the number of keys from rt - rt.keys = 0 // make rt empty - val rt_next = rt.ref(0).asInstanceOf [BpNode] // the node to the right of rt - ref(0) = rt_next // unlink node rt in the forward (ref(0)) direction - if DLINK && rt_next != null then rt_next.pre = this // unlink node rt in the backward (pre) direction - end merge - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Merge this INTERNAL node with its right sibling node (rt). - * @param dk the divider key from parent - * @param rt the right sibling node - */ - def mergeI (dk: ValueType, rt: BpNode): Unit = - key(keys) = dk // move divider key - ref(keys + 1) = rt.ref(0) // node corresponding to divider key - for i <- 0 until rt.keys do // move keys from rt into this node - key(keys + i + 1) = rt.key(i) - ref(keys + i + 2) = rt.ref(i + 1) - keys += rt.keys + 1 // add the number of keys from rt - rt.keys = 0 // make rt empty - end mergeI - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert this node to a string. - */ - override def toString: String = - val sb = StringBuilder ("[ . " ) - for i <- 0 until keys do sb ++= (s"${key(i)} . ") - sb ++= ("]" ) - sb.toString - end toString - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Show the node's data structure. - */ - def show (): Unit = - println (s"isLeaf = $isLeaf") - println (s"keys = $keys") - println (s"key = ${stringOf (key)}") - println (s"ref = ${stringOf (ref)}") - if DLINK then println (s"pre = $pre") - end show - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Show the node's references. - */ - def showRef (): Unit = - println (s"ref = ${stringOf (ref)}") - end showRef - -end BpNode - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `bpNodeTest` main function tests the `BpNode` class by inserting random - * key values and testing a leaf node split. - * > runMain scalation.database.bpNodeTest - */ -@main def bpNodeTest (): Unit = - - import java.util.Random - - banner ("Example of an Leaf Node Split") - - val totKeys = 5 - val mx = 10 * totKeys - val seed = 1 - val rng = new Random (seed) - val node = new BpNode (0, true) // empty leaf node - var right: BpNode = null - - for i <- 1 to totKeys do - val key = rng.nextInt (mx) - banner (s"put key = $key") - node.add (key, 2 * key) - println (s"node = $node") - if node.overflow then - println (s"BEFORE split: node = $node") - node.showRef () - val (dk, rt) = node.split () // split keys between node (3) and right (2) - right = rt - println (s"AFTER split: node = $node, dk = $dk, rt = $rt") - node.showRef (); rt.showRef () - end for - - banner ("Show Arrays") - node.show () - - banner ("Example of an Leaf Node Merge") - node.remove (2) - node.merge (right) - println (s"AFTER merge: node = $node, right = $right") - node.show () - -end bpNodeTest - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `bpNodeTest2` main function tests the `BpNode` class by inserting random - * key values and testing a internal node split. - * > runMain scalation.database.bpNodeTest2 - */ -@main def bpNodeTest2 (): Unit = - - import java.util.Random - - banner ("Example of an Internal Node Split") - - val totKeys = 5 - val mx = 10 * totKeys - val seed = 1 - val rng = new Random (seed) - val node = new BpNode (0, false) // empty internal node: false => not isLeaf - - for i <- 1 to totKeys do - val key = rng.nextInt (mx) - banner (s"put key = $key") - node.add (key, 2 * key) - println (s"node = $node") - if node.overflow then - println (s"BEFORE split: node = $node") - node.showRef () - val (dk, rt) = node.splitI () // splitI keys between node (2) and right (2) - println (s"AFTER split: node = $node, dk = $dk, rt = $rt") - node.showRef (); rt.showRef () - end for - - banner ("Show Arrays") - node.show () - -end bpNodeTest2 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `bpNodeTest3` main function tests the `BpNode` class by inserting random - * and removing key values. - * > runMain scalation.database.bpNodeTest3 - */ -@main def bpNodeTest3 (): Unit = - - banner ("Example of key insertion and removal") - - val keyArr = Array (35, 47, 4, 38) - val node = new BpNode (0, true) // empty internal node: false => not isLeaf - - for k <- keyArr do - banner (s"put key = $k") - node.add (k, 2 * k) - println (s"node = $node") - end for - - for k <- keyArr do - banner (s"remove key = $k") - val dp = node.findEq (k) // find the deletion position (dp) - node.remove (dp) // remove the key and its reference - println (s"AFTER remove: node = $node") - node.showRef () - end for - - banner ("Show Arrays") - node.show () - -end bpNodeTest3 - diff --git a/target/scala-3.6.4/classes/scalation/database/old/BpTreeMap.scala.bak b/target/scala-3.6.4/classes/scalation/database/old/BpTreeMap.scala.bak deleted file mode 100644 index c972b1303..000000000 --- a/target/scala-3.6.4/classes/scalation/database/old/BpTreeMap.scala.bak +++ /dev/null @@ -1,522 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Fri Aug 11 00:26:03 EDT 2023 - * @see LICENSE (MIT style license file). - * - * @note Sorted Map Implemented Using B+Trees - * - * Split Nodes on Overflow - * Borrow/Merge Nodes on Underflow (not yet implemented) - */ - -package scalation -package database - -import scala.collection.mutable.{AbstractMap, SortedMap} -import scala.reflect.ClassTag - -import BpNode._ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `BpTreeMap` class provides sorted maps that use the B+Tree Data Structure. - * Inserts may cause the splitting of nodes. - * @tparam V the type of the values assigned to keys in this sorted map - */ -class BpTreeMap [V: ClassTag] () - extends AbstractMap [ValueType, V] - with SortedMap [ValueType, V] - with Serializable: - - var count = 0 // count # nodes accessed (performance) - - private val debug = debugf ("BpTreeMap", true) // debug function - - private var keyCount = 0 // counter for total number of keys - private var root = new BpNode () // root node of this B+Tree - private val first = root // first leaf node in this B+Tree - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** The `SortedMap` trait requires `Ordering` with a compare method to be defined. - * @see https://scala-lang.org/api/3.3.0/scala/math/Ordering.html - * @see ValueType.scala in `scalation.package` - */ - def ordering: Ordering [ValueType] = ValueTypeOrd - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the size of this B+Tree. - */ - override def size: Int = keyCount - -//------------------------------------------------------------------------------ -// Retrieve values or ranges (substrees) -//------------------------------------------------------------------------------ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** The `TreeIterator` inner class supports iterating over all the elements - * in a B+Tree by traversing through the LEAF nodes of the tree. - * @param ns the starting leaf node (defaults to first) - * @param js the starting within node index (defaults to -1) - */ - class TreeIterator (ns: BpNode = first, js: Int = -1) extends Iterator [(ValueType, V)]: - var (n, j) = (ns, js) - def hasNext: Boolean = j < n.nKeys-1 || n.ref(n.nKeys) != null - def next (): (ValueType, V) = - debug ("next", s"node n = $n") - if j < n.nKeys-1 then j += 1 else { n = n.ref(n.nKeys).asInstanceOf [BpNode]; j = 0 } - (n(j), n.ref(j).asInstanceOf [V]) - end next - end TreeIterator - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return an iterator for retrieving all the elements in this B+Tree. - * @see scala.collection.IterableOnce - */ - def iterator: Iterator [(ValueType, V)] = new TreeIterator () - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return an iterator for retrieving all the elements in this B+Tree starting - * from key start. Returns null if all keys in tree are smaller than start. - * @see scala.collection.SortedMapOps - * @param start the key to start with - */ - def iteratorFrom (start: ValueType): Iterator [(ValueType, V)] = - val (ns, js) = findp (start, root) - if ns != null then new TreeIterator (ns, js) - else null - end iteratorFrom - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return an iterator for retrieving all the keys in this B+Tree starting - * from key start. - * @see scala.collection.SortedMapOps - * @param start the key to start with - */ - def keysIteratorFrom (start: ValueType): Iterator [ValueType] = - throw new UnsupportedOperationException ("keysIteratorFrom not available, use iteratorFrom instead") - end keysIteratorFrom - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the the submap starting at from and ending before until. - * @see scala.collection.SortedOps - * @param from the starting key (inclusive) - * @param until the ending key (exclusive) - */ - def rangeImpl (from: Option [ValueType], until: Option [ValueType]): BpTreeMap [V] = - val subtree = new BpTreeMap [V] () - val it = if from.isDefined then iteratorFrom (from.get) - else iterator - var cont = true - while cont && it.hasNext do - val (k, v) = it.next () - if ! until.isDefined || k < until.get then subtree.addOne ((k, v)) - else cont = false - end while - subtree - end rangeImpl - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the value associated with the key by looking it up in this B+Tree. - * @param key the key used for look up - */ - def get (key: ValueType): Option [V] = Option (find (key)) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Find the given key in this B+tree and return its corresponding value. - * Calls the recursive findp method. - * @param key the key to find - */ - inline def find (key: ValueType): V = - val (ln, ip) = findp(key, root) // leaf node, index position - if ip != -1 then ln.ref(ip).asInstanceOf [V] - else null.asInstanceOf [V] - end find - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Recursive helper method for finding the position of the given key in this B+tree. - * @param key the key to find - * @param n the current node - */ - private def findp (key: ValueType, n: BpNode): (BpNode, Int) = - count += 1 - val ip = n.find (key) - if n.isLeaf then - if n.eqAt (key, ip) then (n, ip) else (null, -1) - else - findp (key, n.ref(ip).asInstanceOf [BpNode]) - end if - end findp - -//------------------------------------------------------------------------------ -// Add key-value pairs into the B+Tree -//------------------------------------------------------------------------------ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Add one key-value pair into this B+Tree and return this. - * Called by the put method in `AbstractMap`. - * Note: it splits the node that overflows - * @param elem the key-value pair to add/insert - */ - def addOne (elem: (ValueType, V)): this.type = - val (key, value) = elem - keyCount += 1 // increment the key count - insert (key, value, root) // call the recursive insert - this - end addOne - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Recursive helper method for inserting key and ref into this B+tree. - * Returns key and right sibling upon split, else null. - * @param key the key to insert - * @param ref the value/node to insert - * @param n the current node - */ - private def insert (key: ValueType, ref: V, n: BpNode): (ValueType, BpNode) = - var k_r: (ValueType, BpNode) = null - if n.isLeaf then // handle LEAF node - k_r = add (n, key, ref) - if k_r != null then - if n != root then return k_r - root = new BpNode (root, k_r._1, k_r._2) // make a new root - end if - else // handle INTERNAL node - k_r = insert (key, ref, n.ref(n.find (key)).asInstanceOf [BpNode]) - if k_r != null then - k_r = addI (n, k_r._1, k_r._2) - if k_r != null && n == root then root = new BpNode (root, k_r._1, k_r._2) - end if - end if - k_r - end insert - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Add a new key k and value v into LEAF node n. If it is already full, a split will - * be triggered, in which case the divider key and new right sibling node are returned. - * @param n the current node - * @param k the new key - * @param v the new left value - */ - private def add (n: BpNode, k: ValueType, v: V): (ValueType, BpNode) = - var k_r: (ValueType, BpNode) = null // divider key, right sibling - val split = n.add (k, v) // try adding into node n unless it is full - if split then // its full, must split - k_r = n.split () // split keys between node and right - if k > k_r._1 then k_r._2.add (k, v) // try again after split, add into node r - else n.add (k, v) // try again after split, add into node n - end if - k_r - end add - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Add a new key k and value v into INTERNAL node n. If it is already full, - * a split will be triggered, in which case the divider key and new right - * sibling node are returned. - * @param n the current node - * @param k the new key - * @param v the new left value (ref a node) - */ - private def addI (n: BpNode, k: ValueType, v: BpNode): (ValueType, BpNode) = - var k_r: (ValueType, BpNode) = null // divider key, right sibling - val split = n.addI (k, v) // try adding into node n unless it is full - if split then // its full, must split - k_r = n.splitI () // split keys between node and right - n.promote () - if k > k_r._1 then k_r._2.addI (k, v) // try again after split, add into node r - else n.addI (k, v) // try again after split, add into node n - end if - k_r - end addI - -//------------------------------------------------------------------------------ -// Remove key-value pair from the B+Tree -//------------------------------------------------------------------------------ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Subtract/remove the one element (key-value pair) with the given key. - * Called by the remove method in `AbstractMap`. - * @param key the key whose element is to be removed - */ - def subtractOne (key: ValueType): this.type = - keyCount -= 1 // decrement the key count - delete (key, root) // call the recursive delete - this - end subtractOne - - private var par: BpNode = null // save parent node (?) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Recursive helper method for deleting key with its ref from this B+tree. - * Returns merge position upon merge, else -1. - * @param key the key to delete - * @param n the current node - */ - private def delete (key: ValueType, n: BpNode): Boolean = - - var parUnderflow = false // whether the parent node underflows - - if n.isLeaf then // handle LEAF node - val dp = n.find (key) // deletion position in leaf node n - val underflow = n.remove (key, dp) - if n != root && underflow then // check for underflow - println (s"delete: needs to handle underflow of leaf node $n") - val j = par.find (key) // j-th index position in parent - val (sib, left) = richestSib (par, j) // richest sib and whether it's left - println (s"delete: leaf sib = $sib") - - if sib.rich then borrow (n, sib, left, par, j) // borrow a key from rich sib - else parUnderflow = merge (n, sib, left, par, j) // merge nodes n and sib, may cause parent to underflow - end if - else // handle INTERNAL node - par = n // save parent node - val dp = n.find (key) // deletion position in internal node n - delete (key, n.ref(dp).asInstanceOf [BpNode]) - - //-------------------------------------------------- - // implement code for internal node borrow and merge - //-------------------------------------------------- - - if parUnderflow then - println (s"delete: needs to handle underflow of internal node $n") - val j = par.find (key) // j-th index position in parent - val (sib, left) = richestSib (par, j) // richest sib and whether it's left - println (s"delete: internal sib = $sib") - - if sib.rich then borrowI (n, sib, left, par, j) // borrow a key from rich sib - else parUnderflow = mergeI (n, sib, left, par, j) // merge nodes n and sib, may cause parent to underflow - end if - end if - parUnderflow - end delete - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return node n's richest sibling (and whether it is left/true or right/false) - * found from parent node. - * @param par the parent node of node n that has underflowed - * @param i the position of node n as a child of node par - */ - private def richestSib (par: BpNode, i: Int): (BpNode, Boolean) = - debug ("richSib", s"return node n's (@ $i) richest sibling (left or right)") - val leftn = if i-1 >= 0 then par.ref(i-1).asInstanceOf [BpNode] else null - val rightn = if i+1 <= par.nKeys then par.ref(i+1).asInstanceOf [BpNode] else null - - if leftn == null then (rightn, false) - else if rightn == null then (leftn, true) - else if leftn.nKeys >= rightn.nKeys then (leftn, true) - else (rightn, false) - end richestSib - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Borrow a key-value pair from a rich sibling, so LEAF node n won't underflow. - * For borrow left, last key in left sib k2 moves to n, then k1 replaces k2 in par. - * [ ... k2 ... ] [ ... k1 ... ] - * [ ... k1 k2 ] [ k3 ... ] TO [ ... k1 ] [ k2 k3 ... ] - * @param n the current node that has underflowed - * @param sib the rich sibling node - * @param left the whether the sib is left or right - * @param par the parent node - * @param j the index position in the parent node - */ - private def borrow (n: BpNode, sib: BpNode, left: Boolean, par: BpNode, j: Int): Unit = - val i = if left then sib.nKeys-1 else 0 - val bkey = sib(i) - debug ("borrow", s"key $bkey from rich sib = $sib node for node n = $n having par = $par at j = $j") - val bref = sib.ref(i).asInstanceOf [V] - sib.remove (bkey, i) - add (n, bkey, bref) - par(j-1) = if left then sib(sib.nKeys-1) else bkey // correct the divider key - end borrow - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Borrow a key-value pair from a rich sibling, so INTERNAL node n won't underflow. - * For borrow left, last key in left sib k2 rotates into par, whose key k3 rotates to n. - * [ ... k3 ... ] [ ... k2 ... ] - * [ ... k1 k2 ] [ k4 ... ] TO [ ... k1 ] [ k3 k4 ... ] - * @param n the current node that has underflowed - * @param sib the rich sibling node - * @param left the whether the sib is left or right - * @param par the parent node - * @param j the index position in the parent node - */ - private def borrowI (n: BpNode, sib: BpNode, left: Boolean, par: BpNode, j: Int): Unit = - println (s"borrowI ($n, $sib, $left, $par, $j) not yet implemented") - end borrowI - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Merge LEAF node n with its sibling return whether the parent node has underflowed. - * @param n the current node that has underflowed - * @param sib the sibling node - * @param left the whether the sib is left or right - * @param par the parent node - * @param j the index position in the parent node - */ - private def merge (n: BpNode, sib: BpNode, left: Boolean, par: BpNode, j: Int): Boolean = - debug ("merge", s"node n = $n that underflows with sib = $sib having par = $par at j = $j") - if left then sib.merge (n) - else n.merge (sib) - par.removeRight (j-1) // true mean parent underflowed - end merge - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Merge INTERNAL node n with its sibling return whether the parent node has underflowed. - * @param n the current node that has underflowed - * @param sib the sibling node - * @param left the whether the sib is left or right - * @param par the parent node - * @param j the index position in the parent node - */ - private def mergeI (n: BpNode, sib: BpNode, left: Boolean, par: BpNode, j: Int): Boolean = - debug ("mergeI", s"internal node n = $n that underflows with sib = $sib having par = $par at j = $j") - false - end mergeI - -//------------------------------------------------------------------------------ -// Print/show the B+Tree -//------------------------------------------------------------------------------ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Show/print this B+Tree. - */ - def show (): Unit = - println ("BpTreeMap") - printT (root, 0) - println ("-" * 60) - end show - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Print this B+Tree map using a pre-order traversal and indenting each level. - * @param n the current node to print - * @param level the current level in the B+Tree - */ - private def printT (n: BpNode, level: Int): Unit = - println ("\t" * level + n) - if ! n.isLeaf then - for j <- 0 to n.nKeys do printT (n.asInstanceOf [BpNode].ref(j).asInstanceOf [BpNode], level + 1) - end if - end printT - -end BpTreeMap - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `bpTreeMapTest` main function used for testing the `BpTreeMap` class by - * inserting increasing key values. - * > runMain scalation.database.bpTreeMapTest - */ -@main def bpTreeMapTest (): Unit = - - banner ("Insert Increasing Integer Keys") - val totKeys = 36 - val tree = new BpTreeMap [Int] () - - for i <- 1 until totKeys by 2 do - banner (s"put ($i, ${i * i})") - tree.put (i, i * i) - tree.show () - end for - - banner ("Find Keys") - for i <- 0 until totKeys do println (s"key = $i, value = ${tree.get(i)}") - println ("-" * 60) - - banner ("Iterate Through the B+Tree") - for it <- tree.iterator do println (it) - println ("-" * 60) - tree.foreach (println (_)) - - banner ("Print Statistics") - println (s"size = ${tree.size}") - println (s"Average number of nodes accessed = ${tree.count / totKeys.toDouble}") - - banner ("Delete Keys") - tree.show () - val toRemove = Array (29, 31, 33, 35, 27) - for key <- toRemove do - banner (s"remove ($key)") - tree.remove (key) - tree.show () - end for - -end bpTreeMapTest - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `bpTreeMapTest2` main function used for testing the `BpTreeMap` class by - * inserting random key values. - * > runMain scalation.database.bpTreeMapTest2 - */ -@main def bpTreeMapTest2 (): Unit = - - import java.util.Random - - banner ("Insert Random Integer Keys") - val totKeys = 60 - val mx = 10 * totKeys - val seed = 1 - val rng = new Random (seed) - val tree = new BpTreeMap [Int] () - - for i <- 1 to totKeys do - val key = rng.nextInt (mx) - banner (s"put ($key, ${2 * key})") - tree.put (key, 2 * key) - tree.show () - end for - - banner ("Print Statistics") - println (s"size = ${tree.size}") - println (s"Average number of nodes accessed = ${tree.count / totKeys.toDouble}") - -end bpTreeMapTest2 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `bpTreeMapTest3` main function used for testing the `BpTreeMap` class by - * inserting keys and values into B+Trees, one representing each of two lanes. - * > runMain scalation.database.bpTreeMapTest3 - */ -@main def bpTreeMapTest3 (): Unit = - - import java.util.Random - - case class Car (vin: Int, dist: Double) - - banner ("Insert Random Integer Keys") - val totKeys = 60 - val seed = 1 - val rng = new Random (seed) - val lane1 = new BpTreeMap [Car] () // index for lane1 - val lane2 = new BpTreeMap [Car] () // index for lane2 - - var dist = 0.0 // distance from end of lane - var ord = 0 // rank order from end of lane - for i <- 1 to totKeys do - dist += rng.nextInt (5) - val c_i = Car (i, dist) // the car being put into lane1's B+Tree - ord += 10 // rank order of car toward end of lane - banner (s"put ($ord, $c_i)") - lane1.put (ord, c_i) - lane1.show () - end for - - dist = 0.0 // distance from end of lane - ord = 0 - for i <- 1 to totKeys do - dist += rng.nextInt (5) - val c_i = Car (totKeys + i, dist) // the car being put into lane2's B+Tree - ord += 10 // rank order of car toward end of lane - banner (s"put ($ord, $c_i)") - lane2.put (ord, c_i) - lane2.show () - end for - - // find the j-th car in lane1 call it car1 - // find the corresponding j-th car in lane2 call it car2 - // check whether car2 is behind car1 in the other lane - // may need a doubly linked list of nodes at the leaf-level to search forward and backward - // find the closest car in the other lane that is behind you - // if its distance is large enough, make the lane change - // may need gaps in ord so lane changing car can get an ord without making all care reassign theirs - -end bpTreeMapTest3 - diff --git a/target/scala-3.6.4/classes/scalation/database/old/BpTreeMap.scala.bak2 b/target/scala-3.6.4/classes/scalation/database/old/BpTreeMap.scala.bak2 deleted file mode 100644 index fab0c885e..000000000 --- a/target/scala-3.6.4/classes/scalation/database/old/BpTreeMap.scala.bak2 +++ /dev/null @@ -1,441 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Mon Jul 4 12:55:00 EDT 2022 - * @see LICENSE (MIT style license file). - * - * @node Map Implemented Using B+Trees - * - * Split Nodes on Overflow - * Merge Nodes on Underflow (not yet implemented) - */ - -package scalation -package database - -//import scala.collection.SortedMapFactoryDefaults -import scala.collection.mutable.{AbstractMap, SortedMap} // SortedMapOps} -import scala.reflect.ClassTag - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `BpTreeMap` class provides sorted maps that use the B+Tree Data Structure. - * Inserts may cause the splitting of nodes. - * @tparam K the type of the keys contained in this sorted map - * @tparam V the type of the values assigned to keys in this sorted map - * @param order the order (maximum number of children per node) - * @param ord the implicit ordering used to compare objects of type K - */ -class BpTreeMap [K: ClassTag, V: ClassTag] (order: Int = 5)(implicit val ord: Ordering [K]) - extends AbstractMap [K, V] - with SortedMap [K, V] -// with SortedMapOps [K, V, BpTreeMap, BpTreeMap [K, V]] -// with SortedMapFactoryDefaults [K, V, BpTreeMap, Iterable, Map] - with Serializable: - - var count = 0 // count # nodes accessed (performance) - - private val debug = debugf ("BpTreeMap", true) // debug function - private val flaw = flawf ("BpTreeMap") // flaw function - private val mxk = order - 1 // maximum number of keys - private val half = mxk / 2 // half of max keys - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** The `Node` inner class defines nodes that are stored in this B+tree. - * @param isLeaf whether this node is a leaf - */ - class Node (val isLeaf: Boolean = true) - extends Serializable: - - val key = Array.ofDim [K] (mxk) // array to hold keys - val ref = Array.ofDim [Any] (order) // array to hold values or reference nodes - var nKeys = 0 // number of active keys - var next: Node = null // reference to next leaf node - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Construct a new root node with one key (and two references) in it. - * @param lt the left node - * @param kd the divider key - * @param rt the right node - */ - def this (lt: Node, kd: K, rt: Node) = - this (false) - nKeys = 1 - ref(0) = lt; key(0) = kd; ref(1) = rt - end this - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Find the "<=" position of key k in this node. If k is larger than all - * keys in this node, return nkeys. - * @param k the key whose position is sought - */ - def find (k: K): Int = - val j = key.indexWhere (ord.lteq (k, _)) - if j < 0 then nKeys else j - end find - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** When space is available, wedge the new key k and value v into this node - * at the given insertion position ip. - * @param k the new key - * @param v the new value - * @param ip the insertion position - * @param left whether to start with from the left size of the key - */ - def wedge (k: K, v: Any, ip: Int, left: Boolean = true): Unit = - if nKeys > mxk then - flaw ("wedge", "node is already full") - else - for j <- nKeys until ip by -1 do - key(j) = key(j-1) - if left || j > ip + 1 then ref(j) = ref(j-1) - end for - key(ip) = k; if left then ref(ip) = v else ref(ip+1) = v - nKeys += 1 - end if - end wedge - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Split this node by creating a right sibling rt and moving half the keys and - * references to that new node. Return the divider key and the right sibling node. - */ - def split (): (K, Node) = - val rt = new Node (isLeaf) - for j <- 0 until half do - rt.key(j) = key(j + half) - rt.ref(j) = ref(j + half) - end for - rt.ref(half) = ref(mxk) - if isLeaf then ref(mxk) = rt - rt.nKeys = half - nKeys = half - (key(half-1), rt) - end split - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Show/print the contents of this node. - */ - def show (): Unit = - print ("[ . " ) - for j <- 0 until nKeys do print (s"${key(j)} . ") - println ("]" ) - end show - - end Node - - - private var keyCount = 0 // counter for total number of keys - - private var root = new Node () // root node of this B+Tree - private val first = root // first leaf node in this B+Tree - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** The `TreeIterator` inner class supports iterating over all the elements - * in a B+Tree by traversing through the leaf nodes of the tree. - * @param ns the starting leaf node (defaults to first) - * @param js the starting within node index (defaults to 0) - */ - class TreeIterator (ns: Node = first, js: Int = 0) extends Iterator [(K, V)]: - var (n, j) = (ns, js) - def hasNext: Boolean = j < n.nKeys - 1 || n.next != null - def next (): (K, V) = - if j < n.nKeys - 1 then j += 1 else { n = n.next; j = 0 } - (n.key(j), n.ref(j).asInstanceOf [V]) - end next - end TreeIterator - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return an iterator for retrieving all the elements in this B+Tree. - * @see scala.collection.IterableOnce - */ - def iterator: Iterator [(K, V)] = new TreeIterator () - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return an iterator for retrieving all the elements in this B+Tree starting - * from key start. Returns null if all keys in tree are smaller than start. - * @see scala.collection.SortedMapOps - * @param start the key to start with - */ - def iteratorFrom (start: K): Iterator [(K, V)] = - val (ns, js) = findp (start, root) - if ns != null then new TreeIterator (ns, js) - else null - end iteratorFrom - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return an iterator for retrieving all the keys in this B+Tree starting - * from key start. - * @see scala.collection.SortedMapOps - * @param start the key to start with - */ - def keysIteratorFrom (start: K): Iterator [K] = - throw new UnsupportedOperationException ("keysIteratorFrom not available, use iteratorFrom instead") - end keysIteratorFrom - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the ordering for this B+Tree. - * @see scala.collection.SortedOps - */ - def ordering: Ordering [K] = ord - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the the submap starting at from and ending before until. - * @see scala.collection.SortedOps - * @param from the starting key (inclusive) - * @param until the ending key (exclusive) - */ - def rangeImpl (from: Option [K], until: Option [K]): BpTreeMap [K, V] = - val subtree = new BpTreeMap [K, V] (order) - val it = if from.isDefined then iteratorFrom (from.get) - else iterator - var cont = true - while cont && it.hasNext do - val (k, v) = it.next () - if ! until.isDefined || ord.lt (k, until.get) then subtree.addOne ((k, v)) - else cont = false - end while - subtree - end rangeImpl - -// def rangeImpl (from: Option [K], until: Option [K]): SortedMap [K, V @uncheckedVariance] = ??? - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the value associated with the key by looking it up in this B+Tree. - * @param key the key used for look up - */ - def get (key: K): Option [V] = Option (find (key, root)) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Add one key-value pair into this B+Tree and return this (called by put). - * Note: it splits the node that overflows - * @param elem the key-value pair to add/insert - */ - def addOne (elem: (K, V)): this.type = - val (key, value) = elem - keyCount += 1 // increment the key count - insert (key, value, root) - this - end addOne - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Subtract/remove the one element (key-value pair) with the given key. - * Note: it merges home nodes upon underflow - * @param key the key whose element is to be removed - */ - def subtractOne (key: K): this.type = - keyCount -= 1 // decrement the key count - // FIX - to be implemented - this - end subtractOne - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Show/print this B+Tree. - */ - def show (): Unit = - println ("BpTreeMap") - printT (root, 0) - println ("-" * 60) - end show - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Print this B+Tree map using a preorder traversal and indenting each level. - * @param n the current node to print - * @param level the current level in the B+Tree - */ - private def printT (n: Node, level: Int): Unit = - println ("-" * 60) - print ("\t" * level + "[ . ") - for i <- 0 until n.nKeys do print (s"${n.key(i)} . ") - println ("]") - if ! n.isLeaf then - for j <- 0 to n.nKeys do printT (n.asInstanceOf [Node].ref(j).asInstanceOf [Node], level + 1) - end if - end printT - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the size of this B+Tree. - */ - override def size: Int = keyCount - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Recursive helper method for finding key in this B+tree. - * @param key the key to find - * @param n the current node - */ - private def find (key: K, n: Node): V = - count += 1 - val ip = n.find (key) - println (s"find: ip = $ip"); n.show () - show () - if n.isLeaf then - if ip < n.nKeys && key == n.key(ip) then n.ref(ip).asInstanceOf [V] - else null.asInstanceOf [V] - else - find (key, n.ref(ip).asInstanceOf [Node]) - end if - end find - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Recursive helper method for finding key in this B+tree. - * @param key the key to find - * @param n the current node - */ - private def findp (key: K, n: Node): (Node, Int) = - count += 1 - val ip = n.find (key) - if n.isLeaf then - if ip < n.nKeys && key == n.key(ip) then (n, ip) - else (null, -1) - else - findp (key, n.ref(ip).asInstanceOf [Node]) - end if - end findp - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Recursive helper method for inserting key and ref into this B+tree. - * @param key the key to insert - * @param ref the value/node to insert - * @param n the current node - */ - private def insert (key: K, ref: V, n: Node): (K, Node) = - var kd_rt: (K, Node) = null - if n.isLeaf then // handle leaf node - kd_rt = add (n, key, ref) - if kd_rt != null then - if n != root then return kd_rt - root = new Node (root, kd_rt._1, kd_rt._2) - else // handle internal node - kd_rt = insert (key, ref, n.ref(n.find (key)).asInstanceOf [Node]) - if kd_rt != null then - kd_rt = addI (n, kd_rt._1, kd_rt._2) - if kd_rt != null && n == root then root = new Node (root, kd_rt._1, kd_rt._2) - end if - kd_rt - end insert - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Add a new key k and value v into leaf node n. If it is already full, a split will - * be triggered, in which case the divider key and new right sibling node are returned. - * @param n the current node - * @param k the new key - * @param v the new left value - */ - private def add (n: Node, k: K, v: V): (K, Node) = - var kd_rt: (K, Node) = null // divider key, right sibling - var split = false - if n.nKeys == mxk then - split = true - debug ("add", s"before leaf split: n = $n") - kd_rt = n.split () // split n -> r & rt - debug ("add", s"after leaf split: n = $n \nkd_rt = $kd_rt") - if ord.gt (k, n.key(n.nKeys - 1)) then - kd_rt._2.wedge (k, v, kd_rt._2.find (k), true) // wedge into right sibling - return kd_rt - end if - n.wedge (k, v, n.find (k), true) // wedge into current node - if split then kd_rt else null - end add - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Add a new key 'k' and value 'v' into internal node 'n'. If it is already - * full, a 'split' will be triggered, in which case the divider key and new - * right sibling node are returned. - * @param n the current node - * @param k the new key - * @param v the new left value - */ - private def addI (n: Node, k: K, v: Any): (K, Node) = - debug ("addI", s"k = $k"); n.show () - var kd_rt: (K, Node) = null // divider key, right sibling - var split = false - if n.nKeys == mxk then - split = true - debug ("addI", s"before internal split: n = $n") - kd_rt = n.split () - // split n -> n & rt - val promotedvalue = n.key(n.nKeys-1) - n.nKeys -= 1 // remove promoted smallest right key - debug ("addI", s"after internal split: n = $n \nkd_rt = $kd_rt") - if ord.gt (k, promotedvalue) then - kd_rt._2.wedge (k, v, kd_rt._2.find (k), false) // wedge into right sibling - return kd_rt - end if - n.wedge (k, v, n.find (k), false) // wedge into current node - if split then kd_rt else null - end addI - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Merge node chain 'isplit-1' by merging this node chain with the last - * node chain. Deccrement 'isplit'. If 'isplit' becomes negative, - * set back to previous phase. - */ - private def merge (): Unit = - debug ("merge", s"node that underflows") - end merge - -end BpTreeMap - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `bpTreeMapTest` main function used for testing the `BpTreeMap` class by - * inserting increasing key values. - * > runMain scalation.database.bpTreeMapTest - */ -@main def bpTreeMapTest (): Unit = - - val tree = new BpTreeMap [Int, Int] () - - val totKeys = 26 - for i <- 1 until totKeys by 2 do - tree.put (i, i * i) - tree.show () - println ("=" * 60) - end for - for i <- 0 until totKeys do println (s"key = $i, value = ${tree.get(i)}") - println ("-" * 60) - for it <- tree.iterator do println (it) - println ("-" * 60) - tree.foreach (println (_)) - println ("-" * 60) - println (s"size = ${tree.size}") - println (s"Average number of nodes accessed = ${tree.count / totKeys.toDouble}") - -end bpTreeMapTest - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `bpTreeMapTest2` main function used for testing the `BpTreeMap` class by - * inserting random key values. - * > runMain scalation.database.bpTreeMapTest2 - */ -@main def bpTreeMapTest2 (): Unit = - - val tree = new BpTreeMap [Int, Int] () - - val totKeys = 50 - val mx = 10 * totKeys - - // for unique random integers - -// import scalation.random.RandiU0 // comment out due to package dependency -// val stream = 2 -// val rng = RandiU0 (mx, stream) -// for i <- 1 until totKeys do tree.put (rng.iigen (mx), i * i) - - // for random integers - import java.util.Random - val seed = 1 - val rng = new Random (seed) - for i <- 1 until totKeys do - val key = rng.nextInt (mx) - banner (s"put key = $key") - tree.put (key, i * i) - end for - - tree.show () - println ("-" * 60) - println (s"size = ${tree.size}") - println (s"Average number of nodes accessed = ${tree.count / totKeys.toDouble}") - -end bpTreeMapTest2 - diff --git a/target/scala-3.6.4/classes/scalation/database/old/BpTreeMap.scala.bak3 b/target/scala-3.6.4/classes/scalation/database/old/BpTreeMap.scala.bak3 deleted file mode 100644 index 40a731e05..000000000 --- a/target/scala-3.6.4/classes/scalation/database/old/BpTreeMap.scala.bak3 +++ /dev/null @@ -1,529 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Fri Aug 11 00:26:03 EDT 2023 - * @see LICENSE (MIT style license file). - * - * @note Sorted Map Implemented Using B+Trees (Indexed and Sequential Access) - * - * Split Nodes on Overflow - * Structure for order = 5 (max of 4 keys), upon first split - * [ . k4 . -- . -- . -- . ] - * [ . k1 . k2 . k3 . -- . ] - * [ . k4 . k5 . -- . -- . ] - * Rules: divider key (k4 added to parent in this case) is the smallest key - * in the right subtree (SMALLEST RIGHT) - * split node n into (n, right_sibling_node) with larger half staying in n - * internal node split promotes middle key to parent as the divider key - * Borrow/Merge Nodes on Underflow - * Rules: try to borrow one key from an adjacent (left or right) rich sibling node - * otherwise merge with sibling node - */ - -package scalation -package database.bptree - -import scala.collection.mutable.AbstractMap // , SortedMap} -import scala.reflect.ClassTag - -import BpNode._ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `BpTreeMap` class provides sorted maps that use the B+Tree Data Structure. - * Inserts may cause the splitting of nodes, while deletes may cause borrowing - * if keys or merging of nodes. - * @tparam V the type of the values assigned to keys in this sorted map - */ -class BpTreeMap [V: ClassTag] () - extends AbstractMap [ValueType, V] -// with SortedMap [ValueType, V] - with Serializable: - - private val debug = debugf ("BpTreeMap", true) // debug function - private var kCount = 0 // counter for total number of keys - var count = 0 // count # nodes accessed (performance) - private var root = new BpNode (0, true) // root node of this B+Tree (initially empty) - private val first = root // first leaf node of this B+Tree - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** The `SortedMap` trait requires `Ordering` with a compare method to be defined. - * @see https://scala-lang.org/api/3.3.0/scala/math/Ordering.html - * @see ValueType.scala in `scalation.package` - */ -// def ordering: Ordering [ValueType] = ValueTypeOrd - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the size (number of keys) of this B+Tree. - */ - inline override def size: Int = kCount - -//------------------------------------------------------------------------------ -// Retrieve values or ranges (subtrees) -//------------------------------------------------------------------------------ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** The `TreeIterator` inner class supports iterating over all the elements - * in a B+Tree by traversing through the LEAF nodes of the tree. - * @param ns the starting leaf node (defaults to first) - * @param js the starting within node index (defaults to -1) - */ - class TreeIterator (ns: BpNode = first, js: Int = -1) extends Iterator [(ValueType, V)]: - var (n, j) = (ns, js) - def hasNext: Boolean = j < n.keys-1 || n.ref(0) != null - def next (): (ValueType, V) = - debug ("next", s"node n = $n") - if j < n.keys-1 then j += 1 else { n = n.ref(0).asInstanceOf [BpNode]; j = 0 } - (n(j), n.ref(j+1).asInstanceOf [V]) - end next - end TreeIterator - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return an iterator for retrieving all the elements in this B+Tree. - * @see scala.collection.IterableOnce - */ - def iterator: Iterator [(ValueType, V)] = new TreeIterator () - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return an iterator for retrieving all the elements in this B+Tree starting - * from key start. Returns null if all keys in tree are smaller than start. - * @see scala.collection.SortedMapOps - * @param start the key to start with - def iteratorFrom (start: ValueType): Iterator [(ValueType, V)] = - val (ns, js) = findp (start, root) - if ns != null then new TreeIterator (ns, js) - else null - end iteratorFrom - */ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return an iterator for retrieving all the keys in this B+Tree starting - * from key start. - * @see scala.collection.SortedMapOps - * @param start the key to start with - def keysIteratorFrom (start: ValueType): Iterator [ValueType] = - throw new UnsupportedOperationException ("keysIteratorFrom not available, use iteratorFrom instead") - end keysIteratorFrom - */ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the submap starting at from and ending before until. - * @see scala.collection.SortedOps - * @param from the starting key (inclusive) - * @param until the ending key (exclusive) - def rangeImpl (from: Option [ValueType], until: Option [ValueType]): BpTreeMap [V] = - val subtree = new BpTreeMap [V] () - val it = if from.isDefined then iteratorFrom (from.get) - else iterator - var cont = true - while cont && it.hasNext do - val (k, v) = it.next () - if ! until.isDefined || k < until.get then subtree.addOne ((k, v)) - else cont = false - end while - subtree - end rangeImpl - */ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the value associated with the key by looking it up in this B+Tree. - * @param key the key used for look up - */ - def get (key: ValueType): Option [V] = Option (find (key)) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Find the given key in this B+tree and return its corresponding value. - * Calls the recursive findp method. - * @param key the key to find - */ - inline def find (key: ValueType): V = - val (ln, ip) = findp (key, root) // leaf node, index position - if ip >= 0 then ln.ref(ip+1).asInstanceOf [V] else null.asInstanceOf [V] - end find - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Recursive helper method for finding the position of the given key in this B+tree. - * @param key the key to find - * @param n the current node - */ - private def findp (key: ValueType, n: BpNode): (BpNode, Int) = - count += 1 - if n.isLeaf then (n, n.findEq (key)) - else findp (key, n.ref(n.find (key)).asInstanceOf [BpNode]) - end findp - -//------------------------------------------------------------------------------ -// Add key-value pairs into the B+Tree -//------------------------------------------------------------------------------ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Add one key-value pair into this B+Tree and return this. - * Called by the put method in `AbstractMap`. - * Note: it splits the node that overflows - * @param elem the key-value pair to add/insert - */ - def addOne (elem: (ValueType, V)): this.type = - val (key, value) = elem - kCount += 1 // increment the key count - insert (key, value, root) // call the recursive insert - this - end addOne - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Recursive helper method for inserting a key and ref into this B+tree. - * Returns the divider key and right sibling upon split, else null. - * @param key the key to insert - * @param ref the value/node to insert - * @param n the current node - */ - private def insert (key: ValueType, ref: V, n: BpNode): (ValueType, BpNode) = - var k_r: (ValueType, BpNode) = null - - if n.isLeaf then // handle LEAF node - k_r = add (n, key, ref) - if k_r != null then - if n != root then return k_r - root = new BpNode (root, k_r._1, k_r._2) // make a new root - end if - - else // handle INTERNAL node - k_r = insert (key, ref, n.ref(n.find (key)).asInstanceOf [BpNode]) - if k_r != null then - k_r = addI (n, k_r._1, k_r._2) - if k_r != null && n == root then root = new BpNode (root, k_r._1, k_r._2) - end if - end if - k_r - end insert - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Add new key k and value v into LEAF node n. Upon overflow, split node n, - * in which case the divider key and new right sibling node are returned. - * @param n the current node - * @param k the new key - * @param v the new value - */ - private def add (n: BpNode, k: ValueType, v: V): (ValueType, BpNode) = - var k_r: (ValueType, BpNode) = null // divider key, right sibling - n.add (k, v) // add into node n - if n.overflow then k_r = n.split () // its full, must split - k_r - end add - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Add new key k and value v into INTERNAL node n. Upon overflow, split node n, - * in which case the divider key and new right sibling node are returned. - * @param n the current node - * @param k the new key - * @param v the new left value (ref a node) - */ - private def addI (n: BpNode, k: ValueType, v: BpNode): (ValueType, BpNode) = - var k_r: (ValueType, BpNode) = null // divider key, right sibling - n.add (k, v) // add into node n - n.showRef () - if n.overflow then k_r = n.splitI () // its full, must split - k_r - end addI - -//------------------------------------------------------------------------------ -// Remove key-value pair from the B+Tree -//------------------------------------------------------------------------------ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Subtract/remove the one element (key-value pair) with the given key. - * Called by the remove method in `AbstractMap`. - * @param key the key whose element is to be removed - */ - def subtractOne (key: ValueType): this.type = - kCount -= 1 // decrement the key count - delete (key, root) // call the recursive delete - if ! root.isLeaf && root.keys == 0 then - root = root.ref(0).asInstanceOf [BpNode] // remove empty root by resetting the root reference - this - end subtractOne - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Recursive helper method for deleting a key with its ref from this B+tree. - * @param key the key to delete - * @param n the current node - * @param par the parent node (null for root) - */ - private def delete (key: ValueType, n: BpNode, par: BpNode = null): Unit = - - if n.isLeaf then // handle LEAF node - val dp = n.findEq (key) // deletion position in LEAF node n - n.remove (dp) // remove key at index position dp - - // upon underflow do a leaf node borrow or merge - - if n != root && n.underflow then // unless root, check for underflow - println (s"delete: needs to handle underflow of LEAF node $n") - val j = par.find (key) // j-th index position in parent - val (sib, left) = richestSib (par, j) // richest sib and whether it's left - println (s"delete: leaf sib = $sib") - - if sib.rich then borrow (n, sib, left, par, j) // leaf borrow a key from rich sib - else merge (n, sib, left, par, j) // leaf merge nodes n and sib, may cause parent to underflow - end if - - else // handle INTERNAL node - val dp = n.find (key) // deletion position in INTERNAL node n - delete (key, n.ref(dp).asInstanceOf [BpNode], n) // recursive call to delete - - // upon underflow do an internal node borrow (borrowI) or merge (mergeI) - - if n != root && n.underflow then // unless root, check for underflow - println (s"delete: needs to handle underflow of INTERNAL node $n") - val j = par.find (key) // j-th index position in parent - val (sib, left) = richestSib (par, j) // richest sib and whether it's left - println (s"delete: internal sib = $sib") - - if sib.rich then borrowI (n, sib, left, par, j) // internal borrow a key from rich sib - else mergeI (n, sib, left, par, j - 1) // internal merge nodes n and sib, may cause parent to underflow - end if - - end if - end delete - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return node n's richest sibling (and whether it is left/true or right/false) - * found from parent node. - * @param par the parent node of node n that has underflowed - * @param i the position of node n as a child of node par - */ - private def richestSib (par: BpNode, i: Int): (BpNode, Boolean) = - debug ("richSib", s"return node n's (@ $i) richest sibling (left or right)") - val leftn = if i-1 >= 0 then par.ref(i-1).asInstanceOf [BpNode] else null - val rightn = if i+1 <= par.keys then par.ref(i+1).asInstanceOf [BpNode] else null - - if leftn == null then (rightn, false) - else if rightn == null then (leftn, true) - else if leftn.keys >= rightn.keys then (leftn, true) - else (rightn, false) - end richestSib - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Borrow a key-value pair from a rich sibling, so LEAF node n won't underflow. - * For borrow LEFT, last key in left sib k2 moves to n, then k2 replaces k3 in par. - * [ ... k3 ... ] [ ... k2 ... ] - * [ ... k1 k2 ] [ k3 ... ] TO [ ... k1 ] [ k2 k3 ... ] - * @param n the current node that has underflowed - * @param sib the rich sibling node - * @param left the whether the sib is left or right - * @param par the parent node - * @param j the index position in the parent node - */ - private def borrow (n: BpNode, sib: BpNode, left: Boolean, par: BpNode, j: Int): Unit = - val i = if left then sib.keys-1 else 0 - val bkey = sib(i) - debug ("borrow", s"key $bkey from rich sib = $sib node for node n = $n having par = $par at j = $j") - val bref = sib.ref(i).asInstanceOf [V] - sib.remove (i) - add (n, bkey, bref) - par(j-1) = if left then bkey else sib(0) // the divider key for parent node - end borrow - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Borrow a key-value pair from a rich sibling, so INTERNAL node n won't underflow. - * For borrow LEFT, last key in left sib k2 rotates into par, whose key k3 rotates to n. - * [ ... k3 ... ] [ ... k2 ... ] - * [ ... k1 k2 ] [ k4 ... ] TO [ ... k1 ] [ k3 k4 ... ] - * @param n the current node that has underflowed - * @param sib the rich sibling node - * @param left the whether the sib is left or right - * @param par the parent node - * @param j the index position in the parent node - */ - private def borrowI (n: BpNode, sib: BpNode, left: Boolean, par: BpNode, j: Int): Unit = - val i = if left then sib.keys-1 else 0 -// val bkey = sib(i) - val bkey = par(j-1) - debug ("borrow", s"key $bkey from rich sib = $sib node for node n = $n having par = $par at j = $j") - val bref = sib.ref(i).asInstanceOf [V] - sib.remove (i) - add (n, bkey, bref) - par(j-1) = if left then bkey else sib(0) // the divider key for parent node - end borrowI - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Merge LEAF node n with its sibling returning whether the parent node - * has underflowed. - * @param n the current node that has underflowed - * @param sib the sibling node - * @param left the whether the sib is left or right - * @param par the parent node - * @param j the index position in the parent node - */ - private def merge (n: BpNode, sib: BpNode, left: Boolean, par: BpNode, j: Int): Boolean = - debug ("merge", s"LEAF node n = $n that underflows with sib = $sib having par = $par at j = $j") - if left then sib.merge (n) - else n.merge (sib) - par.remove (j) // true means parent underflowed - end merge - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Merge INTERNAL node n with its sibling returning whether the parent node - * has underflowed. - * @param n the current node that has underflowed - * @param sib the sibling node - * @param left the whether the sib is left or right - * @param par the parent node - * @param j the index position in the parent node - */ - private def mergeI (n: BpNode, sib: BpNode, left: Boolean, par: BpNode, j: Int): Boolean = - debug ("mergeI", s"INTERNAL node n = $n that underflows with sib = $sib having par = $par at j = $j") - if left then sib.mergeI (par.key(j), n) - else n.mergeI (par.key(j), sib) - par.remove (j) // true means parent underflowed - end mergeI - -//------------------------------------------------------------------------------ -// Print/show the B+Tree -//------------------------------------------------------------------------------ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Show/print this B+Tree. - */ - def show (): Unit = - println ("BpTreeMap") - printT (root, 0) - println ("-" * 60) - end show - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Print this B+Tree map using a pre-order traversal and indenting each level. - * @param n the current node to print - * @param level the current level in the B+Tree - */ - private def printT (n: BpNode, level: Int): Unit = - if n != null then - println ("\t" * level + n) - if ! n.isLeaf then - for j <- 0 to n.keys do printT (n.ref(j).asInstanceOf [BpNode], level + 1) - end printT - -end BpTreeMap - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `bpTreeMapTest` main function used for testing the `BpTreeMap` class by - * inserting increasing key values. - * > runMain scalation.database.bptree.bpTreeMapTest - */ -@main def bpTreeMapTest (): Unit = - - banner ("Insert Increasing Integer Keys") - val totKeys = 36 - val tree = new BpTreeMap [Int] () - - for i <- 1 until totKeys by 2 do - banner (s"put ($i, ${i * i})") - tree.put (i, i * i) - tree.show () - end for - - banner ("Find Keys") - for i <- 0 until totKeys do println (s"key = $i, value = ${tree.get(i)}") - println ("-" * 60) - - banner ("Iterate Through the B+Tree") - for it <- tree.iterator do println (it) - println ("-" * 60) - tree.foreach (println (_)) - - banner ("Print Statistics") - println (s"size = ${tree.size}") - println (s"Average number of nodes accessed = ${tree.count / totKeys.toDouble}") - - banner ("Delete Keys") - tree.show () - val toRemove = Array (29, 31, 33, 35, 27, 25) - for key <- toRemove do - banner (s"remove ($key)") - tree.remove (key) - tree.show () - end for - -end bpTreeMapTest - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `bpTreeMapTest2` main function used for testing the `BpTreeMap` class by - * inserting random key values. - * > runMain scalation.database.bptree.bpTreeMapTest2 - */ -@main def bpTreeMapTest2 (): Unit = - - import java.util.Random - - banner ("Insert Random Integer Keys") - val totKeys = 60 - val mx = 10 * totKeys - val seed = 1 - val rng = new Random (seed) - val tree = new BpTreeMap [Int] () - - for i <- 1 to totKeys do - val key = rng.nextInt (mx) - banner (s"put ($key, ${2 * key})") - tree.put (key, 2 * key) - tree.show () - end for - - banner ("Print Statistics") - println (s"size = ${tree.size}") - println (s"Average number of nodes accessed = ${tree.count / totKeys.toDouble}") - -end bpTreeMapTest2 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `bpTreeMapTest3` main function used for testing the `BpTreeMap` class by - * inserting keys and values into B+Trees, one representing each of two lanes. - * > runMain scalation.database.bptree.bpTreeMapTest3 - */ -@main def bpTreeMapTest3 (): Unit = - - import java.util.Random - - case class Car (vin: Int, dist: Double) - - banner ("Insert Random Integer Keys") - val totKeys = 60 - val seed = 1 - val rng = new Random (seed) - val lane1 = new BpTreeMap [Car] () // index for lane1 - val lane2 = new BpTreeMap [Car] () // index for lane2 - - var dist = 0.0 // distance from end of lane - var ord = 0 // rank order from end of lane - for i <- 1 to totKeys do - dist += rng.nextInt (5) - val c_i = Car (i, dist) // the car being put into lane1's B+Tree - ord += 10 // rank order of car toward end of lane - banner (s"put ($ord, $c_i)") - lane1.put (ord, c_i) - lane1.show () - end for - - dist = 0.0 // distance from end of lane - ord = 0 - for i <- 1 to totKeys do - dist += rng.nextInt (5) - val c_i = Car (totKeys + i, dist) // the car being put into lane2's B+Tree - ord += 10 // rank order of car toward end of lane - banner (s"put ($ord, $c_i)") - lane2.put (ord, c_i) - lane2.show () - end for - - // find the j-th car in lane1 call it car1 - // find the corresponding j-th car in lane2 call it car2 - // check whether car2 is behind car1 in the other lane - // may need a doubly linked list of nodes at the leaf-level to search forward and backward - // find the closest car in the other lane that is behind you - // if its distance is large enough, make the lane change - // may need gaps in ord so lane changing car can get an ord without making all care reassign theirs - -end bpTreeMapTest3 - diff --git a/target/scala-3.6.4/classes/scalation/database/old/BpTreeMap.scala.bak4 b/target/scala-3.6.4/classes/scalation/database/old/BpTreeMap.scala.bak4 deleted file mode 100644 index 8e28f3904..000000000 --- a/target/scala-3.6.4/classes/scalation/database/old/BpTreeMap.scala.bak4 +++ /dev/null @@ -1,765 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Fri Aug 11 00:26:03 EDT 2023 - * @see LICENSE (MIT style license file). - * - * @note Sorted Map Implemented Using B+Trees (Indexed and Sequential Access) - * - * Split Nodes on Overflow - * Structure for order = 5 (max of 4 keys), upon first split - * [ . k4 . -- . -- . -- . ] - * [ . k1 . k2 . k3 . -- . ] - * [ . k4 . k5 . -- . -- . ] - * Rules: divider key (k4 added to parent in this case) is the smallest key - in the right subtree (SMALLEST RIGHT) - * split node n into (n, right_sibling_node) with larger half staying in n - * internal node split promotes middle key to parent as the divider key - * - * Borrow/Merge Nodes on Underflow - * Rules: try to borrow one key from an adjacent (left or right) rich sibling node - * otherwise merge with sibling node - * - * Optionally supports bidirectional linkage of leaf nodes for Sequential Access - * forward via ref(0) [ n1 ] -> [ n2 ] -> [ n3 ] - * backward via pre [ n1 ] <- [ n2 ] <- [ n3 ] optional DLINK = true - */ - -package scalation -package database - -import scala.collection.mutable.{AbstractMap, SortedMap} -import scala.reflect.ClassTag - -import BpNode._ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `BpTreeMap` class provides sorted maps that use the B+Tree Data Structure. - * Inserts may cause the splitting of nodes, while deletes may cause borrowing - * if keys or merging of nodes. - * @see `BpNode` for the specification of the order of the B+Tree. - * @tparam V the type of the values assigned to keys in this sorted map - * @param name the name of the B+Tree (used for indentification) - */ -class BpTreeMap [V: ClassTag] (name: String = "BpTreeMap") - extends AbstractMap [ValueType, V] - with SortedMap [ValueType, V] - with Serializable: - - private val debug = debugf ("BpTreeMap", true) // debug function - private val flaw = flawf ("BpTreeMap") // flaw function - - private var kCount = 0 // counter for total number of keys - private [database] var count = 0 // count # nodes accessed (performance) - private var root = new BpNode (0, true) // root node of this B+Tree (initially empty) - private val first = root // first leaf node of this B+Tree - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Get the first value in the B+Tree (note ref(0) points at next leaf node). - */ - def getFirst: V = first.ref(1).asInstanceOf [V] - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** The `SortedMap` trait requires `Ordering` with a compare method to be defined. - * @see https://scala-lang.org/api/3.3.0/scala/math/Ordering.html - * @see ValueType.scala in `scalation.package` - */ - def ordering: Ordering [ValueType] = ValueTypeOrd - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the size (number of keys) of this B+Tree. - */ - inline override def size: Int = kCount - - //-------------------------------------------------------------------------- - // Retrieve values or ranges (subtrees) - //-------------------------------------------------------------------------- - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** The `TreeIterator` inner class supports iterating over all the elements - * in a B+Tree by traversing through the LEAF nodes of the tree. - * @param ns the starting leaf node (defaults to first) - * @param js the starting within node index (defaults to -1) - */ - class TreeIterator (ns: BpNode = first, js: Int = -1) extends Iterator [(ValueType, V)]: - var (n, j) = (ns, js) - - def hasNext: Boolean = j < n.keys-1 || n.ref(0) != null - - def next (): (ValueType, V) = -// debug ("next", s"node n = $n, j = $j, n.keys = ${n.keys}") - if j < n.keys-1 then j += 1 - else { n = n.ref(0).asInstanceOf [BpNode]; j = 0 } - (n(j), n.ref(j+1).asInstanceOf [V]) - end next - end TreeIterator - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return an iterator for retrieving all the elements in this B+Tree. - * @see scala.collection.IterableOnce - */ - def iterator: Iterator [(ValueType, V)] = new TreeIterator () - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return an iterator for retrieving all the elements in this B+Tree starting - * from key start. Returns null if all keys in tree are smaller than start. - * @see scala.collection.SortedMapOps - * @param start the key to start with (inclusive) - */ - def iteratorFrom (start: ValueType): Iterator [(ValueType, V)] = - val (ns, js) = findp (start, root) // find position: node ns and key index js - debug ("iteratorFrom", s"(ns, js) = ($ns, $js)") - val jjs = math.max (js-1, -1) - if ns != null then new TreeIterator (ns, jjs) - else null - end iteratorFrom - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return an iterator for retrieving all the keys in this B+Tree starting - * from key start. - * @see scala.collection.SortedMapOps - * @param start the key to start with - */ - def keysIteratorFrom (start: ValueType): Iterator [ValueType] = - throw new UnsupportedOperationException ("keysIteratorFrom not available, use iteratorFrom instead") - end keysIteratorFrom - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the submap starting at from and ending before until. - * @see scala.collection.SortedOps - * @param from the starting key (inclusive) - * @param until the ending key (exclusive) - */ - def rangeImpl (from: Option [ValueType], until: Option [ValueType]): BpTreeMap [V] = - val subtree = new BpTreeMap [V] (name + from) - val it = if from.isDefined then iteratorFrom (from.get) - else iterator - var cont = true - while cont && it.hasNext do - val (k, v) = it.next () - if ! until.isDefined || k < until.get then - if ! from.isDefined || k >= from.get then subtree.addOne ((k, v)) - else - cont = false - subtree - end rangeImpl - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the value associated with the key by looking it up in this B+Tree. - * @param key the key used for look up - */ - def get (key: ValueType): Option [V] = Option (find (key)) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Find the given key in this B+tree and return its corresponding value. - * Calls the recursive findp method. - * @param key the key to find - */ - inline def find (key: ValueType): V = - val (ln, ip) = findp (key, root) // leaf node, index position - if ip >= 0 then ln.ref(ip+1).asInstanceOf [V] - else null.asInstanceOf [V] - end find - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Recursive helper method for finding the position of the given key in this B+tree. - * @param key the key to find - * @param n the current node - */ - private def findp (key: ValueType, n: BpNode): (BpNode, Int) = - count += 1 - if n.isLeaf then (n, n.findEq (key)) - else findp (key, n.ref(n.find (key)).asInstanceOf [BpNode]) - end findp - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Find the first (key, value) pair whose key is larger than (beyond) the search key (skey). - * @author Amily Chowdhury - * @param skey the search key - */ - def findFirstBeyond (skey: ValueType): Option [(ValueType, V)] = - val (ln, ip) = findp (skey, root) // locate leaf node and index position for skey - if ln != null then - if ip >= 0 && ip < ln.keys - 1 then - // Case 1: If the key is found and not the last one, return the next key in the leaf node - Some ((ln(ip + 1), ln.ref(ip + 2).asInstanceOf[V])) // use ip + 1 to get the next key - else - // Case 2: Move to the next leaf node and return the first key there - val nxLeaf = ln.ref(0).asInstanceOf [BpNode] // move to the next leaf node - if nxLeaf != null && nxLeaf.keys > 0 then - Some ((nxLeaf(0), nxLeaf.ref(1).asInstanceOf [V])) // get first key and reference - else - None - else - None - end findFirstBeyond - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Find the last (key, value) pair whose key is smaller than (below) the search key (skey). - * @author Amily Chowdhury - * @param skey the search key - */ - def findLastBelow (skey: ValueType): Option [(ValueType, V)] = - val (ln, ip) = findp (skey, root) // locate leaf node and index position for skey - if ln != null then - if ip > 0 then - // Case 1: If the key is found and not the first one, return the previous key in the leaf node - Some ((ln.key(ip - 1), ln.ref(ip).asInstanceOf [V])) // use ip - 1 to get the previous key - else - // Case 2: Move to the previous leaf node and return the last key there - val pvLeaf = ln.pre // move to the previous leaf node - if pvLeaf != null && pvLeaf.keys > 0 then - val iLast = pvLeaf.keys - 1 - Some ((pvLeaf(iLast), pvLeaf.ref(pvLeaf.keys).asInstanceOf [V])) // get last key and reference - else - None - else - None - end findLastBelow - - //-------------------------------------------------------------------------- - // Add key-value pairs into the B+Tree - //-------------------------------------------------------------------------- - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Add one key-value pair into this B+Tree and return this. - * Called by the put method in `AbstractMap`. - * Note: it splits the node that overflows - * @param elem the key-value pair to add/insert - */ - def addOne (elem: (ValueType, V)): this.type = - val (key, value) = elem - insert (key, value, root) // call the recursive insert - this - end addOne - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Recursive helper method for inserting a key and ref into this B+tree. - * Returns the divider key and right sibling upon split, else null. - * @param key the key to insert - * @param ref the value/node to insert - * @param n the current node - */ - private def insert (key: ValueType, ref: V, n: BpNode): (ValueType, BpNode) = - var k_r: (ValueType, BpNode) = null - - if n.isLeaf then // handle LEAF node - k_r = add (n, key, ref) - if k_r != null then - if n != root then return k_r - root = new BpNode (root, k_r._1, k_r._2) // make a new root - - else // handle INTERNAL node - k_r = insert (key, ref, n.ref(n.find (key)).asInstanceOf [BpNode]) - if k_r != null then - k_r = addI (n, k_r._1, k_r._2) - if k_r != null && n == root then root = new BpNode (root, k_r._1, k_r._2) - end if - k_r - end insert - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Add new key k and value v into LEAF node n. Upon overflow, split node n, - * in which case the divider key and new right sibling node are returned. - * @param n the current node - * @param k the new key - * @param v the new value - */ - private def add (n: BpNode, k: ValueType, v: V): (ValueType, BpNode) = - var k_r: (ValueType, BpNode) = null // divider key, right sibling - val duplicate = n.add (k, v) // add into node n - if duplicate == None then kCount += 1 // increment the key count unless its a duplicate - else flaw ("add", s"key $k is a duplicate, old value = $duplicate") - if n.overflow then k_r = n.split () // its full, must split - k_r - end add - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Add new key k and value v into INTERNAL node n. Upon overflow, split node n, - * in which case the divider key and new right sibling node are returned. - * @param n the current node - * @param k the new key - * @param v the new left value (ref a node) - */ - private def addI (n: BpNode, k: ValueType, v: BpNode): (ValueType, BpNode) = - var k_r: (ValueType, BpNode) = null // divider key, right sibling - n.add (k, v) // add into node n -// n.showRef () - if n.overflow then k_r = n.splitI () // its full, must split - k_r - end addI - - //-------------------------------------------------------------------------- - // Remove key-value pair from the B+Tree - //-------------------------------------------------------------------------- - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Remove the one element (key-value pair) with the given key and return - * whether it matches the value expected. - * @param key the key whose element is to be removed - * @param value the value expected of the removed element - */ - def checkedRemove (key: ValueType, value: V): Boolean = remove (key) == value - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Subtract/remove the one element (key-value pair) with the given key. - * Called by the remove method in `AbstractMap`. - * @param key the key whose element is to be removed - */ - def subtractOne (key: ValueType): this.type = - kCount -= 1 // decrement the key count - delete (key, root) // call the recursive delete - if ! root.isLeaf && root.keys == 0 then - root = root.ref(0).asInstanceOf [BpNode] // remove empty root by resetting the root reference - this - end subtractOne - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Recursive helper method for deleting a key with its ref from this B+tree. - * @param key the key to delete - * @param n the current node - * @param par the parent node (null for root) - */ - private def delete (key: ValueType, n: BpNode, par: BpNode = null): Unit = - - if n.isLeaf then // handle LEAF node - val dp = n.findEq (key) // deletion position in LEAF node n - n.remove (dp) // remove key at index position dp - - // upon underflow do a leaf node borrow or merge - - if n != root && n.underflow then // unless root, check for underflow - val j = par.find (key) // j-th index position in parent - val (sib, left) = richestSib (par, j) // richest sib and whether it's left - debug ("delete", s"needs to handle underflow of LEAF node n = $n, sib = $sib, left = $left") - - if sib.rich then borrow (n, sib, left, par, j) // leaf borrow a key from rich sib - else merge (n, sib, left, par, j) // leaf merge nodes n and sib, may cause parent to underflow - end if - - else // handle INTERNAL node - val dp = n.find (key) // deletion position in INTERNAL node n - delete (key, n.ref(dp).asInstanceOf [BpNode], n) // recursive call to delete - - // upon underflow do an internal node borrow (borrowI) or merge (mergeI) - - if n != root && n.underflow then // unless root, check for underflow - val j = par.find (key) // j-th index position in parent - val (sib, left) = richestSib (par, j) // richest sib and whether it's left - debug ("delete", s"needs to handle underflow of INTERNAL node n = $n, sib = $sib, left = $left") - - if sib.rich then borrowI (n, sib, left, par, j) // internal borrow a key from rich sib - else mergeI (n, sib, left, par, j) // internal merge nodes n and sib, may cause parent to underflow - end if - - end if - end delete - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return node n's richest sibling (and whether it is left/true or right/false) - * found from parent node. - * @param par the parent node of node n that has underflowed - * @param i the position of node n as a child of node par - */ - private def richestSib (par: BpNode, i: Int): (BpNode, Boolean) = - debug ("richSib", s"return node n's (@ $i) richest sibling (left or right)") - val leftn = if i-1 >= 0 then par.ref(i-1).asInstanceOf [BpNode] else null - val rightn = if i+1 <= par.keys then par.ref(i+1).asInstanceOf [BpNode] else null - - if leftn == null then (rightn, false) - else if rightn == null then (leftn, true) - else if leftn.keys >= rightn.keys then (leftn, true) - else (rightn, false) - end richestSib - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Borrow a key-value pair from a rich sibling, so LEAF node n won't underflow. - * For borrow LEFT, last key in left sib k2 moves to n, then k2 replaces k3 in par. - * [ ... k3 ... ] [ ... k2 ... ] - * [ ... k1 k2 ] [ k3 ... ] TO [ ... k1 ] [ k2 k3 ... ] - * @param n the current node that has underflowed - * @param sib the rich sibling node - * @param left the whether the sib is left or right - * @param par the parent node - * @param j the index position in the parent node - */ - private def borrow (n: BpNode, sib: BpNode, left: Boolean, par: BpNode, j: Int): Unit = - val i = if left then sib.keys-1 else 0 - val bkey = sib(i) - debug ("borrow", s"key $bkey from rich sib = $sib node for node n = $n having par = $par at j = $j, left=$left") - val bref = sib.ref(i+1).asInstanceOf [V] - sib.remove (i) - add (n, bkey, bref); kCount -= 1 // key is moved, not really added (=> -= 1) - if left then par(j-1) = bkey else par(j) = sib(0) // the divider key for parent node - end borrow - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Borrow a key-value pair from a rich sibling, so INTERNAL node n won't underflow. - * For borrow LEFT, last key in left sib k2 rotates into par, whose key k3 rotates to n. - * [ ... k3 ... ] [ ... k2 ... ] - * [ ... k1 k2 ] [ k4 ... ] TO [ ... k1 ] [ k3 k4 ... ] - * @param n the current node that has underflowed - * @param sib the rich sibling node - * @param left the whether the sib is left or right - * @param par the parent node - * @param j the index position in the parent node - */ - private def borrowI (n: BpNode, sib: BpNode, left: Boolean, par: BpNode, j: Int): Unit = - - if left then // borrow from LEFt sib - val ip = 0 // node n's insertion position is 0 - debug ("borrowI", s"LEFT sib = $sib, n = $n [ip = $ip], par = $par [j = $j]") - n.shiftR (ip) // shift right to make room in node n - n.key(ip) = par.key(j) // move par key @ j to node n - n.ref(ip) = sib.ref(sib.keys) // move sib's last ref to node n - n.keys += 1 - - par.key(j) = sib.key(sib.keys-1) // promote sib's last key to par @ j - sib.keys -= 1 // effectively removes last key in sib - - else // borrow from RIGHT sib - val ip = n.keys // node n's insertion position is n.keys - debug ("borrowI", s"n = $n [ip = $ip], RIGHT sib = $sib, par = $par [j = $j]") - n.key(ip) = par.key(j) // move par key @ j to node n - n.ref(ip+1) = sib.ref(0) // move sib's first ref to node n - n.keys += 1 - - par.key(j) = sib.key(0) // promote sib's first key to par @ j - sib.removeI (0) // remove sib's first key - end borrowI - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Merge LEAF node n with its sibling returning whether the parent node - * has underflowed. - * @param n the current node that has underflowed - * @param sib the sibling node - * @param left the whether the sib is left or right - * @param par the parent node - * @param j the index position in the parent node - */ - private def merge (n: BpNode, sib: BpNode, left: Boolean, par: BpNode, j: Int): Boolean = - debug ("merge", s"LEAF node n = $n that underflows with sib = $sib having par = $par at j = $j ,left=$left") - if left then {sib.merge (n); par.remove(j-1)} - else { n.merge (sib); par.remove (j) } // true means parent underflowed - end merge - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Merge INTERNAL node n with its sibling returning whether the parent node - * has underflowed. - * @param n the current node that has underflowed - * @param sib the sibling node - * @param left the whether the sib is left or right - * @param par the parent node - * @param j the index position in the parent node - */ - private def mergeI (n: BpNode, sib: BpNode, left: Boolean, par: BpNode, j: Int): Boolean = - if left then - debug ("mergeI", s"LEFT sib = $sib, n = $n, par = $par [j = $j]") - sib.mergeI (par.key(j-1), n) - par.remove(j-1) // true means parent underflowed - else - debug ("mergeI", s"n = $n, RIGHT sib = $sib, par = $par [j = $j]") - n.mergeI (par.key(j), sib) - par.remove (j) // true means parent underflowed - end mergeI - - //-------------------------------------------------------------------------- - // Print/show the B+Tree - //-------------------------------------------------------------------------- - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return whether this B+Tree has the same entries, (key, value) pairs, - * (in the same order) as another sorted map. - * @param that the other sorted map - */ - infix def equals (that: SortedMap [ValueType, V]): Boolean = - println (s"this.size = ${this.size} ==? that.size = ${that.size}") - if this.size != that.size then return false - var same = true - val it1 = this.iterator - val it2 = that.iterator - while same && it1.hasNext do - val (k1, v1) = it1.next () - val (k2, v2) = it2.next () - debug ("equals", s"($k1, $v1) ==? ($k2, $v2)") - same = k1 == k2 && v1 == v2 - same - end equals - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Show/print this B+Tree. - */ - def show (): Unit = - println (s"BpTreeMap_$name") - printT (root, 0) - println ("-" * 60) - end show - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Show/print this B+Tree's leaf node links. - */ - def showLink (): Unit = - println (s"BpTreeMap_$name Leaf Node Linkage") - println("=" * 60) - var n = first - while n != null do { n.show (); n = n.ref(0).asInstanceOf [BpNode]} - println("=" * 60) - end showLink - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Print this B+Tree map using a pre-order traversal and indenting each level. - * @param n the current node to print - * @param level the current level in the B+Tree - */ - private def printT (n: BpNode, level: Int): Unit = - if n != null then - println ("\t" * level + n) - if ! n.isLeaf then - for j <- 0 to n.keys do printT (n.ref(j).asInstanceOf [BpNode], level + 1) - end printT - -end BpTreeMap - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `bpTreeMapTest` main function used for testing the `BpTreeMap` class by - * inserting increasing key values. - * > runMain scalation.database.bpTreeMapTest - */ -@main def bpTreeMapTest (): Unit = - - banner ("Insert Increasing Integer Keys") - val totKeys = 60 - val tree = new BpTreeMap [Int] ("Test") - - for i <- 1 until totKeys by 2 do - banner (s"put ($i, ${i * i})") - tree.put (i, i * i) - tree.show () - tree.showLink() - end for - - banner ("Find Keys") - for i <- 0 until totKeys do println (s"key = $i, value = ${tree.get(i)}") - println ("-" * 60) - - banner ("Iterate Through the B+Tree") - for it <- tree.iterator do println (it) - println ("-" * 60) - tree.foreach (println (_)) - - banner ("Iterate Through the B+Tree from Key = 11") - for it <- tree.iteratorFrom (11) do println (it) - println ("-" * 60) - tree.foreach (println (_)) - println ("-" * 60) - - banner ("Find Keys in Range") - println (s"Range Query 11 until 20: key-value pairs = ${tree.range (11, 20)}") - println ("-" * 60) - - banner ("Print Statistics") - println (s"size = ${tree.size}") - println (s"Average number of nodes accessed = ${tree.count / totKeys.toDouble}") - - banner ("Delete Keys") - tree.show () -// val toRemove = Array (7, 1, 3, 5, 9, 21, 17, 19, 21, 11, 15, 13, 29, 31, 27, 33, 35, 23, 39, 37, 41, 25) -// val toRemove = Array (29, 31, 27, 33, 35, 25, 23, 13, 7, 1, 3, 5, 9, 21, 17, 19, 21, 11, 15, 37, 39, 41, 43, 49, 47, 51, 45) - val toRemove = Array (29, 31,27, 33, 35, 25, 23, 13, 7, 1, 59, 55, 47, 53, 3, 5, 9, 21, 17, 19, 21, 11, 15, 37, 39, 41, 43, 49, 47, 51, 45) - - for key <- toRemove do - banner (s"remove ($key)") - tree.remove (key) - tree.show () - tree.showLink() - end for - -end bpTreeMapTest - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `bpTreeMapTest2` main function used for testing the `BpTreeMap` class by - * inserting random key values. - * > runMain scalation.database.bpTreeMapTest2 - */ -@main def bpTreeMapTest2 (): Unit = - - import java.util.Random - - banner ("Insert Random Integer Keys") - val totKeys = 60 - val mx = 10 * totKeys - val seed = 1 - val rng = new Random (seed) - val tree = new BpTreeMap [Int] ("Test2") - - for i <- 1 to totKeys do - val key = rng.nextInt (mx) - banner (s"put ($key, ${2 * key})") - tree.put (key, 2 * key) - tree.show () - tree.showLink() - end for - - banner ("Print Statistics") - println (s"size = ${tree.size}") - println (s"Average number of nodes accessed = ${tree.count / totKeys.toDouble}") - -end bpTreeMapTest2 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `bpTreeMapTest3` main function used for testing the `BpTreeMap` class by - * inserting and removing keys and values into/from a B+Tree and a TreeMap. - * Performs AUTOMATED TESTING. - * > runMain scalation.database.bpTreeMapTest3 - */ -@main def bpTreeMapTest3 (): Unit = - - import java.util.Random - import scala.collection.mutable.TreeMap - - banner ("AutoTest: Insert Random Integer Keys into BpTreeMap and TreeMap") - val totKeys = 60 - val mx = 10 * totKeys - val seed = 1 - val rng = new Random (seed) - val tree = new BpTreeMap [Int] ("Test3") - val tree2 = new TreeMap [ValueType, Int] ()(ValueTypeOrd) - - for i <- 1 to totKeys do - val key = rng.nextInt (mx) - tree.put (key, 2 * key) - tree2.put (key, 2 * key) - end for - - var same = tree equals tree2 - println (s"tree equals tree2 = $same") - assert (same) - - tree.show () - - banner ("AutoTest: Remove All Keys from BpTreeMap and TreeMap") - val toRemove = tree2.keys - for key <- toRemove do - banner (s"remove ($key)") - tree.remove (key) - tree2.remove (key) - tree.show () - tree.showLink() - same = tree equals tree2 - println (s"tree equals tree2 = $same") - assert (same) - end for - -end bpTreeMapTest3 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `bpTreeMapTest4` main function used for testing the `BpTreeMap` class by - * inserting keys and values into B+Trees, and performing range queries. - * @author Amily Chowdhury - * > runMain scalation.database.bpTreeMapTest4 - */ -@main def bpTreeMapTest4 (): Unit = - - banner("Insert Increasing Integer Keys") - val tree = new BpTreeMap [String] ("Range_test") - - tree.put (147.4, "C1") - tree.put (153.4, "C2") - tree.put (163.4, "C3") - tree.put (173.4, "C4") - tree.put (180.4, "C5") - - tree.show () - tree.showLink () - - banner ("Find Keys in Range") -// val lb = 137.4 -// val ub = 180.4 - var lb = 147.4 - var ub = 163.4 - println (s"Range Query lb = $lb: until up = $ub: key-value pairs") - println (s"Range Query lb = $lb: until up = $ub: key-value pairs = ${tree.range (lb, ub)}") - println ("-" * 60) - - banner("Find First Key Beyond Upper Bound of Range") - println(s"Find First Key Beyond $ub") - var result = tree.findFirstBeyond (ub) - println(s"The first key-value pair beyond $ub is: $result") - - ub = 180.4 - println (s"Find First Key Beyond $ub") - result = tree.findFirstBeyond (ub) - println (s"The first key-value pair beyond $ub is: $result") - - banner("Find Last Key Below Lower Bound") - lb = 163.4 - println (s"Find Last Key Below $lb") - result = tree.findLastBelow (lb) - println(s"The last key-value pair below $lb is: $result") - - lb = 147.4 - println (s"Find Last Key Below $lb") - result = tree.findLastBelow (lb) - println(s"The last key-value pair below $lb is: $result") - -end bpTreeMapTest4 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `bpTreeMapTest5` main function used for testing the `BpTreeMap` class by - * inserting keys and values into B+Trees, one representing each of two lanes. - * Can be used for finding cars in a traffic simulation. - * > runMain scalation.database.bpTreeMapTest5 - */ -@main def bpTreeMapTest5 (): Unit = - - import java.util.Random - - case class Car (vin: Int, dist: Double) - - banner ("Insert Random Integer Keys") - val totKeys = 60 - val seed = 1 - val rng = new Random (seed) - val lane1 = new BpTreeMap [Car] ("lane1") // index for lane1 - val lane2 = new BpTreeMap [Car] ("lane2") // index for lane2 - - var dist = 0.0 // distance from end of lane - var ord = 0 // rank order from end of lane - for i <- 1 to totKeys do - dist += rng.nextInt (5) - val c_i = Car (i, dist) // the car being put into lane1's B+Tree - ord += 10 // rank order of car toward end of lane - banner (s"put ($ord, $c_i)") - lane1.put (ord, c_i) - lane1.show () - lane1.showLink() - end for - - dist = 0.0 // distance from end of lane - ord = 0 - for i <- 1 to totKeys do - dist += rng.nextInt (5) - val c_i = Car (totKeys + i, dist) // the car being put into lane2's B+Tree - ord += 10 // rank order of car toward end of lane - banner (s"put ($ord, $c_i)") - lane2.put (ord, c_i) - lane2.show () - lane2.showLink() - end for - -// find the j-th car in lane1 call it car1 -// find the corresponding j-th car in lane2 call it car2 -// check whether car2 is behind car1 in the other lane -// may need a doubly linked list of nodes at the leaf-level to search forward and backward -// find the closest car in the other lane that is behind you -// if its distance is large enough, make the lane change -// may need gaps in ord so lane changing car can get an ord without making all care reassign theirs - -end bpTreeMapTest5 - diff --git a/target/scala-3.6.4/classes/scalation/database/old/BpTreeMap.scala.sav2 b/target/scala-3.6.4/classes/scalation/database/old/BpTreeMap.scala.sav2 deleted file mode 100644 index 105646a49..000000000 --- a/target/scala-3.6.4/classes/scalation/database/old/BpTreeMap.scala.sav2 +++ /dev/null @@ -1,562 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Fri Aug 11 00:26:03 EDT 2023 - * @see LICENSE (MIT style license file). - * - * @note Sorted Map Implemented Using B+Trees (Indexed and Sequential Access) - * - * Split Nodes on Overflow - * Structure for order = 5 (max of 4 keys), upon first split - * [ . k4 . -- . -- . -- . ] - * [ . k1 . k2 . k3 . -- . ] - * [ . k4 . k5 . -- . -- . ] - * Rules: divider key (k4 added to parent in this case) is the smallest key - in the right subtree (SMALLEST RIGHT) - * split node n into (n, right_sibling_node) with larger half staying in n - * internal node split promotes middle key to parent as the divider key - * - * Borrow/Merge Nodes on Underflow - * Rules: try to borrow one key from an adjacent (left or right) rich sibling node - * otherwise merge with sibling node - * - * Optionally supports bidirectional linkage of leaf nodes for Sequential Access - * forward via ref(0) [ n1 ] -> [ n2 ] -> [ n3 ] - * backward via pre [ n1 ] <- [ n2 ] <- [ n3 ] optional DLINK = true - */ - -package scalation -package database - -import scala.collection.mutable.{AbstractMap, SortedMap} -import scala.reflect.ClassTag - -import BpNode._ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `BpTreeMap` class provides sorted maps that use the B+Tree Data Structure. - * Inserts may cause the splitting of nodes, while deletes may cause borrowing - * if keys or merging of nodes. - * @tparam V the type of the values assigned to keys in this sorted map - */ -class BpTreeMap [V: ClassTag] () - extends AbstractMap [ValueType, V] - with SortedMap [ValueType, V] - with Serializable: - - private val debug = debugf ("BpTreeMap", true) // debug function - private var kCount = 0 // counter for total number of keys - private [database] var count = 0 // count # nodes accessed (performance) - private var root = new BpNode (0, true) // root node of this B+Tree (initially empty) - private val first = root // first leaf node of this B+Tree - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Get the first value in the B+Tree (note ref(0) points at next leaf node). - */ - def getFirst: V = first.ref(1).asInstanceOf [V] - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** The `SortedMap` trait requires `Ordering` with a compare method to be defined. - * @see https://scala-lang.org/api/3.3.0/scala/math/Ordering.html - * @see ValueType.scala in `scalation.package` - */ - def ordering: Ordering [ValueType] = ValueTypeOrd - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the size (number of keys) of this B+Tree. - */ - inline override def size: Int = kCount - - //------------------------------------------------------------------------------ - // Retrieve values or ranges (subtrees) - //------------------------------------------------------------------------------ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** The `TreeIterator` inner class supports iterating over all the elements - * in a B+Tree by traversing through the LEAF nodes of the tree. - * @param ns the starting leaf node (defaults to first) - * @param js the starting within node index (defaults to -1) - */ - class TreeIterator (ns: BpNode = first, js: Int = -1) extends Iterator [(ValueType, V)]: - var (n, j) = (ns, js) - def hasNext: Boolean = j < n.keys-1 || n.ref(0) != null - def next (): (ValueType, V) = - debug ("next", s"node n = $n") - if j < n.keys-1 then j += 1 else { n = n.ref(0).asInstanceOf [BpNode]; j = 0 } - (n(j), n.ref(j+1).asInstanceOf [V]) - end next - end TreeIterator - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return an iterator for retrieving all the elements in this B+Tree. - * @see scala.collection.IterableOnce - */ - def iterator: Iterator [(ValueType, V)] = new TreeIterator () - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return an iterator for retrieving all the elements in this B+Tree starting - * from key start. Returns null if all keys in tree are smaller than start. - * @see scala.collection.SortedMapOps - * @param start the key to start with - */ - def iteratorFrom (start: ValueType): Iterator [(ValueType, V)] = - val (ns, js) = findp (start, root) - if ns != null then new TreeIterator (ns, js) - else null - end iteratorFrom - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return an iterator for retrieving all the keys in this B+Tree starting - * from key start. - * @see scala.collection.SortedMapOps - * @param start the key to start with - */ - def keysIteratorFrom (start: ValueType): Iterator [ValueType] = - throw new UnsupportedOperationException ("keysIteratorFrom not available, use iteratorFrom instead") - end keysIteratorFrom - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the submap starting at from and ending before until. - * @see scala.collection.SortedOps - * @param from the starting key (inclusive) - * @param until the ending key (exclusive) - */ - def rangeImpl (from: Option [ValueType], until: Option [ValueType]): BpTreeMap [V] = - val subtree = new BpTreeMap [V] () - val it = if from.isDefined then iteratorFrom (from.get) - else iterator - var cont = true - while cont && it.hasNext do - val (k, v) = it.next () - if ! until.isDefined || k < until.get then subtree.addOne ((k, v)) - else cont = false - end while - subtree - end rangeImpl - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the value associated with the key by looking it up in this B+Tree. - * @param key the key used for look up - */ - def get (key: ValueType): Option [V] = Option (find (key)) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Find the given key in this B+tree and return its corresponding value. - * Calls the recursive findp method. - * @param key the key to find - */ - inline def find (key: ValueType): V = - val (ln, ip) = findp (key, root) // leaf node, index position - if ip >= 0 then ln.ref(ip+1).asInstanceOf [V] - else null.asInstanceOf [V] - end find - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Recursive helper method for finding the position of the given key in this B+tree. - * @param key the key to find - * @param n the current node - */ - private def findp (key: ValueType, n: BpNode): (BpNode, Int) = - count += 1 - if n.isLeaf then (n, n.findEq (key)) - else findp (key, n.ref(n.find (key)).asInstanceOf [BpNode]) - end findp - - //------------------------------------------------------------------------------ - // Add key-value pairs into the B+Tree - //------------------------------------------------------------------------------ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Add one key-value pair into this B+Tree and return this. - * Called by the put method in `AbstractMap`. - * Note: it splits the node that overflows - * @param elem the key-value pair to add/insert - */ - def addOne (elem: (ValueType, V)): this.type = - val (key, value) = elem - kCount += 1 // increment the key count - insert (key, value, root) // call the recursive insert - this - end addOne - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Recursive helper method for inserting a key and ref into this B+tree. - * Returns the divider key and right sibling upon split, else null. - * @param key the key to insert - * @param ref the value/node to insert - * @param n the current node - */ - private def insert (key: ValueType, ref: V, n: BpNode): (ValueType, BpNode) = - var k_r: (ValueType, BpNode) = null - - if n.isLeaf then // handle LEAF node - k_r = add (n, key, ref) - if k_r != null then - if n != root then return k_r - root = new BpNode (root, k_r._1, k_r._2) // make a new root - end if - - else // handle INTERNAL node - k_r = insert (key, ref, n.ref(n.find (key)).asInstanceOf [BpNode]) - if k_r != null then - k_r = addI (n, k_r._1, k_r._2) - if k_r != null && n == root then root = new BpNode (root, k_r._1, k_r._2) - end if - end if - k_r - end insert - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Add new key k and value v into LEAF node n. Upon overflow, split node n, - * in which case the divider key and new right sibling node are returned. - * @param n the current node - * @param k the new key - * @param v the new value - */ - private def add (n: BpNode, k: ValueType, v: V): (ValueType, BpNode) = - var k_r: (ValueType, BpNode) = null // divider key, right sibling - n.add (k, v) // add into node n - if n.overflow then k_r = n.split () // its full, must split - k_r - end add - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Add new key k and value v into INTERNAL node n. Upon overflow, split node n, - * in which case the divider key and new right sibling node are returned. - * @param n the current node - * @param k the new key - * @param v the new left value (ref a node) - */ - private def addI (n: BpNode, k: ValueType, v: BpNode): (ValueType, BpNode) = - var k_r: (ValueType, BpNode) = null // divider key, right sibling - n.add (k, v) // add into node n - n.showRef () - if n.overflow then k_r = n.splitI () // its full, must split - k_r - end addI - - //------------------------------------------------------------------------------ - // Remove key-value pair from the B+Tree - //------------------------------------------------------------------------------ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Remove the one element (key-value pair) with the given key and return - * whether it matches the value expected. - * @param key the key whose element is to be removed - * @param value the value expected of the removed element - */ - def checkedRemove (key: ValueType, value: V): Boolean = remove (key) == value - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Subtract/remove the one element (key-value pair) with the given key. - * Called by the remove method in `AbstractMap`. - * @param key the key whose element is to be removed - */ - def subtractOne (key: ValueType): this.type = - kCount -= 1 // decrement the key count - delete (key, root) // call the recursive delete - if ! root.isLeaf && root.keys == 0 then - root = root.ref(0).asInstanceOf [BpNode] // remove empty root by resetting the root reference - this - end subtractOne - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Recursive helper method for deleting a key with its ref from this B+tree. - * @param key the key to delete - * @param n the current node - * @param par the parent node (null for root) - */ - private def delete (key: ValueType, n: BpNode, par: BpNode = null): Unit = - - if n.isLeaf then // handle LEAF node - val dp = n.findEq (key) // deletion position in LEAF node n - n.remove (dp) // remove key at index position dp - - // upon underflow do a leaf node borrow or merge - - if n != root && n.underflow then // unless root, check for underflow - println (s"delete: needs to handle underflow of LEAF node $n") - val j = par.find (key) // j-th index position in parent - val (sib, left) = richestSib (par, j) // richest sib and whether it's left - println (s"delete: leaf sib = $sib") - - if sib.rich then borrow (n, sib, left, par, j) // leaf borrow a key from rich sib - else merge (n, sib, left, par, j) // leaf merge nodes n and sib, may cause parent to underflow - end if - - else // handle INTERNAL node - val dp = n.find (key) // deletion position in INTERNAL node n - delete (key, n.ref(dp).asInstanceOf [BpNode], n) // recursive call to delete - - // upon underflow do an internal node borrow (borrowI) or merge (mergeI) - - if n != root && n.underflow then // unless root, check for underflow - println (s"delete: needs to handle underflow of INTERNAL node $n") - val j = par.find (key) // j-th index position in parent - val (sib, left) = richestSib (par, j) // richest sib and whether it's left - println (s"delete: internal sib = $sib") - - if sib.rich then borrowI (n, sib, left, par, j) // internal borrow a key from rich sib - else mergeI (n, sib, left, par, j) // internal merge nodes n and sib, may cause parent to underflow - end if - - end if - end delete - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return node n's richest sibling (and whether it is left/true or right/false) - * found from parent node. - * @param par the parent node of node n that has underflowed - * @param i the position of node n as a child of node par - */ - private def richestSib (par: BpNode, i: Int): (BpNode, Boolean) = - debug ("richSib", s"return node n's (@ $i) richest sibling (left or right)") - val leftn = if i-1 >= 0 then par.ref(i-1).asInstanceOf [BpNode] else null - val rightn = if i+1 <= par.keys then par.ref(i+1).asInstanceOf [BpNode] else null - - if leftn == null then (rightn, false) - else if rightn == null then (leftn, true) - else if leftn.keys >= rightn.keys then (leftn, true) - else (rightn, false) - end richestSib - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Borrow a key-value pair from a rich sibling, so LEAF node n won't underflow. - * For borrow LEFT, last key in left sib k2 moves to n, then k2 replaces k3 in par. - * [ ... k3 ... ] [ ... k2 ... ] - * [ ... k1 k2 ] [ k3 ... ] TO [ ... k1 ] [ k2 k3 ... ] - * @param n the current node that has underflowed - * @param sib the rich sibling node - * @param left the whether the sib is left or right - * @param par the parent node - * @param j the index position in the parent node - */ - private def borrow (n: BpNode, sib: BpNode, left: Boolean, par: BpNode, j: Int): Unit = - val i = if left then sib.keys-1 else 0 - val bkey = sib(i) - debug ("borrow", s"key $bkey from rich sib = $sib node for node n = $n having par = $par at j = $j, left=$left") - val bref = sib.ref(i).asInstanceOf [V] - sib.remove (i) - add (n, bkey, bref) - if left then par(j-1) = bkey else par(j) = sib(0) // the divider key for parent node - end borrow - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Borrow a key-value pair from a rich sibling, so INTERNAL node n won't underflow. - * For borrow LEFT, last key in left sib k2 rotates into par, whose key k3 rotates to n. - * [ ... k3 ... ] [ ... k2 ... ] - * [ ... k1 k2 ] [ k4 ... ] TO [ ... k1 ] [ k3 k4 ... ] - * @param n the current node that has underflowed - * @param sib the rich sibling node - * @param left the whether the sib is left or right - * @param par the parent node - * @param j the index position in the parent node - */ - private def borrowI (n: BpNode, sib: BpNode, left: Boolean, par: BpNode, j: Int): Unit = - val i = if left then sib.keys-1 else 0 - val bkey = sib(i) - debug ("borrowI", s"key $bkey from rich sib = $sib node for node n = $n having par = $par at j = $j") - val bref = sib.ref(i+1).asInstanceOf [BpNode] - sib.removeI (i) - addI (n, bkey, bref) - if left then par(j-1) = bkey else par(j) = bkey // the divider key for parent node - end borrowI - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Merge LEAF node n with its sibling returning whether the parent node - * has underflowed. - * @param n the current node that has underflowed - * @param sib the sibling node - * @param left the whether the sib is left or right - * @param par the parent node - * @param j the index position in the parent node - */ - private def merge (n: BpNode, sib: BpNode, left: Boolean, par: BpNode, j: Int): Boolean = - debug ("merge", s"LEAF node n = $n that underflows with sib = $sib having par = $par at j = $j ,left=$left") - if left then {sib.merge (n); par.remove(j-1)} - else { n.merge (sib); par.remove (j) } // true means parent underflowed - end merge - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Merge INTERNAL node n with its sibling returning whether the parent node - * has underflowed. - * @param n the current node that has underflowed - * @param sib the sibling node - * @param left the whether the sib is left or right - * @param par the parent node - * @param j the index position in the parent node - */ - private def mergeI (n: BpNode, sib: BpNode, left: Boolean, par: BpNode, j: Int): Boolean = - debug ("mergeI", s"INTERNAL node n = $n that underflows with sib = $sib having par = $par at j = $j, left =$left") - if left then sib.mergeI (par.key(j-1), n) - else n.mergeI (par.key(j), sib) - par.remove (j) // true means parent underflowed - end mergeI - - //------------------------------------------------------------------------------ - // Print/show the B+Tree - //------------------------------------------------------------------------------ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Show/print this B+Tree. - */ - def show (): Unit = - println ("BpTreeMap") - printT (root, 0) - println ("-" * 60) - end show - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Show/print this B+Tree's leaf node links. - */ - def showLink (): Unit = - println ("BpTreeMap Leaf Node Linkage") - println("=" * 60) - var n = first - while n != null do { n.show (); n = n.ref(0).asInstanceOf [BpNode]} - println("=" * 60) - end showLink - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Print this B+Tree map using a pre-order traversal and indenting each level. - * @param n the current node to print - * @param level the current level in the B+Tree - */ - private def printT (n: BpNode, level: Int): Unit = - if n != null then - println ("\t" * level + n) - if ! n.isLeaf then - for j <- 0 to n.keys do printT (n.ref(j).asInstanceOf [BpNode], level + 1) - end printT - -end BpTreeMap - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `bpTreeMapTest` main function used for testing the `BpTreeMap` class by - * inserting increasing key values. - * > runMain scalation.database.bpTreeMapTest - */ -@main def bpTreeMapTest (): Unit = - - banner ("Insert Increasing Integer Keys") - val totKeys = 52 - val tree = new BpTreeMap [Int] () - - for i <- 1 until totKeys by 2 do - banner (s"put ($i, ${i * i})") - tree.put (i, i * i) - tree.show () - tree.showLink() - end for - - banner ("Find Keys") - for i <- 0 until totKeys do println (s"key = $i, value = ${tree.get(i)}") - println ("-" * 60) - - banner ("Iterate Through the B+Tree") - for it <- tree.iterator do println (it) - println ("-" * 60) - tree.foreach (println (_)) - - banner ("Print Statistics") - println (s"size = ${tree.size}") - println (s"Average number of nodes accessed = ${tree.count / totKeys.toDouble}") - - banner ("Delete Keys") - tree.show () - //val toRemove = Array (29, 31, 33, 35, 27, 25) - //val toRemove = Array (29, 31, 33, 35, 27, 25, 23, 13, 7, 1, 3, 5, 9, 21, 17, 19, 21, 11, 15 ) - val toRemove = Array(7, 1, 3, 5, 9, 21, 17, 19, 21, 11, 15, 13, 29, 31, 27, 33, 35, 23, 39, 37, 41, 25) - for key <- toRemove do - banner (s"remove ($key)") - tree.remove (key) - tree.show () - tree.showLink() - end for - -end bpTreeMapTest - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `bpTreeMapTest2` main function used for testing the `BpTreeMap` class by - * inserting random key values. - * > runMain scalation.database.bpTreeMapTest2 - */ -@main def bpTreeMapTest2 (): Unit = - - import java.util.Random - - banner ("Insert Random Integer Keys") - val totKeys = 60 - val mx = 10 * totKeys - val seed = 1 - val rng = new Random (seed) - val tree = new BpTreeMap [Int] () - - for i <- 1 to totKeys do - val key = rng.nextInt (mx) - banner (s"put ($key, ${2 * key})") - tree.put (key, 2 * key) - tree.show () - tree.showLink() - end for - - banner ("Print Statistics") - println (s"size = ${tree.size}") - println (s"Average number of nodes accessed = ${tree.count / totKeys.toDouble}") - -end bpTreeMapTest2 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `bpTreeMapTest3` main function used for testing the `BpTreeMap` class by - * inserting keys and values into B+Trees, one representing each of two lanes. - * > runMain scalation.database.bpTreeMapTest3 - */ -@main def bpTreeMapTest3 (): Unit = - - import java.util.Random - - case class Car (vin: Int, dist: Double) - - banner ("Insert Random Integer Keys") - val totKeys = 60 - val seed = 1 - val rng = new Random (seed) - val lane1 = new BpTreeMap [Car] () // index for lane1 - val lane2 = new BpTreeMap [Car] () // index for lane2 - - var dist = 0.0 // distance from end of lane - var ord = 0 // rank order from end of lane - for i <- 1 to totKeys do - dist += rng.nextInt (5) - val c_i = Car (i, dist) // the car being put into lane1's B+Tree - ord += 10 // rank order of car toward end of lane - banner (s"put ($ord, $c_i)") - lane1.put (ord, c_i) - lane1.show () - end for - - dist = 0.0 // distance from end of lane - ord = 0 - for i <- 1 to totKeys do - dist += rng.nextInt (5) - val c_i = Car (totKeys + i, dist) // the car being put into lane2's B+Tree - ord += 10 // rank order of car toward end of lane - banner (s"put ($ord, $c_i)") - lane2.put (ord, c_i) - lane2.show () - end for - -// find the j-th car in lane1 call it car1 -// find the corresponding j-th car in lane2 call it car2 -// check whether car2 is behind car1 in the other lane -// may need a doubly linked list of nodes at the leaf-level to search forward and backward -// find the closest car in the other lane that is behind you -// if its distance is large enough, make the lane change -// may need gaps in ord so lane changing car can get an ord without making all care reassign theirs - -end bpTreeMapTest3 - diff --git a/target/scala-3.6.4/classes/scalation/database/old/BpTreeMap.scala.sav3 b/target/scala-3.6.4/classes/scalation/database/old/BpTreeMap.scala.sav3 deleted file mode 100644 index 62e980ae8..000000000 --- a/target/scala-3.6.4/classes/scalation/database/old/BpTreeMap.scala.sav3 +++ /dev/null @@ -1,682 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Fri Aug 11 00:26:03 EDT 2023 - * @see LICENSE (MIT style license file). - * - * @note Sorted Map Implemented Using B+Trees (Indexed and Sequential Access) - * - * Split Nodes on Overflow - * Structure for order = 5 (max of 4 keys), upon first split - * [ . k4 . -- . -- . -- . ] - * [ . k1 . k2 . k3 . -- . ] - * [ . k4 . k5 . -- . -- . ] - * Rules: divider key (k4 added to parent in this case) is the smallest key - in the right subtree (SMALLEST RIGHT) - * split node n into (n, right_sibling_node) with larger half staying in n - * internal node split promotes middle key to parent as the divider key - * - * Borrow/Merge Nodes on Underflow - * Rules: try to borrow one key from an adjacent (left or right) rich sibling node - * otherwise merge with sibling node - * - * Optionally supports bidirectional linkage of leaf nodes for Sequential Access - * forward via ref(0) [ n1 ] -> [ n2 ] -> [ n3 ] - * backward via pre [ n1 ] <- [ n2 ] <- [ n3 ] optional DLINK = true - */ - -package scalation -package database - -import scala.collection.mutable.{AbstractMap, SortedMap} -import scala.reflect.ClassTag -import scala.runtime.ScalaRunTime.stringOf - -import BpNode._ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `BpTreeMap` class provides sorted maps that use the B+Tree Data Structure. - * Inserts may cause the splitting of nodes, while deletes may cause borrowing - * if keys or merging of nodes. - * @tparam V the type of the values assigned to keys in this sorted map - */ -class BpTreeMap [V: ClassTag] () - extends AbstractMap [ValueType, V] - with SortedMap [ValueType, V] - with Serializable: - - private val debug = debugf ("BpTreeMap", true) // debug function - private var kCount = 0 // counter for total number of keys - private [database] var count = 0 // count # nodes accessed (performance) - private var root = new BpNode (0, true) // root node of this B+Tree (initially empty) - private val first = root // first leaf node of this B+Tree - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** The `SortedMap` trait requires `Ordering` with a compare method to be defined. - * @see https://scala-lang.org/api/3.3.0/scala/math/Ordering.html - * @see ValueType.scala in `scalation.package` - */ - def ordering: Ordering [ValueType] = ValueTypeOrd - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the size (number of keys) of this B+Tree. - */ - inline override def size: Int = kCount - - //------------------------------------------------------------------------------ - // Retrieve values or ranges (subtrees) - //------------------------------------------------------------------------------ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** The `TreeIterator` inner class supports iterating over all the elements - * in a B+Tree by traversing through the LEAF nodes of the tree. - * @param ns the starting leaf node (defaults to first) - * @param js the starting within node index (defaults to -1) - */ - class TreeIterator (ns: BpNode = first, js: Int = -1) extends Iterator [(ValueType, V)]: - var (n, j) = (ns, js) - def hasNext: Boolean = j < n.keys-1 || n.ref(0) != null - def next (): (ValueType, V) = - debug ("next", s"node n = $n") - if j < n.keys-1 then j += 1 else { n = n.ref(0).asInstanceOf [BpNode]; j = 0 } - (n(j), n.ref(j+1).asInstanceOf [V]) - end next - end TreeIterator - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return an iterator for retrieving all the elements in this B+Tree. - * @see scala.collection.IterableOnce - */ - def iterator: Iterator [(ValueType, V)] = new TreeIterator () - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return an iterator for retrieving all the elements in this B+Tree starting - * from key start. Returns null if all keys in tree are smaller than start. - * @see scala.collection.SortedMapOps - * @param start the key to start with - */ - def iteratorFrom (start: ValueType): Iterator [(ValueType, V)] = - val (ns, js) = findp (start, root) - if ns != null then new TreeIterator (ns, js) - else null - end iteratorFrom - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return an iterator for retrieving all the keys in this B+Tree starting - * from key start. - * @see scala.collection.SortedMapOps - * @param start the key to start with - */ - def keysIteratorFrom (start: ValueType): Iterator [ValueType] = - throw new UnsupportedOperationException ("keysIteratorFrom not available, use iteratorFrom instead") - end keysIteratorFrom - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the submap starting at from and ending before until. - * @see scala.collection.SortedOps - * @param from the starting key (inclusive) - * @param until the ending key (exclusive) - */ - def rangeImpl (from: Option [ValueType], until: Option [ValueType]): BpTreeMap [V] = - val subtree = new BpTreeMap [V] () - val it = if from.isDefined then iteratorFrom (from.get) - else iterator - var cont = true - while cont && it.hasNext do - val (k, v) = it.next () - if ! until.isDefined || k < until.get then subtree.addOne ((k, v)) - else cont = false - end while - subtree - end rangeImpl - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the value associated with the key by looking it up in this B+Tree. - * @param key the key used for look up - */ - def get (key: ValueType): Option [V] = Option (find (key)) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Find the given key in this B+tree and return its corresponding value. - * Calls the recursive findp method. - * @param key the key to find - */ - inline def find (key: ValueType): V = - val (ln, ip) = findp (key, root) // leaf node, index position - if ip >= 0 then ln.ref(ip+1).asInstanceOf [V] - else null.asInstanceOf [V] - end find - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Recursive helper method for finding the position of the given key in this B+tree. - * @param key the key to find - * @param n the current node - */ - private def findp (key: ValueType, n: BpNode): (BpNode, Int) = - count += 1 - if n.isLeaf then (n, n.findEq (key)) - else findp (key, n.ref(n.find (key)).asInstanceOf [BpNode]) - end findp - - //------------------------------------------------------------------------------ - // Add key-value pairs into the B+Tree - //------------------------------------------------------------------------------ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Add one key-value pair into this B+Tree and return this. - * Called by the put method in `AbstractMap`. - * Note: it splits the node that overflows - * @param elem the key-value pair to add/insert - */ - def addOne (elem: (ValueType, V)): this.type = - val (key, value) = elem - kCount += 1 // increment the key count - insert (key, value, root) // call the recursive insert - this - end addOne - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Recursive helper method for inserting a key and ref into this B+tree. - * Returns the divider key and right sibling upon split, else null. - * @param key the key to insert - * @param ref the value/node to insert - * @param n the current node - */ - private def insert (key: ValueType, ref: V, n: BpNode): (ValueType, BpNode) = - var k_r: (ValueType, BpNode) = null - - if n.isLeaf then // handle LEAF node - k_r = add (n, key, ref) - if k_r != null then - if n != root then return k_r - root = new BpNode (root, k_r._1, k_r._2) // make a new root - end if - - else // handle INTERNAL node - k_r = insert (key, ref, n.ref(n.find (key)).asInstanceOf [BpNode]) - if k_r != null then - k_r = addI (n, k_r._1, k_r._2) - if k_r != null && n == root then root = new BpNode (root, k_r._1, k_r._2) - end if - end if - k_r - end insert - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Add new key k and value v into LEAF node n. Upon overflow, split node n, - * in which case the divider key and new right sibling node are returned. - * @param n the current node - * @param k the new key - * @param v the new value - */ - private def add (n: BpNode, k: ValueType, v: V): (ValueType, BpNode) = - var k_r: (ValueType, BpNode) = null // divider key, right sibling - n.add (k, v) // add into node n - if n.overflow then k_r = n.split () // its full, must split - k_r - end add - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Add new key k and value v into INTERNAL node n. Upon overflow, split node n, - * in which case the divider key and new right sibling node are returned. - * @param n the current node - * @param k the new key - * @param v the new left value (ref a node) - */ - private def addI (n: BpNode, k: ValueType, v: BpNode): (ValueType, BpNode) = - var k_r: (ValueType, BpNode) = null // divider key, right sibling - n.add (k, v) // add into node n - n.showRef () - if n.overflow then k_r = n.splitI () // its full, must split - k_r - end addI - - //------------------------------------------------------------------------------ - // Remove key-value pair from the B+Tree - //------------------------------------------------------------------------------ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Subtract/remove the one element (key-value pair) with the given key. - * Called by the remove method in `AbstractMap`. - * @param key the key whose element is to be removed - */ - def subtractOne (key: ValueType): this.type = - kCount -= 1 // decrement the key count - delete (key, root) // call the recursive delete - if ! root.isLeaf && root.keys == 0 then - root = root.ref(0).asInstanceOf [BpNode] // remove empty root by resetting the root reference - this - end subtractOne - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Recursive helper method for deleting a key with its ref from this B+tree. - * @param key the key to delete - * @param n the current node - * @param par the parent node (null for root) - */ - private def delete (key: ValueType, n: BpNode, par: BpNode = null): Unit = - - if n.isLeaf then // handle LEAF node - val dp = n.findEq (key) // deletion position in LEAF node n - n.remove (dp) // remove key at index position dp - - // upon underflow do a leaf node borrow or merge - - if n != root && n.underflow then // unless root, check for underflow - println (s"delete: needs to handle underflow of LEAF node $n") - val j = par.find (key) // j-th index position in parent - val (sib, left) = richestSib (par, j) // richest sib and whether it's left - println (s"delete: leaf sib = $sib") - - if sib.rich then borrow (n, sib, left, par, j) // leaf borrow a key from rich sib - else merge (n, sib, left, par, j) // leaf merge nodes n and sib, may cause parent to underflow - end if - - else // handle INTERNAL node - val dp = n.find (key) // deletion position in INTERNAL node n - delete (key, n.ref(dp).asInstanceOf [BpNode], n) // recursive call to delete - - // upon underflow do an internal node borrow (borrowI) or merge (mergeI) - - if n != root && n.underflow then // unless root, check for underflow - println (s"delete: needs to handle underflow of INTERNAL node $n") - val j = par.find (key) // j-th index position in parent - println(s"Before rich sib delete: internal key = $key, par=$par, j=$j, parKeys = ${par.keys}")// the fix - val (sib, left) = richestSib (par, j) // richest sib and whether it's left - println (s"After rich sib delete: internal sib = $sib, left=$left") - - if sib.rich then borrowI (n, sib, left, par, j) // internal borrow a key from rich sib - else mergeI (n, sib, left, par, j) // internal merge nodes n and sib, may cause parent to underflow - end if - - end if - end delete - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return node n's richest sibling (and whether it is left/true or right/false) - * found from parent node. - * @param par the parent node of node n that has underflowed - * @param i the position of node n as a child of node par - */ - private def richestSib (par: BpNode, i: Int): (BpNode, Boolean) = - debug ("richSib", s"return node n's (@ $i) richest sibling (left or right)") - val leftn = if i-1 >= 0 then par.ref(i-1).asInstanceOf [BpNode] else null - val rightn = if i+1 <= par.keys then par.ref(i+1).asInstanceOf [BpNode] else null - - println(s"@@@@ righNode=$rightn, leftNode=$leftn , i=$i, parRef(o) = ${par.ref(0)}") - - if leftn == null then (rightn, false) - else if rightn == null then (leftn, true) - else if leftn.keys >= rightn.keys then (leftn, true) - else (rightn, false) - end richestSib - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Borrow a key-value pair from a rich sibling, so LEAF node n won't underflow. - * For borrow LEFT, last key in left sib k2 moves to n, then k2 replaces k3 in par. - * [ ... k3 ... ] [ ... k2 ... ] - * [ ... k1 k2 ] [ k3 ... ] TO [ ... k1 ] [ k2 k3 ... ] - * @param n the current node that has underflowed - * @param sib the rich sibling node - * @param left the whether the sib is left or right - * @param par the parent node - * @param j the index position in the parent node - */ - private def borrow (n: BpNode, sib: BpNode, left: Boolean, par: BpNode, j: Int): Unit = - val i = if left then sib.keys-1 else 0 - val bkey = sib(i) - debug ("borrow", s"key $bkey from rich sib = $sib node for node n = $n having par = $par at j = $j, left=$left") - val bref = sib.ref(i+1).asInstanceOf [V] - println(s"bref=$bref, sib.ref=${stringOf(sib.ref)}, i =$i") - sib.remove (i) - add (n, bkey, bref) - if left then par(j-1) = bkey else par(j) = sib(0) // the divider key for parent node - end borrow - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Borrow a key-value pair from a rich sibling, so INTERNAL node n won't underflow. - * For borrow LEFT, last key in left sib k2 rotates into par, whose key k3 rotates to n. - * [ ... k3 ... ] [ ... k2 ... ] - * [ ... k1 k2 ] [ k4 ... ] TO [ ... k1 ] [ k3 k4 ... ] - * @param n the current node that has underflowed - * @param sib the rich sibling node - * @param left the whether the sib is left or right - * @param par the parent node - * @param j the index position in the parent node - */ -// private def borrowI (n: BpNode, sib: BpNode, left: Boolean, par: BpNode, j: Int): Unit = -// val i = if left then sib.keys-1 else 0 -// val bkey = sib(i) -// val bref = sib.ref(i+1).asInstanceOf [BpNode] -// debug("borrowI", s"key $bkey and bref= $bref from rich sib = $sib node for node n = $n having par = $par at j = $j") -// sib.removeI (i) -// addI (n, bkey, bref) -// if left then par(j-1) = bkey else par(j) = bkey // the divider key for parent node -// end borrowI - - // this is a borrowI from the right - // Move the key from the parent into the left child - // In this case, that key is 17. - // We need to move the key from right child into the parent in this case, that key is 33 - private def borrowI(n: BpNode, sib: BpNode, left: Boolean, par: BpNode, j: Int): Unit = - //@@@@@@@@@@@@@@ a left borrow @@@@@@@@@@@@@@@@@@ - - // [ ... k4 ] [ ...k2 ] - // [ ... k1.k2 ] [ ... k5. ] TO [ ... k1. ] [ k4.k5 ... ] - - // [ ... 19. ... ] [ ... 18 ... ] - // [ ...17.18 ] [ 20 ... ] TO [ ... 17 ] [ 19.20 ... ] - //sib //node - if left then - println("@@@@@@@This is from the left @@@@@@@@@@@@@@@@@@@@") - // Shift keys and references in n to make room for the new key and reference from the parent - val ip = 0 - for i <- n.keys until ip by -1 do - n.key(i) = n.key(i - 1) - n.ref(i + 1) = n.ref(i) - // Insert the parent's key and reference to the first position in n - n.keys += 1 - n.key(ip) = par.key(j) //move the par key to this node - par.key(j) = sib.key(sib.keys - 1) - - n.ref(ip) = sib.ref(j + 1) - //par.ref(j) = n.ref(n.keys - 1) - - sib.removeI(j) // remove the bkey from sib - // Update the parent with the last key and reference from the - else - - // [ ... k2.k5 ] [ ... k3.k5 ] - // [ ... k1 ] [ ... k3. k4 ] TO [ ... k1.k2 ] [ k4. ... ] - - // [ ... 17. ... ] [ ... 33 ... ] - // [ ... ] [ 33 41 ... ] TO [ ... 17 ] [ 41 ... ] - - - // [ ... k2 ... ] [ ... k3 ... ] - - // [ ... k2.k5 ] [ ... k3.k5 ] - // [ ... k1 ] [ ... k3. k4 ] TO [ ... k1.k2 ] [ k4. ... ] - - // [ ... 17. ... ] [ ... 33 ... ] - // [ ... ] [ 33 41 ... ] TO [ ... 17 ] [ 41 ... ] - //println(s"bkey = ${n.key(ip)},bref= ${n.ref(ip)}, sib(j) = ${sib.key(j)}, nKeys= ${n.keys}") - println("@@@@@@@This is from the right @@@@@@@@@@@@@@@@@@@@") - val ip = n.keys // the inserting point at the node - println(s"this is the ip = $ip") - - n.keys += 1 // I don't know why we are doing this incrementation - n.key(ip) = par.key(j) // insert k2 to the n @ the insertion point - par.key(j) = sib.key(0) // promote k3 up to the parent node - - //n.ref(ip) = par.ref(j + 1).asInstanceOf[BpNode] // copy the ref along. the ref is j + 1, for 33, it is 39 - //par.ref(j+1) = sib.ref(1).asInstanceOf[BpNode] // promote the ref of k3 - n.ref(ip+1) = sib.ref(0) - - sib.removeI(0) // remove k2 from the right, sib = [.k3.] - println(s"new par=${par.key(j)}, new par ref = ${par.ref(j+1)}") - end borrowI - - -// private def borrowI(n: BpNode, sib: BpNode, left: Boolean, par: BpNode, j: Int): Unit = -// if left then -// println("@@@@@@@This is frm the left @@@@@@@@@@@@@@@@@@@@") -// val ip = 0 -// for i <- n.keys until ip by -1 do // make room by shifting keys right -// n.key(i) = n.key(i - 1) -// n.ref(i + 1) = n.ref(i) -// // key(ip) = key() // insert new key -// // ref(ip+1) = ref() // insert new value (right of key) -// n.keys += 1 -// n.key(ip) = par.key(j) -// n.ref(ip) = par.ref(j) -// par.key(j) = sib.key(sib.keys - 1) -// par.ref(j) = sib.ref(sib.keys - 1) -// sib.keys -= 1 -// else -// println("@@@@@@@This is frm the right @@@@@@@@@@@@@@@@@@@@") -// val ip = n.keys -// // key(ip) = k // insert new key -// // ref(ip + 2) = v // insert new value (right of key) -// n.keys += 1 -// n.key(ip) = par.key(j) -// n.ref(ip) = par.ref(j) -// par.key(j) = sib.key(0) -// par.ref(j) = sib.ref(0) -// sib.remove(0) -// assert(1 == 2) -// end borrowI - - - - - - - - - - - - - - - - - - - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Merge LEAF node n with its sibling returning whether the parent node - * has underflowed. - * @param n the current node that has underflowed - * @param sib the sibling node - * @param left the whether the sib is left or right - * @param par the parent node - * @param j the index position in the parent node - */ - private def merge (n: BpNode, sib: BpNode, left: Boolean, par: BpNode, j: Int): Boolean = - debug ("merge", s"LEAF node n = $n that underflows with sib = $sib having par = $par at j = $j ,left=$left") - if left then {sib.merge (n); par.remove(j-1)} - else { n.merge (sib); par.remove (j) } // true means parent underflowed - end merge - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Merge INTERNAL node n with its sibling returning whether the parent node - * has underflowed. - * @param n the current node that has underflowed - * @param sib the sibling node - * @param left the whether the sib is left or right - * @param par the parent node - * @param j the index position in the parent node - */ - private def mergeI (n: BpNode, sib: BpNode, left: Boolean, par: BpNode, j: Int): Boolean = - println("@@@@Show@@@@@@@@@@@@@@@") - show() - println("@@@@Show@@@@@@@@@@@@@@@") - debug ("mergeI", s"INTERNAL node n = $n that underflows with sib = $sib having par = $par at j = $j, left =$left") - if left then - println("mergeI from the left") - sib.mergeI (par.key(j-1), n) - par.remove(j-1) - else - println("mergeI from the right") - //n.mergeI (par.key(j), sib) - n.mergeI (par.key(j), sib) - par.remove (j) // fix removin g the wrong parent -// sib.key(sib.keys) = par.key(j) -// sib.ref(sib.keys + 1) = n.ref(0) // node corresponding to divider key -// for i <- 0 until n.keys do // move keys from rt into this node -// sib.key(sib.keys + i + 1) = n.key(i) -// sib.ref(sib.keys + i + 2) = n.ref(i + 1) -// sib.keys += n.keys + 1 -// par.remove (j) // fix removing the wrong parent // true means parent underflowed - end mergeI - - - //------------------------------------------------------------------------------ - // Print/show the B+Tree - //------------------------------------------------------------------------------ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Show/print this B+Tree. - */ - def show (): Unit = - println ("BpTreeMap") - printT (root, 0) - println ("-" * 60) - end show - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Show/print this B+Tree's leaf node links. - */ - def showLink (): Unit = - println ("BpTreeMap Leaf Node Linkage") - println("=" * 60) - var n = first - while n != null do { n.show (); n = n.ref(0).asInstanceOf [BpNode]} - println("=" * 60) - end showLink - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Print this B+Tree map using a pre-order traversal and indenting each level. - * @param n the current node to print - * @param level the current level in the B+Tree - */ - private def printT (n: BpNode, level: Int): Unit = - if n != null then - println ("\t" * level + n) - if ! n.isLeaf then - for j <- 0 to n.keys do printT (n.ref(j).asInstanceOf [BpNode], level + 1) - end printT - -end BpTreeMap - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `bpTreeMapTest` main function used for testing the `BpTreeMap` class by - * inserting increasing key values. - * > runMain scalation.database.bpTreeMapTest - */ -@main def bpTreeMapTest (): Unit = - - banner ("Insert Increasing Integer Keys") - val totKeys = 60 - val tree = new BpTreeMap [Int] () - - for i <- 1 until totKeys by 2 do - banner (s"put ($i, ${i * i})") - tree.put (i, i * i) - tree.show () - tree.showLink() - end for - - banner ("Find Keys") - for i <- 0 until totKeys do println (s"key = $i, value = ${tree.get(i)}") - println ("-" * 60) - - banner ("Iterate Through the B+Tree") - for it <- tree.iterator do println (it) - println ("-" * 60) - tree.foreach (println (_)) - - banner ("Print Statistics") - println (s"size = ${tree.size}") - println (s"Average number of nodes accessed = ${tree.count / totKeys.toDouble}") - - banner ("Delete Keys") - tree.show () - //val toRemove = Array (29, 31, 33, 35, 27, 25) - val toRemove = Array (29, 31,27, 33, 35, 25, 23, 13, 7, 1, 3, 5, 9, 21, 17, 19, 21, 11, 15, 37, 39,41,43,49,47,51,45) - //val toRemove = Array(7, 1, 3, 5, 9, 21, 17, 19, 21, 11, 15, 13, 29, 31, 27, 33, 35, 23, 39, 37, 41, 25) - for key <- toRemove do - banner (s"remove ($key)") - tree.remove (key) - tree.show () - tree.showLink() - end for - -end bpTreeMapTest - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `bpTreeMapTest2` main function used for testing the `BpTreeMap` class by - * inserting random key values. - * > runMain scalation.database.bpTreeMapTest2 - */ -@main def bpTreeMapTest2 (): Unit = - - import java.util.Random - - banner ("Insert Random Integer Keys") - val totKeys = 60 - val mx = 10 * totKeys - val seed = 1 - val rng = new Random (seed) - val tree = new BpTreeMap [Int] () - - for i <- 1 to totKeys do - val key = rng.nextInt (mx) - banner (s"put ($key, ${2 * key})") - tree.put (key, 2 * key) - tree.show () - tree.showLink() - end for - - banner ("Print Statistics") - println (s"size = ${tree.size}") - println (s"Average number of nodes accessed = ${tree.count / totKeys.toDouble}") - -end bpTreeMapTest2 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `bpTreeMapTest3` main function used for testing the `BpTreeMap` class by - * inserting keys and values into B+Trees, one representing each of two lanes. - * > runMain scalation.database.bpTreeMapTest3 - */ -@main def bpTreeMapTest3 (): Unit = - - import java.util.Random - - case class Car (vin: Int, dist: Double) - - banner ("Insert Random Integer Keys") - val totKeys = 60 - val seed = 1 - val rng = new Random (seed) - val lane1 = new BpTreeMap [Car] () // index for lane1 - val lane2 = new BpTreeMap [Car] () // index for lane2 - - var dist = 0.0 // distance from end of lane - var ord = 0 // rank order from end of lane - for i <- 1 to totKeys do - dist += rng.nextInt (5) - val c_i = Car (i, dist) // the car being put into lane1's B+Tree - ord += 10 // rank order of car toward end of lane - banner (s"put ($ord, $c_i)") - lane1.put (ord, c_i) - lane1.show () - lane1.showLink() - end for - - dist = 0.0 // distance from end of lane - ord = 0 - for i <- 1 to totKeys do - dist += rng.nextInt (5) - val c_i = Car (totKeys + i, dist) // the car being put into lane2's B+Tree - ord += 10 // rank order of car toward end of lane - banner (s"put ($ord, $c_i)") - lane2.put (ord, c_i) - lane2.show () - lane2.showLink() - end for - -// find the j-th car in lane1 call it car1 -// find the corresponding j-th car in lane2 call it car2 -// check whether car2 is behind car1 in the other lane -// may need a doubly linked list of nodes at the leaf-level to search forward and backward -// find the closest car in the other lane that is behind you -// if its distance is large enough, make the lane change -// may need gaps in ord so lane changing car can get an ord without making all care reassign theirs - -end bpTreeMapTest3 diff --git a/target/scala-3.6.4/classes/scalation/database/old/MinSpanningTree.scala.bak b/target/scala-3.6.4/classes/scalation/database/old/MinSpanningTree.scala.bak deleted file mode 100644 index 91c7c545e..000000000 --- a/target/scala-3.6.4/classes/scalation/database/old/MinSpanningTree.scala.bak +++ /dev/null @@ -1,217 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller, Peng Hao - * @version 2.0 - * @date Sat Nov 7 21:01:31 EST 2015 - * @see LICENSE (MIT style license file). - * - * @title Minimum Spanning Tree Implementing Prim's Algorithm - */ - -package scalation -package database - -import scala.collection.mutable.{Map, Set => SET} -//import scala.collection.mutable.PriorityQueue // lacks decreaseKey method -import scala.runtime.ScalaRunTime.stringOf - -import graph_pm.Graph -import scalation.PriorityQueue // ScalaTion's extesnion add increaseKey, printInOrder - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `MinSpanningTree` class is used to build minimum cost spanning trees - * from graphs. Edge cost/weights are given by edge labels. `MinSpanningTree` - * implements Prim's algorithm. - * @see www.cse.ust.hk/~dekai/271/notes/L07/L07.pdf - * @param g the digraph to build the spanning tree from - * @param min whether to create a minimum (true) or maximum (false) spanning tree - * @param undirected whether the graph is already undirected - */ -class MinSpanningTree (g: Graph, min: Boolean = true, undirected: Boolean = true): - - private val debug = debugf ("MinSpanningTree", true) // debug flag - private var stree: Tree = null // spanning tree built by calling span - private val size = g.size // the number of nodes for the spanning tree - private val root = new TreeNode (0, 0, 0.0) // for vertex 0 in g, create a root node - private val key = if min then Array.fill (size)(MAX_VALUE) - else Array.fill (size)(-MAX_VALUE) // cost/key array - private val out = Array.fill (size)(true) // status of outside spanning tree - private val qu = PriorityQueue ()(if min then NodeOrder else NodeOrder2) // priority queue of vertices - for i <- 0 until size do qu.enqueue (Elem (i, key(i))) // put all vertices in priority queue - - debug ("init", s"size = $size, min = $min, undirected = $undirected") - - if ! undirected then g.makeUndirected () - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Print the spanning tree. - */ - def printSTree (): Unit = stree.printTree () - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a minimum cost spanning tree for the given graph, returning true - * if a complete spanning tree connecting all of g's vertices can be created. - */ - def span (): Tree = - val pred = makeITree () // make an inverted tree - val el = Array.ofDim [ValueType] (pred.length) // copy elabel value from g into a pred elabel array - for i <- 1 until el.length do el(i) = g.elabel(pred(i), i).toDouble // skipping root node (0) - stree = Tree (pred, el, 4, "st") // build spanning tree from pred array - stree - end span - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** The `Elem` class is used for ordering elements on a priority queue. - * @param idx the index of a node - * @param key the ordering key (based on cost) for a node - */ - case class Elem (idx: Int, key: Double) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** The `NodeOrder` object defines the order of node indices based on - * their 'key' value. Using -key to get "smallest first" in priority queue. - * This is for minimum spanning trees ('min' = true) - */ - object NodeOrder extends Ordering [Elem]: - def compare (e1: Elem, e2: Elem): Int = -e1.key compare -e2.key - end NodeOrder - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** The `NodeOrder` object defines the order of node indices based on - * their 'key' value. Using +key to get "largest first" in priority queue. - * This is for maximum spanning trees ('min' = false) - */ - object NodeOrder2 extends Ordering [Elem]: - def compare (e1: Elem, e2: Elem): Int = e1.key compare e2.key - end NodeOrder2 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Make an inverted tree by recording the predecessor/parent array. - * Each node except the root will have one parent. See pseudo-code on p. 28 - * @see www.cse.ust.hk/~dekai/271/notes/L07/L07.pdf - */ - def makeITree (): Array [Int] = - val pred = Array.fill (size)(-1) // predecessor node array - key(0) = null.asInstanceOf [Double] // start at the root (node index 0) - pred(0) = -1 // it has no predecessor/parent - - while qu.nonEmpty do // until all vertices in spanning tree - qu.printInOrder // print qu in order, comment out to reducing printing - val i = qu.dequeue ().idx // return and remove least cost vertex - debug ("makeITree", s"dequeued i = $i") - for j <- g.ch(i) if out(j) do // iterate through its outside children - val cost = g.elabel (i, j).toDouble // get cost from edge label - - if (min && cost < key(j)) || (!min && cost > key(j)) then - qu.increaseKey (Elem (j, key(j)), Elem (j, cost)) // reposition j toward front in priority queue - key(j) = cost // lower the cost for node index j - pred(j) = i // set pred of j to parent i - end if - end for - out(i) = false // now finished with i - end while - debug ("makeITree", s"pred = ${stringOf (pred)}") - pred - end makeITree - -end MinSpanningTree - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `minSpanningTreeTest` main function tests the `MinSpanningTree` class. - * @see www.cse.ust.hk/~dekai/271/notes/L07/L07.pdf - * > runMain scalation.database.minSpanningTreeTest - */ -@main def minSpanningTreeTest: Unit = - - val g = new Graph (Array (SET (1, 3, 4), // ch(0) - SET (2, 3), // ch(1) - SET (3, 5), // ch(2) - SET (4, 5), // ch(3) - SET (), // ch(4) - SET ()), // ch(5) - Array.fill (6)(-1.0), // vertex labels - Map ((0, 1) -> 1.0, // edge labels - (0, 3) -> 10.0, - (0, 4) -> 3.0, - (1, 2) -> 2.0, - (1, 3) -> 3.0, - (2, 3) -> 4.0, - (2, 5) -> 5.0, - (3, 4) -> 4.0, - (3, 5) -> 1.0)) - g.printG () - - val st = new MinSpanningTree (g) - st.span () - println ("spanning tree for graph " + g.name) - println ("-" * 60) - st.printSTree () - -end minSpanningTreeTest - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `minSpanningTreeTest2` main function tests the `MinSpanningTree` class. - * This test the Maximum Spanning Tree option. - * @see www.cse.ust.hk/~dekai/271/notes/L07/L07.pdf - * > runMain scalation.database.minSpanningTreeTest2 - */ -@main def minSpanningTreeTest2 (): Unit = - - val g = new Graph (Array (SET (1, 3, 4), // ch(0) - SET (2, 3), // ch(1) - SET (3, 5), // ch(2) - SET (4, 5), // ch(3) - SET (), // ch(4) - SET ()), // ch(5) - Array.fill (6)(-1.0), // vertex labels - Map ((0, 1) -> 1.0, // edge labels - (0, 3) -> 10.0, - (0, 4) -> 3.0, - (1, 2) -> 2.0, - (1, 3) -> 3.0, - (2, 3) -> 4.0, - (2, 5) -> 5.0, - (3, 4) -> 4.0, - (3, 5) -> 1.0)) - g.printG () - - val st = new MinSpanningTree (g, false) - st.span () - println ("spanning tree for graph " + g.name) - println ("-" * 60) - st.printSTree () - -end minSpanningTreeTest2 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `minSpanningTreeTest3` main function tests the `MinSpanningTree` class by - * building a graph from an adjacency matrix called cmi for Conditional Mutual - * Information and creating a maximum spanning tree from it. - * @see scalation.modeling.classifying.BayesClassifier - * > runMain scalation.database.minSpanningTreeTest3 - */ -@main def minSpanningTreeTest3 (): Unit = - - import scalation.mathstat.MatrixD - - val cmi = MatrixD ((4, 4), 0.00000, 0.419593, 0.222815, 0.311752, - 0.00000, 0.00000, 0.419593, 0.168895, - 0.00000, 0.00000, 0.00000, 0.0610538, - 0.00000, 0.00000, 0.00000, 0.00000) - println (s"cmi = $cmi") - - banner ("Graph from Adjacency Matrix") - val g = Graph.fromMatrix (cmi) - g.printG () - - banner ("Maximum Spanning Tree from Graph") - val st = new MinSpanningTree (g, false, false) - st.span () - println ("-" * 60) - st.printSTree () - -end minSpanningTreeTest3 - diff --git a/target/scala-3.6.4/classes/scalation/database/old/Tabular.scala.bak b/target/scala-3.6.4/classes/scalation/database/old/Tabular.scala.bak deleted file mode 100644 index 62b3fd811..000000000 --- a/target/scala-3.6.4/classes/scalation/database/old/Tabular.scala.bak +++ /dev/null @@ -1,602 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Fri Jun 17 11:19:14 EDT 2022 - * @see LICENSE (MIT style license file). - * - * @title Base Trait for Row-Oriented Relational DBMS - * - * RA Operators: rename, project, select, union, minus, intersect, product, join, - * leftJoin, divide, groupBy, aggregate, orderBy - * - * Most of the RA Operators have Unicode versions: @see `scalation.UnicodeTest` - * - * inline def ρ (newName: String): Tabular = rename (newName) - * inline def π (x: String): Tabular = project (splitTrim (x)) - * inline def σ (predicate: Predicate): Tabular = select (predicate) - * inline def σ (condition: String): Tabular = select (condition) - * inline def σ (pkey: KeyType): Tabular = select (pkey) - * inline def ⋃ (r2: Tabular): Tabular = union (r2) - * inline def - (r2: Tabular): Tabular = minus (r2) - * inline def ⋂ (r2: Tabular): Tabular = intersect (r2) - * inline def × (r2: Tabular): Tabular = product (r2) - * inline def ⋈ (predicate: Predicate2, r2: Tabular): Tabular = join (predicate, r2) - * inline def ⋈ (condition: String, r2:Tabular): Tabular = join (condition, r2) - * inline def ⋈ (x: String, y: String, r2: Tabular): Tabular = join (splitTrim (x), splitTrim (y), r2) - * inline def ⋈ (fkey: (String, Tabular)): Tabular = join (fkey) - * inline def ⋈ (r2: Tabular): Tabular = join (r2) - * inline def ⋉ (x: Schema, y: Schema, r2: Tabular): Tabular = leftJoin (x, y, r2) - * inline def / (r2: Tabular): Tabular = divide (r2) - * inline def γ (g_atr: String): Tabular = groupBy (g_atr) - * inline def ϙ (atr: String*)(rev: Boolean = false): Tabular = orderBy (atr :_*)(rev) - */ - -package scalation -package database - -import scala.collection.mutable.{ArrayBuffer => Bag, Map} -import scala.runtime.ScalaRunTime.stringOf - -import scalation.mathstat.{MatrixD, VectorD, VectorS, VectorT} - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Type definitions for components of the relational model. - * @see `ValueType` in scalation package - */ -type Domain = Array [Char] -type Schema = Array [String] -type Tuple = Array [ValueType] -type Predicate = Tuple => Boolean -type Predicate2 = (Tuple, Tuple) => Boolean -type AggFunction = Array [ValueType] => ValueType - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Definitions of comparison operators for `ValueType`. - */ -def == (x: ValueType, y: ValueType): Boolean = x == y -def != (x: ValueType, y: ValueType): Boolean = x != y -def ne (x: ValueType, y: ValueType): Boolean = x != y -def < (x: ValueType, y: ValueType): Boolean = x < y -def <= (x: ValueType, y: ValueType): Boolean = x <= y -def > (x: ValueType, y: ValueType): Boolean = x > y -def >= (x: ValueType, y: ValueType): Boolean = x >= y - -def equ (x: ValueType, y: ValueType): Boolean = x == y -def neq (x: ValueType, y: ValueType): Boolean = x != y - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Split and trim the comma-separated names contained in the given string str. - * @param str the string to split and trim - * @param sep the separation character - */ -def splitTrim (str: String, sep: Char = ','): Array [String] = - (str split sep).map (_.trim) -end splitTrim - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Split a condition string into three tokens: "atr op value". - * e.g., "cname == 'John Doe'", "accno == 123", "balance > 1000.00", "cname == cname" - * @param condition the simple condition string to parse - */ -def parseCond (condition: String): (Array [String], Boolean) = - val token = splitTrim (condition, '\'') - if token.size > 1 then - val prefix = token(0) - val part = splitTrim (prefix, ' ') - (Array (part(0), part(1), token(1)), false) - else - val part = splitTrim (condition, ' ') - val twoAtrs = java.lang.Character.isUnicodeIdentifierStart (part(0)(0)) - (part, twoAtrs) - end if -end parseCond - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Combine two schemas and disambiguate any repeated names by appending "2". - * @param sch1 the first schema - * @param sch2 the second schema - */ -def disambiguate (sch1: Schema, sch2: Schema): Schema = - val sch = Bag.from (sch1) - for s <- sch2 do sch += (if sch1 contains s then s + "2" else s) - sch.toArray -end disambiguate - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Tabular` trait defines relational algebra operators. - * Supported domains/data-types are 'D'ouble, 'I'nt, 'L'ong, 'S'tring, and 'T'imeNum. - * 'D' - `Double` - `VectorD` - 64 bit double precision floating point number - * 'I' - `Int` - `VectorI` - 32 bit integer - * 'L' - `Long` - `VectorL` - 64 bit long integer - * 'S' - `String` - `VectorS` - variable length numeric string - * 'T' - `TimeNum` - `VectorT` - time numbers for date-time - * @param name the name of the table - * @param schema the attributes for the table - * @param domain the domains/data-types for attributes ('D', 'I', 'L', 'S', 'T') - * @param key the attributes forming the primary key - */ -trait Tabular [T <: Tabular [T]] (val name: String, val schema: Schema, val domain: Domain, val key: Schema) - extends Serializable: - - private val flaw = flawf ("Tabular") // flaw function - - val on = Map [String, Int] () // map from attribute name to column number - for j <- schema.indices do on += schema(j) -> j - - if schema.size != domain.size then flaw ("init", "size mismatch between attributes and domains") - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the size in terms of number of rows in the table. - */ - def rows: Int - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the size in terms of number of columns in the table. - */ - def cols: Int = schema.size - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the cardinality (number of tuples) and arity (number of attributes). - */ - inline def dims: (Int, Int) = (rows, cols) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the j-th column in this table (or the passed in tuples) as an array of value-type. - * @param j the column to return - * @param tups the collection of tuples to use (defaults to all tuples in this table) - */ - def col (j: Int, tups: Bag [Tuple]): Array [ValueType] - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the range of columns numbers. - */ - def colIndices: Range = 0 until schema.size - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return whether this table contains tuple u. - * @param u the tuple to look for - */ - def contains (u: Tuple): Boolean - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Check the size of the tuple (number of elements) as well as the type of each - * value to ensure it is from the right domain (satisfies the DOMAIN CONSTRAINTS). - * @param t the tuple to be type checked - */ - def typeCheck (t: Tuple): Boolean = - if t.size != domain.size then - flaw ("typeCheck", s"the size of tuple ${stringOf (t)} != ${domain.size} (the domain size)") - return false - end if - for j <- t.indices if typeOf (t(j)).head != domain(j) do - flaw ("typeCheck", s"domain constraint violation: tuple ${stringOf (t)} has wrong type for $j-th domain") - return false - end for - true - end typeCheck - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Add a FOREIGN KEY CONSTRAINT to this table by specifying the foreign key - * attribute fkey and the table it references refTab. - * Caveat: a foreign key may not be composite. - * @param fkey the foreign key attribute - * @param refTab the table being referenced (to its primary key) - */ - def addForeignKey (fkey: String, refTab: Tabular): Unit - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Check that all the foreign keys values in tuple t satisfy their - * REFERENTIAL INTEGRITY CONSTRAINTS. - * @param t the tuple being checked for referential integrity - */ - def referenceCheck (t: Tuple): Boolean - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the i-th primary key. - * @param i the index in the tuples/row index - */ - def getPkey (i: Int): KeyType - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** CREATE/recreate the primary INDEX that maps the primary key to the tuple - * containing it. Warning, creating an index will remove DUPLICATES based - * on maintaining UNIQUENESS CONSTRAINT of primary key values. - */ - def create_index (): Unit - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** DROP the primary INDEX that maps the primary key to the tuple containing it. - */ - def drop_index (): Unit - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the table restricted to the given range of rows. - * @param r the given range of rows - */ - def apply (r: Range): Tabular - - // ================================================================== RENAME - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** RENAME this table, returning a shallow copy of this table. - * @param newName the new name for the table. - */ - def rename (newName: String): Tabular - - inline def ρ (newName: String): Tabular = rename (newName) - - // ================================================================= PROJECT - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** PROJECT the tuples in this table onto the given attribute names. - * @param x the schema/attribute names to project onto - */ - def project (x: Schema): T - - inline def project (x: String): T = project (splitTrim (x)) - - inline def π (x: String): T - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** PROJECT onto the columns with the given column positions. - * @param cPos the column positions to project onto - * @param cName the optional new names for the columns to project onto - */ - def project (cPos: IndexedSeq [Int], cName: Schema = null): Tabular - - inline def π (cPos: IndexedSeq [Int], cName: Schema = null): Tabular = project (cPos, cName) - - // ================================================================== SELECT - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** SELECT the tuples in this table that satisfy the predicate. - * @param predicate the predicate (`Boolean` function) to be satisfied - */ - def select (predicate: Predicate): Tabular - - inline def σ (predicate: Predicate): Tabular = select (predicate) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** SELECT the tuples in this table that satisfy the given simple (3 token) condition. - * @param condition the simple condition string "a1 op a2" to be satisfied, where - * a1 is attribute, op is comparison operator, a2 is attribute or value - */ - def select (condition: String): Tabular - - inline def σ (condition: String): Tabular = select (condition) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** SELECT via the INDEX the tuple with the given primary key value pkey. - * Returns an empty table if the primary index has not been created. - * @param pkey the primary key value - */ - def select (pkey: KeyType): Tabular - - inline def σ (pkey: KeyType): Tabular = select (pkey) - - // =========================================================== SET OPERATORS - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** UNION this table and r2. Check that the two tables are compatible. - * If they are not, return the first table. - * Caveat: Assumes the key from the first table still works (@see create_index) - * Acts like union-all, so to remove duplicates call create_index after union. - * @param r2 the second table - */ - def union (r2: Tabular): Tabular - - inline def ⋃ (r2: Tabular): Tabular = union (r2) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute this table MINUS (set difference) table r2 (this - r2). Check that - * the two tables are compatible. If they are not, return the first table. - * @param r2 the second table - */ - def minus (r2: Tabular): Tabular - - inline def - (r2: Tabular): Tabular = minus (r2) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** INTERSECT this table and r2. Check that the two tables are compatible. - * If they are not, return the first table. - * @param r2 the second table - */ - def intersect (r2: Tabular): Tabular - - inline def ⋂ (r2: Tabular): Tabular = intersect (r2) - - // ================================================================= PRODUCT - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the CARTESIAN PRODUCT of this table and r2 (this × r2). - * @param r2 the second table - */ - def product (r2: Tabular): Tabular - - inline def × (r2: Tabular): Tabular = product (r2) - - // ==================================================================== JOIN - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** JOIN this table and r2 keeping concatenated tuples that satisfy the predicate. - * Caveat: Assumes both keys are needed for the new key (depending on the - * predicate both may not be required). - * @param predicate the join predicate to be satisfied - * @param r2 the second table - */ - def join (predicate: Predicate2, r2: Tabular): Tabular - - inline def ⋈ (predicate: Predicate2, r2: Tabular): Tabular = join (predicate, r2) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the THETA-JOIN of this table and r2 keeping concatenated tuples that - * satisfy the given simple (3 token) condition. - * @param condition the simple condition "a1 op a2" - * @param r2 the second table - */ - def join (condition: String, r2: Tabular): Tabular - - inline def ⋈ (condition: String, r2:Tabular): Tabular = join (condition, r2) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the EQUI-JOIN of this table and r2 keeping concatenated tuples that - * are equal on specified attributes. - * @param x the subschema/attributes for the first/this table - * @param y the subschema/attributes for the second table - * @param r2 the second table - */ - def join (x: Schema, y: Schema, r2: Tabular): Tabular - - inline def join (x: String, y: String, r2: Tabular): Tabular = - join (splitTrim (x), splitTrim (y), r2) - end join - - inline def ⋈ (x: String, y: String, r2: Tabular): Tabular = join (splitTrim (x), splitTrim (y), r2) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the EQUI-JOIN via the INDEX of this table and the referenced table keeping - * concatenated tuples that are equal on the primary key and foreign key attributes. - * Caveat: Requires the foreign key table to be first [ fkey_table join ((fkey, pkey_table) ]. - * Usage: deposit join (("cname", customer)) - * @param ref the foreign key reference (foreign key attribute, referenced table) - */ - def join (ref: (String, Tabular)): Tabular - - inline def ⋈ (fkey: (String, Tabular)): Tabular = join (fkey) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the NATURAL JOIN of this table and r2 keeping concatenated tuples - * that agree on the common attributes. - * @param r2 the second table - */ - def join (r2: Tabular): Tabular - - inline def ⋈ (r2: Tabular): Tabular = join (r2) - - // ============================================================== OUTER JOIN - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the LEFT-EQUI-JOIN of this table and r2 keeping concatenated tuples - * that are equal on specified attributes. Also, keep all tuples in the left - * table padding the missing attributes with null. - * For right-join swap table1 and table2, e.g., table1.leftJoin (... table2) - * @param x the subschema/attributes for the left/first/this table - * @param y the subschema/attributes for the right/second table - * @param r2 the second table - */ - def leftJoin (x: Schema, y: Schema, r2: Tabular): Tabular - - // Note: although this is the semi-join symbol, due to Unicode limitations, it is used for left-join. - - inline def ⋉ (x: Schema, y: Schema, r2: Tabular): Tabular = leftJoin (x, y, r2) - - inline def rightJoin (x: Schema, y: Schema, r2: Tabular): Tabular = r2.leftJoin (y, x, this) - - inline def ⋊ (x: Schema, y: Schema, r2: Tabular): Tabular = r2.leftJoin (y, x, this) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a tuple with missing values for each column according to the given - * domains. This method is used by leftJoin. - * @param domain the domains of the table for which a null tuple is required - */ - def nullTuple (domain: Domain): Tuple = - val v = Array.ofDim [ValueType] (domain.size) - for j <- v.indices do - v(j) = domain(j) match - case 'D' => NO_DOUBLE - case 'I' => NO_INT - case 'L' => NO_LONG - case 'S' => NO_STRING - case 'T' => NO_TIMENUM - case _ => { flaw ("nullTuple", s"does not support domain type ${domain(j)}"); null } - end for - v - end nullTuple - - // ================================================================== DIVIDE - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** DIVIDE this table by table r2. Requires a tuple in the quotient part of - * this table to be paired with all tuples in table r2. - * @param r2 the second table - */ - def divide (r2: Tabular): Tabular - - inline def / (r2: Tabular): Tabular = divide (r2) - - // ================================================================ GROUP BY - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** GROUP this table BY the specified attribute, returning this table. - * Each value for attribute atr will be mapped to a collection of tuples. - * @param g_atr the attribute to group by - */ - def groupBy (g_atr: String): Tabular - - inline def γ (g_atr: String): Tabular = groupBy (g_atr) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Assuming this table has been grouped by attribute g_atr, create a table - * where the first column is g_atr and the rest are AGGREGATE FUNCTIONs applied - * to their corresponding attributes. - * @param g_atr the attribute the table has been grouped on - * @param f_as the aggregate function and the attribute to apply it to (as varargs) - */ - def aggregate (g_atr: String, f_as: (AggFunction, String)*): Tabular - - // ================================================================ ORDER BY - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** ORDER-BY the given attributes, i.e., reorder the tuples in this table into - * ascending order. A stable sorting is used to allow sorting on multiple attributes. - * @param atr the subschema/attributes to order by - * @param rev whether to reverse the sorting order (defaults to false => ascending) - */ - def orderBy (atr: String*)(rev: Boolean = false): Tabular - - inline def ϙ (atr: String*)(rev: Boolean = false): Tabular = orderBy (atr :_*)(rev) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** ORDER (descending) the rows in the table BY the selected columns cName. - * A stable sorting is used to allow sorting on multiple columns. - * @param atr the subschema/attributes to order by - */ - inline def reverseOrderBy (atr: String*): Tabular = orderBy (atr :_*)(true) - - inline def ω (atr: String*): Tabular = orderBy (atr :_*)(true) - - // ================================================================= UPDATES - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** ADD (insert) tuple t into this table checking to make sure the domains are correct. - * Also, checks referential integrity for any foreign keys in the tuple. - * Return true iff the tuple passes the type check and reference check. - * @param t the tuple to be inserted - */ - def add (t: Tuple): Tabular - - def add (v: ValueType*): Tabular = add (v.toArray) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** UPDATE the column with attribute name atr using newVal for elements with value - * matchVal. Return true iff at least one tuple is updated. - * @param atr the attribute name for the column to be updated - * @param newVal the value used to assign updated values - * @param matchVal the value to be matched to elements - */ - def update (atr: String, newVal: ValueType, matchVal: ValueType): Boolean - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** DELETE all tuples in this table satisfying the deletion predicate. - * If there is an index, remove those tuples from the index as well. - * Return true iff at least one tuple is deleted. - * @param predicate the predicate that specifies which tuples to delete - */ - def delete (predicate: Predicate): Boolean - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert the tuples in tups into a string, e.g., for displaying a collection - * of tuples. - * @param tups the tuples to be converted to a string - */ - def showT (tups: Bag [Tuple]): String = - val sb = StringBuilder () - for t <- tups do - sb.append ("( ") - for v <- t do sb.append (s"$v ") - sb.append ("), ") - end for - sb.append ("\n") - sb.toString - end showT - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** SHOW/print this table, one tuple per row. - * @param rng the range of tuples to show, defaults to 0 until 10 - */ - def show (rng: Range = 0 until 10): Unit - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** SHOW/print this table's primary index. - */ - def show_index (): Unit - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** SHOW/print this table's foreign keys. - */ - def show_foreign_keys (): Unit - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** SAVE this table in a file using serialization. - * @see load in `Tabular` object - */ - def save (): Unit - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Write this table into a Comma-Separated-Value (CSV) file with each tuple - * written to a line. - * @param fileName the file name of the data file - */ - def writeCSV (fileName: String): Unit - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert this table to a matrix of doubles by making the necessary - * type transformations. - * @param cols the column position to use for forming the matrix. - */ - def toMatrix (cols: Array [Int] = Array.range (0, schema.size)): MatrixD - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Determine whether this table and r2 are incompatible by having differing domains. - * @param r2 the second table - */ - def incompatible (r2: Tabular): Boolean = - val (dom, dom2) = (stringOf (domain), stringOf (r2.domain)) - if dom != dom2 then - flaw ("incompatible", s"$name and ${r2.name} have differing domains $dom vs. ${dom2}") - true - else - false - end if - end incompatible - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return whether schema x is a subset-of schema y. - * @param x the first schema (array/set of attributes) - * @param y the schema schema (array/set of attributes) - */ - inline def subset (x: Schema, y: Schema): Boolean = x.forall (y contains _) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Pull the values out of tuple t for the attributes in subschema x. - * @param t the given tuple to pull values out of - * @param x the subschema/attributes to be collected - */ - def pull (t: Tuple, x: Schema): Tuple = - val u = Array.ofDim [ValueType] (x.size) - for i <- x.indices do u(i) = t(on(x(i))) - u - end pull - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Pull a value out of tuple t for attribute a. - * @param t the given tuple to pull value out of - * @param a the attribute to be collected - */ - def pull (t: Tuple, a: String): ValueType = t(on(a)) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Pull the domains out of this table for the attributes in subschema x. - * @param x the subschema/attributes to be collected - */ - def pull (x: Schema): Domain = - val dom = Array.ofDim [Char] (x.size) - for i <- x.indices do dom(i) = domain(on(x(i))) - dom - end pull - -end Tabular - diff --git a/target/scala-3.6.4/classes/scalation/database/relation/Ex_Days$.class b/target/scala-3.6.4/classes/scalation/database/relation/Ex_Days$.class deleted file mode 100644 index fa8c987b0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/relation/Ex_Days$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/relation/Ex_Days.class b/target/scala-3.6.4/classes/scalation/database/relation/Ex_Days.class deleted file mode 100644 index fdaaaeba7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/relation/Ex_Days.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/relation/Ex_Days.tasty b/target/scala-3.6.4/classes/scalation/database/relation/Ex_Days.tasty deleted file mode 100644 index ad62169c0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/relation/Ex_Days.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/relation/Ex_ProductSales$.class b/target/scala-3.6.4/classes/scalation/database/relation/Ex_ProductSales$.class deleted file mode 100644 index 73d5c380f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/relation/Ex_ProductSales$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/relation/Ex_ProductSales.class b/target/scala-3.6.4/classes/scalation/database/relation/Ex_ProductSales.class deleted file mode 100644 index 870c7577f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/relation/Ex_ProductSales.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/relation/Ex_ProductSales.tasty b/target/scala-3.6.4/classes/scalation/database/relation/Ex_ProductSales.tasty deleted file mode 100644 index 3c9463bd4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/relation/Ex_ProductSales.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/relation/Ex_Teaching$.class b/target/scala-3.6.4/classes/scalation/database/relation/Ex_Teaching$.class deleted file mode 100644 index 79e21ed3b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/relation/Ex_Teaching$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/relation/Ex_Teaching$package$.class b/target/scala-3.6.4/classes/scalation/database/relation/Ex_Teaching$package$.class deleted file mode 100644 index 6489039e6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/relation/Ex_Teaching$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/relation/Ex_Teaching$package.class b/target/scala-3.6.4/classes/scalation/database/relation/Ex_Teaching$package.class deleted file mode 100644 index 1cd7e8487..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/relation/Ex_Teaching$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/relation/Ex_Teaching$package.tasty b/target/scala-3.6.4/classes/scalation/database/relation/Ex_Teaching$package.tasty deleted file mode 100644 index 042734a59..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/relation/Ex_Teaching$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/relation/Ex_Teaching.class b/target/scala-3.6.4/classes/scalation/database/relation/Ex_Teaching.class deleted file mode 100644 index 23f9287cb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/relation/Ex_Teaching.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/relation/Ex_Teaching.tasty b/target/scala-3.6.4/classes/scalation/database/relation/Ex_Teaching.tasty deleted file mode 100644 index dd91369df..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/relation/Ex_Teaching.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/relation/Relation$.class b/target/scala-3.6.4/classes/scalation/database/relation/Relation$.class deleted file mode 100644 index 706822bb6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/relation/Relation$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/relation/Relation$package$.class b/target/scala-3.6.4/classes/scalation/database/relation/Relation$package$.class deleted file mode 100644 index 7709578c4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/relation/Relation$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/relation/Relation$package.class b/target/scala-3.6.4/classes/scalation/database/relation/Relation$package.class deleted file mode 100644 index f7f01100e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/relation/Relation$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/relation/Relation$package.tasty b/target/scala-3.6.4/classes/scalation/database/relation/Relation$package.tasty deleted file mode 100644 index fd32a93fc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/relation/Relation$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/relation/Relation.class b/target/scala-3.6.4/classes/scalation/database/relation/Relation.class deleted file mode 100644 index d13204e3d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/relation/Relation.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/relation/Relation.tasty b/target/scala-3.6.4/classes/scalation/database/relation/Relation.tasty deleted file mode 100644 index 702c297e3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/relation/Relation.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/relation/TableGen$.class b/target/scala-3.6.4/classes/scalation/database/relation/TableGen$.class deleted file mode 100644 index d748c78d0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/relation/TableGen$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/relation/TableGen$package$.class b/target/scala-3.6.4/classes/scalation/database/relation/TableGen$package$.class deleted file mode 100644 index 26c37ff2c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/relation/TableGen$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/relation/TableGen$package.class b/target/scala-3.6.4/classes/scalation/database/relation/TableGen$package.class deleted file mode 100644 index e7b32c030..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/relation/TableGen$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/relation/TableGen$package.tasty b/target/scala-3.6.4/classes/scalation/database/relation/TableGen$package.tasty deleted file mode 100644 index 50cde2e2e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/relation/TableGen$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/relation/TableGen.class b/target/scala-3.6.4/classes/scalation/database/relation/TableGen.class deleted file mode 100644 index 54ae0c163..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/relation/TableGen.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/relation/TableGen.tasty b/target/scala-3.6.4/classes/scalation/database/relation/TableGen.tasty deleted file mode 100644 index 4be1f4e72..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/relation/TableGen.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/relation/Vectr$package$.class b/target/scala-3.6.4/classes/scalation/database/relation/Vectr$package$.class deleted file mode 100644 index f5f6fde6a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/relation/Vectr$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/relation/Vectr$package.class b/target/scala-3.6.4/classes/scalation/database/relation/Vectr$package.class deleted file mode 100644 index 4eab34c22..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/relation/Vectr$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/relation/Vectr$package.tasty b/target/scala-3.6.4/classes/scalation/database/relation/Vectr$package.tasty deleted file mode 100644 index c9e3d6b07..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/relation/Vectr$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/relation/index.html b/target/scala-3.6.4/classes/scalation/database/relation/index.html deleted file mode 100644 index 255e7653a..000000000 --- a/target/scala-3.6.4/classes/scalation/database/relation/index.html +++ /dev/null @@ -1,12 +0,0 @@ - - -

    Source files in relation Package

    -

    - - \ No newline at end of file diff --git a/target/scala-3.6.4/classes/scalation/database/relation/old/Relation.scala.bak b/target/scala-3.6.4/classes/scalation/database/relation/old/Relation.scala.bak deleted file mode 100644 index c2aa700d1..000000000 --- a/target/scala-3.6.4/classes/scalation/database/relation/old/Relation.scala.bak +++ /dev/null @@ -1,3087 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller, Yang Fan, Vinay Bingi, Santosh Uttam Bobade - * @version 1.6 - * @date Sun Aug 23 15:42:06 EDT 2015 - * @see LICENSE (MIT style license file). - * - * An implementation supporting columnar relational databases facilitating easy - * and rapid analytics. The columns in a relation are vectors from the - * `scalation.linalgebra` package. Vectors and matrices may be readily extracted - * from a relation and feed into any of the numerous analytics techniques provided - * in `scalation.analytics`. The implementation provides most of the columnar - * relational algebra operators given in the following paper: - * @see db.csail.mit.edu/projects/cstore/vldb.pdf - * - * Some of the operators have unicode versions: @see `scalation.util.UnicodeTest` - * - * Supports Time Series Databases (TSDB) via `TimeNum` domain/datatype and 'leftJoinApx' - * 'rightJoinApx' methods. - */ - -package scalation -package columnar_db - -import java.io._ - -import scala.collection.mutable.{ArrayBuffer, HashMap, IndexedSeq, Map} -import scala.concurrent.{Await, Future} -import scala.concurrent.ExecutionContext.Implicits.global -import scala.concurrent.duration._ -import scala.math.{min => MIN} -import scala.io.Source.fromInputStream -import scala.reflect.ClassTag - -import scalation.linalgebra._ -import scalation.linalgebra.Vec_Elem.{<, =~, !=~} -import scalation.linalgebra.MatrixKind._ -import scalation.math.{Complex, Rational, Real} -import scalation.math.StrO.StrNum -import scalation.math.TimeO.{TimeNum, setThreshold} -import scalation.math.{noComplex, noDouble, noInt, noLong, noRational, noReal, noStrNum, noTimeNum} -import scalation.util.{banner, Error, getFromURL_File, MergeSortIndirect, ReArray, removeAt, time} - -import TableObj._ -import columnar_db._ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Relation` companion object provides additional functions for the `Relation` - * class. - * FIX - apply methods - make compatible with RelationSQL - */ -object Relation - extends Error -{ - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create an unpopulated relation. - * @param name the name of the relation - * @param key the column number for the primary key (< 0 => no primary key) - * @param domain an optional string indicating domains for columns (e.g., 'SD' = 'StrNum', 'Double') - * @param colName the names of columns - */ - def apply (name: String, key: Int, domain: String, colName: String*): Relation = - { - val n = colName.length - val colName_ = ArrayBuffer (colName :_* ) - new Relation (name, colName_, Vector.fill [Vec] (n)(null), key, domain) - } // apply - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create an unpopulated relation. - * @param name the name of the relation - * @param key the column number for the primary key (< 0 => no primary key) - * @param domain an optional string indicating domains for columns (e.g., 'SD' = 'StrNum', 'Double') - * @param colName the names of columns - */ - def apply (name: String, key: Int, domain: String, colName: ArrayBuffer [String]): Relation = - { - val n = colName.length - new Relation (name, colName, Vector.fill [Vec] (n)(null), key, domain) - } // apply - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a relation from a sequence of row/tuples. These rows must be converted - * to columns. - * @param name the name of the relation - * @param colName the names of columns - * @param row the sequence of rows to be converted to columns for the columnar relation - * @param key the column number for the primary key (< 0 => no primary key) - * @param domain an optional string indicating domains for columns (e.g., 'SD' = 'StrNum', 'Double') - */ - def apply (name: String, colName: ArrayBuffer [String], row: ArrayBuffer [Row], key: Int, domain: String): Relation = - { - val equivCol = Vector.fill [Vec] (colName.length)(null) - val r2 = new Relation (name, colName, equivCol, key, domain) - for (tuple <- row) r2.add (tuple) - r2.materialize () - } // apply - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a relation from a sequence of row/tuples. These rows must be converted - * to columns. - * @param name the name of the relation - * @param colName the names of columns - * @param row the sequence of rows to be converted to columns for the columnar relation - * @param key the column number for the primary key (< 0 => no primary key) - */ - def apply (name: String, colName: ArrayBuffer [String], row: ArrayBuffer [Row], key: Int): Relation = - { - val equivCol = Vector.fill [Vec] (colName.length)(null) - val r2 = new Relation (name, colName, equivCol, key, null) - for (tuple <- row) r2.add (tuple) - r2.materialize () - } // apply - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Read the relation with the given 'name' into memory using serialization. - * @param name the name of the relation to load - */ - def apply (name: String): Relation = - { - val ois = new ObjectInputStream (new FileInputStream (STORE_DIR + name + SER)) - val obj = ois.readObject () - ois.close () - val res = obj.asInstanceOf [Relation] - res - } // apply - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Read the relation with the given 'name' into memory loading its columns - * with data from the CSV file named 'fileName'. - * Note: "ln.split (eSep, -1)" will keep all values even if empty "one,,three" -> "one","",three" - * @param fileName the file name of the data file - * @param name the name of the relation - * @param colName the names of columns - * @param key the column number for the primary key (< 0 => no primary key) - * @param domain an optional string indicating domains for columns (e.g., 'SD' = 'StrNum', 'Double') - * @param skip the number of lines in the CSV file to skip (e.g., header line(s)) - * @param eSep the element separation string/regex (e.g., "," ";" " +") - */ - def apply (fileName: String, name: String, colName: ArrayBuffer [String], key: Int, - domain: String, skip: Int, eSep: String): Relation = - { - var cnt = skip - val lines = getFromURL_File (fileName) - val newCol = Vector.fill [Vec] (colName.length)(null) - val r3 = new Relation (name, colName, newCol, key, domain) - for (ln <- lines) { - val buf = ArrayBuffer.from (ln.split (eSep, -1)) - if (cnt <= 0) r3.add (r3.row (buf, domain)) else cnt -= 1 - } // for - r3.materialize () - } // apply - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Read the relation with the given 'name' into memory loading its columns - * with data from the CSV file named 'fileName'. In this version, the column - * names are read from the first line of the file. - * @param fileName the file name of the data file - * @param name the name of the relation - * @param key the column number for the primary key (< 0 => no primary key) - * @param domain an optional string indicating domains for columns (e.g., 'SD' = 'StrNum', 'Double') - * @param eSep the element separation string/regex (e.g., "," ";" " +") - * @param cPos the sequence of column positions in the input file to be used (null => select all) - */ - def apply (fileName: String, name: String, key: Int, domain: String, eSep: String, - cPos: ArrayBuffer [Int]): Relation = - { - val lines = getFromURL_File (fileName) - var first = true - var colBuffer: Array [ArrayBuffer [String]] = null - var colName: ArrayBuffer [String] = null - var newCol: Vector [Vec] = null - - if (cPos == null) { // select all columns - for (ln <- lines) { - if (first) { - colName = ArrayBuffer.from (ln.split (eSep, -1).map (_.trim)) - colBuffer = Array.fill (colName.length)(new ArrayBuffer ()) - first = false - } else { - val values = ln.split (eSep, -1).map (_.trim) - for (i <- colName.indices) colBuffer(i) += values(i) - } // if - } // for - } else { // select cPos columns - if (domain.length != cPos.length) { - flaw ("apply", "cPos length should be same as domain length") - } // if - for (ln <- lines) { - if (first) { - val name = ln.split (eSep, -1).map (_.trim) - colName = ArrayBuffer [String] () - colBuffer = Array.fill (cPos.length)(new ArrayBuffer ()) - for (i <- colBuffer.indices) colName += name(cPos(i)) - first = false - } else { - val values = ln.split (eSep, -1).map (_.trim) - for (i <- colName.indices) colBuffer(i) += values(cPos(i)) - } // if - } // for - } // if - new Relation (name, colName, makeCol (colBuffer, domain), key, domain) - } // apply - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Make the columns for the columnar table from data stored in 'colBuffer'. - * @param colBuffer the column buffer holding the data - * @param domain the domains/datatypes for the columns - */ - private def makeCol (colBuffer: Array [ArrayBuffer [String]], domain: String): Vector [Vec] = - { - colBuffer.indices.map (i => - if (domain == null || domain == "") VectorS (colBuffer(i).toArray) - else domain(i) match { - case 'C' => VectorC (colBuffer(i).toArray) // dense vectors - case 'D' => VectorD (colBuffer(i).toArray) - case 'I' => VectorI (colBuffer(i).toArray) - case 'L' => VectorL (colBuffer(i).toArray) - case 'Q' => VectorQ (colBuffer(i).toArray) - case 'R' => VectorR (colBuffer(i).toArray) - case 'S' => VectorS (colBuffer(i).toArray) - case 'T' => VectorT (colBuffer(i).toArray) - - case 'c' => RleVectorC (colBuffer(i).toArray) // compressed vectors - case 'd' => RleVectorD (colBuffer(i).toArray) - case 'i' => RleVectorI (colBuffer(i).toArray) - case 'l' => RleVectorL (colBuffer(i).toArray) - case 'q' => RleVectorQ (colBuffer(i).toArray) - case 'r' => RleVectorR (colBuffer(i).toArray) - case 's' => RleVectorS (colBuffer(i).toArray) - case 't' => RleVectorT (colBuffer(i).toArray) - - case 'χ' => SparseVectorC (colBuffer(i).toArray) // sparse vectors - case 'δ' => SparseVectorD (colBuffer(i).toArray) - case 'ι' => SparseVectorI (colBuffer(i).toArray) - case 'λ' => SparseVectorL (colBuffer(i).toArray) - case 'ϟ' => SparseVectorQ (colBuffer(i).toArray) - case 'ρ' => SparseVectorR (colBuffer(i).toArray) - case 'σ' => SparseVectorS (colBuffer(i).toArray) - case 'τ' => SparseVectorT (colBuffer(i).toArray) - - case _ => flaw ("makeCol", s"domain type ${domain(i)} not supported") - null.asInstanceOf [Vec] - }).toVector - } // makeCol - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Read the relation with the given 'name' into memory loading its columns - * with data from the CSV file named 'fileName'. In this version, the column - * names are read from the first line of the file. It uses 'col2' which is a - * temporary ReArray, and maintains indices. - * @param fileName the file name of the data file - * @param name the name of the relation - * @param key the column number for the primary key (< 0 => no primary key) - * @param domain an optional string indicating domains for columns (e.g., 'SD' = 'StrNum', 'Double') - * @param eSep the element separation string/regex (e.g., "," ";" " +") - */ - def apply (fileName: String, name: String, domain: String, key: Int, eSep: String = ","): Relation = - { - var first = true - val lines = getFromURL_File (fileName) - var r3: Relation = null - var currentlineno = 0 - - for (ln <- lines) { - if (first) { - val colName = ArrayBuffer.from (ln.split (eSep, -1)) - val newCol = Vector.fill [Vec] (colName.length)(null) - r3 = new Relation (name, colName, newCol, key, domain) - first = false - } else { - if (currentlineno % 1000 == 0) println (s"$currentlineno") - val buf = ArrayBuffer.from (ln.split (eSep, -1)) - r3.add (r3.row (buf, domain)) - currentlineno += 1 - } // if - } // for - r3.materialize () - } // apply - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Read the relation with the given 'name' into memory loading its columns - * with data from the CSV file named 'fileName'. This version assumes - * defaults for 'eSep' and 'skip' of ("," and 0). - * @param fileName the file name of the data file - * @param name the name of the relation - * @param colName the names of columns - * @param key the column number for the primary key (< 0 => no primary key) - * @param domain an optional string indicating domains for columns (e.g., 'SD' = 'StrNum', 'Double') - */ - def apply (fileName: String, name: String, colName: ArrayBuffer [String], key: Int, - domain: String): Relation = - { - val eSep = "," - val lines = getFromURL_File (fileName) - val newCol = Vector.fill [Vec] (colName.length)(null) - val r3 = new Relation (name, colName, newCol, key, domain) - for (ln <- lines) { - val buf = ArrayBuffer.from (ln.split (eSep, -1)) - r3.add (r3.row (buf, domain)) - } // for - r3.materialize () - } // apply - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Read the relation with the given 'name' into memory loading its columns - * with data from the '.arff' file named 'fileName'. - * @param fileName the file name of the data file - * @param key the column number for the primary key (< 0 => no primary key) - * @param domain an optional string indicating domains for columns (e.g., 'SD' = 'StrNum', 'Double') - */ - def apply (fileName: String, key: Int, domain: String): Relation = - { - val eSep = "[, ]" - val lines = getFromURL_File (fileName) - var name: String = null - var colBuffer: Array [ArrayBuffer [String]] = null - var colName = ArrayBuffer [String]() - var newCol: Vector [Vec] = null - var foundData = false - for (ln <- lines) { - if (ln.indexOf ("%") == 0) { - // skip comment - } else if (ln.indexOf ("@relation") == 0) { - name = ln.split (eSep, -1)(1) - } else if (ln.indexOf ("@attribute") == 0) { - colName += ln.split(eSep, -1)(1) - } else if (ln.indexOf ("@data") == 0) { - foundData = true - colBuffer = Array.ofDim (colName.length) - for (i <- colBuffer.indices) colBuffer (i) = new ArrayBuffer () - } else if (foundData) { - val values = ln.split (eSep, -1) - values.indices.foreach (i => { colBuffer (i) += values (i) }) - } // if - } // for - new Relation (name, colName, colBuffer.indices.map (i => VectorS (colBuffer(i).toArray)).toVector, key, domain) - } // apply - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Read the relation with the given 'name' into memory from a JSON file. - * @see https://github.com/FasterXML/jackson-databind - * @author Shubham Vasant Shingate - * FIX - does not work Scala 2.13 - * @param fileName the file name of the JSON file - * @param name the name of the relation to load - * - def apply (fileName: String, name: String): Relation = - { - import scala.jdk.CollectionConverters.asScalaIteratorConverter -// import scala.collection.JavaConverters.asScalaIteratorConverter - - import com.fasterxml.jackson.databind.ObjectMapper - type JSON_ELEM = java.util.LinkedHashMap [String, String] - type JSON_TYPE = java.util.List [JSON_ELEM] - - var jsonList: java.util.List [JSON_ELEM] = null - try { - val objMapper = new ObjectMapper () - val jsonStr = fromInputStream (new FileInputStream (fileName)).mkString - jsonList = objMapper.readValue (jsonStr, classOf [JSON_TYPE]) - } catch { - case e: FileNotFoundException => flaw ("apply", s"file $fileName not found") - case e: IOException => flaw ("apply", s"unable to read $fileName: $e") - } // try - - var splitStr = jsonList.get(0).toString - var arrSize = 0 - var flag = true - val colNames = ArrayBuffer [String] () - while (arrSize != 1) { - val subStr = splitStr.split ("=", 2) - arrSize = subStr.length - if (subStr(0).startsWith ("{") && arrSize != 1) subStr(0) = subStr(0).substring(1) - if (! flag && arrSize != 1) subStr(0) = subStr(0).split (", ", 2)(1) - if (arrSize != 1) { - colNames += subStr(0) - splitStr = subStr(1) - flag = false - } // if - } // while - - val rel = new Relation (name, colNames, Vector.fill [Vec] (colNames.length)(null), 0) - for (jsonData <- asScalaIteratorConverter (jsonList.iterator()).asScala) { - val tuple = ArrayBuffer (asScalaIteratorConverter (jsonData.values().iterator()).asScala.toSeq :_*) - rel.add (rel.row (tuple, null)) - } // for - rel.materialize () - rel - } // apply - */ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a relation from the 'xy' matrix of doubles. - * @param xy the matrix containing the data - * @param name the name of the relation - * @param colName the names of columns - * @param key the column number for the primary key (< 0 => no primary key) - * @param domain an optional string indicating domains for columns (e.g., 'SD' = 'StrNum', 'Double') - */ - def fromMatriD (xy: MatriD, name: String, colName: ArrayBuffer [String], key: Int = -1, - domain: String = null): Relation = - { - val newCol = for (j <- 0 until xy.dim2) yield xy.col (j).asInstanceOf [Vec] - new Relation (name, colName, newCol.toVector, key, domain) - } // fromMatriD - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a relation from the 'x' matrix of doubles and 'y' vector of doubles - * or integers. - * @param x the matrix containing the data - * @param y the vector containing the data - * @param name the name of the relation - * @param colName the names of columns - * @param key the column number for the primary key (< 0 => no primary key) - * @param domain an optional string indicating domains for columns (e.g., 'SD' = 'StrNum', 'Double') - */ - def fromMatriD_ (x: MatriD, y: Vec, name: String, colName: ArrayBuffer [String], key: Int = -1, - domain: String = null): Relation = - { - val newCol = for (j <- 0 until x.dim2) yield x.col (j).asInstanceOf [Vec] - new Relation (name, colName, newCol.toVector :+ y, key, domain) - } // fromMatriD_ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a relation from the 'xy' matrix of integers. - * @param xy the matrix containing the data - * @param name the name of the relation - * @param colName the names of columns - * @param key the column number for the primary key (< 0 => no primary key) - * @param domain an optional string indicating domains for columns (e.g., 'SD' = 'StrNum', 'Double') - */ - def fromMatriI (xy: MatriI, name: String, colName: ArrayBuffer [String], key: Int = -1, - domain: String = null): Relation = - { - val newCol = for (j <- 0 until xy.dim2) yield xy.col (j).asInstanceOf [Vec] - new Relation (name, colName, newCol.toVector, key, domain) - } // fromMatriI - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a relation from the 'xy' matrix of integers and 'y' vector of integers. - * @param x the matrix containing the data - * @param y the vector containing the data - * @param name the name of the relation - * @param colName the names of columns - * @param key the column number for the primary key (< 0 => no primary key) - * @param domain an optional string indicating domains for columns (e.g., 'SD' = 'StrNum', 'Double') - */ - def fromMatriII (x: MatriI, y: VectorI, name: String, colName: ArrayBuffer [String], key: Int = -1, - domain: String = null): Relation = - { - val newCol = for (j <- 0 until x.dim2) yield x.col (j).asInstanceOf [Vec] - new Relation (name, colName, newCol.toVector :+ y, key, domain) - } // fromMatriII - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the count (number of elements) of each of the columns of columnar - * relation 'r'. - * @param r the given relation - */ - def count (r: Relation): IndexedSeq [Int] = ArrayBuffer (r.col.map (_.size) :_*) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the minimum of each of the columns of columnar relation 'r'. - * @param r the given relation - */ - def min (r: Relation): IndexedSeq [Any] = ArrayBuffer (r.col.map (Vec.min (_))) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the maximum of each of the columns of columnar relation 'r'. - * @param r the given relation - */ - def max (r: Relation): IndexedSeq [Any] = ArrayBuffer (r.col.map (Vec.max (_))) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the mean of each of the columns of columnar relation 'r'. - * @param r the given relation - */ - def sum (r: Relation): IndexedSeq [Any] = ArrayBuffer (r.col.map (Vec.sum (_))) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the mean of each of the columns of columnar relation 'r'. - * @param r the given relation - */ - def mean (r: Relation): IndexedSeq [Any] = ArrayBuffer (r.col.map (Vec.mean (_))) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the mean of each of the columns of columnar relation 'r'. - * @param r the given relation - */ - def Ɛ (r: Relation): IndexedSeq [Any] = ArrayBuffer (r.col.map (Vec.mean (_))) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the variance of each of the columns of columnar relation 'r'. - * @param r the given relation - */ - def variance (r: Relation): IndexedSeq [Any] = ArrayBuffer (r.col.map (Vec.variance (_))) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the variance of each of the columns of columnar relation 'r'. - * @param r the given relation - */ - def Ʋ (r: Relation): IndexedSeq [Any] = ArrayBuffer (r.col.map (Vec.variance (_))) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the correlation of column 'i' and 'j' within columnar relation 'r'. - * @param r the given relation - * @param i the first column vector - * @param j the second column vector - */ - def corr (r: Relation, i: Int = 0, j: Int = 1): Double = Vec.corr (r.col(i), r.col(j)) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Get a Vec of sum of the 'cName' column for the 'r' relation base on each group, - * the result will be the same size. - * @param r the relation to operate on - * @param cName sum on column "cName" - */ - def sum (r: Relation, cName: String): Vec = - { - val cPos = r.colMap.get(cName).get - val domainc = r.domain(cPos) - var columnlist:Vec = null - var count = 0 - var pointer = 0 - var sumlist: Vec = null - for (idx <- r.orderedIndex) { -// columnlist = Vec.:+ (columnlist,r.index(idx)(cPos),r.domain,cPos) - columnlist = Vec.:+ (columnlist,r.index(idx)(cPos)) - if (count +1 == r.grouplist(pointer)) { - val thisroundsum = Vec.sum(columnlist) -// sumlist = Vec.:+ (sumlist, thisroundsum, r.domain, cPos) - sumlist = Vec.:+ (sumlist, thisroundsum) - columnlist = null - pointer += 1 - } // if - count += 1 - } // for - sumlist - } // sum - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Get a Vec of max of the 'cName' column for the 'r' relation. - * @param r the relation you want to operate on - * @param cName max on column "cName" - */ - def max (r: Relation, cName: String): Vec = - { - val cPos = r.colMap.get(cName).get - val domainc = r.domain(cPos) - var columnlist:Vec = null - var count = 0 - var pointer = 0 - var maxlist: Vec=null - for(idx <- r.orderedIndex) { -// columnlist = Vec.:+ (columnlist,r.index(idx)(cPos),r.domain,cPos) - columnlist = Vec.:+ (columnlist,r.index(idx)(cPos)) - if (count +1 == r.grouplist(pointer)) { - val thisroundsum = Vec.max(columnlist) -// maxlist = Vec.:+ (maxlist, thisroundsum, r.domain, cPos) - maxlist = Vec.:+ (maxlist, thisroundsum) - columnlist = null - pointer += 1 - } // if - count += 1 - } // for - maxlist - } // max - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Get a Vec of min of the 'cName' column for the 'r' relation - * @param r the relation you want to operate on - * @param cName min on column "cName" - */ - def min (r: Relation, cName: String): Vec = - { - val cPos = r.colMap.get(cName).get - val domainc = r.domain(cPos) - var columnlist:Vec = null - var count = 0 - var pointer = 0 - var minlist:Vec=null - for (idx <- r.orderedIndex) { -// columnlist = Vec.:+ (columnlist,r.index(idx)(cPos),r.domain,cPos) - columnlist = Vec.:+ (columnlist,r.index(idx)(cPos)) - if (count +1 == r.grouplist(pointer)) { - val thisroundsum = Vec.min(columnlist) -// minlist = Vec.:+ (minlist, thisroundsum, r.domain, cPos) - minlist = Vec.:+ (minlist, thisroundsum) - columnlist = null - pointer += 1 - } // if - count += 1 - } // for - minlist - } // min - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Get a Vec of average of the 'cName' column for the 'r' relation. - * @param r the relation you want to operate on - * @param cName average on column "cName" - */ - def avg (r: Relation, cName: String): Vec = - { - val cPos = r.colMap.get(cName).get - val domainc = r.domain(cPos) - var columnlist: Vec = null - var count = 0 - var pointer = 0 - var avglist: Vec = null - for (idx <- r.orderedIndex) { -// columnlist = Vec.:+ (columnlist, r.index(idx)(cPos), r.domain, cPos) - columnlist = Vec.:+ (columnlist, r.index(idx)(cPos)) - if (count + 1 == r.grouplist(pointer)) { - val thisroundsum = Vec.mean(columnlist) -// avglist = Vec.:+ (avglist, thisroundsum, r.domain, cPos) - avglist = Vec.:+ (avglist, thisroundsum) - columnlist = null - pointer += 1 - } // if - count += 1 - } // for - avglist - } // avg - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Get a Vec of count of the 'cName' column for the 'r' relation. - * @param r the relation you want to operate on - * @param cName the column name for the column to be counted - */ - def count (r: Relation, cName: String): Vec = - { - val cPos = r.colMap.get(cName).get - var countlist: Vec = null - var i = 0 - for (p <- r.grouplist) { - val count = p - i -// countlist = Vec.:+ (countlist, count, r.domain, cPos) - countlist = Vec.:+ (countlist, count) - i = p - } // for - countlist - } // count - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** From function return cartesian product of all the relations. - * @param relations the relations making up the from clause - */ - def from (relations: Relation*): Relation = - { - var result = relations(0) - for (i <- 1 until relations.size) result = result product relations(i) - result - } // from - -} // Relation object - -import Relation._ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Relation` class stores and operates on vectors. The vectors form the - * columns of the columnar relational datastore. Columns may have any of the - * following types: - *

    - * C - `Complex` - `VectorC` - 128 bit complex number a + bi - * D - `Double` - `VectorD` - 64 bit double precision floating point number - * I - `Int` - `VectorI` - 32 bit integer - * L - `Long` - `VectorL` - 64 bit long integer - * Q - `Rational` - `VectorQ` - 128 bit ratio of two long integers - * R - `Real` - `VectorR` - 128 bit quad precision floating point number - * S - `StrNum` - `VectorS` - variable length numeric string - * T - `TimeNum` - `VectorT` - 96 bit time Instant = (Long, Int) - *

    - * FIX - (1) don't allow (public) var - (2) avoid unchecked or incomplete .asInstanceOf [T] - *------------------------------------------------------------------------------ - * @param name the name of the relation - * @param colName the names of columns - * @param col the Scala Vector of columns making up the columnar relation - * @param key the column number for the primary key (< 0 => no primary key) - * @param domain an optional string indicating domains for columns (e.g., 'SD' = 'StrNum', 'Double') - * @param fKeys an optional sequence of foreign keys - ArrayBuffer (column name, ref table name, ref column position) - * @param enter whether to enter the newly created relation into the `Catalog` - */ -class Relation (val name: String, val colName: ArrayBuffer [String], var col: Vector [Vec] = null, - val key: Int = 0, val domain: String = null, var fKeys: ArrayBuffer [(String, String, Int)] = null, - enter: Boolean = true) - extends Table with Error with Serializable -{ - private val DEBUG = true // debug flag - private [columnar_db] val colMap = Map [String, Int] () // map column name -> column number - @transient -// private val col2 = Vector.fill (colName.size)(new ReArray [Any]) // efficient holding area for building columns -// private val col2 = Vector [ReArray [Any]] () // efficient holding area for building columns - private var grouplist = Vector [Int] () // rows in group - protected val index = Map [KeyType, Row] () // index that maps a key into row - protected val indextoKey = HashMap [Int, KeyType] () // map index -> key - private var keytoIndex = HashMap [KeyType, Int] () // map key -> index - protected var orderedIndex = Vector [KeyType] () // re-ordering of the key column - - if (col == null) col = Vector.fill [Vec] (colName.length)(null) - if (colName.length != col.length) flaw ("constructor", "incompatible sizes for 'colName' and 'col'") - if (enter) Catalog.add (name, colName, key, domain) - - for (j <- colName.indices) colMap += colName(j) -> j - private val col2 = - if (domain == null) (for (j <- colName.indices) yield new ReArray [Any] ()).toVector - else (for (j <- colName.indices) yield - domain(j) match { - case 'C' => new ReArray [Complex] () - case 'D' => new ReArray [Double] () - case 'I' => new ReArray [Int] () - case 'L' => new ReArray [Long] () - case 'Q' => new ReArray [Rational] () - case 'R' => new ReArray [Real] () - case 'S' => new ReArray [StrNum] () - case 'T' => new ReArray [TimeNum] () - case _ => { flaw ("constructor", s"unsupported column type ${domain(j)} for column $j"); null } - }).toVector - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** The 'generateIndex' method helps, e.g., the 'popTable', methods to generate - * an index for the table. - * @param reset if reset is true, use old index to build new index; otherwise, create new index - */ - def generateIndex (reset: Boolean = false): Unit = - { - if (! reset) { // create new index - for (i <- 0 until rows) { - val mkey = if (key != -1) new KeyType (row(i)(key)) // key column is specified - else new KeyType(i) // key column is not specified - val tuple = row(i) - index += mkey -> tuple - indextoKey += i -> mkey - keytoIndex += mkey -> i - orderedIndex = orderedIndex :+ mkey - } // for - } else { // use old index to build - val newoderedIndex = new ReArray [KeyType] () - val newkeytoIndex = new HashMap [KeyType, Int] () - for (i <- orderedIndex.indices) { - val mkey = if (key != -1) orderedIndex(i) else new KeyType (i) - val tuple = row(keytoIndex(mkey)) - index += mkey -> tuple - newkeytoIndex += mkey -> i - newoderedIndex.update (newoderedIndex.length, mkey) - } // for - orderedIndex = newoderedIndex.toVector // map old keytoIndex to rowIndex to - keytoIndex = newkeytoIndex - } // if - } // generateIndex - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the size in terms of number of columns in the relation. - */ - def cols: Int = col.length - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return all of the columns in the relation. - */ - def columns: Vector [Vec] = col - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the column in the relation with column name 'cName'. - * @param cName column name used to retrieve the column vector - */ - def column (cName: String): Vec = col(colMap (cName)) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the names of columns in the relation. - */ - def colNames: ArrayBuffer [String] = colName - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the mapping from column names to column positions. - */ - def colsMap: Map [String, Int] = colMap - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the domains for the columns in the relation. - */ - def domains: String = domain - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a row by pulling values from all columns at position 'i'. - * @param i the 'i'th position - */ - def row (i: Int): Row = col.map (Vec (_, i)).toVector - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the size in terms of number of rows in the relation. - */ - def rows: Int = if (col(0) == null) 0 else col(0).size - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Determine whether 'this' relation contains a row matching the given 'tuple'. - * @param tuple an aggregation of columns values (potential row) - */ - def contains (tuple: Row): Boolean = - { - for (i <- 0 until rows if row(i) sameElements tuple) return true - false - } // contains - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Rename 'this' table, returning a shallow copy of 'this' table. - * @param newName the new name for the table. - */ - def rename (newName: String): Relation = - { - new Relation (newName, colName, col, key, domain, fKeys) - } // rename - - // ================================================================= PROJECT - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Project onto the columns with the given column names. - * @param cName the names of the columns to project onto - */ - def project (cName: String*): Relation = project (ArrayBuffer (cName.map (colMap (_)) :_*), - ArrayBuffer (cName :_*)) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Project onto the columns with the given column positions using the given - * column names. - * @param cPos the positions of the columns to project onto - * @param cName the names of the columns to project onto - */ - def project (cPos: IndexedSeq [Int], cName: ArrayBuffer [String] = null): Relation = - { - val newCName = if (cName == null) { - val cn = ArrayBuffer [String] () - for (i <- cPos) cn += colName(i) - cn - } else cName - val newCol = cPos.map (col(_)).toVector - val newKey = if (cPos contains key) cPos.indexOf (key) else -1 - val newDomain = projectD (domain, cPos) - new Relation (name + "_p_" + ucount (), newCName, newCol, newKey, newDomain) - } // project - - // ======================================================== EXTENDED PROJECT - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Aggregate/project on the given columns (an extended projection operator that - * applies aggregate operators to aggregation columns and regular projection - * to projection columns). - * @see en.wikipedia.org/wiki/Relational_algebra - * @param aggCol the columns to aggregate on: (aggregate function, new column name, old column name)* - * @param cName the other columns to project on - */ - def eproject (aggCol: AggColumn*)(cName: String*): Relation = - { - val aRange = 0 until aggCol.size - val nCols = aggCol.size + cName.size - val funName = ArrayBuffer [String] () - for (c <- aggCol) { - if (! (colName contains c._3)) throw new IllegalArgumentException (s"column ${c._3} to aggregate on does not exist") - else funName += c._2 - } // for - - if (grouplist.isEmpty) groupBy (colName(key)) - val newCol = Vector.fill [Vec] (nCols)(null) - val newCName = ArrayBuffer ((cName ++ funName) :_*) - var newDomain = cName.map (n => colMap(n)).map (i => domain(i)) - for (i <- aRange) { - newDomain = if (funName(i) contains "count") newDomain :+ 'I' // aggregate's result domain is based on aggregate column - else newDomain :+ domain(colMap(aggCol(i)._3)) - } // for - val r2 = new Relation (name + "_e_" + ucount (), newCName, newCol, key, newDomain.mkString ("")) - if (rows == 0) return r2 // no rows means early return - - val agglist = for (i <- aRange) yield aggCol(i)._1(this, aggCol(i)._3) - if (cName.size != 0) { - val cPos = ArrayBuffer (cName.map (colMap(_)) :_*) // position of cName - val cPos2 = aggCol.map ((a: AggColumn) => colMap(a._3)) // position of aggregate columns - val shrinkR = pi(cPos, null) // projected relation - var row_i = 0 - var group_j = 0 - orderedIndex.foreach (idx => { - var thisrow = shrinkR.row(keytoIndex(idx)) - for (aggf <- agglist.indices) thisrow = thisrow :+ Vec (agglist(aggf), group_j) - r2.add_ni (thisrow) - row_i += 1 - if (row_i == grouplist(group_j)) group_j += 1 - }) // foreach - r2.materialize () - } else { // only project on the aggregate column - for (i <- aRange) { - r2.col = if (i == 0) Vector (agglist(i)) else r2.col :+ agglist(i) - } // for - } // if - r2 - } // eproject - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Method 'epiAny' is a special case of epi. When the projected columns can not be - * decided by the group by columns, only one representative will be shown for each group. - * FIX - change name - * @param aggF the aggregate functions you want to use - * @param funName the newly created aggregate columns'names - * @param aggFAttr the columns you want to use of correspondent aggregate functions - * @param cName the columns you want to project on - */ - def epiAny (aggF: ArrayBuffer [AggFunction], funName: ArrayBuffer [String], aggFAttr: ArrayBuffer [String], cName: String*): Relation = - { - aggFAttr.foreach (a => - if (! colName.contains(a)) throw new IllegalArgumentException("the attribute you want to aggregate on does not exists")) - cName.foreach (a => - if (! colName.contains(a)) throw new IllegalArgumentException("the attribute you want to project on does not exists")) - - if (grouplist.isEmpty) groupBy (colName(key)) - val newCol = Vector.fill [Vec](aggFAttr.size + cName.size)(null) - val colNamenew = ArrayBuffer ((cName ++ funName) :_*) - var newDomain = cName.map (n => colMap(n)).map (i => domain(i)) - for (i <- funName.indices) { - newDomain = if (funName(i) contains "count") newDomain :+ 'I' - else newDomain :+ domain(colMap(aggFAttr(i))) - } // for - val r2 = new Relation (name + "_e_" + ucount (), colNamenew, newCol, key, newDomain.mkString ("")) - if (rows == 0) return r2 - - val agglist = for (i <- aggF.indices) yield aggF(i)(this, aggFAttr(i)) - var group_j = 0 - if (cName.size != 0) { - val cPos = ArrayBuffer (cName.map (colMap(_)) :_*) - val shrinkR = pi(cPos, null) - grouplist.foreach (idx => { - var newrow: Vector[Any] = null - val rownumber = keytoIndex(orderedIndex(idx-1)) - newrow = shrinkR.row(rownumber) - for (i<- aggF.indices) { - val aggtemp = Vec (agglist(0), group_j) - newrow = newrow:+ aggtemp - } // for - r2.add_ni (newrow) - group_j += 1 - }) // foreach - } // if - r2.materialize () - } // epiAny - - // ========================================================== PROJECT-SELECT - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Select elements from column 'cName' in 'this' relation that satisfy the - * predicate 'p' and project onto that column. - * @param cName the name of the column used for selection - * @param p the predicate (`Boolean` function) to be satisfied - */ - def pisigmaC (cName: String, p: Complex => Boolean): Relation = - { - val nu = getMeta (cName) - val newCol = Vector (col (nu._1).asInstanceOf [VectorC].filter (p)) - new Relation (name + "_s_" + ucount (), nu._2, newCol, nu._3, nu._4) - } // pisigmaC - - def pisigmaD (cName: String, p: Double => Boolean): Relation = - { - val nu = getMeta (cName) - val newCol = Vector (col (nu._1).asInstanceOf [VectorD].filter (p)) - new Relation (name + "_s_" + ucount (), nu._2, newCol, nu._3, nu._4) - } // pisigmaD - - def pisigmaI (cName: String, p: Int => Boolean): Relation = - { - val nu = getMeta (cName) - val newCol = Vector (col (nu._1).asInstanceOf [VectorI].filter (p)) - new Relation (name + "_s_" + ucount (), nu._2, newCol, nu._3, nu._4) - } // pisigmaI - - def pisigmaL (cName: String, p: Long => Boolean): Relation = - { - val nu = getMeta (cName) - val newCol = Vector (col (nu._1).asInstanceOf [VectorL].filter (p)) - new Relation (name + "_s_" + ucount (), nu._2, newCol, nu._3, nu._4) - } // pisigmaL - - def pisigmaQ (cName: String, p: Rational => Boolean): Relation = - { - val nu = getMeta (cName) - val newCol = Vector (col (nu._1).asInstanceOf [VectorQ].filter (p)) - new Relation (name + "_s_" + ucount (), nu._2, newCol, nu._3, nu._4) - } // pisigmaQ - - def pisigmaR (cName: String, p: Real => Boolean): Relation = - { - val nu = getMeta (cName) - val newCol = Vector (col (nu._1).asInstanceOf [VectorR].filter (p)) - new Relation (name + "_s_" + ucount (), nu._2, newCol, nu._3, nu._4) - } // pisigmaR - - def pisigmaS (cName: String, p: StrNum => Boolean): Relation = - { - val nu = getMeta (cName) - val newCol = Vector (col (nu._1).asInstanceOf [VectorS].filter (p)) - new Relation (name + "_s_" + ucount (), nu._2, newCol, nu._3, nu._4) - } // pisigmaS - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Get meta-data about the column with name 'cName'. - * @param cName the name of the column - */ - private def getMeta (cName: String): (Int, ArrayBuffer [String], Int, String) = - { - val cn = colMap (cName) // column position - (cn, ArrayBuffer (cName), if (cn == key) key else -1, projectD (domain, IndexedSeq (cn))) - } // getMeta - - // ================================================================== SELECT - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Select elements from columns in 'cName' in 'this' relation that satisfy - * the predicate 'p'. - * @param cName the name of the column used for selection - * @param p the predicate (`Boolean` function) to be satisfied - */ - def select [T : ClassTag] (cName: String, p: T => Boolean): Relation = - { - if (domain != null) { - domain(colMap (cName)) match { - case 'C' | 'c' | 'χ' => selectAt (selectC (cName, p.asInstanceOf [Complex => Boolean])) - case 'D' | 'd' | 'δ' => selectAt (selectD (cName, p.asInstanceOf [Double => Boolean])) - case 'I' | 'i' | 'ι' => selectAt (selectI (cName, p.asInstanceOf [Int => Boolean])) - case 'L' | 'l' | 'λ' => selectAt (selectL (cName, p.asInstanceOf [Long => Boolean])) - case 'Q' | 'q' | 'ϟ' => selectAt (selectQ (cName, p.asInstanceOf [Rational => Boolean])) - case 'R' | 'r' | 'ρ' => selectAt (selectR (cName, p.asInstanceOf [Real => Boolean])) - case 'S' | 's' | 'σ' => selectAt (selectS (cName, p.asInstanceOf [StrNum => Boolean])) - case 'T' | 't' | 'τ' => selectAt (selectT (cName, p.asInstanceOf [TimeNum => Boolean])) - case _ => { flaw ("select", "predicate type not supported"); null } - } // match - } else { - flaw ("select", "optional domains not given - use type specific sigma?") - null - } // if - } // select - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the relation whose rows are equal to 'value' in the column with the given name. - * @param cv the (column-name, value) pair, e.g., ("time", 5.00) - */ - def == [T : ClassTag] (cv: (String, T)): Relation = select [T] (cv._1, (x: T) => x == cv._2) - def != [T : ClassTag] (cv: (String, T)): Relation = select [T] (cv._1, (x: T) => x != cv._2) - def < [T <: Ordered [T] : ClassTag] (cv: (String, T)): Relation = select [T] (cv._1, (x: T) => x < cv._2) - def <= [T <: Ordered [T] : ClassTag] (cv: (String, T)): Relation = select [T] (cv._1, (x: T) => x <= cv._2) - def > [T <: Ordered [T] : ClassTag] (cv: (String, T)): Relation = select [T] (cv._1, (x: T) => x > cv._2) - def >= [T <: Ordered [T] : ClassTag] (cv: (String, T)): Relation = select [T] (cv._1, (x: T) => x >= cv._2) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Select elements from columns in 'cName' in 'this' relation that satisfy - * the predicate 'p'. - * @param cName the name of the column used for selection - * @param p the predicate (`Boolean` function) to be satisfied - */ - def sigmaC (cName: String, p: Complex => Boolean): Relation = selectAt (selectC (cName, p)) - - def sigmaD (cName: String, p: Double => Boolean): Relation = selectAt (selectD (cName, p)) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** The parellel version of 'selectD'. - * FIX - move to .par package - * @param cName column to select on - * @param p predicate to select - * - def sigmaDpar (cName: String, p: Double => Boolean): Relation = - { - val filtercol = new scalation.linalgebra.par.VectorD (col (colMap (cName)).asInstanceOf [VectorD].toArray) - selectAt (filtercol.filterPos (p)) - } // sigmaDpar - */ - - def sigmaI (cName: String, p: Int => Boolean): Relation = selectAt (selectI (cName, p)) - - def sigmaL (cName: String, p: Long => Boolean): Relation = selectAt (selectL (cName, p)) - - def sigmaQ (cName: String, p: Rational => Boolean): Relation = selectAt (selectQ (cName, p)) - - def sigmaR (cName: String, p: Real => Boolean): Relation = selectAt (selectR (cName, p)) - - def sigmaS (cName: String, p: StrNum => Boolean): Relation = selectAt (selectS (cName, p)) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Select the positions of elements from columns in 'cName' in 'this' relation - * that satisfy the predicate 'p'. - * @param cName the name of the column used for selection - * @param p the predicate (`Boolean` function) to be satisfied - */ - def selectC (cName: String, p: Complex => Boolean): IndexedSeq [Int] = - { - col (colMap (cName)).asInstanceOf [VectorC].filterPos (p) - } // selectC - - def selectD (cName: String, p: Double => Boolean): IndexedSeq [Int] = - { - col (colMap (cName)).asInstanceOf [VectorD].filterPos (p) - } // selectD - - def selectI (cName: String, p: Int => Boolean): IndexedSeq [Int] = - { - col (colMap (cName)).asInstanceOf [VectorI].filterPos (p) - } // selectI - - def selectL (cName: String, p: Long => Boolean): IndexedSeq [Int] = - { - col (colMap (cName)).asInstanceOf [VectorL].filterPos (p) - } // selectL - - def selectQ (cName: String, p: Rational => Boolean): IndexedSeq [Int] = - { - col (colMap (cName)).asInstanceOf [VectorQ].filterPos (p) - } // selectQ - - def selectR (cName: String, p: Real => Boolean): IndexedSeq [Int] = - { - col (colMap (cName)).asInstanceOf [VectorR].filterPos (p) - } // selectR - - def selectS (cName: String, p: StrNum => Boolean): IndexedSeq [Int] = - { - col (colMap (cName)).asInstanceOf [VectorS].filterPos (p) - } // selectS - - def selectT (cName: String, p: TimeNum => Boolean): IndexedSeq [Int] = - { - col (colMap (cName)).asInstanceOf [VectorT].filterPos (p) - } // selectT - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Select across all columns at the specified column positions. - * @param pos the specified column positions - */ - def selectAt (pos: IndexedSeq [Int]): Relation = - { - val newCol = (for (j <- col.indices) yield Vec.select (col(j), pos)).toVector - new Relation (name + "_s_" + ucount (), colName, newCol, key, domain) - } // selectAt - - // =========================================================== SET OPERATORS - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Union 'this' relation and 'r2'. Check that the two relations are compatible. - * If they are not, return the first 'this' relation. - * @param r2 the other relation - */ - def union (r2: Table): Relation = - { - if (incompatible (r2)) return this // take only this relation - -// if (col(0) == null) return if (r2.col(0) == null) null else r2 -// else if (r2.col(0) == null) return this - - val newCol = (for (j <- col.indices) yield Vec.++ (col(j), r2.columns(j))) - new Relation (name + "_u_" + ucount (), colName, newCol.toVector, -1, domain) - } // union - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Intersect 'this' relation and 'r2'. Check that the two relations are compatible. - * Use index to finish intersect operation. - * @param _r2 the other relation - */ - def intersect (_r2: Table): Relation = - { - val r2 = _r2.asInstanceOf [Relation] - if (incompatible (r2)) return null - - val newCol = Vector.fill [Vec] (colName.length) (null) - val r3 = new Relation (name + "_u_" + ucount (), colName, newCol, -1, domain) - - for (i <- orderedIndex.indices) { - if (r2.keytoIndex isDefinedAt orderedIndex(i)) { - if (row(i) sameElements r2.row(r2.keytoIndex (orderedIndex(i)))) r3.add_ni (row(i)) - } // if - } // for - r3.materialize () - } // intersect - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Intersect 'this' relation and 'r2'. Check that the two relations are compatible. - * Slower and only to be used if there is no index. - * @param r2 the other relation - */ - def intersect2 (r2: Table): Relation = - { - if (incompatible (r2)) return null - - val newCol = Vector.fill [Vec] (colName.length)(null) - val r3 = new Relation (name + "_u_" + ucount (), colName, newCol.toVector, -1, domain) - for (i <- 0 until rows if r2 contains row(i)) r3.add (row(i)) - r3.materialize () - } // intersect2 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Take the difference of 'this' relation and 'r2' ('this - r2'). Check that - * the two relations are compatible. - * @param r2 the other relation - */ - def minus (r2: Table): Relation = - { - if (incompatible (r2)) return null - - val newCol = Vector.fill [Vec] (colName.length)(null) - val r3 = new Relation (name + "_m_" + ucount (), colName, newCol, key, domain) - for (i <- 0 until rows if ! (r2 contains row(i))) r3.add (row(i)) - r3.materialize () - } // minus - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Take the difference of 'this' relation and 'r2' ('this - r2'). Check that - * the two relations are compatible. Indexed based minus. - * @param _r2 the other relation - */ - def minus2 (_r2: Table): Relation = - { - val r2 = _r2.asInstanceOf [Relation] - if (incompatible (r2)) return null - - val newCol = Vector.fill [Vec] (colName.length)(null) - val r3 = new Relation (name + "_m_" + ucount (), colName, newCol, key, domain) - for (i <- orderedIndex.indices) { - if (r2.keytoIndex isDefinedAt orderedIndex(i)) { - if (! (row(i) sameElements r2.row(r2.keytoIndex (orderedIndex(i))))) r3.add_ni (row(i)) - } else { - r3.add_ni (row(i)) - } // if - } // for - r3.materialize () - } // minus2 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Determine whether any rows/tuples exist in 'this' relation. - */ - def exists: Boolean = rows > 0 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Determine whether 'this' relation and 'r2' are incompatible by having - * differing numbers of columns or differing domain strings. - * @param r2 the other relation/table - */ - def incompatible (r2: Table): Boolean = - { - if (cols != r2.cols) { - flaw ("incompatible", s"${this.name} and r2 have differing number of columns") - true - } else if (domains != r2.domains) { - flaw ("incompatible", "${this.name} and r2 have differing domain strings") - true - } else { - false - } // if - } // incompatible - - // ================================================================= PRODUCT - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the Cartesian product of this' relation and 'r2' ('this × r2'). - * @param r2 the second relation - */ - def product (r2: Table): Relation = - { - val ncols = cols + r2.cols - val newCName = disambiguate (colName, r2.colNames) - val newCol = Vector.fill [Vec] (ncols) (null) - val newKey = key // FIX - val newDomain = domain + r2.domains - val r3 = new Relation (name + "_j_" + ucount (), newCName, newCol, newKey, newDomain) - - for (i <- 0 until rows) { - val t = row(i) - for (j <- 0 until r2.rows) r3.add (t ++ r2.row(j)) - } // for - r3.materialize () - } // product - - // ==================================================================== JOIN - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Join 'this' relation and 'r2' by performing an "equi-join". Rows from both - * relations are compared requiring 'cName1' values to equal 'cName2' values. - * Disambiguate column names by appending "2" to the end of any duplicate column name. - * @param cName1 the join column names of this relation (e.g., the Foreign Key) - * @param cName2 the join column names of relation r2 (e.g., the Primary Key) - * @param r2 the rhs relation in the join operation - */ - def join (cName1: ArrayBuffer [String], cName2: ArrayBuffer [String], r2: Table): Relation = - { - val ncols = cols + r2.cols - val cp1 = cName1.map (colMap (_)) // get column positions in 'this' - val cp2 = cName2.map (r2.colsMap (_)) // get column positions in 'r2' - if (cp1.length != cp2.length) flaw ("join", "incompatible sizes on match columns") - - val newCName = disambiguate (colName, r2.colNames) - val newCol = Vector.fill [Vec] (ncols) (null) - val newKey = key // FIX - val newDomain = domain + r2.domains - val r3 = new Relation (name + "_j_" + ucount (), newCName, newCol, newKey, newDomain) - - for (i <- 0 until rows) { - val t = row(i) - for (j <- 0 until r2.rows) { - val u = r2.row(j) - if (sameOn (t, u, cp1, cp2)) r3.add (t ++ u) - } // for - } // for - r3.materialize () - } // join - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Join 'this' relation and 'r2' by performing an "equi-join", use index to join - * @param cName1 the join column names of this relation (e.g., the Foreign Key) - * @param cName2 the join column names of relation r2 (e.g., the Primary Key) - * @param _r2 the rhs relation in the join operation - */ - def joinindex (cName1: ArrayBuffer [String], cName2: ArrayBuffer [String], _r2: Table): Relation = - { - val r2 = _r2.asInstanceOf [Relation] - val ncols = cols + r2.cols - val cp1 = cName1.map (colMap (_)) // get column positions in 'this' - val cp2 = cName2.map (r2.colMap (_)) // get column positions in 'r2' - if (cp1.length != cp2.length) flaw ("join", "incompatible sizes on match columns") - - val newCName = disambiguate (colName, r2.colName) - val newCol = Vector.fill [Vec] (ncols)(null) - val newKey = if (r2.key == cp2(0)) key // foreign key in this relation - else if (key == cp1(0)) r2.key // foreign key in r2 table - else -1 // key not in join and composite keys not allowed - - val newDomain = domain + r2.domains - val r3 = new Relation (name + "_j_" + ucount (), newCName, newCol, newKey, newDomain) - - if (cp1.size == 1 && cp2.size == 1) { - if (key == cp1(0) && r2.key == cp2(0)) { - for (k <- orderedIndex) { - val t = index(k) - val u = r2.index.getOrElse (k, null) - if (u != null) r3.add_ni (t ++ u) - } // for - } else if (key == cp1(0)) { - for (idx <- r2.orderedIndex) { - val u = r2.index(idx) - val t = index.getOrElse (new KeyType (u(cp2(0))), null) - if (t != null) r3.add_ni (t ++ u) - r3.add_ni(t ++ u) - } // for - } else if (r2.key == cp2(0)) { - for (idx <- orderedIndex) { - val t = index(idx) - val u = r2.index.getOrElse (new KeyType (t(cp1(0))), null) - if (u != null) r3.add_ni (t ++ u) - } // for - } // if - } // if - r3.materialize () - } // joinindex - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Join 'this' relation and 'r2' by performing a "natural-join". Rows from both - * relations are compared requiring 'cName' values to be equal. - * @param cName the common join column names for both relation - * @param _r2 the rhs relation in the join operation - */ - def join (cName: ArrayBuffer [String], _r2: Table): Relation = - { - val r2 = _r2.asInstanceOf [Relation] - val ncols = cols + r2.cols - cName.length - val cp1 = cName.map (colMap (_)) // get column positions in 'this' - val cp2 = cName.map (r2.colMap (_)) // get column positions in 'r2' - var newDomain2 = r2.domain - for (i <- cp1.length - 1 to 0 by -1) { - val (cp1_i, cp2_i) = (cp1(i), cp2(i)) - if (domain(cp1_i) != r2.domain(cp2_i)) flaw ("join", s"column types do not match: $cp1, $cp2") - newDomain2 = removeAt (newDomain2, cp2_i) - } // for - val cp3 = r2.colName.map (r2.colMap (_)) diff cp2 // 'r2' specific columns - - val newCName = uniq_union (colName, r2.colName) - val newCol = Vector.fill [Vec] (ncols) (null) - val newKey = key // FIX - val newDomain = domain + newDomain2 - val r3 = new Relation (name + "_j_" + ucount (), newCName, newCol, newKey, newDomain) - - for (i <- 0 until rows) { - val t = row(i) - for (j <- 0 until r2.rows) { - val u = r2.row(j) - if (sameOn (t, u, cp1, cp2)) { val u3 = TableObj.project (u, cp3); r3.add (t ++ u3) } - } // for - } // for - r3.materialize () - } // join - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** The theta join, handle the predicates in where are connect by "and" (where a....and b....). - * @param _r2 the second relation - * @param p0 the first theta join predicate (r1 cName, r2 cName, predicate to compare these two column) - * @param p the rest of theta join predicates (r1 cName, r2 cName, predicates to compare these two column) - */ - def join [T] (_r2: Table, p0: Predicate2 [T], p: Predicate2 [T]*): Relation = - { - val r2 = _r2.asInstanceOf [Relation] - val ncols = cols + r2.cols - val newCName = disambiguate (colName, r2.colName) - val newCol = Vector.fill [Vec] (ncols) (null) - val newKey = key // FIX - val newDomain = domain + r2.domain - val r3 = new Relation (name + "_j_" + ucount (), newCName, newCol, newKey, newDomain, null) - - var resultlist = IndexedSeq [(Int, Int)] () - for (i <- 0 to p.size) { - var result = IndexedSeq [(Int, Int)] () - val p_i = if (i == 0) p0 else p(i-1) - val cp1 = colMap (p_i._1) - val cp2 = r2.colMap (p_i._2) - if (domain.charAt (cp1) != r2.domain.charAt (cp2)) flaw ("join", "differing domain strings") - val psingle = p_i._3 // single predicate - - domain (colMap(p_i._1)) match { - case 'C' => - val cols1 = col(cp1).asInstanceOf [VectorC] - val cols2 = r2.col(cp2).asInstanceOf [VectorC] - result = cols1.filterPos2 (cols2, psingle.asInstanceOf [(Complex, Complex) => Boolean]) - case 'D' => result = col(cp1).asInstanceOf [VectorD].filterPos2 (r2.col (cp2).asInstanceOf [VectorD], - psingle.asInstanceOf [(Double, Double) => Boolean]) - case 'I' => result = col(cp1).asInstanceOf [VectorI].filterPos2 (r2.col (cp2).asInstanceOf [VectoI], - psingle.asInstanceOf [(Int, Int) => Boolean]) - case 'L' => result = col(cp1).asInstanceOf [VectorL].filterPos2 (r2.col (cp2).asInstanceOf [VectorL], - psingle.asInstanceOf [(Long, Long) => Boolean]) - case 'Q' => result = col(cp1).asInstanceOf [VectorQ].filterPos2 (r2.col (cp2).asInstanceOf [VectorQ], - psingle.asInstanceOf [(Rational, Rational) => Boolean]) - case 'R' => result = col(cp1).asInstanceOf [VectorR].filterPos2 (r2.col (cp2).asInstanceOf [VectorR], - psingle.asInstanceOf [(Real, Real) => Boolean]) - case 'S' => result = col(cp1).asInstanceOf [VectorS].filterPos2 (r2.col (cp2).asInstanceOf [VectorS], - psingle.asInstanceOf [(StrNum, StrNum) => Boolean]) - case 'T' => result = col(cp1).asInstanceOf [VectorT].filterPos2 (r2.col (cp2).asInstanceOf [VectorT], - psingle.asInstanceOf [(TimeNum, TimeNum) => Boolean]) - - case 'c' => result = col(cp1).asInstanceOf [RleVectorC].filterPos2 (r2.col (cp2).asInstanceOf [RleVectorC], - psingle.asInstanceOf [(Complex, Complex) => Boolean]) - case 'd' => result = col(cp1).asInstanceOf [RleVectorD].filterPos2 (r2.col (cp2).asInstanceOf [RleVectorD], - psingle.asInstanceOf [(Double, Double) => Boolean]) - case 'i' => result = col(cp1).asInstanceOf [RleVectorI].filterPos2 (r2.col (cp2).asInstanceOf [RleVectorI], - psingle.asInstanceOf [(Int, Int) => Boolean]) - case 'l' => result = col(cp1).asInstanceOf [RleVectorL].filterPos2 (r2.col (cp2).asInstanceOf [RleVectorL], - psingle.asInstanceOf [(Long, Long) => Boolean]) - case 'q' => result = col(cp1).asInstanceOf [RleVectorQ].filterPos2 (r2.col (cp2).asInstanceOf [RleVectorQ], - psingle.asInstanceOf [(Rational, Rational) => Boolean]) - case 'r' => result = col(cp1).asInstanceOf [RleVectorR].filterPos2 (r2.col (cp2).asInstanceOf [RleVectorR], - psingle.asInstanceOf [(Real, Real) => Boolean]) - case 's' => result = col(cp1).asInstanceOf [RleVectorS].filterPos2 (r2.col (cp2).asInstanceOf [RleVectorS], - psingle.asInstanceOf [(StrNum, StrNum) => Boolean]) - case 't' => result = col(cp1).asInstanceOf [RleVectorT].filterPos2 (r2.col (cp2).asInstanceOf [RleVectorT], - psingle.asInstanceOf [(TimeNum, TimeNum) => Boolean]) - - case 'χ' => result = col(cp1).asInstanceOf [SparseVectorC].filterPos2 (r2.col (cp2).asInstanceOf [SparseVectorC], - psingle.asInstanceOf [(Complex, Complex) => Boolean]) - case 'δ' => result = col(cp1).asInstanceOf [SparseVectorD].filterPos2 (r2.col (cp2).asInstanceOf [SparseVectorD], - psingle.asInstanceOf [(Double, Double) => Boolean]) - case 'ι' => result = col(cp1).asInstanceOf [SparseVectorI].filterPos2 (r2.col (cp2).asInstanceOf [SparseVectorI], - psingle.asInstanceOf [(Int, Int) => Boolean]) - case 'λ' => result = col(cp1).asInstanceOf [SparseVectorL].filterPos2 (r2.col (cp2).asInstanceOf [SparseVectorL], - psingle.asInstanceOf [(Long, Long) => Boolean]) - case 'ϟ' => result = col(cp1).asInstanceOf [SparseVectorQ].filterPos2 (r2.col (cp2).asInstanceOf [SparseVectorQ], - psingle.asInstanceOf [(Rational, Rational) => Boolean]) - case 'ρ' => result = col(cp1).asInstanceOf [SparseVectorR].filterPos2 (r2.col (cp2).asInstanceOf [SparseVectorR], - psingle.asInstanceOf [(Real, Real) => Boolean]) - case 'σ' => result = col(cp1).asInstanceOf [SparseVectorS].filterPos2 (r2.col (cp2).asInstanceOf [SparseVectorS], - psingle.asInstanceOf [(StrNum, StrNum) => Boolean]) - case 'τ' => result = col(cp1).asInstanceOf [SparseVectorT].filterPos2 (r2.col (cp2).asInstanceOf [SparseVectorT], - psingle.asInstanceOf [(TimeNum, TimeNum) => Boolean]) - - case _ => flaw ("join", "domain string is missing"); null - } // match - - if (DEBUG) println (s"join: after predicate $i: result = $result") - resultlist = if (i == 0) result else resultlist intersect result - } // for - - val smallmapbig = resultlist.groupBy (_._1) - for (i <- smallmapbig.keySet.toVector.sorted) { - val t = if (key < 0) index(KeyType(i)) else index(indextoKey(i)) - val bigindexs = smallmapbig (i).map (x => x._2) - for (j <- bigindexs) { - val u = if (r2.key < 0) r2.index(KeyType (j)) else r2.index(r2.indextoKey(j)) - r3.add (t ++ u) - } // for - } // for - r3.materialize () - } // join - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Join 'this' relation and 'r2' by performing an "left-join". Rows from both - * relations are compared requiring 'cName1' values to equal 'cName2' values. - * Disambiguate column names by appending "2" to the end of any duplicate column name. - * All rows from the left table are maintained with missing values indicators used - * where needed. - * @param cName1 the join column names of this relation (e.g., the Foreign Key) - * @param cName2 the join column names of relation r2 (e.g., the Primary Key) - * @param r2 the rhs relation in the join operation - */ - def leftJoin (cName1: String, cName2: String, r2: Table): Relation = - { - leftJoin (colMap (cName1), colMap (cName2), r2.asInstanceOf [Relation]) - } // leftJoin - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Join 'this' relation and 'r2' by performing a "left join". Rows from both - * relations are compared requiring 'cp1' values to equal 'cp2' values. - * This method returns all the rows from 'this' relation, and the matched rows - * from relation 'r2'. It adds a 'null' tuples for the unmatched rows of relation 'r2' - * FIX: It requires relations 'this' and 'r2' to be sorted on column 'cp1' and 'cp2' resp., as it uses Sort-Merge join - * @param cp1 the position of the join column of this relation - * @param cp2 the position of the join column of 'r2' relation - * @param r2 the rhs relation in the join operation - */ - def leftJoin (cp1: Int, cp2: Int, r2: Relation): Relation = - { - val r3 = Relation (name + "_leftJoin_" + r2.name, key, domain + r2.domain, colName ++ r2.colName) - val absentTuple = nullTuple (r2.domain) - var j = 0 - for (i <- 0 until rows) { - val t = row(i) - val t_cp1 = t(cp1) - while (j < r2.rows-1 && Vec_Elem.<(Vec (r2.col(cp2), j), t_cp1)) j += 1 - val j_aux = j - if (t_cp1 == r2.row(j)(cp2)) { - while (j < r2.rows && Vec (r2.col(cp2), j) == t_cp1) { - val u = r2.row(j) - r3.add_ni (t ++ u) - j += 1 - } // while - j = j_aux - } else r3.add_ni (t ++ absentTuple) - } // for - r3.materialize () - } // leftJoin - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Join 'this' relation and 'r2' by performing a "right join". Rows from both - * relations are compared requiring 'cp1' values to equal 'cp2' values. - * This method returns all the rows from 'this' relation, and the matched rows - * from relation 'r2'. It adds a 'null' tuples for the unmatched rows of relation 'r2' - * @param cp1 the position of the join column of this relation - * @param cp2 the position of the join column of 'r2' relation - * @param r2 the rhs relation in the join operation - */ - def rightJoin (cp1: Int, cp2: Int, r2: Relation): Relation = - { - r2.leftJoin (cp2, cp1, this) - } // rightJoin - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Join 'this' relation and 'r2' by performing an "apprimate left-join". Rows from both - * relations are compared requiring 'cName1' values to apprximately equal 'cName2' values. - * Disambiguate column names by appending "2" to the end of any duplicate column name. - * All rows from the left table are maintained with missing values indicators used - * where needed. - * @param thres the approximate equality threshold - * @param cName1 the join column names of this relation (e.g., the Foreign Key) - * @param cName2 the join column names of relation r2 (e.g., the Primary Key) - * @param r2 the rhs relation in the join operation - */ - def leftJoin (thres: Double = 0.001) (cName1: String, cName2: String, r2: Table): Relation = - { - setThreshold (thres) - leftJoinApx (colMap (cName1), colMap (cName2), r2.asInstanceOf [Relation]) - } // leftJoin - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Join 'this' relation and 'r2' by performing a "left join". Rows from both - * relations are compared requiring 'cp1' values to approximately equal 'cp2' values. - * This method returns all the rows from 'this' relation, and the matched rows - * from relation 'r2'. It adds a 'null' tuples for the unmatched rows of relation 'r2' - * FIX: It requires relations 'this' and 'r2' to be sorted on column 'cp1' and 'cp2' resp., - * as it uses Sort-Merge join - * @param cp1 the position of the join column of this relation - * @param cp2 the position of the join column of 'r2' relation - * @param r2 the rhs relation in the join operation - */ - def leftJoinApx (cp1: Int, cp2: Int, r2: Relation): Relation = - { - val r3 = Relation (name + "_leftJoinApx_" + r2.name, 1, domain + r2.domain, colName ++ r2.colName) - val absentTuple = nullTuple (r2.domain) - var j = 0 - - for (i <- 0 until rows) { - val t = row(i) - val t_cp1 = t(cp1) - while (j < r2.rows-1 && !=~ (Vec (r2.col(cp2), j), t_cp1) && Vec_Elem.<(Vec (r2.col(cp2), j), t_cp1)) j += 1 - val j_aux = j - if (=~ (t_cp1, r2.row(j)(cp2))) { - while (j < r2.rows && =~ (Vec (r2.col(cp2), j), t_cp1)) { - val u = r2.row(j) - r3.add_ni (t ++ u) - j += 1 - } // while - j = j_aux - } else r3.add_ni (t ++ absentTuple) - } // for - r3.materialize () - } // leftJoinApx - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Join 'this' relation and 'r2' by performing a "right join". Rows from both - * relations are compared requiring 'cp1' values to approximately equal 'cp2' values. - * This method returns all the rows from 'this' relation, and the matched rows - * from relation 'r2'. It adds a 'null' tuples for the unmatched rows of relation 'r2' - * @param cp1 the position of the join column of this relation - * @param cp2 the position of the join column of 'r2' relation - * @param r2 the rhs relation in the join operation - */ - def rightJoinApx (cp1: Int, cp2: Int, r2: Relation): Relation = - { - r2.leftJoinApx (cp2, cp1, this) - } // rightJoinApx - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Parallel Join 'this' relation and 'r2' by performing an equi join on cName1 = cName2 and into k -threads - * seperate the lhs into k part join with rhs. - * FIX - move to .par package - * @param cName1 the join column names of lhs relation - * @param cName2 the join column names of rhs relation - * @param _r2 the rhs relation in the join operation - * @param k parallel run into k parts - * - def parjoin (cName1: ArrayBuffer [String], cName2: ArrayBuffer [String], _r2: Relation, k: Int): Relation = - { - // make the join into k parts of outer table join with inner table - // outer table is the one without index, partition on the outer table, inner table use index to loop through - - val r2 = _r2.asInstanceOf [Relation] - val cp1 = cName1.map (colMap (_)) // get column positions in 'this' - val cp2 = cName2.map (r2.colMap (_)) // get column positions in 'r2' - var futurelist: IndexedSeq [Future [Relation]] = null - - if (key == cp1(0)) futurelist = - // use left table as inner table (partition on outer table) - for (i <- 1 to k) yield Future { r2.parjoinsmall (cName1, cName2, this, i, k) } - else if (r2.key == cp2(0)) futurelist = - for (i <- 1 to k) yield Future { parjoinsmall (cName1, cName2, r2, i, k) } - - val waitduration = 190.millisecond // Need to be FIXED, should be partial to (rows /k) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - def recur (r1: Relation, kth: Int, relationList: ArrayBuffer [Relation]): Relation = - { - if (kth == relationList.size - 1) r1 union relationList(kth) - else if (r1 != null) recur (r1 union relationList(kth), kth + 1, relationList) - else recur (relationList(kth), kth + 1, relationList) - } // recur - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - // union the tables together 2 as a group from result of the futurelist - def foldhalf (fl: IndexedSeq [Future [Relation]]): Relation = - { - val relationList = ArrayBuffer [Relation] () - for (i <- 0 until fl.size) relationList += Await.result(fl(i), waitduration) - recur (relationList(0), 1, relationList) - } // foldhalf - - foldhalf (futurelist) - } // parjoin - */ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** The 'parjoinsmall' method serves as the core part for parjoin function, do the - * nth part of the parellel join returning the relation of join of two tables. - * FIX - move to .par package - * @param cName1 the join column names of lhs relation - * @param cName2 the join column names of rhs relation - * @param _r2 the rhs relation in the join operation - * @param nth the nth part of the parallel join - * @param n parallel run into k parts - */ - private def parjoinsmall (cName1: ArrayBuffer [String], cName2: ArrayBuffer [String], _r2: Table, nth: Int, n: Int): Relation = - { - val r2 = _r2.asInstanceOf [Relation] - val ncols = cols + r2.cols - val cp1 = cName1.map (colMap (_)) // get column positions in 'this' - val cp2 = cName2.map (r2.colMap (_)) // get column positions in 'r2' - if (cp1.length != cp2.length) flaw ("join", "incompatible sizes on match columns") - - val newCName = disambiguate (colName, r2.colName) - val newCol = Vector.fill [Vec] (ncols) (null) - val newKey = key // FIX - val newDomain = domain + r2.domain - val r3 = new Relation (name + "_j_" + ucount (), newCName, newCol, newKey, newDomain) - val start = rows/n * (nth-1) - val end = if (nth == n) rows-1 else rows/n*nth - 1 - - if (cp1.size == 1 && cp2.size == 1) { - if (key == cp1(0) && r2.key == cp2(0)) { - for (k <- orderedIndex.slice (start, end + 1)) { - val t = index(k) - val u = r2.index.getOrElse(k, null) - if (u != null) r3.add_ni (t ++ u) - } //for - } // if - // partition the left table - for (i <- start until end+1) { - val t = row(i) - val u = r2.index.getOrElse(new KeyType (t(cp1(0))), null) - if (u != null) r3.add_ni (t ++ u) - } // for - } // if - r3.materialize () - } // parjoinsmall - - // ================================================================ GROUP BY - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Group 'this' relation by the specified column names, returning 'this' relation. - * @param cName the group column names - */ - def groupBy (cName: String*): Relation = - { - if (! cName.map (c => colName contains(c)).reduceLeft (_ && _)) - flaw ("groupBy", "groupbyName used to groupby doesn't exist in the cName") - val equivCol = Vector.fill [Vec] (colName.length)(null) - if (rows == 0) return this - - val cPos = cName.map (colMap (_)) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /* Sort on the given columns. - * @param sortColumn the set of columns to sort on - */ - def sortcol (sortColumn: Set [Any]): Vec = - { - println (s"sortCol: sortColumn = $sortColumn") - var colcol: Vec = null - val domain = null -// for (x <- sortColumn) colcol = Vec.:+ (colcol, x, domain, 0) - for (x <- sortColumn) colcol = Vec.:+ (colcol, x) - - colcol match { - case _: VectoC => val sortcol = colcol.asInstanceOf [VectoC]; sortcol.sort (); sortcol - case _: VectoD => val sortcol = colcol.asInstanceOf [VectoD]; sortcol.sort (); sortcol - case _: VectoI => val sortcol = colcol.asInstanceOf [VectoI]; sortcol.sort (); sortcol - case _: VectoL => val sortcol = colcol.asInstanceOf [VectoL]; sortcol.sort (); sortcol - case _: VectoQ => val sortcol = colcol.asInstanceOf [VectoQ]; sortcol.sort (); sortcol - case _: VectoR => val sortcol = colcol.asInstanceOf [VectoR]; sortcol.sort (); sortcol - case _: VectoS => val sortcol = colcol.asInstanceOf [VectoS]; sortcol.sort (); sortcol - case _: VectoT => val sortcol = colcol.asInstanceOf [VectoT]; sortcol.sort (); sortcol -// case _ => flaw ("sortcol", s"vector type ${colcol.getClass} not supported"); null.asInstanceOf [Vec] - case _ => flaw ("sortcol", s"vector type $colcol not supported"); null.asInstanceOf [Vec] - } // match - } // sortcol - - var groupIndexMap = Map [Any, Vector [KeyType]] () - val tempIndexMap = Map [Any, Vector [KeyType]] () - var sortlst: Vec = null - - for (i <- cPos.indices) { - if (i == 0) { - index.foreach (indexmap => { - val key = StrNum (indexmap._2(cPos(i)).toString) - val value = indexmap._1 - if (groupIndexMap contains key) groupIndexMap += key -> (groupIndexMap(key) :+ value) - else groupIndexMap += key -> Vector(value) - }) // foreach - } else { - tempIndexMap.clear () - groupIndexMap.foreach (groupindexmap => { - val tempidxlist = groupindexmap._2 - for (idx <- tempidxlist) { - val key = StrNum(groupindexmap._1.toString + "," + index(idx)(cPos(i))) - val value = idx - if (tempIndexMap.contains(key)) tempIndexMap += key -> (tempIndexMap(key) :+ value) - else tempIndexMap += key -> Vector(value) - } // for - }) // for each - groupIndexMap = tempIndexMap - } // if - - if (i == cPos.size - 1) { - orderedIndex = Vector () - grouplist = Vector [Int] () - sortlst = sortcol (groupIndexMap.keySet.toSet) - for (k <- 0 until sortlst.size) { - val indexes = groupIndexMap(Vec(sortlst, k)) - orderedIndex = orderedIndex ++ indexes - grouplist = grouplist :+ orderedIndex.length - } // for - } // if - } // for - this - } // groupby - - // ================================================================= ORDER BY - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Order (ascending) the rows in the relation by the selected columns '_cName'. - * A stable sorting is used to allow sorting on multiple columns. - * @param _cName the column names that are to be sorted - */ - def orderBy (_cName: String*): Relation = - { - val cName = _cName.distinct - if (! cName.map (c => colName contains (c)).reduceLeft (_ && _)) - flaw ("orderBy", "cName used to orderBy does not exist in relation") - - val newCol = Vector.fill [Vec] (cols)(null) - val r2 = new Relation (name + "_j_" + ucount (), colName, newCol, key, domain) - - val perm = orderByHelper (ArrayBuffer (cName.map (colMap (_)) :_*), rows) - for (i <- perm) r2.add (row(i)) - r2.materialize () - } // orderBy - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Order (descending) the rows in the relation by the selected columns '_cName'. - * A stable sorting is used to allow sorting on multiple columns. - * @param _cName the column names that are to be sorted - */ - def reverseOrderBy (_cName: String*): Relation = - { - val cName = _cName.distinct - if (! cName.map (c => colName contains (c)).reduceLeft (_ && _)) - flaw ("orderBy", "cName used to orderBy does not exist in relation") - - val newCol = Vector.fill [Vec] (cols) (null) - val r2 = new Relation (name + "_j_" + ucount (), colName, newCol, key, domain) - - val perm = orderByHelper (ArrayBuffer (cName.map (colMap (_)) :_*), rows) - for (i <- perm.reverse) r2.add (row(i)) - r2.materialize () - } // reverseOrderBy - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Helper method for 'orderBy' and 'reverseOrderBy'. Performs indirect merge sort. - * @param cPos sequence of column positions to sort - * @param n total number of rows in this Relation - */ - private def orderByHelper (cPos: ArrayBuffer [Int], n: Int = rows): Array [Int] = - { - var perm: Array [Int] = null - - for (i <- cPos.indices) { - val col_i: Array [Any] = col (cPos(i)) match { - case _: VectorC => col(cPos(i)).asInstanceOf [VectorC]().toArray - case _: VectorD => col(cPos(i)).asInstanceOf [VectorD]().toArray - case _: VectorI => col(cPos(i)).asInstanceOf [VectorI]().toArray - case _: VectorL => col(cPos(i)).asInstanceOf [VectorL]().toArray - case _: VectorQ => col(cPos(i)).asInstanceOf [VectorQ]().toArray - case _: VectorR => col(cPos(i)).asInstanceOf [VectorR]().toArray - case _: VectorS => col(cPos(i)).asInstanceOf [VectorS]().toArray - case _: VectorT => col(cPos(i)).asInstanceOf [VectorT]().toArray - } // match - - perm = if (i == 0) (new MergeSortIndirect (col_i)()).isort () - else (new MergeSortIndirect (col_i)(perm)).isort () - }// for - perm - } // orderByHelper - - // ================================================================ COMPRESS - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compress the selected columns 'cName' in 'this' table. - * @param cName the names of the columns to be compressed - */ - def compress (cName: String*): Unit = - { - for (c <- cName) { - val i = colMap (c) -// col(i).compress () // FIX - add compress to Vec - } // for - } // compress - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Uncompress the selected columns 'cName' in 'this' table. - * @param cName the names of the columns to be uncompressed - */ - def uncompress (cName: String*): Unit = - { - for (c <- cName) { - val i = colMap (c) -// col(i).uncompress () // FIX - add uncompress to Vec - } // for - } // uncompress - - // ================================================================= UPDATES - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Add 'tuple' to 'this' relation as a new row. - * FIX: want an efficient, covariant, mutable data structure, but `Array` is invariant. - * @param tuple an aggregation of columns values (new row) - * - def add (tuple: Row): Unit = - { - col = (for (j <- tuple.indices) yield - try { - Vec.:+ (col(j), tuple(j)) - } catch { - case cce: ClassCastException => - println (s"add: for column $j of tuple $tuple"); throw cce - } // try - ).toVector - } // add - */ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Add 'tuple' to 'this' relation as a new row. It uses 'col2' as a temp 'col' - * to improve performance. - * @param tuple an aggregation of columns values (new row) - */ - @throws (classOf [Exception]) - def add (tuple: Row): Unit = - { - try { - if (tuple == null) throw new Exception ("add function: tuple is null") - val rowIdx = col2(0).length - val newkey = if (key < 0) new KeyType (rowIdx) else new KeyType (tuple(key)) - index += newkey -> tuple - keytoIndex += newkey -> rowIdx - orderedIndex = orderedIndex :+ newkey - indextoKey += rowIdx -> newkey - for (j <- tuple.indices) addElem (j, rowIdx, tuple(j)) - } catch { - case ex: NullPointerException => - println ("tuple'size is: " + tuple.size) - println ("col'size is: " + col.size) - throw ex - } // try - } // add - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Add an element into 'col2', the holding area for input. If the types - * of column domains are specified, the types are checked. - * @param j the j-th column of col2 - * @param rowIdx the row index - * @param elem the element to added - */ - private def addElem (j: Int, rowIdx: Int, elem: Any): Unit = - { - val typ = if (domain == null) 'X' else domain(j) - try { - if (typ == 'X') col2(j).asInstanceOf [ReArray [Any]](rowIdx) = elem // no domains => assume Any type - else { typ match { - case 'C' => col2(j).asInstanceOf [ReArray [Complex]](rowIdx) = elem.asInstanceOf [Complex] - case 'D' => col2(j).asInstanceOf [ReArray [Double]](rowIdx) = elem.asInstanceOf [Double] - case 'I' => col2(j).asInstanceOf [ReArray [Int]](rowIdx) = elem.asInstanceOf [Int] - case 'L' => col2(j).asInstanceOf [ReArray [Long]](rowIdx) = elem.asInstanceOf [Long] - case 'Q' => col2(j).asInstanceOf [ReArray [Rational]](rowIdx) = elem.asInstanceOf [Rational] - case 'R' => col2(j).asInstanceOf [ReArray [Real]](rowIdx) = elem.asInstanceOf [Real] - case 'S' => col2(j).asInstanceOf [ReArray [StrNum]](rowIdx) = elem.asInstanceOf [StrNum] - case 'T' => col2(j).asInstanceOf [ReArray [TimeNum]](rowIdx) = elem.asInstanceOf [TimeNum] - case _ => flaw ("constructor", s"unsupported column type ${domain(j)} for column $j") - } // match - } // if - } catch { - case ex: ClassCastException => - if (typ == 'S') { -// println (s"warning in addElem: colIdx j = $j, rowIdx = $rowIdx, elem = $elem, class = ${elem.getClass}, typ = $typ") - col2(j).asInstanceOf [ReArray [StrNum]](rowIdx) = StrNum (elem.toString) // anything can be a string - } else if (elem.isInstanceOf [String] || elem.isInstanceOf [Char]) { - println (s"warning in addElem: colIdx j = $j, rowIdx = $rowIdx, elem = $elem, class = ${elem.getClass}, typ = $typ") - typ match { - case 'C' => col2(j).asInstanceOf [ReArray [Complex]](rowIdx) = noComplex - case 'D' => col2(j).asInstanceOf [ReArray [Double]](rowIdx) = noDouble - case 'I' => col2(j).asInstanceOf [ReArray [Int]](rowIdx) = noInt - case 'L' => col2(j).asInstanceOf [ReArray [Long]](rowIdx) = noLong - case 'Q' => col2(j).asInstanceOf [ReArray [Rational]](rowIdx) = noRational - case 'R' => col2(j).asInstanceOf [ReArray [Real]](rowIdx) = noReal - case 'T' => col2(j).asInstanceOf [ReArray [TimeNum]](rowIdx) = noTimeNum - case _ => flaw ("constructor", s"unsupported column type ${domain(j)} for column $j") - } // match - } else { - println (s"exception in addElem: name = $name, colIdx j = $j, rowIdx = $rowIdx, elem = $elem, class = ${elem.getClass}, typ = $typ") - throw ex - } // if - } // try - - } // addElem - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Add 'tuple' to 'this' relation as a new row. It is slower than 'add' method. - * Type is determined by sampling values for columns. - * @param tuple an aggregation of columns values (new row) - * - def add_2 (tuple: Row): Unit = - { - index += new KeyType (tuple(key))-> tuple // hashmap way - keytoIndex += new KeyType (tuple(key)) ->rows - col = (for (j <- tuple.indices) yield - try { -// Vec.:+ (col(j), StrNum (tuple(j).toString), domain, j) // FIX - allow this option - Vec.:+ (col(j), StrNum (tuple(j).toString)) - } catch { - case cce: ClassCastException => - println (s"add: for column $j of tuple $tuple"); throw cce - } // try - ).toVector - } // add_2 - */ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Add a tuple into the 'col2', without maintaining the index (No Index (ni), - * orderedIndex, keytoIndex and indextoKey. - * @param tuple the tuple to add - */ - private def add_ni (tuple: Row): Unit = - { - val rowIdx = col2(0).length - for (j <- tuple.indices) addElem (j, rowIdx, tuple(j)) - } // add_ni - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Materialize the relation by copying the temporary 'col2' into 'col'. - * It needs to be called by the end of the relation construction. - */ - def materialize (): Relation = - { - if (domain == null || domain == "") materialize1 () else materialize2 () - } // materialize - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Materialize the relation by copying the temporary 'col2' into 'col'. - * It needs to be called by the end of the relation construction. - * This version uses the type/domain of the first value to transform the 'col2' to 'col'. - */ - private [columnar_db] def materialize1 (): Relation = - { - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /* Transform the j-th column to the appropriate vector type. - * @param j the j-th column index in the relation - */ - def transform1 (j: Int): Vec = - { - val first = col2(j)(0) - if (first != null) col2(j).reduceToSize (col2(j).size) - first match { - case _: Complex => val rs = VectorC (col2(j).asInstanceOf [Seq [Complex]]); col2(j).clear (); rs - case _: Double => val rs = VectorD (col2(j).asInstanceOf [Seq [Double]]); col2(j).clear (); rs - case _: Int => val rs = VectorI (col2(j).asInstanceOf [Seq [Int]]); col2(j).clear (); rs - case _: Long => val rs = VectorL (col2(j).asInstanceOf [Seq [Long]]); col2(j).clear (); rs - case _: Rational => val rs = VectorQ (col2(j).asInstanceOf [Seq [Rational]]); col2(j).clear (); rs - case _: Real => val rs = VectorR (col2(j).asInstanceOf [Seq [Real]]); col2(j).clear (); rs - case _: StrNum => val rs = VectorS (col2(j).asInstanceOf [Seq [StrNum]]); col2(j).clear (); rs - case _: String => val rs = VectorS (col2(j).asInstanceOf [Seq [String]].toArray); col2(j).clear (); rs - case _: TimeNum => val rs = VectorT (col2(j).asInstanceOf [Seq [TimeNum]]); col2(j).clear (); rs - case _ => flaw ("materialize1.transform", s"($j): vector type ($first) not supported"); null - } // match - } // transform1 - -// if (DEBUG) println (s"materialize1: col2 = $col2") - if (colEmpty) { - col = (for (j <- col2.indices) yield transform1(j)).toVector - } else { - col = (for (j <- col.indices) yield transform1(j)).toVector ++ - (for (j <- col2.indices) yield transform1(j)).toVector - } // if - this - } // materialize1 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Materialize the relation by copying the temporary 'col2' into 'col'. - * It needs to be called by the end of the relation construction. - * This version uses 'domain' to transform the 'col2' to 'col' according to the domain indicator: - *

    - * Dense: 'C', 'D', 'I', 'L'. 'Q', 'R', 'S', 'T' - * Compressed: 'c', 'd', 'i', 'l', 'q', 'r', 's', 't' - * Sparse: 'χ', 'δ', 'ι', 'λ', 'ϟ', 'ρ', 'σ', 'τ' - *

    - */ - private [columnar_db] def materialize2 (): Relation = - { - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /* Transform the j-th column to the appropriate vector type. - * @param j the j-th column index in the relation - */ - def transform2 (j: Int): Vec = - { - val dj = domain(j) - col2(j).reduceToSize (col2(j).size) - dj match { - - // Upper case letter type/domain indictors for Dense Vectors - case 'C' => val rs = VectorC (col2(j).asInstanceOf [Seq [Complex]]); col2(j).clear (); rs - case 'D' => val rs = VectorD (col2(j).asInstanceOf [Seq [Double]]); col2(j).clear (); rs - case 'I' => val rs = VectorI (col2(j).asInstanceOf [Seq [Int]]); col2(j).clear (); rs - case 'L' => val rs = VectorL (col2(j).asInstanceOf [Seq [Long]]); col2(j).clear (); rs - case 'Q' => val rs = VectorQ (col2(j).asInstanceOf [Seq [Rational]]); col2(j).clear (); rs - case 'R' => val rs = VectorR (col2(j).asInstanceOf [Seq [Real]]); col2(j).clear (); rs - case 'S' => val rs = VectorS (col2(j).asInstanceOf [Seq [StrNum]]); col2(j).clear (); rs - case 'T' => val rs = VectorT (col2(j).asInstanceOf [Seq [TimeNum]]); col2(j).clear (); rs - - // Lower case letter type/domain indictors for Compressed Vectors - case 'c' => val rs = RleVectorC (col2(j).asInstanceOf [Seq [Complex]]); col2(j).clear (); rs - case 'd' => val rs = RleVectorD (col2(j).asInstanceOf [Seq [Double]]); col2(j).clear (); rs - case 'i' => val rs = RleVectorI (col2(j).asInstanceOf [Seq [Int]]); col2(j).clear (); rs - case 'l' => val rs = RleVectorL (col2(j).asInstanceOf [Seq [Long]]); col2(j).clear (); rs - case 'q' => val rs = RleVectorQ (col2(j).asInstanceOf [Seq [Rational]]); col2(j).clear (); rs - case 'r' => val rs = RleVectorR (col2(j).asInstanceOf [Seq [Real]]); col2(j).clear (); rs - case 's' => val rs = RleVectorS (col2(j).asInstanceOf [Seq [StrNum]]); col2(j).clear (); rs - case 't' => val rs = RleVectorT (col2(j).asInstanceOf [Seq [TimeNum]]); col2(j).clear (); rs - - // Lower case Greek letter type/domain indictors for Sparse Vectors - // @see web.mit.edu/jmorzins/www/greek-alphabet.html - // @see en.wikipedia.org/wiki/List_of_Unicode_characters#Greek_and_Coptic - case 'χ' => val rs = SparseVectorC (col2(j).asInstanceOf [Seq [Complex]]); col2(j).clear (); rs - case 'δ' => val rs = SparseVectorD (col2(j).asInstanceOf [Seq [Double]]); col2(j).clear (); rs - case 'ι' => val rs = SparseVectorI (col2(j).asInstanceOf [Seq [Int]]); col2(j).clear (); rs - case 'λ' => val rs = SparseVectorL (col2(j).asInstanceOf [Seq [Long]]); col2(j).clear (); rs - case 'ϟ' => val rs = SparseVectorQ (col2(j).asInstanceOf [Seq [Rational]]); col2(j).clear (); rs - case 'ρ' => val rs = SparseVectorR (col2(j).asInstanceOf [Seq [Real]]); col2(j).clear (); rs - case 'σ' => val rs = SparseVectorS (col2(j).asInstanceOf [Seq [StrNum]]); col2(j).clear (); rs - case 'τ' => val rs = SparseVectorT (col2(j).asInstanceOf [Seq [TimeNum]]); col2(j).clear (); rs - - case _ => flaw ("materialize2.transform", s"($j) vector type not supported domain ($dj)"); null - } // match - } // transform2 - -// if (DEBUG) println (s"materialize2: col2 = $col2") - if (colEmpty) { - col = (for (j <- col2.indices) yield transform2(j)).toVector - } else { - col = (for (j <- col.indices) yield transform2(j)).toVector ++ - (for (j <- col2.indices) yield transform2(j)).toVector - } // if - this - } // materialize2 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Determine whether all of the columns in the relation are empty. - */ - def colEmpty: Boolean = - { - for (column <- col if column != null) return false - true - } // Empty - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Update the column named 'cName' using function 'func' for elements with - * value 'matchStr'. - * @param cName the name of the column to be updated - * @param newVal the value used to assign updated values - * @param matchVal the value to be matched to elements - * @tparam T type of the column - */ - def update [T] (cName: String, newVal: T, matchVal: T): Unit = - { - val colPos = colMap(cName) - val c = col(colPos) - for (i <- 0 until c.size if Vec(c, i) == matchVal) Vec(c, i) = newVal - } // update - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Update the column named 'cName' using function 'func' for elements with - * value 'matchStr'. - * @param cName the name of the column to be updated - * @param func the function used to assign updated values - * @param matchVal the value to be matched to elements - * @tparam T type of the column - */ - def update [T] (cName: String, func: (T) => T, matchVal: T): Unit = - { - val colPos = colMap (cName) - val c = col (colPos) - for (i <- 0 until c.size if Vec(c, i) == matchVal) Vec(c, i) = func (Vec(c, i).asInstanceOf [T]) - } // update - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Update the column named 'cName' using function 'func' for elements where - * the predicate 'pred' evaluates to true. - * @param cName the name of the column to be updated - * @param func the function used to assign updated values - * @param pred the predicated used to select elements for update - * @tparam T type of the column - */ - def update [T] (cName: String, func: (T) => T, pred: (T) => Boolean): Unit = - { - val colPos = colMap (cName) - val c = col (colPos) - var pos = ArrayBuffer [Int] () - for (i <- 0 until c.size) { - val v_ci = Vec(c, i).asInstanceOf [T] - if (pred (v_ci)) Vec(c, i) = func (v_ci) - } // for - } // update - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Delete the rows from 'this' relation that satisfy the predicates. - * FIX - handle all 24 domain types - * @param p tuple(1): column name, tuple(2): predicate (T => `Boolean`) - * @tparam T the predicate type - */ - def delete [T] (p: Predicate [T]*): Relation = - { - null -/* - var pos = ArrayBuffer [Int] () - for (i <- p.indices) { - domain (colMap(p(i)._1)) match { - case 'D' => val pos1 = col (colMap(p(i)._1)).asInstanceOf [VectorD].filterPos (p(i)._2.asInstanceOf [Double => Boolean]) - if (i > 0) pos = pos intersect pos1 else pos ++= pos1 - case 'I' => val pos1 = col (colMap(p(i)._1)).asInstanceOf [VectorI].filterPos (p(i)._2.asInstanceOf [Int => Boolean]) - if (i > 0) pos = pos intersect pos1 else pos ++= pos1 - case 'L' => val pos1 = col (colMap(p(i)._1)).asInstanceOf [VectorL].filterPos (p(i)._2.asInstanceOf [Long => Boolean]) - if (i > 0) pos = pos intersect pos1 else pos ++= pos1 - case 'S' => val pos1 = col (colMap(p(i)._1)).asInstanceOf [VectorS].filterPos (p(i)._2.asInstanceOf [StrNum => Boolean]) - if (i > 0) pos = pos intersect pos1 else pos ++= pos1 - case _ => flaw ("delete", "predicate type not supported") - null - } // match - } // for - val indices = Set (0 to rows-1 :_*) diff pos.toSet - for (i <- 0 until cols) Vec.delete (col(i), pos.asInstanceOf [ArrayBuffer [Int]]) - selectAt (indices.toArrayBuffer.sorted) -*/ - } // delete - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert 'this' relation into a string column by column. - */ - override def toString: String = - { - var sb = new StringBuilder ("Relation(" + name + ", " + key + ",\n" + colName + ",\n") - for (i <- col.indices) sb.append (s"${col(i)} \n") - sb.replace (sb.length-1, sb.length, ")").mkString - } // toString - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Show 'this' relation row by row. - * @param limit the limit on the number of rows to display - */ - def show (limit: Int = Int.MaxValue): Unit = - { - val wid = 18 // column width - val rep = wid * colName.length // repetition = width * # columns - val title = s"| Relation name = $name, key-column = $key " - - println (s"|-${"-"*rep}-|") - println (title + " "*(rep-title.length) + " |") - println (s"|-${"-"*rep}-|") - print ("| "); for (cn <- colName) print (s"%${wid}s".format (cn)); println (" |") - println (s"|-${"-"*rep}-|") - for (i <- 0 until MIN (rows, limit)) { - print ("| ") - for (cv <- row(i)) { - if (cv.isInstanceOf [Double]) print (s"%${wid}g".format (cv)) - else print (s"%${wid}s".format (cv)) - } // for - println (" |") - } // for - println (s"|-${"-"*rep}-|") - } // show - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Show 'this' relation's foreign keys. - */ - def showFk (): Unit = - { - val wid = 18 // column width - val rep = wid * colName.length // repetition = width * # columns - val title = s"| Relation name = $name, foreign keys = " - val fkline = s"| $fKeys " - - println (s"|-${"-"*rep}-|") - println (title + " "*(rep-title.length) + " |") - println (s"|-${"-"*rep}-|") - println (fkline + " "*(rep-fkline.length) + " |") - println (s"|-${"-"*rep}-|") - } // showFk - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert 'this' relation into a matrix of doubles, e.g., - *

    - * in the regression equation: 'xb = y' create matrix 'xy' - *

    - * @param colPos the column positions to use for the matrix - * @param kind the kind of matrix to create - */ - def toMatriD (colPos: ArrayBuffer [Int], kind: MatrixKind = DENSE): MatriD = - { - val colVec = for (x <- project (colPos).col) yield Vec.toDouble (x) - kind match { - case DENSE => MatrixD (colVec) - case SPARSE => SparseMatrixD (colVec) - case SYM_TRIDIAGONAL => SymTriMatrixD (colVec) - case BIDIAGONAL => BidMatrixD (colVec) - case COMPRESSED => RleMatrixD (colVec) - } // match - } // toMatriD - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert 'this' relation into a matrix of doubles and a vector of doubles. - *

    - * in the regression equation: 'xb = y' create matrix 'x' and vector 'y' - *

    - * @param colPos the column positions to use for the matrix - * @param colPosV the column position to use for the vector - * @param kind the kind of matrix to create - */ - def toMatriDD (colPos: ArrayBuffer [Int], colPosV: Int, kind: MatrixKind = DENSE): (MatriD, VectorD) = - { - val colVec = for (x <- project (colPos).col) yield Vec.toDouble (x) - kind match { - case DENSE => (MatrixD (colVec), Vec.toDouble (col(colPosV)).toDense.asInstanceOf [VectorD]) - case SPARSE => (SparseMatrixD (colVec), Vec.toDouble (col(colPosV)).toDense.asInstanceOf [VectorD]) - case SYM_TRIDIAGONAL => (SymTriMatrixD (colVec), Vec.toDouble (col(colPosV)).toDense.asInstanceOf [VectorD]) - case BIDIAGONAL => (BidMatrixD (colVec), Vec.toDouble (col(colPosV)).toDense.asInstanceOf [VectorD]) - case COMPRESSED => (RleMatrixD (colVec), Vec.toDouble (col(colPosV)).toDense.asInstanceOf [VectorD]) - } // match - } // toMatriDD - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert 'this' relation into a matrix of doubles and a vector of integers. - *

    - * in the regression equation: 'xb = y' create matrix 'x' and vector 'y' - *

    - * @param colPos the column positions to use for the matrix - * @param colPosV the column position to use for the vector - * @param kind the kind of matrix to create - */ - def toMatriDI (colPos: ArrayBuffer [Int], colPosV: Int, kind: MatrixKind = DENSE): (MatriD, VectorI) = - { - val colVec = for (x <- project (colPos).col) yield Vec.toDouble (x) - kind match { - case DENSE => (MatrixD (colVec), Vec.toInt (col(colPosV)).toDense.asInstanceOf [VectorI]) - case SPARSE => (SparseMatrixD (colVec), Vec.toInt (col(colPosV)).toDense.asInstanceOf [VectorI]) - case SYM_TRIDIAGONAL => (SymTriMatrixD (colVec), Vec.toInt (col(colPosV)).toDense.asInstanceOf [VectorI]) - case BIDIAGONAL => (BidMatrixD (colVec), Vec.toInt (col(colPosV)).toDense.asInstanceOf [VectorI]) - case COMPRESSED => (RleMatrixD (colVec), Vec.toInt (col(colPosV)).toDense.asInstanceOf [VectorI]) - } // match - } // toMatriDI - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert 'this' relation into a matrix of double. It will convert - * strings to double. - *

    - * in the regression equation: 'xb = y' create matrix 'xy' - *

    - * @param colPos the column positions to use for the matrix - * @param kind the kind of matrix to create - */ - def toMatriD2 (colPos: ArrayBuffer [Int] = null, kind: MatrixKind = DENSE): MatriD = - { - import Converter._ - val cp = if (colPos == null) ArrayBuffer.range(0, cols) else colPos - val colVec = for (x <- project (cp).col) yield { - try { - Vec.toDouble (x) - } catch { - case num: NumberFormatException => map2Int (x.asInstanceOf [VectorS])._1.toDouble - } // trys - } // for - kind match { - case DENSE => MatrixD (colVec) - case SPARSE => SparseMatrixD (colVec) - case SYM_TRIDIAGONAL => SymTriMatrixD (colVec) - case BIDIAGONAL => BidMatrixD (colVec) - case COMPRESSED => RleMatrixD (colVec) - } // match - } // toMatriD2 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert 'this' relation into a matrix of integers. - *

    - * in the regression equation: 'xb = y' create matrix 'xy' - *

    - * @param colPos the column positions to use for the matrix - * @param kind the kind of matrix to create - */ - def toMatriI (colPos: ArrayBuffer [Int], kind: MatrixKind = DENSE): MatriI = - { - val colVec = for (x <- project (colPos).col) yield Vec.toInt (x) - kind match { - case DENSE => MatrixI (colVec) - case SPARSE => SparseMatrixI (colVec) - case SYM_TRIDIAGONAL => SymTriMatrixI (colVec) - case BIDIAGONAL => BidMatrixI (colVec) - case COMPRESSED => RleMatrixI (colVec) - } // match - } // toMatriI - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert 'this' relation into a matrix of integers. It will convert - * doubles and strings to integers. - *

    - * in the regression equation: 'xb = y' create matrix 'xy' - *

    - * @param colPos the column positions to use for the matrix - * @param kind the kind of matrix to create - */ - def toMatriI2 (colPos: ArrayBuffer [Int] = null, kind: MatrixKind = DENSE): MatriI = - { - import Converter._ - val cp = if (colPos == null) ArrayBuffer.range(0, cols) else colPos - val colVec = for (x <- project (cp).col) yield { - try { - Vec.toInt (x) - } catch { - case num: NumberFormatException => map2Int (x.asInstanceOf [VectorS])._1 - } // trys - } // for - kind match { - case DENSE => MatrixI (colVec) - case SPARSE => SparseMatrixI (colVec) - case SYM_TRIDIAGONAL => SymTriMatrixI (colVec) - case BIDIAGONAL => BidMatrixI (colVec) - case COMPRESSED => RleMatrixI (colVec) - } // match - } // toMatriI2 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert 'this' relation into a matrix of integers and a vector of integers. - *

    - * in the regression equation: 'xb = y' create matrix 'x' and vector 'y' - *

    - * @param colPos the column positions to use for the matrix - * @param colPosV the column position to use for the vector - * @param kind the kind of matrix to create - */ - def toMatriII (colPos: ArrayBuffer [Int], colPosV: Int, kind: MatrixKind = DENSE): (MatriI, VectorI) = - { - val colVec = for (x <- project (colPos).col) yield Vec.toInt (x) - kind match { - case DENSE => (MatrixI (colVec), Vec.toInt (col(colPosV)).toDense.asInstanceOf [VectorI]) - case SPARSE => (SparseMatrixI (colVec), Vec.toInt (col(colPosV)).toDense.asInstanceOf [VectorI]) - case SYM_TRIDIAGONAL => (SymTriMatrixI (colVec), Vec.toInt (col(colPosV)).toDense.asInstanceOf [VectorI]) - case BIDIAGONAL => (BidMatrixI (colVec), Vec.toInt (col(colPosV)).toDense.asInstanceOf [VectorI]) - case COMPRESSED => (RleMatrixI (colVec), Vec.toInt (col(colPosV)).toDense.asInstanceOf [VectorI]) - } // match - } // toMatriII - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert the 'colPos' column of 'this' relation into a vector of complex numbers. - * @param colPos the column position to use for the vector - */ - def toVectorC (colPos: Int = 0): VectorC = col(colPos).asInstanceOf [VectorC] - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert the 'colName' column of 'this' relation into a vector of complex numbers. - * @param colName the column name to use for the vector - */ - def toVectorC (colName: String): VectorC = col(colMap(colName)).asInstanceOf [VectorC] - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert the 'colPos' column of 'this' relation into a vector of doubles. - * @param colPos the column position to use for the vector - */ - def toVectorD (colPos: Int = 0): VectorD = Vec.toDouble (col(colPos)).toDense.asInstanceOf [VectorD] - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert the 'colName' column of 'this' relation into a vector of doubles. - * @param colName the column name to use for the vector - */ - def toVectorD (colName: String): VectorD = Vec.toDouble (col(colMap(colName))).toDense.asInstanceOf [VectorD] - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert the 'colPos' column of 'this' relation into a vector of integers. - * @param colPos the column position to use for the vector - */ - def toVectorI (colPos: Int = 0): VectorI = Vec.toInt (col(colPos)).toDense.asInstanceOf [VectorI] - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert the 'colName' column of 'this' relation into a vector of integers. - * @param colName the column name to use for the vector - */ - def toVectorI (colName: String): VectorI = Vec.toInt (col(colMap(colName))).toDense.asInstanceOf [VectorI] - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert the 'colPos' column of 'this' relation into a vector of long integers. - * @param colPos the column position to use for the vector - */ - def toVectorL (colPos: Int = 0): VectorL = col(colPos).asInstanceOf [VectorL] - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert the 'colName' column of 'this' relation into a vector of long integers. - * @param colName the column name to use for the vector - */ - def toVectorL (colName: String): VectorL = col(colMap(colName)).asInstanceOf [VectorL] - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert the 'colPos' column of 'this' relation into a vector of rational numbers. - * @param colPos the column position to use for the vector - */ - def toVectorQ (colPos: Int = 0): VectorQ = col(colPos).asInstanceOf [VectorQ] - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert the 'colName' column of 'this' relation into a vector of rational numbers. - * @param colName the column name to use for the vector - */ - def toVectorQ (colName: String): VectorQ = col(colMap(colName)).asInstanceOf [VectorQ] - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert the 'colPos' column of 'this' relation into a vector of real number. - * @param colPos the column position to use for the vector - */ - def toVectorR (colPos: Int = 0): VectorR = col(colPos).asInstanceOf [VectorR] - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert the 'colName' column of 'this' relation into a vector of real number. - * @param colName the column name to use for the vector - */ - def toVectorR (colName: String): VectorR = col(colMap(colName)).asInstanceOf [VectorR] - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert the 'colPos' column of 'this' relation into a vector of string-num. - * @param colPos the column position to use for the vector - */ - def toVectorS (colPos: Int = 0): VectorS = col(colPos).asInstanceOf [VectorS] - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert the 'colName' column of 'this' relation into a vector of string-num. - * @param colName the column name to use for the vector - */ - def toVectorS (colName: String): VectorS = col(colMap(colName)).asInstanceOf [VectorS] - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert the 'colPos' column of 'this' relation into a vector of time-num. - * @param colPos the column position to use for the vector - */ - def toVectorT (colPos: Int = 0): VectorT = col(colPos).asInstanceOf [VectorT] - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert the 'colName' column of 'this' relation into a vector of time-num. - * @param colName the column name to use for the vector - */ - def toVectorT (colName: String): VectorT = col(colMap(colName)).asInstanceOf [VectorT] - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert the 'colPos' column of 'this' relation into a vector of doubles. - * @param colPos the column position to use for the vector - */ - def toRleVectorD (colPos: Int = 0): RleVectorD = Vec.toDouble (col(colPos)).asInstanceOf [RleVectorD] - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert the 'colName' column of 'this' relation into a vector of doubles. - * @param colName the column name to use for the vector - */ - def toRleVectorD (colName: String): RleVectorD = Vec.toDouble (col(colMap(colName))).asInstanceOf [RleVectorD] - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert the 'colPos' column of 'this' relation into a vector of integers. - * @param colPos the column position to use for the vector - */ - def toRleVectorI (colPos: Int = 0): RleVectorI = Vec.toInt (col(colPos)).asInstanceOf [RleVectorI] - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert the 'colName' column of 'this' relation into a vector of integers. - * @param colName the column name to use for the vector - */ - def toRleVectorI (colName: String): RleVectorI = Vec.toInt (col(colMap(colName))).asInstanceOf [RleVectorI] - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert the 'colPos' column of 'this' relation into a vector of integers. - * @param colPos the column position to use for the vector - */ - def toRleVectorS (colPos: Int = 0): RleVectorS = col(colPos).asInstanceOf [RleVectorS] - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert the 'colName' column of 'this' relation into a vector of integers. - * @param colName the column name to use for the vector - */ - def toRleVectorS (colName: String): RleVectorS = col(colMap(colName)).asInstanceOf [RleVectorS] - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert the given columns within 'this' relation to a map: 'keyColPos' -> 'valColPos'. - * @param keyColPos the key column positions - * @param valColPos the value column positions - */ - def toMap (keyColPos: ArrayBuffer [Int], valColPos: Int): Map [ArrayBuffer [Any], Any] = - { - val map = Map [ArrayBuffer [Any], Any] () - for (i <- indices) { - val tuple = row(i) - map += keyColPos.map (tuple(_)) -> tuple(valColPos) - } // for - map - } // toMap - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert the given columns within 'this' relation to a map: 'keyColName' -> 'valColName'. - * @param keyColName the key column names - * @param valColname the value column names - */ - def toMap (keyColName: ArrayBuffer [String], valColName: String): Map [ArrayBuffer [Any], Any] = - { - toMap (keyColName.map (colMap(_)), colMap(valColName)) - } // toMap - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Save 'this' relation in a file using serialization. - */ - def save (): Unit = - { - val oos = new ObjectOutputStream (new FileOutputStream (STORE_DIR + name + SER)) - oos.writeObject (this) - oos.close () - } // save - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Write 'this' relation into a CSV file with each row written to a line. - * @param fileName the file name of the data file - */ - def writeCSV (fileName: String): Unit = - { - val out = new PrintWriter (BASE_DIR + fileName) - out.println (colName.toString.drop (5).dropRight (1)) - for (i <- 0 until rows) out.println (row(i).toString.drop (7).dropRight (1)) - out.close - } // writeCSV - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Write 'this' relation into a JSON file. - * @param fileName the file name of the data file - */ - def writeJSON (fileName: String): Unit = - { - // FIX - to be implemented - } // writeJSON - - // ============================================ BUILT-IN AGGREGATE FUNCTIONS - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the mean of the values in column 'cName'. - * @param cName the column name - */ - def avg (cName: String) = Vec.mean (col(colMap(cName))) - def mean (cName: String) = Vec.mean (col(colMap(cName))) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the number of values in column 'cName'. - * @param cName the column name - */ - def count (cName: String): Int = rows - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the maximum value in column 'cName'. - * @param cName the column name - */ - def max (cName: String): Any = Vec.max (col(colMap(cName))) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the minimum value in column 'cName'. - * @param cName the column name - */ - def min (cName: String): Any = Vec.min (col(colMap(cName))) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the sum of the values in column 'cName'. - * @param cName the column name - */ - def sum (cName: String): Any = Vec.sum (col(colMap(cName))) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the variance of the values in column 'cName'. - * @param cName the column name - */ - def variance (cName: String): Any = Vec.variance (col(colMap(cName))) - -} // Relation class - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `RelationEx` object provides and example relation for testing. - * @see www.codeproject.com/Articles/652108/Create-First-Data-WareHouse - */ -object RelationEx -{ - val productSales = Relation ("productSales", - ArrayBuffer ("SalesInvoiceNumber", "SalesDateKey", "SalesTimeKey", "SalesTimeAltKey", "StoreID", "CustomerID", - "ProductID", "SalesPersonID", "Quantity", "ProductActualCost", "SalesTotalCost", "Deviation"), - ArrayBuffer (Vector [Any] (1, 20130101, 44347, 121907, 1, 1, 1, 1, 2, 11.0, 13.0, 2.0), - Vector [Any] (1, 20130101, 44347, 121907, 1, 1, 2, 1, 1, 22.5, 24.0, 1.5), - Vector [Any] (1, 20130101, 44347, 121907, 1, 1, 3, 1, 1, 42.0, 43.5, 1.5), - Vector [Any] (2, 20130101, 44519, 122159, 1, 2, 3, 1, 1, 42.0, 43.5, 1.5), - Vector [Any] (2, 20130101, 44519, 122159, 1, 2, 4, 1, 3, 54.0, 60.0, 6.0), - Vector [Any] (3, 20130101, 52415, 143335, 1, 3, 2, 2, 2, 11.0, 13.0, 2.0), - Vector [Any] (3, 20130101, 52415, 143335, 1, 3, 3, 2, 1, 42.0, 43.5, 1.5), - Vector [Any] (3, 20130101, 52415, 143335, 1, 3, 4, 2, 3, 54.0, 60.0, 6.0), - Vector [Any] (3, 20130101, 52415, 143335, 1, 3, 5, 2, 1, 135.0, 139.0, 4.0), - Vector [Any] (4, 20130102, 44347, 121907, 1, 1, 1, 1, 2, 11.0, 13.0, 2.0), - Vector [Any] (4, 20130102, 44347, 121907, 1, 1, 2, 1, 1, 22.5, 24.0, 1.5), - Vector [Any] (5, 20130102, 44519, 122159, 1, 2, 3, 1, 1, 42.0, 43.5, 1.5), - Vector [Any] (5, 20130102, 44519, 122159, 1, 2, 4, 1, 3, 54.0, 60.0, 6.0), - Vector [Any] (6, 20130102, 52415, 143335, 1, 3, 2, 2, 2, 11.0, 13.0, 2.0), - Vector [Any] (6, 20130102, 52415, 143335, 1, 3, 5, 2, 1, 135.0, 139.0, 4.0), - Vector [Any] (7, 20130102, 44347, 121907, 2, 1, 4, 3, 3, 54.0, 60.0, 6.0), - Vector [Any] (7, 20130102, 44347, 121907, 2, 1, 5, 3, 1, 135.0, 139.0, 4.0), - Vector [Any] (8, 20130103, 59326, 162846, 1, 1, 3, 1, 2, 84.0, 87.0, 3.0), - Vector [Any] (8, 20130103, 59326, 162846, 1, 1, 4, 1, 3, 54.0, 60.0, 3.0), - Vector [Any] (9, 20130103, 59349, 162909, 1, 2, 1, 1, 1, 5.5, 6.5, 1.0), - Vector [Any] (9, 20130103, 59349, 162909, 1, 2, 2, 1, 1, 22.5, 24.0, 1.5), - Vector [Any] (10, 20130103, 67390, 184310, 1, 3, 1, 2, 2, 11.0, 13.0, 2.0), - Vector [Any] (10, 20130103, 67390, 184310, 1, 3, 4, 2, 3, 54.0, 60.0, 6.0), - Vector [Any] (11, 20130103, 74877, 204757, 2, 1, 2, 3, 1, 5.5, 6.5, 1.0), - Vector [Any] (11, 20130103, 74877, 204757, 2, 1, 3, 3, 1, 42.0, 43.5, 1.5)), - 0, "IIIIIIIIIDDD") - -} // RelationEx object - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `RelationTest` object tests the operations provided by `Relation`. - * > runMain scalation.columnar_db.RelationTest - */ -object RelationTest extends App -{ - val weekdays = new Relation ("weekdays", ArrayBuffer ("day", "time"), - Vector (VectorS ("Mon", "Tue", "Wed", "Thu", "Fri"), - VectorD (5.00, 8.15, 6.30, 9.45, 7.00)), - 0, "SD") - - val weekend = new Relation ("weekends", ArrayBuffer ("day", "time"), - Vector (VectorS ("Sat", "Sun"), - VectorD (3.00, 4.30)), - 0, "SD") - - weekdays.generateIndex () - weekend.generateIndex () - - banner ("weekdays") - println ("weekdays = " + weekdays) - banner ("weekdend") - println ("weekend = " + weekend) - - banner ("Test pi") - println ("weekdays.pi (\"day\") = " + weekdays.pi ("day")) - println ("-" * 60) - println ("weekdays.pisigmaS (\"day\", _ == \"Mon\") = " + weekdays.pisigmaS ("day", _ == "Mon")) - - banner ("Test sigma") - println ("weekdays.sigmaS (\"day\", _ == \"Mon\") = " + weekdays.sigmaS ("day", _ == "Mon")) - println ("-" * 60) - println ("weekdays.sigma (\"day\", _ == \"Mon\") = " + weekdays.sigma ("day", (x: StrNum) => x == "Mon")) - println ("-" * 60) - println ("weekdays.sigma (\"time\", _ == 5.00) = " + weekdays.sigma ("time", (x: Double) => x == 5.00)) - println ("weekdays.sigma (\"time\", _ == 5.00) = " + weekdays.sigma [Double] ("time", _ == 5.00)) - println ("weekdays.sigma (\"time\", _ == 5.00) = " + weekdays.sigmaD ("time", _ == 5.00)) - println ("weekdays.sigma (\"time\", _ == 5.00) = " + weekdays == ("time", 5.00)) - println ("-" * 60) - println ("weekdays.sigmaS (\"day\", _ > \"Mon\") = " + weekdays.sigmaS ("day", _ > "Mon")) - println ("-" * 60) - println ("weekdays.selectS (\"day\", _ > \"Mon\") = " + weekdays.selectS ("day", _ > "Mon")) - println ("-" * 60) - println ("weekdays.sigmaSD (\"day\", \"time\") = " + weekdays.sigmaS ("day", _ == "Mon")) - - val week = weekdays.union (weekend) - banner ("Test union") - println ("weekdays.union (weekend) = " + week) - - weekend.add (Vector ("Zday", 1.00)) - banner ("Test add") - println ("weekend add (\"Zday\", 1.00)) = " + weekend) - - banner ("Test -") - println ("week - weekend = " + (week - weekend)) - - banner ("Test join") - println ("week.join (\"day\", \"day\" weekend) = " + week.join ("day", "day", weekend)) - println ("-" * 60) - println ("week join weekend = " + (week join weekend)) - - week.writeCSV ("columnar_db" + ⁄ + "week.csv") - -} // RelationTest object - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `RelationTest2` object tests the operations provided by `Relation`. - * The relational algebra operators are given using Unicode. - * @see en.wikipedia.org/wiki/List_of_Unicode_characters - * > runMain scalation.columnar_db.RelationTest2 - */ -object RelationTest2 extends App -{ - val weekdays = new Relation ("weekdays", ArrayBuffer ("day", "time"), - Vector (VectorS ("Mon", "Tue", "Wed", "Thu", "Fri"), - VectorD (5.00, 8.15, 6.30, 9.45, 7.00)), - 0, "SD") - - val weekend = new Relation ("weekends", ArrayBuffer ("day", "time"), - Vector (VectorS ("Sat", "Sun"), - VectorD (3.00, 4.30)), - 0, "SD") - - banner ("Test π") - println ("weekdays.π (\"day\") = " + weekdays.π ("day")) - println ("-" * 60) - println ("weekdays.π (\"time\") = " + weekdays.π ("time")) - - banner ("Test σ") - println ("weekdays.σ (\"day\", _ == \"Mon\") = " + weekdays.σ ("day", (x: StrNum) => x == "Mon")) - println ("-" * 60) - println ("weekdays.σ (\"time\", _ == 5.00) = " + weekdays.σ ("time", (x: Double) => x == 5.00)) - println ("-" * 60) - println ("weekdays.σ (\"day\", _ > \"Mon\") = " + weekdays.σ ("day", (x: StrNum) => x > "Mon")) - println ("-" * 60) - println ("weekdays.σ (\"time\", _ > 5.00) = " + weekdays.σ ("time", (x: Double) => x > 5.00)) - println ("-" * 60) - println ("weekdays.σ (\"day\", \"time\") = " + weekdays.σ ("day", (x: StrNum) => x == "Mon") - .σ ("time", (x: Double) => x == 5.00)) - val week = weekdays ⋃ weekend - - banner ("Test ⋃") - println ("weekdays ⋃ weekend) = " + weekdays ⋃ weekend) - - banner ("Test ⋂") - println ("week ⋂ weekend = " + (week ⋂ weekend)) - - banner ("Test -") - println ("week - weekend = " + (week - weekend)) - - banner ("Test ⋈ ") - println ("week ⋈ weekend = " + (week ⋈ weekend)) - -} // RelationTest2 object - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `RelationTest3` object tests the operations provided by `Relation`. - * It test various aggregate/OLAP operations on a simple data warehouse fact table. - * @see www.codeproject.com/Articles/652108/Create-First-Data-WareHouse - * FIX - allow entering doubles as "13" rather than "13.0" - * > runMain scalation.columnar_db.RelationTest3 - */ -object RelationTest3 extends App -{ - import Relation.{max, min} - import RelationEx.productSales - - val costVprice = productSales.project ("ProductActualCost", "SalesTotalCost") - - productSales.show () - - println ("productSales = " + productSales) - println ("productSales.project (\"ProductActualCost\", \"SalesTotalCost\") = " + costVprice) - - banner ("Test count") - println ("count (productSales) = " + count (productSales)) - println ("-" * 60) - println ("count (costVprice) = " + count (costVprice)) - - banner ("Test min") - println ("min (productSales) = " + min (productSales)) - println ("-" * 60) - println ("min (costVprice) = " + min (costVprice)) - - banner ("Test max") - println ("max (productSales) = " + max (productSales)) - println ("-" * 60) - println ("max (costVprice) = " + max (costVprice)) - - banner ("Test sum") - println ("sum (productSales) = " + sum (productSales)) - println ("-" * 60) - println ("sum (costVprice) = " + sum (costVprice)) - - banner ("Test expectation/mean") - println ("Ɛ (productSales) = " + Ɛ (productSales)) - println ("-" * 60) - println ("Ɛ (costVprice) = " + Ɛ (costVprice)) - - banner ("Test variance") - println ("Ʋ (productSales) = " + Ʋ (productSales)) - println ("-" * 60) - println ("Ʋ (costVprice) = " + Ʋ (costVprice)) - - banner ("Test correlation") - println ("corr (costVprice) = " + corr (costVprice)) - -} // RelationTest3 object - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `RelationTest4` object tests conversion `Relation` to a matrix. - * > runMain scalation.columnar_db.RelationTest4 - */ -object RelationTest4 extends App -{ - import RelationEx.productSales - - val (mat, vec) = productSales.toMatriDD (ArrayBuffer.range (0, 11), 11) - - banner ("productSales") - productSales.show () - - banner ("mat and vec") - println ("mat = " + mat) - println ("vec = " + vec) - -} // RelationTest4 object - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `RelationTest5` object tests the interoperability between Relations and Matrices. - * > runMain scalation.columnar_db.RelationTest5 - */ -object RelationTest5 extends App -{ - val sales_item1 = Relation ("Sales_Item1", ArrayBuffer ("Date", "FL", "GA", "NC", "SC"), - ArrayBuffer (Vector [Any] ("20130101", 10, 5, 5, 4), - Vector [Any] ("20130102", 20, 30, 40, 25), - Vector [Any] ("20130103", 8, 6, 9, 9), - Vector [Any] ("20130104", 6, 7, 9, 10), - Vector [Any] ("20130105", 4, 7, 9, 10)), - 0,"SIIII") - - val price_item1 = Relation ("Price_Item1", ArrayBuffer ("Date", "FL", "GA", "NC", "SC"), - ArrayBuffer (Vector [Any] ("20130101", 1.6, 1.6, 1.5, 1.3), - Vector [Any] ("20130102", 1.6, 1.6, 1.5, 1.2), - Vector [Any] ("20130103", 1.5, 1.6, 1.5, 1.4), - Vector [Any] ("20130104", 1.4, 1.7, 1.5, 1.4), - Vector [Any] ("20130105", 1.4, 1.7, 1.4, 1.4)), - 0,"SDDDD") - val revenue = Relation ("Revenue", -1, null, "Item", "FL", "GA", "NC", "SC") - - sales_item1.show () - price_item1.show () - - val x = sales_item1.toMatriD (ArrayBuffer.range (1, 5), COMPRESSED) - val y = price_item1.toMatriD (ArrayBuffer.range (1, 5), COMPRESSED) - val z = x dot y - revenue.add ("Item1" +: z().toVector) - - banner ("revenue") - revenue.show () - -} // RelationTest5 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `RelationTest6` object tests 'indexjoin', 'parjoin', 'groupby' and 'aggregation'. - * > runMain scalation.columnar_db.RelationTest6 - */ -object RelationTest6 extends App -{ - val professor = Relation ("professor", 0, "ISS", "pid", "name", "prodeptid") - TableGen.popTable (professor, 10) - professor.generateIndex () - - val course = Relation ("course", 0, "ISS", "cid","crsname", "descr") - TableGen.popTable (course, 20) - course.generateIndex () - - val teaching = Relation ("teaching", 0, "IISI", "tid", "cid", "semester", "pid") - teaching.fKeys = ArrayBuffer (("cid", "course", 0), ("pid", "professor", 0)) - TableGen.popTable (teaching, 50, ArrayBuffer (course, professor)) - teaching.generateIndex () - - banner ("database") - professor.show () - course.show () - teaching.show () - teaching.showFk () - - banner ("joinindex") - teaching.joinindex (ArrayBuffer ("pid"), ArrayBuffer("pid"), professor).show () -// banner ("parjoin") -// teaching.parjoin (ArrayBuffer ("pid"), ArrayBuffer ("pid"), professor, 4).show () - banner ("groupBy.eproject") - teaching.groupBy ("cid").eproject ((count, "pid_count", "pid"))("tid", "semester").show () - -} // RelationTest6 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `RelationTest7` object tests 'join' method. - * > runMain scalation.columnar_db.RelationTest7 - */ -object RelationTest7 extends App -{ - val professor = Relation ("professor", - ArrayBuffer("pid", "name", "department", "title"), - ArrayBuffer (Vector [Any] (1, "jackson", "pharm", 4), - Vector [Any] (2, "ken", "cs", 2), - Vector [Any] (3, "pan", "pharm", 0), - Vector [Any] (4, "yang", "gis", 3), - Vector [Any] (5, "zhang", "cs", 0), - Vector [Any] (6, "Yu", "cs", 0)), - -1, "ISSI") - - val professor2 = Relation ("professor", - ArrayBuffer ("pid", "name", "department", "title"), - ArrayBuffer (Vector [Any] (7, "LiLy", "gis", 5), - Vector [Any] (8, "Marry", "gis", 5), - Vector [Any] (0, "Kate", "cs", 5)), - 0, "ISSI") - - professor.generateIndex () - professor2.generateIndex () - - banner ("professor") - professor.show () - banner ("professor2") - professor2.show () - - banner ("join") - professor.join [Int] (professor2, ("pid", "pid", (x, y) => x < y)).show () - professor.join [Int] (professor2, ("pid", "pid", _ < _)).show () - -} // RelationTest7 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `RelationTest8` object tests 'save' method. - * > runMain scalation.columnar_db.RelationTest8 - */ -object RelationTest8 extends App -{ - val professor = Relation ("professor", - ArrayBuffer("pid", "name", "department", "title"), - ArrayBuffer (Vector [Any] (1, "jackson", "pharm", 4), - Vector [Any] (2, "ken", "cs", 2), - Vector [Any] (3, "pan", "pharm", 0), - Vector [Any] (4, "yang", "gis", 3), - Vector [Any] (5, "zhang", "cs", 0), - Vector [Any] (6, "Yu", "cs", 0)), - -1, "ISSI") - - professor.save () - -} // RelationTest8 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `RelationTest9` object tests 'apply' method to load a saved relation. - * > runMain scalation.columnar_db.RelationTest9 - */ -object RelationTest9 extends App -{ - Relation ("professor").show () - -} // RelationTest9 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `RelationTest10` object tests the 'orderBy' method. - * > runMain scalation.columnar_db.RelationTest10 - */ -object RelationTest10 extends App -{ - import RelationEx.productSales - - productSales.orderBy ("SalesTotalCost", "Deviation").show () - -} // RelationTest10 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `RelationTest11` object tests the `Relation` on the traffic schema. - * > runMain scalation.columnar_db.RelationTest11 - */ -object RelationTest11 extends App -{ - val sensor = Relation ("sensor", ArrayBuffer ("sensorID", "model", "latitude", "longitude", "on"), - null, 0, "ISDDI") - val road = Relation ("road", ArrayBuffer ("roadID", "rdName", "lat1", "long1", "lat2", "long2"), - null, 0, "ISDDDD") - val mroad = Relation ("road", ArrayBuffer ("roadID", "rdName", "lanes", "lat1", "long1", "lat2", "long2"), - null, 0, "ISIDDDD") - val traffic = Relation ("traffic", ArrayBuffer ("time", "sensorID", "count", "speed"), - null, 0, "TIID") - val wsensor = Relation ("sensor", ArrayBuffer ("sensorID", "model", "latitude", "longitude"), - null, 0, "ISDD") - val weather = Relation ("weather", ArrayBuffer ("time", "sensorID", "precipitation", "wind"), - null, 0, "TIID") - - sensor.show () - road.show () - mroad.show () - traffic.show () - wsensor.show () - weather.show () - -} // RelationTest11 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `RelationTest12` object tests the `Relation` class on JSON data. - * @see www.learningcontainer.com/sample-json-file - * FIX - does not work for Scala 2.13 - * > runMain scalation.columnar_db.RelationTest12 - * -object RelationTest12 extends App -{ - val fname = BASE_DIR + "employee.json" - println (s"fname = $fname") - val employee = Relation (fname, "employee") - - employee.show () - -} // RelationTest12 - */ - diff --git a/target/scala-3.6.4/classes/scalation/database/relation/old/Relation.scala.bak2 b/target/scala-3.6.4/classes/scalation/database/relation/old/Relation.scala.bak2 deleted file mode 100644 index bd241891d..000000000 --- a/target/scala-3.6.4/classes/scalation/database/relation/old/Relation.scala.bak2 +++ /dev/null @@ -1,2303 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller, Yang Fan, Vinay Bingi, Santosh Uttam Bobade - * @version 1.6 - * @date Sun Aug 23 15:42:06 EDT 2015 - * @see LICENSE (MIT style license file). - * - * An implementation supporting columnar relational databases facilitating easy - * and rapid analytics. The columns in a relation are vectors from the - * `scalation.linalgebra` package. Vectors and matrices may be readily extracted - * from a relation and feed into any of the numerous analytics techniques provided - * in `scalation.analytics`. The implementation provides most of the columnar - * relational algebra operators given in the following paper: - * @see db.csail.mit.edu/projects/cstore/vldb.pdf - * - * Some of the operators have unicode versions: @see `scalation.util.UnicodeTest` - * - * Supports Time Series Databases (TSDB) via `TimeNum` domain/datatype and 'leftJoinApx' - * 'rightJoinApx' methods. - */ - -package scalation -package database -package relation - -import java.io._ - -import scala.collection.mutable.{ArrayBuffer, HashMap, IndexedSeq, Map} -import scala.math.min -import scala.io.Source.fromInputStream - -import scalation.mathstat._ - -import Table._ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Relation` companion object provides additional functions for the `Relation` - * class. - * FIX - apply methods - make compatible with RelationSQL - */ -object Relation: - - private val flaw = flawf ("Relation") // flaw function - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create an unpopulated relation. - * @param name the name of the relation - * @param key the column number for the primary key (< 0 => no primary key) - * @param domain an optional string indicating domains for columns (e.g., 'SD' = 'String', 'Double') - * @param colName the names of columns - */ - def apply (name: String, key: Int, domain: String, colName: String*): Relation = - val n = colName.length - val colName_ = ArrayBuffer (colName :_* ) - new Relation (name, colName_, Vector.fill [Vec] (n)(null), key, domain) - end apply - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create an unpopulated relation. - * @param name the name of the relation - * @param key the column number for the primary key (< 0 => no primary key) - * @param domain an optional string indicating domains for columns (e.g., 'SD' = 'String', 'Double') - * @param colName the names of columns - */ - def apply (name: String, key: Int, domain: String, colName: ArrayBuffer [String]): Relation = - val n = colName.length - new Relation (name, colName, Vector.fill [Vec] (n)(null), key, domain) - end apply - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a relation from a sequence of row/tuples. These rows must be converted - * to columns. - * @param name the name of the relation - * @param colName the names of columns - * @param row the sequence of rows to be converted to columns for the columnar relation - * @param key the column number for the primary key (< 0 => no primary key) - * @param domain an optional string indicating domains for columns (e.g., 'SD' = 'String', 'Double') - */ - def apply (name: String, colName: ArrayBuffer [String], row: ArrayBuffer [Row], key: Int, domain: String): Relation = - val equivCol = Vector.fill [Vec] (colName.length)(null) - val r2 = new Relation (name, colName, equivCol, key, domain) - for tuple <- row do r2.add (tuple) - r2.materialize () - end apply - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a relation from a sequence of row/tuples. These rows must be converted - * to columns. - * @param name the name of the relation - * @param colName the names of columns - * @param row the sequence of rows to be converted to columns for the columnar relation - * @param key the column number for the primary key (< 0 => no primary key) - */ - def apply (name: String, colName: ArrayBuffer [String], row: ArrayBuffer [Row], key: Int): Relation = - val equivCol = Vector.fill [Vec] (colName.length)(null) - val r2 = new Relation (name, colName, equivCol, key, null) - for tuple <- row do r2.add (tuple) - r2.materialize () - end apply - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Read the relation with the given 'name' into memory using serialization. - * @param name the name of the relation to load - */ - def apply (name: String): Relation = - val ois = new ObjectInputStream (new FileInputStream (STORE_DIR + name + SER)) - val obj = ois.readObject () - ois.close () - val res = obj.asInstanceOf [Relation] - res - end apply - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Read the relation with the given 'name' into memory loading its columns - * with data from the CSV file named 'fileName'. - * Note: "ln.split (eSep, -1)" will keep all values even if empty "one,,three" -> "one","",three" - * @param fileName the file name of the data file - * @param name the name of the relation - * @param colName the names of columns - * @param key the column number for the primary key (< 0 => no primary key) - * @param domain an optional string indicating domains for columns (e.g., 'SD' = 'String', 'Double') - * @param skip the number of lines in the CSV file to skip (e.g., header line(s)) - * @param eSep the element separation string/regex (e.g., "," ";" " +") - */ - def apply (fileName: String, name: String, colName: ArrayBuffer [String], key: Int, - domain: String, skip: Int, eSep: String): Relation = - var cnt = skip - val lines = getFromURL_File (fileName) - val newCol = Vector.fill [Vec] (colName.length)(null) - val r3 = new Relation (name, colName, newCol, key, domain) - for ln <- lines do - val buf = ArrayBuffer.from (ln.split (eSep, -1)) - if cnt <= 0 then r3.add (r3.row (buf, domain)) else cnt -= 1 - end for - r3.materialize () - end apply - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Read the relation with the given 'name' into memory loading its columns - * with data from the CSV file named 'fileName'. In this version, the column - * names are read from the first line of the file. - * @param fileName the file name of the data file - * @param name the name of the relation - * @param key the column number for the primary key (< 0 => no primary key) - * @param domain an optional string indicating domains for columns (e.g., 'SD' = 'String', 'Double') - * @param eSep the element separation string/regex (e.g., "," ";" " +") - * @param cPos the sequence of column positions in the input file to be used (null => select all) - */ - def apply (fileName: String, name: String, key: Int, domain: String, eSep: String, - cPos: ArrayBuffer [Int]): Relation = - val lines = getFromURL_File (fileName) - var first = true - var colBuffer: Array [ArrayBuffer [String]] = null - var colName: ArrayBuffer [String] = null - var newCol: Vector [Vec] = null - - if cPos == null then // select all columns - for ln <- lines do - if first then - colName = ArrayBuffer.from (ln.split (eSep, -1).map (_.trim)) - colBuffer = Array.fill (colName.length)(new ArrayBuffer ()) - first = false - else - val values = ln.split (eSep, -1).map (_.trim) - for i <- colName.indices do colBuffer(i) += values(i) - end if - end for - else // select cPos columns - if domain.length != cPos.length then - flaw ("apply", "cPos length should be same as domain length") - end if - for ln <- lines do - if first then - val name = ln.split (eSep, -1).map (_.trim) - colName = ArrayBuffer [String] () - colBuffer = Array.fill (cPos.length)(new ArrayBuffer ()) - for i <- colBuffer.indices do colName += name(cPos(i)) - first = false - else - val values = ln.split (eSep, -1).map (_.trim) - for i <- colName.indices do colBuffer(i) += values(cPos(i)) - end if - end for - end if - new Relation (name, colName, makeCol (colBuffer, domain), key, domain) - end apply - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Make the columns for the columnar table from data stored in 'colBuffer'. - * @param colBuffer the column buffer holding the data - * @param domain the domains/datatypes for the columns - */ - private def makeCol (colBuffer: Array [ArrayBuffer [String]], domain: String): Vector [Vec] = - colBuffer.indices.map (i => - if domain == null || domain == "" then VectorS.fromStrings (colBuffer(i)) - else domain(i) match { - case 'D' => VectorD.fromStrings (colBuffer(i)) - case 'I' => VectorI.fromStrings (colBuffer(i)) - case 'L' => VectorL.fromStrings (colBuffer(i)) - case 'S' => VectorS.fromStrings (colBuffer(i)) - case _ => flaw ("makeCol", s"domain type ${domain(i)} not supported") - null.asInstanceOf [Vec] - }).toVector.asInstanceOf [Vector [Vec]] - end makeCol - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Read the relation with the given 'name' into memory loading its columns - * with data from the CSV file named 'fileName'. In this version, the column - * names are read from the first line of the file. It uses 'col2' which is a - * temporary ArrayBuffer, and maintains indices. - * @param fileName the file name of the data file - * @param name the name of the relation - * @param key the column number for the primary key (< 0 => no primary key) - * @param domain an optional string indicating domains for columns (e.g., 'SD' = 'String', 'Double') - * @param eSep the element separation string/regex (e.g., "," ";" " +") - */ - def apply (fileName: String, name: String, domain: String, key: Int, eSep: String = ","): Relation = - var first = true - val lines = getFromURL_File (fileName) - var r3: Relation = null - var currentlineno = 0 - - for ln <- lines do - if first then - val colName = ArrayBuffer.from (ln.split (eSep, -1)) - val newCol = Vector.fill [Vec] (colName.length)(null) - r3 = new Relation (name, colName, newCol, key, domain) - first = false - else - if currentlineno % 1000 == 0 then println (s"$currentlineno") - val buf = ArrayBuffer.from (ln.split (eSep, -1)) - r3.add (r3.row (buf, domain)) - currentlineno += 1 - end if - end for - r3.materialize () - end apply - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Read the relation with the given 'name' into memory loading its columns - * with data from the CSV file named 'fileName'. This version assumes - * defaults for 'eSep' and 'skip' of ("," and 0). - * @param fileName the file name of the data file - * @param name the name of the relation - * @param colName the names of columns - * @param key the column number for the primary key (< 0 => no primary key) - * @param domain an optional string indicating domains for columns (e.g., 'SD' = 'String', 'Double') - */ - def apply (fileName: String, name: String, colName: ArrayBuffer [String], key: Int, - domain: String): Relation = - val eSep = "," - val lines = getFromURL_File (fileName) - val newCol = Vector.fill [Vec] (colName.length)(null) - val r3 = new Relation (name, colName, newCol, key, domain) - for ln <- lines do - val buf = ArrayBuffer.from (ln.split (eSep, -1)) - r3.add (r3.row (buf, domain)) - end for - r3.materialize () - end apply - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Read the relation with the given 'name' into memory loading its columns - * with data from the '.arff' file named 'fileName'. - * @param fileName the file name of the data file - * @param key the column number for the primary key (< 0 => no primary key) - * @param domain an optional string indicating domains for columns (e.g., 'SD' = 'String', 'Double') - */ - def apply (fileName: String, key: Int, domain: String): Relation = - val eSep = "[, ]" - val lines = getFromURL_File (fileName) - var name: String = null - var colBuffer: Array [ArrayBuffer [String]] = null - var colName = ArrayBuffer [String]() - var newCol: Vector [Vec] = null - var foundData = false - for ln <- lines do - if ln.indexOf ("%") != 0 then // skip comment - if ln.indexOf ("@relation") == 0 then - name = ln.split (eSep, -1)(1) - else if ln.indexOf ("@attribute") == 0 then - colName += ln.split (eSep, -1)(1) - else if ln.indexOf ("@data") == 0 then - foundData = true - colBuffer = Array.ofDim (colName.length) - for i <- colBuffer.indices do colBuffer (i) = new ArrayBuffer () - else if foundData then - val values = ln.split (eSep, -1) - values.indices.foreach (i => { colBuffer (i) += values (i) }) - end if - end if - end for - new Relation (name, colName, colBuffer.indices.map (i => VectorS (colBuffer(i))).toVector, key, domain) - end apply - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Read the relation with the given 'name' into memory from a JSON file. - * @see https://github.com/FasterXML/jackson-databind - * @author Shubham Vasant Shingate - * FIX - does not work Scala 2.13 - * @param fileName the file name of the JSON file - * @param name the name of the relation to load - * - def apply (fileName: String, name: String): Relation = - import scala.jdk.CollectionConverters.asScalaIteratorConverter -// import scala.collection.JavaConverters.asScalaIteratorConverter - - import com.fasterxml.jackson.databind.ObjectMapper - type JSON_ELEM = java.util.LinkedHashMap [String, String] - type JSON_TYPE = java.util.List [JSON_ELEM] - - var jsonList: java.util.List [JSON_ELEM] = null - try - val objMapper = new ObjectMapper () - val jsonStr = fromInputStream (new FileInputStream (fileName)).mkString - jsonList = objMapper.readValue (jsonStr, classOf [JSON_TYPE]) - catch - case e: FileNotFoundException => flaw ("apply", s"file $fileName not found") - case e: IOException => flaw ("apply", s"unable to read $fileName: $e") - end try - - var splitStr = jsonList.get(0).toString - var arrSize = 0 - var flag = true - val colNames = ArrayBuffer [String] () - while arrSize != 1 do - val subStr = splitStr.split ("=", 2) - arrSize = subStr.length - if subStr(0).startsWith ("{") && arrSize != 1 then subStr(0) = subStr(0).substring(1) - if ! flag && arrSize != 1 then subStr(0) = subStr(0).split (", ", 2)(1) - if arrSize != 1 then - colNames += subStr(0) - splitStr = subStr(1) - flag = false - end if - end while - - val rel = new Relation (name, colNames, Vector.fill [Vec] (colNames.length)(null), 0) - for jsonData <- asScalaIteratorConverter (jsonList.iterator ()).asScala do - val tuple = ArrayBuffer (asScalaIteratorConverter (jsonData.values().iterator()).asScala.toSeq :_*) - rel.add (rel.row (tuple, null)) - end for - rel.materialize () - rel - } // apply - */ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a relation from the 'xy' matrix of doubles. - * @param xy the matrix containing the data - * @param name the name of the relation - * @param colName the names of columns - * @param key the column number for the primary key (< 0 => no primary key) - * @param domain an optional string indicating domains for columns (e.g., 'SD' = 'String', 'Double') - */ - def fromMatrixD (xy: MatrixD, name: String, colName: ArrayBuffer [String], key: Int = -1, - domain: String = null): Relation = - val newCol = for j <- xy.indices2 yield xy(?, j) - new Relation (name, colName, newCol.toVector, key, domain) - end fromMatrixD - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a relation from the 'x' matrix of doubles and 'y' vector of doubles - * or integers. - * @param x the matrix containing the data - * @param y the vector containing the data - * @param name the name of the relation - * @param colName the names of columns - * @param key the column number for the primary key (< 0 => no primary key) - * @param domain an optional string indicating domains for columns (e.g., 'SD' = 'String', 'Double') - */ - def fromMatrixDD (x: MatrixD, y: VectorD, name: String, colName: ArrayBuffer [String], key: Int = -1, - domain: String = null): Relation = - val newCol = for j <- x.indices2 yield x(?, j) - new Relation (name, colName, newCol.toVector :+ y, key, domain) - end fromMatrixDD - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the count (number of elements) of each of the columns of columnar - * relation 'r'. - * @param r the given relation - */ - def count (r: Relation): IndexedSeq [Int] = ArrayBuffer (r.col.map (_.size) :_*) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the minimum of each of the columns of columnar relation 'r'. - * @param r the given relation - */ - def min (r: Relation): Vector [Double] = for c <- r.col yield c.toDouble.min - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the maximum of each of the columns of columnar relation 'r'. - * @param r the given relation - */ - def max (r: Relation): Vector [Double] = for c <- r.col yield c.toDouble.max - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the mean of each of the columns of columnar relation 'r'. - * @param r the given relation - */ - def sum (r: Relation): Vector [Double] = for c <- r.col yield c.toDouble.sum - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the mean of each of the columns of columnar relation 'r'. - * @param r the given relation - */ - def mean (r: Relation): Vector [Double] = for c <- r.col yield c.toDouble.mean - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the variance of each of the columns of columnar relation 'r'. - * @param r the given relation - */ - def variance (r: Relation): Vector [Double] = for c <- r.col yield c.toDouble.variance - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Get a Vec of sum of the 'cName' column for the 'r' relation base on each group, - * the result will be the same size. - * @param r the relation to operate on - * @param cName sum on column "cName" - * - def sum (r: Relation, cName: String): Vec = - val cPos = r.colMap.get(cName).get - val domainc = r.domain(cPos) - var columnlist:Vec = null - var count = 0 - var pointer = 0 - var sumlist: Vec = null - for idx <- r.orderedIndex do -// columnlist = Vec.:+ (columnlist,r.index(idx)(cPos),r.domain,cPos) - columnlist = Vec.:+ (columnlist,r.index(idx)(cPos)) - if count +1 == r.grouplist(pointer) then - val thisroundsum = Vec.sum(columnlist) -// sumlist = Vec.:+ (sumlist, thisroundsum, r.domain, cPos) - sumlist = Vec.:+ (sumlist, thisroundsum) - columnlist = null - pointer += 1 - end if - count += 1 - end for - sumlist - end sum - */ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Get a Vec of max of the 'cName' column for the 'r' relation. - * @param r the relation you want to operate on - * @param cName max on column "cName" - * - def max (r: Relation, cName: String): Vec = - val cPos = r.colMap.get(cName).get - val domainc = r.domain(cPos) - var columnlist:Vec = null - var count = 0 - var pointer = 0 - var maxlist: Vec = null - for idx <- r.orderedIndex do -// columnlist = Vec.:+ (columnlist,r.index(idx)(cPos),r.domain,cPos) - columnlist = Vec.:+ (columnlist,r.index(idx)(cPos)) - if count + 1 == r.grouplist(pointer) then - val thisroundsum = Vec.max(columnlist) -// maxlist = Vec.:+ (maxlist, thisroundsum, r.domain, cPos) - maxlist = Vec.:+ (maxlist, thisroundsum) - columnlist = null - pointer += 1 - end if - count += 1 - end for - maxlist - end max - */ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Get a Vec of min of the 'cName' column for the 'r' relation - * @param r the relation you want to operate on - * @param cName min on column "cName" - * - def min (r: Relation, cName: String): Vec = - val cPos = r.colMap.get(cName).get - val domainc = r.domain(cPos) - var columnlist:Vec = null - var count = 0 - var pointer = 0 - var minlist: Vec = null - for idx <- r.orderedIndex do -// columnlist = Vec.:+ (columnlist,r.index(idx)(cPos),r.domain,cPos) - columnlist = Vec.:+ (columnlist,r.index(idx)(cPos)) - if count + 1 == r.grouplist(pointer) then - val thisroundsum = Vec.min(columnlist) -// minlist = Vec.:+ (minlist, thisroundsum, r.domain, cPos) - minlist = Vec.:+ (minlist, thisroundsum) - columnlist = null - pointer += 1 - end if - count += 1 - end for - minlist - end min - */ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Get a Vec of average of the 'cName' column for the 'r' relation. - * @param r the relation you want to operate on - * @param cName average on column "cName" - * - def avg (r: Relation, cName: String): Vec = - val cPos = r.colMap.get(cName).get - val domainc = r.domain(cPos) - var columnlist: Vec = null - var count = 0 - var pointer = 0 - var avglist: Vec = null - for idx <- r.orderedIndex do -// columnlist = Vec.:+ (columnlist, r.index(idx)(cPos), r.domain, cPos) - columnlist = Vec.:+ (columnlist, r.index(idx)(cPos)) - if count + 1 == r.grouplist(pointer) then - val thisroundsum = Vec.mean(columnlist) -// avglist = Vec.:+ (avglist, thisroundsum, r.domain, cPos) - avglist = Vec.:+ (avglist, thisroundsum) - columnlist = null - pointer += 1 - end if - count += 1 - end for - avglist - end avg - */ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Get a Vec of count of the 'cName' column for the 'r' relation. - * @param r the relation you want to operate on - * @param cName the column name for the column to be counted - * - def count (r: Relation, cName: String): Vec = - val cPos = r.colMap.get(cName).get - var countlist: Vec = null - var i = 0 - for p <- r.grouplist do - val count = p - i -// countlist = Vec.:+ (countlist, count, r.domain, cPos) - countlist = Vec.:+ (countlist, count) - i = p - end for - countlist - end count - */ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** From function return cartesian product of all the relations. - * @param relations the relations making up the from clause - */ - def from (relations: Relation*): Relation = - var result = relations(0) - for i <- 1 until relations.size do result = result product relations(i) - result - end from - -end Relation - -import Relation._ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Relation` class stores and operates on vectors. The vectors form the - * columns of the columnar relational datastore. Columns may have any of the - * following types: - * D - `Double` - `VectorD` - 64 bit double precision floating point number - * I - `Int` - `VectorI` - 32 bit integer - * L - `Long` - `VectorL` - 64 bit long integer - * S - `String` - `VectorS` - variable length numeric string - * FIX - (1) don't allow (public) var - * (2) avoid unchecked or incomplete .asInstanceOf [T] - *------------------------------------------------------------------------------ - * @param name the name of the relation - * @param colName the names of columns - * @param col the Scala Vector of columns making up the columnar relation - * @param key the column number for the primary key (< 0 => no primary key) - * @param domain an optional string indicating domains for columns (e.g., 'SD' = 'String', 'Double') - * @param fKeys an optional sequence of foreign keys - ArrayBuffer (column name, ref table name, ref column position) - * @param enter whether to enter the newly created relation into the `Catalog` - */ -class Relation (val name: String, val colName: ArrayBuffer [String], var col: Vector [Vec] = null, - val key: Int = 0, val domain: String = null, var fKeys: ArrayBuffer [(String, String, Int)] = null, - enter: Boolean = true) - extends Table with Serializable: - - private val debug = debugf ("Relation", true) // debug function - private val flaw = flawf ("Relation") // flaw function - private [relation] val colMap = Map [String, Int] () // map column name -> column number - private var grouplist = Vector [Int] () // rows in group - protected val index = Map [ValueType, Row] () // index that maps a key into row - protected val indextoKey = HashMap [Int, ValueType] () // map index -> key - private var keytoIndex = HashMap [ValueType, Int] () // map key -> index - protected var orderedIndex = Vector [ValueType] () // re-ordering of the key column - - if col == null then col = Vector.fill [Vec] (colName.length)(null) - if colName.length != col.length then flaw ("constructor", "incompatible sizes for 'colName' and 'col'") -// if enter then Catalog.add (name, colName, key, domain) - - for j <- colName.indices do colMap += colName(j) -> j - - @transient - private val col2 = Vector.fill (colName.size)(ArrayBuffer [ValueType] ()) // efficient holding area for building columns - -/* - private val col2 = - if domain == null then (for j <- colName.indices yield ArrayBuffer [ValueType] ()).toVector - else (for j <- colName.indices yield - domain(j) match { - case 'D' => new ArrayBuffer [Double] () - case 'I' => new ArrayBuffer [Int] () - case 'L' => new ArrayBuffer [Long] () - case 'S' => new ArrayBuffer [String] () - case _ => { flaw ("constructor", s"unsupported column type ${domain(j)} for column $j"); null } - }).toVector -*/ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** The 'generateIndex' method helps, e.g., the 'popTable', methods to generate - * an index for the table. - * @param reset if reset is true, use old index to build new index; otherwise, create new index - */ - def generateIndex (reset: Boolean = false): Unit = - if ! reset then - for i <- 0 until rows do - val mkey = if key != -1 then row(i)(key) // key column is specified - else i // key column is not specified - val tuple = row(i) - index += mkey -> tuple - indextoKey += i -> mkey - keytoIndex += mkey -> i - orderedIndex = orderedIndex :+ mkey - end for - else // use old index to build - val newoderedIndex = new ArrayBuffer [ValueType] () - val newkeytoIndex = new HashMap [ValueType, Int] () - for i <- orderedIndex.indices do - val mkey = if key != -1 then orderedIndex(i) else i - val tuple = row(keytoIndex(mkey)) - index += mkey -> tuple - newkeytoIndex += mkey -> i - newoderedIndex.update (newoderedIndex.length, mkey) - end for - orderedIndex = newoderedIndex.toVector // map old keytoIndex to rowIndex to - keytoIndex = newkeytoIndex - end if - end generateIndex - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the size in terms of number of columns in the relation. - */ - def cols: Int = col.length - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return all of the columns in the relation. - */ - def columns: Vector [Vec] = col - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the column in the relation with column name 'cName'. - * @param cName column name used to retrieve the column vector - */ - def column (cName: String): Vec = col(colMap (cName)) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the names of columns in the relation. - */ - def colNames: ArrayBuffer [String] = colName - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the mapping from column names to column positions. - */ - def colsMap: Map [String, Int] = colMap - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the domains for the columns in the relation. - */ - def domains: String = domain - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a row by pulling values from all columns at position 'i'. - * @param i the 'i'th position - */ - def row (i: Int): Row = for c <- col yield c(i) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the size in terms of number of rows in the relation. - */ - def rows: Int = if col(0) == null then 0 else col(0).size - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Determine whether 'this' relation contains a row matching the given 'tuple'. - * @param tuple an aggregation of columns values (potential row) - */ - def contains (tuple: Row): Boolean = - for i <- 0 until rows if row(i) sameElements tuple do return true - false - end contains - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Rename 'this' table, returning a shallow copy of 'this' table. - * @param newName the new name for the table. - */ - def rename (newName: String): Relation = - new Relation (newName, colName, col, key, domain, fKeys) - end rename - - // ================================================================= PROJECT - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Project onto the columns with the given column names. - * @param cName the names of the columns to project onto - */ - def project (cName: String*): Relation = project (ArrayBuffer (cName.map (colMap (_)) :_*), - ArrayBuffer (cName :_*)) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Project onto the columns with the given column positions using the given - * column names. - * @param cPos the positions of the columns to project onto - * @param cName the names of the columns to project onto - */ - def project (cPos: IndexedSeq [Int], cName: ArrayBuffer [String] = null): Relation = - val newCName = if cName == null then ArrayBuffer (for i <- cPos yield colName(i)) - else cName - val newCol = cPos.map (col(_)).toVector - val newKey = if cPos contains key then cPos.indexOf (key) else -1 - val newDomain = projectD (domain, cPos) - new Relation (name + "_p_" + ucount (), newCName.asInstanceOf [ArrayBuffer [String]], newCol, newKey, newDomain) - end project - - // ======================================================== EXTENDED PROJECT - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Aggregate/project on the given columns (an extended projection operator that - * applies aggregate operators to aggregation columns and regular projection - * to projection columns). - * @see en.wikipedia.org/wiki/Relational_algebra - * @param aggCol the columns to aggregate on: (aggregate function, new column name, old column name)* - * @param cName the other columns to project on - * - def eproject (aggCol: AggColumn*)(cName: String*): Relation = - val aRange = 0 until aggCol.size - val nCols = aggCol.size + cName.size - val funName = ArrayBuffer [String] () - for c <- aggCol do - if ! (colName contains c._3) then throw new IllegalArgumentException (s"column ${c._3} to aggregate on does not exist") - else funName += c._2 - end for - - if grouplist.isEmpty then groupBy (colName(key)) - val newCol = Vector.fill [Vec] (nCols)(null) - val newCName = ArrayBuffer ((cName ++ funName) :_*) - var newDomain = cName.map (n => colMap(n)).map (i => domain(i)) - for i <- aRange do - newDomain = if funName(i) contains "count" then newDomain :+ 'I' // aggregate's result domain is based on aggregate column - else newDomain :+ domain(colMap(aggCol(i)._3)) - end for - val r2 = new Relation (name + "_e_" + ucount (), newCName, newCol, key, newDomain.mkString ("")) - if rows == 0 then return r2 // no rows means early return - - val agglist = for i <- aRange yield aggCol(i)._1(this, aggCol(i)._3) - if cName.size != 0 then - val cPos = ArrayBuffer (cName.map (colMap(_)) :_*) // position of cName - val cPos2 = aggCol.map ((a: AggColumn) => colMap(a._3)) // position of aggregate columns - val shrinkR = pi(cPos, null) // projected relation - var row_i = 0 - var group_j = 0 - orderedIndex.foreach (idx => { - var thisrow = shrinkR.row(keytoIndex(idx)) - for aggf <- agglist.indices do thisrow = thisrow :+ Vec (agglist(aggf), group_j) - r2.add_ni (thisrow) - row_i += 1 - if row_i == grouplist(group_j) then group_j += 1 - }) - r2.materialize () - else // only project on the aggregate column - for i <- aRange do - r2.col = if i == 0 then Vector (agglist(i)) else r2.col :+ agglist(i) - end for - end if - r2 - end eproject - */ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Method 'epiAny' is a special case of epi. When the projected columns can not be - * decided by the group by columns, only one representative will be shown for each group. - * FIX - change name - * @param aggF the aggregate functions you want to use - * @param funName the newly created aggregate columns'names - * @param aggFAttr the columns you want to use of correspondent aggregate functions - * @param cName the columns you want to project on - * - def epiAny (aggF: ArrayBuffer [AggFunction], funName: ArrayBuffer [String], aggFAttr: ArrayBuffer [String], cName: String*): Relation = - aggFAttr.foreach (a => - if ! (colName contains a) then throw new IllegalArgumentException("the attribute you want to aggregate on does not exists")) - cName.foreach (a => - if ! (colName contains a) then throw new IllegalArgumentException("the attribute you want to project on does not exists")) - - if grouplist.isEmpty then groupBy (colName(key)) - val newCol = Vector.fill [Vec](aggFAttr.size + cName.size)(null) - val colNamenew = ArrayBuffer ((cName ++ funName) :_*) - var newDomain = cName.map (n => colMap(n)).map (i => domain(i)) - for i <- funName.indices do - newDomain = if funName(i) contains "count" then newDomain :+ 'I' - else newDomain :+ domain(colMap(aggFAttr(i))) - end for - val r2 = new Relation (name + "_e_" + ucount (), colNamenew, newCol, key, newDomain.mkString ("")) - if rows == 0 then return r2 - - val agglist = for i <- aggF.indices yield aggF(i)(this, aggFAttr(i)) - var group_j = 0 - if cName.size != 0 then - val cPos = ArrayBuffer (cName.map (colMap(_)) :_*) - val shrinkR = pi(cPos, null) - grouplist.foreach (idx => { - var newrow: Vector[ValueType] = null - val rownumber = keytoIndex(orderedIndex(idx-1)) - newrow = shrinkR.row(rownumber) - for i<- aggF.indices do - val aggtemp = Vec (agglist(0), group_j) - newrow = newrow:+ aggtemp - end for - r2.add_ni (newrow) - group_j += 1 - }) - end if - r2.materialize () - end epiAny - */ - - // ========================================================== PROJECT-SELECT - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Select elements from column 'cName' in 'this' relation that satisfy the - * predicate 'p' and project onto that column. - * @param cName the name of the column used for selection - * @param p the predicate (`Boolean` function) to be satisfied - */ - def pisigma (cName: String, p: ValueType => Boolean): Relation = - val nu = getMeta (cName) - val newCol = Vector (col (nu._1).filter (p)).asInstanceOf [Vector [Vec]] - new Relation (name + "_s_" + ucount (), nu._2, newCol, nu._3, nu._4) - end pisigma - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Get meta-data about the column with name 'cName'. - * @param cName the name of the column - */ - private def getMeta (cName: String): (Int, ArrayBuffer [String], Int, String) = - val cn = colMap (cName) // column position - (cn, ArrayBuffer (cName), if cn == key then key else -1, projectD (domain, IndexedSeq (cn))) - end getMeta - - // ================================================================== SELECT - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Select elements from columns in cName in this relation that satisfy - * the predicate p. - * @param cName the name of the column used for selection - * @param p the predicate (`Boolean` function) to be satisfied - */ - def select (cName: String, p: ValueType => Boolean): Relation = - selectAt (col (colMap (cName)).filterPos (p)) - end select - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Select across all columns at the specified column positions. - * @param pos the specified column positions - */ - def selectAt (pos: collection.immutable.IndexedSeq [Int]): Relation = - val newCol = (for j <- col.indices yield col(j)(pos(0))).toVector.asInstanceOf [Vector [Vec]] // FIX - next line & remove this line -// val newCol = (for j <- col.indices yield col(j)(pos)).toVector.asInstanceOf [Vector [Vec]] - new Relation (name + "_s_" + ucount (), colName, newCol, key, domain) - end selectAt - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the relation whose rows are equal to 'value' in the column with the given name. - * @param cv the (column-name, value) pair, e.g., ("time", 5.00) - */ - def == (cv: (String, ValueType)): Relation = select (cv._1, (x: ValueType) => x == cv._2) - def != (cv: (String, ValueType)): Relation = select (cv._1, (x: ValueType) => x != cv._2) - def < (cv: (String, ValueType)): Relation = select (cv._1, (x: ValueType) => x < cv._2) - def <= (cv: (String, ValueType)): Relation = select (cv._1, (x: ValueType) => x <= cv._2) - def > (cv: (String, ValueType)): Relation = select (cv._1, (x: ValueType) => x > cv._2) - def >= (cv: (String, ValueType)): Relation = select (cv._1, (x: ValueType) => x >= cv._2) - - // =========================================================== SET OPERATORS - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Union 'this' relation and 'r2'. Check that the two relations are compatible. - * If they are not, return the first 'this' relation. - * @param r2 the other relation - */ - def union (r2: Relation): Relation = - if incompatible (r2) then return this // take only this relation - - val newCol = (for j <- col.indices yield col(j) ++ r2.columns(j)).toVector.asInstanceOf [Vector [Vec]] - new Relation (name + "_u_" + ucount (), colName, newCol, -1, domain) - end union - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Intersect 'this' relation and 'r2'. Check that the two relations are compatible. - * Use index to finish intersect operation. - * @param r2 the other relation - */ - def intersect (r2: Relation): Relation = - if incompatible (r2) then return null - - val newCol = Vector.fill [Vec] (colName.length) (null) - val r3 = new Relation (name + "_u_" + ucount (), colName, newCol, -1, domain) - - for i <- orderedIndex.indices do - if r2.keytoIndex isDefinedAt orderedIndex(i) then - if row(i) sameElements r2.row(r2.keytoIndex (orderedIndex(i))) then r3.add_ni (row(i)) - end if - end for - r3.materialize () - end intersect - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Intersect 'this' relation and 'r2'. Check that the two relations are compatible. - * Slower and only to be used if there is no index. - * @param r2 the other relation - */ - def intersect2 (r2: Relation): Relation = - if incompatible (r2) then return null - - val newCol = Vector.fill [Vec] (colName.length)(null) - val r3 = new Relation (name + "_u_" + ucount (), colName, newCol.toVector, -1, domain) - for i <- 0 until rows if r2 contains row(i) do r3.add (row(i)) - r3.materialize () - end intersect2 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Take the difference of 'this' relation and 'r2' ('this - r2'). Check that - * the two relations are compatible. - * @param r2 the other relation - */ - def minus (r2: Relation): Relation = - if incompatible (r2) then return null - - val newCol = Vector.fill [Vec] (colName.length)(null) - val r3 = new Relation (name + "_m_" + ucount (), colName, newCol, key, domain) - for i <- 0 until rows if ! (r2 contains row(i)) do r3.add (row(i)) - r3.materialize () - end minus - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Take the difference of 'this' relation and 'r2' ('this - r2'). Check that - * the two relations are compatible. Indexed based minus. - * @param r2 the other relation - */ - def minus2 (r2: Relation): Relation = - if incompatible (r2) then return null - - val newCol = Vector.fill [Vec] (colName.length)(null) - val r3 = new Relation (name + "_m_" + ucount (), colName, newCol, key, domain) - for i <- orderedIndex.indices do - if r2.keytoIndex isDefinedAt orderedIndex(i) then - if ! (row(i) sameElements r2.row(r2.keytoIndex (orderedIndex(i)))) then r3.add_ni (row(i)) - else - r3.add_ni (row(i)) - end if - end for - r3.materialize () - end minus2 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Determine whether any rows/tuples exist in 'this' relation. - */ - def exists: Boolean = rows > 0 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Determine whether 'this' relation and 'r2' are incompatible by having - * differing numbers of columns or differing domain strings. - * @param r2 the other relation/table - */ - def incompatible (r2: Table): Boolean = - if cols != r2.cols then - flaw ("incompatible", s"$name and r2 have differing number of columns") - true - else if domains != r2.domains then - flaw ("incompatible", s"$name and r2 have differing domain strings") - true - else - false - end if - end incompatible - - // ================================================================= PRODUCT - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the Cartesian product of this' relation and 'r2' ('this × r2'). - * @param r2 the second relation - */ - def product (r2: Table): Relation = - val ncols = cols + r2.cols - val newCName = disambiguate (colName, r2.colNames) - val newCol = Vector.fill [Vec] (ncols) (null) - val newKey = key // FIX - val newDomain = domain + r2.domains - val r3 = new Relation (name + "_j_" + ucount (), newCName, newCol, newKey, newDomain) - - for i <- 0 until rows do - val t = row(i) - for j <- 0 until r2.rows do r3.add (t ++ r2.row(j)) - end for - r3.materialize () - end product - - // ==================================================================== JOIN - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Join 'this' relation and 'r2' by performing an "equi-join". Rows from both - * relations are compared requiring 'cName1' values to equal 'cName2' values. - * Disambiguate column names by appending "2" to the end of any duplicate column name. - * @param cName1 the join column names of this relation (e.g., the Foreign Key) - * @param cName2 the join column names of relation r2 (e.g., the Primary Key) - * @param r2 the rhs relation in the join operation - */ - def join (cName1: ArrayBuffer [String], cName2: ArrayBuffer [String], r2: Table): Relation = - val ncols = cols + r2.cols - val cp1 = cName1.map (colMap (_)) // get column positions in 'this' - val cp2 = cName2.map (r2.colsMap (_)) // get column positions in 'r2' - if cp1.length != cp2.length then flaw ("join", "incompatible sizes on match columns") - - val newCName = disambiguate (colName, r2.colNames) - val newCol = Vector.fill [Vec] (ncols) (null) - val newKey = key // FIX - val newDomain = domain + r2.domains - val r3 = new Relation (name + "_j_" + ucount (), newCName, newCol, newKey, newDomain) - - for i <- 0 until rows do - val t = row(i) - for j <- 0 until r2.rows do - val u = r2.row(j) - if sameOn (t, u, cp1, cp2) then r3.add (t ++ u) - end for - end for - r3.materialize () - end join - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Join 'this' relation and 'r2' by performing an "equi-join", use index to join - * @param cName1 the join column names of this relation (e.g., the Foreign Key) - * @param cName2 the join column names of relation r2 (e.g., the Primary Key) - * @param r2 the rhs relation in the join operation - */ - def joinindex (cName1: ArrayBuffer [String], cName2: ArrayBuffer [String], r2: Relation): Relation = - val ncols = cols + r2.cols - val cp1 = cName1.map (colMap (_)) // get column positions in 'this' - val cp2 = cName2.map (r2.colMap (_)) // get column positions in 'r2' - if cp1.length != cp2.length then flaw ("join", "incompatible sizes on match columns") - - val newCName = disambiguate (colName, r2.colName) - val newCol = Vector.fill [Vec] (ncols)(null) - val newKey = if r2.key == cp2(0) then key // foreign key in this relation - else if key == cp1(0) then r2.key // foreign key in r2 table - else -1 // key not in join and composite keys not allowed - - val newDomain = domain + r2.domains - val r3 = new Relation (name + "_j_" + ucount (), newCName, newCol, newKey, newDomain) - - if cp1.size == 1 && cp2.size == 1 then - if key == cp1(0) && r2.key == cp2(0) then - for k <- orderedIndex do - val t = index(k) - val u = r2.index.getOrElse (k, null) - if u != null then r3.add_ni (t ++ u) - end for - else if key == cp1(0) then - for idx <- r2.orderedIndex do - val u = r2.index(idx) - val t = index.getOrElse ((u(cp2(0))), null) - if t != null then r3.add_ni (t ++ u) - r3.add_ni(t ++ u) - end for - else if r2.key == cp2(0) then - for idx <- orderedIndex do - val t = index(idx) - val u = r2.index.getOrElse ((t(cp1(0))), null) - if u != null then r3.add_ni (t ++ u) - end for - end if - end if - r3.materialize () - end joinindex - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Join 'this' relation and 'r2' by performing a "natural-join". Rows from both - * relations are compared requiring 'cName' values to be equal. - * @param cName the common join column names for both relation - * @param r2 the rhs relation in the join operation - */ - def join (cName: ArrayBuffer [String], r2: Relation): Relation = - val ncols = cols + r2.cols - cName.length - val cp1 = cName.map (colMap (_)) // get column positions in 'this' - val cp2 = cName.map (r2.colMap (_)) // get column positions in 'r2' - var newDomain2 = r2.domain - for i <- cp1.length - 1 to 0 by -1 do - val (cp1_i, cp2_i) = (cp1(i), cp2(i)) - if domain(cp1_i) != r2.domain(cp2_i) then flaw ("join", s"column types do not match: $cp1, $cp2") - newDomain2 = removeAt (newDomain2, cp2_i) - end for - val cp3= r2.colName.map (r2.colMap (_)) diff cp2 // 'r2' specific columns - - val newCName = uniq_union (colName, r2.colName) - val newCol = Vector.fill [Vec] (ncols) (null) - val newKey = key // FIX - val newDomain = domain + newDomain2 - val r3 = new Relation (name + "_j_" + ucount (), newCName, newCol, newKey, newDomain) - - for i <- 0 until rows do - val t = row(i) - for j <- 0 until r2.rows do - val u = r2.row(j) - if sameOn (t, u, cp1, cp2) then { val u3 = Table.project (u, cp3); r3.add (t ++ u3) } - end for - end for - r3.materialize () - end join - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** The theta join, handle the predicates in where are connect by "and" (where a....and b....). - * @param r2 the second relation - * @param p0 the first theta join predicate (r1 cName, r2 cName, predicate to compare these two column) - * @param p the rest of theta join predicates (r1 cName, r2 cName, predicates to compare these two column) - */ - def join (r2: Relation, p0: Predicate2, p: Predicate2*): Relation = - val ncols = cols + r2.cols - val newCName = disambiguate (colName, r2.colName) - val newCol = Vector.fill [Vec] (ncols) (null) - val newKey = key // FIX - val newDomain = domain + r2.domain - val r3 = new Relation (name + "_j_" + ucount (), newCName, newCol, newKey, newDomain, null) - - var resultlist = IndexedSeq [(Int, Int)] () - for i <- 0 to p.size do - var result = IndexedSeq [(Int, Int)] () - val p_i = if i == 0 then p0 else p(i-1) - val cp1 = colMap (p_i._1) - val cp2 = r2.colMap (p_i._2) - if domain.charAt (cp1) != r2.domain.charAt (cp2) then flaw ("join", "differing domain strings") - - val psingle = p_i._3 // single predicate - result = null // FIX the next line & remove this line - //result = col(cp1).filterPos2 (r2.col (cp2), psingle) - - debug ("join", s"after predicate $i: result = $result") - resultlist = if i == 0 then result else resultlist intersect result - end for - - val smallmapbig = resultlist.groupBy (_._1) - for i <- smallmapbig.keySet.toVector.sorted do - val t = if key < 0 then index(i) else index(indextoKey(i)) - val bigindexs = smallmapbig (i).map (x => x._2) - for j <- bigindexs do - val u = if r2.key < 0 then r2.index(j) else r2.index(r2.indextoKey(j)) - r3.add (t ++ u) - end for - end for - r3.materialize () - end join - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Join 'this' relation and 'r2' by performing an "left-join". Rows from both - * relations are compared requiring 'cName1' values to equal 'cName2' values. - * Disambiguate column names by appending "2" to the end of any duplicate column name. - * All rows from the left table are maintained with missing values indicators used - * where needed. - * @param cName1 the join column names of this relation (e.g., the Foreign Key) - * @param cName2 the join column names of relation r2 (e.g., the Primary Key) - * @param r2 the rhs relation in the join operation - */ - def leftJoin (cName1: String, cName2: String, r2: Relation): Relation = - leftJoin (colMap (cName1), colMap (cName2), r2) - end leftJoin - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Join 'this' relation and 'r2' by performing a "left join". Rows from both - * relations are compared requiring 'cp1' values to equal 'cp2' values. - * This method returns all the rows from 'this' relation, and the matched rows - * from relation 'r2'. It adds a 'null' tuples for the unmatched rows of relation 'r2' - * FIX: It requires relations 'this' and 'r2' to be sorted on column 'cp1' and 'cp2' resp., as it uses Sort-Merge join - * @param cp1 the position of the join column of this relation - * @param cp2 the position of the join column of 'r2' relation - * @param r2 the rhs relation in the join operation - */ - def leftJoin (cp1: Int, cp2: Int, r2: Relation): Relation = - val r3 = Relation (name + "_leftJoin_" + r2.name, key, domain + r2.domain, colName ++ r2.colName) - val absentTuple = nullTuple (r2.domain) - var j = 0 - for i <- 0 until rows do - val t = row(i) - val t_cp1 = t(cp1) - while j < r2.rows-1 && r2.col(cp2)(j) < t_cp1 do j += 1 - val j_aux = j - if t_cp1 == r2.row(j)(cp2) then - while j < r2.rows && r2.col(cp2)(j) == t_cp1 do - val u = r2.row(j) - r3.add_ni (t ++ u) - j += 1 - end while - j = j_aux - else - r3.add_ni (t ++ absentTuple) - end if - end for - r3.materialize () - end leftJoin - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Join 'this' relation and 'r2' by performing a "right join". Rows from both - * relations are compared requiring 'cp1' values to equal 'cp2' values. - * This method returns all the rows from 'this' relation, and the matched rows - * from relation 'r2'. It adds a 'null' tuples for the unmatched rows of relation 'r2' - * @param cp1 the position of the join column of this relation - * @param cp2 the position of the join column of 'r2' relation - * @param r2 the rhs relation in the join operation - */ - def rightJoin (cp1: Int, cp2: Int, r2: Relation): Relation = - r2.leftJoin (cp2, cp1, this) - end rightJoin - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Join 'this' relation and 'r2' by performing an "approximate left-join". Rows from both - * relations are compared requiring 'cName1' values to apprximately equal 'cName2' values. - * Disambiguate column names by appending "2" to the end of any duplicate column name. - * All rows from the left table are maintained with missing values indicators used - * where needed. - * @param thres the approximate equality threshold - * @param cName1 the join column names of this relation (e.g., the Foreign Key) - * @param cName2 the join column names of relation r2 (e.g., the Primary Key) - * @param r2 the rhs relation in the join operation - * - def leftJoinApx (thres: Double = 0.001) (cName1: String, cName2: String, r2: Relation): Relation = - setThreshold (thres) - leftJoinApx (colMap (cName1), colMap (cName2), r2.asInstanceOf [Relation]) - end leftJoinApx - */ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Join 'this' relation and 'r2' by performing a "left join". Rows from both - * relations are compared requiring 'cp1' values to approximately equal 'cp2' values. - * This method returns all the rows from 'this' relation, and the matched rows - * from relation 'r2'. It adds a 'null' tuples for the unmatched rows of relation 'r2' - * FIX: It requires relations 'this' and 'r2' to be sorted on column 'cp1' and 'cp2' resp., - * as it uses Sort-Merge join - * @param cp1 the position of the join column of this relation - * @param cp2 the position of the join column of 'r2' relation - * @param r2 the rhs relation in the join operation - * - def leftJoinApx (cp1: Int, cp2: Int, r2: Relation): Relation = - val r3 = Relation (name + "_leftJoinApx_" + r2.name, 1, domain + r2.domain, colName ++ r2.colName) - val absentTuple = nullTuple (r2.domain) - var j = 0 - - for i <- 0 until rows do - val t = row(i) - val t_cp1 = t(cp1) - while j < r2.rows-1 && !=~ (Vec (r2.col(cp2), j), t_cp1) && r2.col(cp2)(j) < t_cp1 do j += 1 - val j_aux = j - if =~ (t_cp1, r2.row(j)(cp2)) then - while j < r2.rows && =~ (Vec (r2.col(cp2), j), t_cp1) do - val u = r2.row(j) - r3.add_ni (t ++ u) - j += 1 - end while - j = j_aux - else - r3.add_ni (t ++ absentTuple) - end if - end for - r3.materialize () - end leftJoinApx - */ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Join 'this' relation and 'r2' by performing a "right join". Rows from both - * relations are compared requiring 'cp1' values to approximately equal 'cp2' values. - * This method returns all the rows from 'this' relation, and the matched rows - * from relation 'r2'. It adds a 'null' tuples for the unmatched rows of relation 'r2' - * @param cp1 the position of the join column of this relation - * @param cp2 the position of the join column of 'r2' relation - * @param r2 the rhs relation in the join operation - * - def rightJoinApx (cp1: Int, cp2: Int, r2: Relation): Relation = - r2.leftJoinApx (cp2, cp1, this) - end rightJoinApx - */ - - // ================================================================ GROUP BY - - private val groupMap = Map [ValueType, ArrayBuffer [Int]] () - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Group this relation by the specified column name, returning this relation. - * Each value in column cName will be mapped to a vector of row numbers containing - * the value, e.g., * { (a, A), (b, C), (a, T) } makes map a -> (0, 2), b -> (1). - * @param cName the group column name - */ - def groupBy (cName: String): Relation = - if ! (colName contains cName) then - flaw ("groupBy", s"cName = $cName is not contained in colName") - end if - - val _col = col(colMap (cName)) // the cName column - for i <- indices do - val key = _col(i).asInstanceOf [ValueType] - val loc = groupMap.getOrElseUpdate (key, ArrayBuffer [Int] ()) - loc += i // add index/row num i - end for - this - end groupBy - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Group 'this' relation by the specified column names, returning 'this' relation. - * @param cName the group column names - * - def groupBy (cName: String*): Relation = - if ! cName.map (c => colName contains(c)).reduceLeft (_ && _) then - flaw ("groupBy", "groupbyName used to groupby doesn't exist in the cName") - end if - val equivCol = Vector.fill [Vec] (colName.length)(null) - if rows == 0 then return this - - val cPos = cName.map (colMap (_)) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /* Sort on the given columns. - * @param sortColumn the set of columns to sort on - */ - def sortcol (sortColumn: Set [ValueType]): Vec = - println (s"sortCol: sortColumn = $sortColumn") - var colcol: Vec = null - val domain = null -// for x <- sortColumn do colcol = Vec.:+ (colcol, x, domain, 0) - for x <- sortColumn do colcol = Vec.:+ (colcol, x) - - val sortcol = colcol; sortcol.sort (); sortcol - end sortcol - - var groupIndexMap = Map [ValueType, Vector [ValueType]] () - val tempIndexMap = Map [ValueType, Vector [ValueType]] () - var sortlst: Vec = null - - for i <- cPos.indices do - if i == 0 then - index.foreach (indexmap => { - val key = indexmap._2(cPos(i)).toString - val value = indexmap._1 - if groupIndexMap contains key then groupIndexMap += key -> (groupIndexMap(key) :+ value) - else groupIndexMap += key -> Vector(value) - }) // foreach - else - tempIndexMap.clear () - groupIndexMap.foreach (groupindexmap => { - val tempidxlist = groupindexmap._2 - for idx <- tempidxlist do - val key = groupindexmap._1.toString + "," + index(idx)(cPos(i)) - val value = idx - if tempIndexMap contains(key) then tempIndexMap += key -> (tempIndexMap(key) :+ value) - else tempIndexMap += key -> Vector(value) - end for - }) // for each - groupIndexMap = tempIndexMap - end if - - if i == cPos.size - 1 then - orderedIndex = Vector () - grouplist = Vector [Int] () - sortlst = sortcol (groupIndexMap.keySet.toSet) - for k <- 0 until sortlst.size do - val indexes = groupIndexMap(Vec(sortlst, k)) - orderedIndex = orderedIndex ++ indexes - grouplist = grouplist :+ orderedIndex.length - end for - end if - end for - this - end groupBy - */ - - // ================================================================= ORDER BY - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Order (ascending) the rows in the relation by the selected columns '_cName'. - * A stable sorting is used to allow sorting on multiple columns. - * @param _cName the column names that are to be sorted - */ - def orderBy (_cName: String*): Relation = - val cName = _cName.distinct - if ! cName.map (c => colName contains (c)).reduceLeft (_ && _) then - flaw ("orderBy", "cName used to orderBy does not exist in relation") - end if - - val newCol = Vector.fill [Vec] (cols)(null) - val r2 = new Relation (name + "_j_" + ucount (), colName, newCol, key, domain) - - val perm = orderByHelper (ArrayBuffer (cName.map (colMap (_)) :_*), rows) - for i <- perm do r2.add (row(i)) - r2.materialize () - end orderBy - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Order (descending) the rows in the relation by the selected columns '_cName'. - * A stable sorting is used to allow sorting on multiple columns. - * @param _cName the column names that are to be sorted - */ - def reverseOrderBy (_cName: String*): Relation = - val cName = _cName.distinct - if ! cName.map (c => colName contains (c)).reduceLeft (_ && _) then - flaw ("reverseOrderBy", "cName used to orderBy does not exist in relation") - end if - - val newCol = Vector.fill [Vec] (cols) (null) - val r2 = new Relation (name + "_j_" + ucount (), colName, newCol, key, domain) - - val perm = orderByHelper (ArrayBuffer (cName.map (colMap (_)) :_*), rows) - for i <- perm.reverse do r2.add (row(i)) - r2.materialize () - end reverseOrderBy - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Helper method for 'orderBy' and 'reverseOrderBy'. Performs indirect merge sort. - * @param cPos sequence of column positions to sort - * @param n total number of rows in this Relation - */ - private def orderByHelper (cPos: ArrayBuffer [Int], n: Int = rows): Array [Int] = - var perm: Array [Int] = null - - for i <- cPos.indices do - val col_i = col (cPos(i)).toArray -/* FIX - add MergeSortIndirect to scalation pcakage - perm = if i == 0 then (new MergeSortIndirect (col_i)()).isort () - else (new MergeSortIndirect (col_i)(perm)).isort () -*/ - end for - perm - end orderByHelper - - // ================================================================= UPDATES - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Add 'tuple' to 'this' relation as a new row. It uses 'col2' as a temp 'col' - * to improve performance. - * @param tuple an aggregation of columns values (new row) - */ - @throws (classOf [Exception]) - def add (tuple: Row): Unit = - try - if tuple == null then throw new Exception ("add function: tuple is null") - val rowIdx = col2(0).length - val newkey = if key < 0 then rowIdx else tuple(key) - index += newkey -> tuple - keytoIndex += newkey -> rowIdx - orderedIndex = orderedIndex :+ newkey - indextoKey += rowIdx -> newkey - for j <- tuple.indices do addElem (j, rowIdx, tuple(j)) - catch - case ex: NullPointerException => - println ("tuple'size is: " + tuple.size) - println ("col'size is: " + col.size) - throw ex - end try - end add - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Add an element into 'col2', the holding area for input. If the types - * of column domains are specified, the types are checked. - * @param j the j-th column of col2 - * @param rowIdx the row index - * @param elem the element to added - */ - private def addElem (j: Int, rowIdx: Int, elem: ValueType): Unit = - val typ = if domain == null then 'X' else domain(j) - try - col2(j)(rowIdx) = elem - catch - case ex: ClassCastException => - if typ == 'S' then -// println (s"warning in addElem: colIdx j = $j, rowIdx = $rowIdx, elem = $elem, class = ${elem.getClass}, typ = $typ") - col2(j)(rowIdx) = elem.toString // anything can be a string - else if elem.isInstanceOf [String] || elem.isInstanceOf [Char] then - println (s"warning in addElem: colIdx j = $j, rowIdx = $rowIdx, elem = $elem, class = ${elem.getClass}, typ = $typ") - else - println (s"exception in addElem: name = $name, colIdx j = $j, rowIdx = $rowIdx, elem = $elem, class = ${elem.getClass}, typ = $typ") - throw ex - end if - end try - end addElem - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Add a tuple into the 'col2', without maintaining the index (No Index (ni), - * orderedIndex, keytoIndex and indextoKey. - * @param tuple the tuple to add - */ - private def add_ni (tuple: Row): Unit = - val rowIdx = col2(0).length - for j <- tuple.indices do addElem (j, rowIdx, tuple(j)) - end add_ni - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Materialize the relation by copying the temporary 'col2' into 'col'. - * It needs to be called by the end of the relation construction. - */ - def materialize (): Relation = - if domain == null || domain == "" then materialize1 () else materialize2 () - end materialize - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Materialize the relation by copying the temporary 'col2' into 'col'. - * It needs to be called by the end of the relation construction. - * This version uses the type/domain of the first value to transform the 'col2' to 'col'. - */ - private [relation] def materialize1 (): Relation = - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /* Transform the j-th column to the appropriate vector type. - * @param j the j-th column index in the relation - */ - def transform1 (j: Int): Vec = - val first = col2(0) -/* - first match - case _: Double => val rs = VectorD (col2(j).asInstanceOf [IndexedSeq [Double]]); col2(j).clear (); rs - case _: Int => val rs = VectorI (col2(j).asInstanceOf [IndexedSeq [Int]]); col2(j).clear (); rs - case _: Long => val rs = VectorL (col2(j).asInstanceOf [IndexedSeq [Long]]); col2(j).clear (); rs - case _: String => val rs = VectorS (col2(j).asInstanceOf [IndexedSeq [String]]); col2(j).clear (); rs - case _ => flaw ("materialize1.transform", s"($j): vector type ($first) not supported"); null - end match -*/ - null.asInstanceOf [Vec] // FIX - end transform1 - -// debug ("materialize1", s"col2 = $col2") - if colEmpty then - col = (for j <- col2.indices yield transform1(j)).toVector - else - col = (for j <- col.indices yield transform1(j)).toVector ++ - (for j <- col2.indices yield transform1(j)).toVector - end if - this - end materialize1 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Materialize the relation by copying the temporary 'col2' into 'col'. - * It needs to be called by the end of the relation construction. - * This version uses 'domain' to transform the 'col2' to 'col' according to the domain indicator: - */ - private [relation] def materialize2 (): Relation = - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /* Transform the j-th column to the appropriate vector type. - * @param j the j-th column index in the relation - */ - def transform2 (j: Int): Vec = - val dj = domain(j) - dj match - case 'D' => val rs = VectorD (col2(j).asInstanceOf [IndexedSeq [Double]]); col2(j).clear (); rs - case 'I' => val rs = VectorI (col2(j).asInstanceOf [IndexedSeq [Int]]); col2(j).clear (); rs - case 'L' => val rs = VectorL (col2(j).asInstanceOf [IndexedSeq [Long]]); col2(j).clear (); rs - case 'S' => val rs = VectorS (col2(j).asInstanceOf [IndexedSeq [String]]); col2(j).clear (); rs - case _ => flaw ("materialize2.transform", s"($j) vector type not supported domain ($dj)"); null - end match - end transform2 - -// debug ("materialize2", s"col2 = $col2") - if colEmpty then - col = (for j <- col2.indices yield transform2(j)).toVector - else - col = (for j <- col.indices yield transform2(j)).toVector ++ - (for j <- col2.indices yield transform2(j)).toVector - end if - this - end materialize2 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Determine whether all of the columns in the relation are empty. - */ - def colEmpty: Boolean = - for column <- col if column != null do return false - true - end colEmpty - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Update the column named 'cName' using function 'func' for elements with - * value 'matchStr'. - * @param cName the name of the column to be updated - * @param newVal the value used to assign updated values - * @param matchVal the value to be matched to elements - */ - def update (cName: String, newVal: ValueType, matchVal: ValueType): Unit = - val col_j = col(colMap(cName)) - for i <- col_j.indices if col_j(i) == matchVal do assign (col_j, i, newVal) - end update - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Assign the value newVal to column/vector c at index position i. - * @param c the name of the column/vector to be assigned - * @param i the index position to be assigned - * @param newVal the value used for assignment - */ - def assign (col_j: Vec, i: Int, newVal: ValueType): Unit = - val first = col_j(0) - first match - case _: Double => (col_j.asInstanceOf [VectorD])(i) = newVal.asInstanceOf [Double] - case _: Int => (col_j.asInstanceOf [VectorI])(i) = newVal.asInstanceOf [Int] - case _: Long => (col_j.asInstanceOf [VectorL])(i) = newVal.asInstanceOf [Long] - case _: String => (col_j.asInstanceOf [VectorS])(i) = newVal.asInstanceOf [String] - case _ => flaw ("assign", s"vector type ($first) not supported") - end match - end assign - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Update the column named 'cName' using function 'func' for elements with - * value 'matchStr'. - * @param cName the name of the column to be updated - * @param func the function used to assign updated values - * @param matchVal the value to be matched to elements - */ - def update (cName: String, func: (ValueType) => ValueType, matchVal: ValueType): Unit = - val col_j = col (colMap(cName)) - for i <- col_j.indices if col_j(i) == matchVal do assign (col_j, i, func (col_j(i))) - end update - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Update the column named 'cName' using function 'func' for elements where - * the predicate 'pred' evaluates to true. - * @param cName the name of the column to be updated - * @param func the function used to assign updated values - * @param pred the predicated used to select elements for update - */ - def update (cName: String, func: (ValueType) => ValueType, pred: (ValueType) => Boolean): Unit = - val col_j = col (colMap(cName)) - for i <- col_j.indices if pred (col_j(i)) do assign (col_j, i, func (col_j(i))) - end update - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Delete the rows from 'this' relation that satisfy the predicates. - * @param p tuple(1): column name, tuple(2): predicate (T => `Boolean`) - * @tparam T the predicate type - */ - def delete (p: Predicate*): Relation = - null -/* - var pos = ArrayBuffer [Int] () - for i <- p.indices do - domain (colMap(p(i)._1)) match - case 'D' => val pos1 = col (colMap(p(i)._1)).asInstanceOf [VectorD].filterPos (p(i)._2.asInstanceOf [Double => Boolean]) - if i > 0 then pos = pos intersect pos1 else pos ++= pos1 - case 'I' => val pos1 = col (colMap(p(i)._1)).asInstanceOf [VectorI].filterPos (p(i)._2.asInstanceOf [Int => Boolean]) - if i > 0 then pos = pos intersect pos1 else pos ++= pos1 - case 'L' => val pos1 = col (colMap(p(i)._1)).asInstanceOf [VectorL].filterPos (p(i)._2.asInstanceOf [Long => Boolean]) - if i > 0 then pos = pos intersect pos1 else pos ++= pos1 - case 'S' => val pos1 = col (colMap(p(i)._1)).asInstanceOf [VectorS].filterPos (p(i)._2.asInstanceOf [String => Boolean]) - if i > 0 then pos = pos intersect pos1 else pos ++= pos1 - case _ => flaw ("delete", "predicate type not supported") - null - end match - end for - val indices = Set (0 to rows-1 :_*) diff pos.toSet - for i <- 0 until cols do Vec.delete (col(i), pos.asInstanceOf [ArrayBuffer [Int]]) - selectAt (indices.toArrayBuffer.sorted) -*/ - end delete - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert 'this' relation into a string column by column. - */ - override def toString: String = - var sb = new StringBuilder ("Relation(" + name + ", " + key + ",\n" + colName + ",\n") - for i <- col.indices do sb.append (s"${col(i)} \n") - sb.replace (sb.length-1, sb.length, ")").mkString - end toString - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Show 'this' relation row by row. - * @param limit the limit on the number of rows to display - */ - def show (limit: Int = Int.MaxValue): Unit = - val wid = 18 // column width - val rep = wid * colName.length // repetition = width * # columns - val title = s"| Relation name = $name, key-column = $key " - - println (s"|-${"-"*rep}-|") - println (title + " "*(rep-title.length) + " |") - println (s"|-${"-"*rep}-|") - print ("| "); for cn <- colName do print (s"%${wid}s".format (cn)); println (" |") - println (s"|-${"-"*rep}-|") - for i <- 0 until math.min (rows, limit) do - print ("| ") - for cv <- row(i) do - if cv.isInstanceOf [Double] then print (s"%${wid}g".format (cv)) - else print (s"%${wid}s".format (cv)) - end for - println (" |") - end for - println (s"|-${"-"*rep}-|") - end show - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Show 'this' relation's foreign keys. - */ - def showFkey (): Unit = - val wid = 18 // column width - val rep = wid * colName.length // repetition = width * # columns - val title = s"| Relation name = $name, foreign keys = " - val fkline = s"| $fKeys " - - println (s"|-${"-"*rep}-|") - println (title + " "*(rep-title.length) + " |") - println (s"|-${"-"*rep}-|") - println (fkline + " "*(rep-fkline.length) + " |") - println (s"|-${"-"*rep}-|") - end showFkey - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert 'this' relation into a matrix of doubles, e.g., - * in the regression equation: 'xb = y' create matrix 'xy' - * @param colPos the column positions to use for the matrix - * @param kind the kind of matrix to create - */ - def toMatrixD (colPos: ArrayBuffer [Int]): MatrixD = - val colVec = for x <- project (colPos).col yield x.toDouble - MatrixD (colVec) - end toMatrixD - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert 'this' relation into a matrix of doubles and a vector of doubles. - * in the regression equation: 'xb = y' create matrix 'x' and vector 'y' - * @param colPos the column positions to use for the matrix - * @param colPosV the column position to use for the vector - * @param kind the kind of matrix to create - */ - def toMatrixDD (colPos: ArrayBuffer [Int], colPosV: Int): (MatrixD, VectorD) = - val colVec = for x <- project (colPos).col yield x.toDouble - (MatrixD (colVec), col(colPosV).toDouble) - end toMatrixDD - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert 'this' relation into a matrix of double. It will convert - * strings to double. - * in the regression equation: 'xb = y' create matrix 'xy' - * @param colPos the column positions to use for the matrix - * @param kind the kind of matrix to create - */ - def toMatrixD2 (colPos: ArrayBuffer [Int] = null): MatrixD = - val cp = if colPos == null then ArrayBuffer.range (0, cols) else colPos - val colVec = - for x <- project (cp).col yield { - try x.toDouble - catch case num: NumberFormatException => map2Int (x.asInstanceOf [VectorS])._1.toDouble - } // for - MatrixD (colVec) - end toMatrixD2 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert the 'colPos' column of 'this' relation into a vector of doubles. - * @param colPos the column position to use for the vector - */ - def toVectorD (colPos: Int = 0): VectorD = col(colPos).toDouble - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert the 'colName' column of 'this' relation into a vector of doubles. - * @param colName the column name to use for the vector - */ - def toVectorD (colName: String): VectorD = col(colMap(colName)).toDouble - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert the given columns within 'this' relation to a map: 'keyColPos' -> 'valColPos'. - * @param keyColPos the key column positions - * @param valColPos the value column positions - */ - def toMap (keyColPos: ArrayBuffer [Int], valColPos: Int): Map [ArrayBuffer [ValueType], ValueType] = - val map = Map [ArrayBuffer [ValueType], ValueType] () - for i <- indices do - val tuple = row(i) - map += keyColPos.map (tuple(_)) -> tuple(valColPos) - end for - map - end toMap - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert the given columns within 'this' relation to a map: 'keyColName' -> 'valColName'. - * @param keyColName the key column names - * @param valColname the value column names - */ - def toMap (keyColName: ArrayBuffer [String], valColName: String): Map [ArrayBuffer [ValueType], ValueType] = - toMap (keyColName.map (colMap(_)), colMap(valColName)) - end toMap - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Save 'this' relation in a file using serialization. - */ - def save (): Unit = - val oos = new ObjectOutputStream (new FileOutputStream (STORE_DIR + name + SER)) - oos.writeObject (this) - oos.close () - end save - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Write 'this' relation into a CSV file with each row written to a line. - * @param fileName the file name of the data file - */ - def writeCSV (fileName: String): Unit = - val out = new PrintWriter (BASE_DIR + fileName) - out.println (colName.toString.drop (5).dropRight (1)) - for i <- 0 until rows do out.println (row(i).toString.drop (7).dropRight (1)) - out.close - end writeCSV - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Write 'this' relation into a JSON file. - * @param fileName the file name of the data file - */ - def writeJSON (fileName: String): Unit = ??? // FIX - to be implemented - - // ============================================ BUILT-IN AGGREGATE FUNCTIONS - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the mean of the values in column 'cName'. - * @param cName the column name - */ - def avg (cName: String): Double = col(colMap(cName)).toDouble.mean - def mean (cName: String): Double = col(colMap(cName)).toDouble.mean - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the number of values in column 'cName'. - * @param cName the column name - */ - def count (cName: String): Int = rows - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the maximum value in column 'cName'. - * @param cName the column name - */ - //def max (cName: String): ValueType = col(colMap(cName)).max // FIX - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the minimum value in column 'cName'. - * @param cName the column name - */ - //def min (cName: String): ValueType = col(colMap(cName)).min // FIX - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the sum of the values in column 'cName'. - * @param cName the column name - */ - //def sum (cName: String): ValueType = col(colMap(cName)).sum // FIX - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the variance of the values in column 'cName'. - * @param cName the column name - */ - def variance (cName: String): Double = col(colMap(cName)).toDouble.variance - -/** As seen from class Relation, the missing signatures are as follows. - * For convenience, these are usable as stub implementations. - */ - def eproject (aggCol: Seq[AggColumn]) (cName: Seq[String]): Table = ??? - def groupBy(cName: Seq[String]): Table = ??? - def intersect (r2: Table): Table = ??? - def join (cNames: ArrayBuffer[String], r2: Table): Table = ??? - def join (r2: Table, p0: Predicate2 , p: Seq[Predicate2]): Table = ??? - def leftJoin (cName1: String, cName2: String, r2: Table): Table = ??? - def leftJoin (thres: Double) (cName1: String, cName2: String, r2: Table): Table = ??? - def minus (r2: Table): Table = ??? - def selectAt (pos: IndexedSeq[Int]): Table = ??? - def toMatrixD_VectorD (colPosM: ArrayBuffer[Int], colPosV: Int): (MatrixD, VectorD) = ??? - def toVectorI(colPos: Int): VectorI = ??? - def toVectorI(colName: String): mathstat.VectorI = ??? - def toVectorL(colPos: Int): VectorL = ??? - def toVectorL(colName: String): VectorL = ??? - def toVectorS(colPos: Int): Vec = ??? - def toVectorS(colName: String): Vec = ??? - def union (r2: Table): relation.Table = ??? - -end Relation - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `RelationEx` object provides and example relation for testing. - * @see www.codeproject.com/Articles/652108/Create-First-Data-WareHouse - */ -object RelationEx: - - val productSales = Relation ("productSales", - ArrayBuffer ("SalesInvoiceNumber", "SalesDateKey", "SalesTimeKey", "SalesTimeAltKey", "StoreID", "CustomerID", - "ProductID", "SalesPersonID", "Quantity", "ProductActualCost", "SalesTotalCost", "Deviation"), - ArrayBuffer (Vector [ValueType] (1, 20130101, 44347, 121907, 1, 1, 1, 1, 2, 11.0, 13.0, 2.0), - Vector [ValueType] (1, 20130101, 44347, 121907, 1, 1, 2, 1, 1, 22.5, 24.0, 1.5), - Vector [ValueType] (1, 20130101, 44347, 121907, 1, 1, 3, 1, 1, 42.0, 43.5, 1.5), - Vector [ValueType] (2, 20130101, 44519, 122159, 1, 2, 3, 1, 1, 42.0, 43.5, 1.5), - Vector [ValueType] (2, 20130101, 44519, 122159, 1, 2, 4, 1, 3, 54.0, 60.0, 6.0), - Vector [ValueType] (3, 20130101, 52415, 143335, 1, 3, 2, 2, 2, 11.0, 13.0, 2.0), - Vector [ValueType] (3, 20130101, 52415, 143335, 1, 3, 3, 2, 1, 42.0, 43.5, 1.5), - Vector [ValueType] (3, 20130101, 52415, 143335, 1, 3, 4, 2, 3, 54.0, 60.0, 6.0), - Vector [ValueType] (3, 20130101, 52415, 143335, 1, 3, 5, 2, 1, 135.0, 139.0, 4.0), - Vector [ValueType] (4, 20130102, 44347, 121907, 1, 1, 1, 1, 2, 11.0, 13.0, 2.0), - Vector [ValueType] (4, 20130102, 44347, 121907, 1, 1, 2, 1, 1, 22.5, 24.0, 1.5), - Vector [ValueType] (5, 20130102, 44519, 122159, 1, 2, 3, 1, 1, 42.0, 43.5, 1.5), - Vector [ValueType] (5, 20130102, 44519, 122159, 1, 2, 4, 1, 3, 54.0, 60.0, 6.0), - Vector [ValueType] (6, 20130102, 52415, 143335, 1, 3, 2, 2, 2, 11.0, 13.0, 2.0), - Vector [ValueType] (6, 20130102, 52415, 143335, 1, 3, 5, 2, 1, 135.0, 139.0, 4.0), - Vector [ValueType] (7, 20130102, 44347, 121907, 2, 1, 4, 3, 3, 54.0, 60.0, 6.0), - Vector [ValueType] (7, 20130102, 44347, 121907, 2, 1, 5, 3, 1, 135.0, 139.0, 4.0), - Vector [ValueType] (8, 20130103, 59326, 162846, 1, 1, 3, 1, 2, 84.0, 87.0, 3.0), - Vector [ValueType] (8, 20130103, 59326, 162846, 1, 1, 4, 1, 3, 54.0, 60.0, 3.0), - Vector [ValueType] (9, 20130103, 59349, 162909, 1, 2, 1, 1, 1, 5.5, 6.5, 1.0), - Vector [ValueType] (9, 20130103, 59349, 162909, 1, 2, 2, 1, 1, 22.5, 24.0, 1.5), - Vector [ValueType] (10, 20130103, 67390, 184310, 1, 3, 1, 2, 2, 11.0, 13.0, 2.0), - Vector [ValueType] (10, 20130103, 67390, 184310, 1, 3, 4, 2, 3, 54.0, 60.0, 6.0), - Vector [ValueType] (11, 20130103, 74877, 204757, 2, 1, 2, 3, 1, 5.5, 6.5, 1.0), - Vector [ValueType] (11, 20130103, 74877, 204757, 2, 1, 3, 3, 1, 42.0, 43.5, 1.5)), - 0, "IIIIIIIIIDDD") - -end RelationEx - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `RelationTest` object tests the operations provided by `Relation`. - * > runMain scalation.database.relation.RelationTest - */ -object RelationTest extends App: - - val weekdays = new Relation ("weekdays", ArrayBuffer ("day", "time"), - Vector (VectorS ("Mon", "Tue", "Wed", "Thu", "Fri"), - VectorD (5.00, 8.15, 6.30, 9.45, 7.00)), - 0, "SD") - - val weekend = new Relation ("weekends", ArrayBuffer ("day", "time"), - Vector (VectorS ("Sat", "Sun"), - VectorD (3.00, 4.30)), - 0, "SD") - - weekdays.generateIndex () - weekend.generateIndex () - - banner ("weekdays") - println ("weekdays = " + weekdays) - banner ("weekdend") - println ("weekend = " + weekend) - - banner ("Test pi") - println ("weekdays.pi (\"day\") = " + weekdays.pi ("day")) - println ("-" * 60) - println ("weekdays.pisigma (\"day\", _ == \"Mon\") = " + weekdays.pisigma ("day", _ == "Mon")) - - banner ("Test sigma") - println ("weekdays.select (\"day\", _ == \"Mon\") = " + weekdays.select ("day", _ == "Mon")) - println ("-" * 60) - println ("weekdays.select (\"time\", _ == 5.00) = " + weekdays.select ("time", _ == 5.00)) - println ("-" * 60) - println ("weekdays.select (\"day\", _ > \"Mon\") = " + weekdays.select ("day", _ > "Mon")) - println ("-" * 60) - println ("weekdays.select (\"day\", _ > \"Mon\") = " + weekdays.select ("day", _ > "Mon")) - println ("-" * 60) - println ("weekdays.select (\"day\", \"time\") = " + weekdays.select ("day", _ == "Mon")) - - val week = weekdays.union (weekend) - banner ("Test union") - println ("weekdays.union (weekend) = " + week) - - weekend.add (Vector ("Zday", 1.00)) - banner ("Test add") - println ("weekend add (\"Zday\", 1.00)) = " + weekend) - - banner ("Test -") - println ("week - weekend = " + (week - weekend)) - - banner ("Test join") - println ("week.join (\"day\", \"day\" weekend) = " + week.join ("day", "day", weekend)) - println ("-" * 60) - println ("week join weekend = " + (week join weekend)) - - week.writeCSV ("relation" + ⁄ + "week.csv") - -end RelationTest - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `RelationTest2` object tests the operations provided by `Relation`. - * The relational algebra operators are given using Unicode. - * @see en.wikipedia.org/wiki/List_of_Unicode_characters - * > runMain scalation.database.relation.RelationTest2 - */ -object RelationTest2 extends App: - - val weekdays = new Relation ("weekdays", ArrayBuffer ("day", "time"), - Vector (VectorS ("Mon", "Tue", "Wed", "Thu", "Fri"), - VectorD (5.00, 8.15, 6.30, 9.45, 7.00)), - 0, "SD") - - val weekend = new Relation ("weekends", ArrayBuffer ("day", "time"), - Vector (VectorS ("Sat", "Sun"), - VectorD (3.00, 4.30)), - 0, "SD") - - banner ("Test π") - println ("weekdays.π (\"day\") = " + weekdays.π ("day")) - println ("-" * 60) - println ("weekdays.π (\"time\") = " + weekdays.π ("time")) - - banner ("Test σ") - println ("weekdays.σ (\"day\", _ == \"Mon\") = " + weekdays.σ ("day", _ == "Mon")) - println ("-" * 60) - println ("weekdays.σ (\"time\", _ == 5.00) = " + weekdays.σ ("time", _ == 5.00)) - println ("-" * 60) - println ("weekdays.σ (\"day\", _ > \"Mon\") = " + weekdays.σ ("day", _ > "Mon")) - println ("-" * 60) - println ("weekdays.σ (\"time\", _ > 5.00) = " + weekdays.σ ("time", _ > 5.00)) - println ("-" * 60) - println ("weekdays.σ (\"day\", \"time\") = " + weekdays.σ ("day", _ == "Mon") - .σ ("time", _ == 5.00)) - val week = weekdays ⋃ weekend - - banner ("Test ⋃") - println ("weekdays ⋃ weekend) = " + weekdays ⋃ weekend) - - banner ("Test ⋂") - println ("week ⋂ weekend = " + (week ⋂ weekend)) - - banner ("Test -") - println ("week - weekend = " + (week - weekend)) - - banner ("Test ⋈ ") - println ("week ⋈ weekend = " + (week ⋈ weekend)) - -end RelationTest2 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `RelationTest3` object tests the operations provided by `Relation`. - * It test various aggregate/OLAP operations on a simple data warehouse fact table. - * @see www.codeproject.com/Articles/652108/Create-First-Data-WareHouse - * FIX - allow entering doubles as "13" rather than "13.0" - * > runMain scalation.database.relation.RelationTest3 - */ -object RelationTest3 extends App: - - import Relation.{max, min} - import RelationEx.productSales - - val costVprice = productSales.project ("ProductActualCost", "SalesTotalCost") - - productSales.show () - - println ("productSales = " + productSales) - println ("productSales.project (\"ProductActualCost\", \"SalesTotalCost\") = " + costVprice) - - banner ("Test count") - println ("count (productSales) = " + count (productSales)) - println ("-" * 60) - println ("count (costVprice) = " + count (costVprice)) - - banner ("Test min") - println ("min (productSales) = " + min (productSales)) - println ("-" * 60) - println ("min (costVprice) = " + min (costVprice)) - - banner ("Test max") - println ("max (productSales) = " + max (productSales)) - println ("-" * 60) - println ("max (costVprice) = " + max (costVprice)) - - banner ("Test sum") - println ("sum (productSales) = " + sum (productSales)) - println ("-" * 60) - println ("sum (costVprice) = " + sum (costVprice)) - - banner ("Test expectation/mean") - println ("mean (productSales) = " + mean (productSales)) - println ("-" * 60) - println ("mean (costVprice) = " + mean (costVprice)) - - banner ("Test variance") - println ("variance (productSales) = " + variance (productSales)) - println ("-" * 60) - println ("variance (costVprice) = " + variance (costVprice)) - -end RelationTest3 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `RelationTest4` object tests conversion `Relation` to a matrix. - * > runMain scalation.database.relation.RelationTest4 - */ -object RelationTest4 extends App: - - import RelationEx.productSales - - val (mat, vec) = productSales.toMatrixDD (ArrayBuffer.range (0, 11), 11) - - banner ("productSales") - productSales.show () - - banner ("mat and vec") - println ("mat = " + mat) - println ("vec = " + vec) - -end RelationTest4 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `RelationTest5` object tests the interoperability between Relations and Matrices. - * > runMain scalation.database.relation.RelationTest5 - */ -object RelationTest5 extends App: - - val sales_item1 = Relation ("Sales_Item1", ArrayBuffer ("Date", "FL", "GA", "NC", "SC"), - ArrayBuffer (Vector [ValueType] ("20130101", 10, 5, 5, 4), - Vector [ValueType] ("20130102", 20, 30, 40, 25), - Vector [ValueType] ("20130103", 8, 6, 9, 9), - Vector [ValueType] ("20130104", 6, 7, 9, 10), - Vector [ValueType] ("20130105", 4, 7, 9, 10)), - 0,"SIIII") - - val price_item1 = Relation ("Price_Item1", ArrayBuffer ("Date", "FL", "GA", "NC", "SC"), - ArrayBuffer (Vector [ValueType] ("20130101", 1.6, 1.6, 1.5, 1.3), - Vector [ValueType] ("20130102", 1.6, 1.6, 1.5, 1.2), - Vector [ValueType] ("20130103", 1.5, 1.6, 1.5, 1.4), - Vector [ValueType] ("20130104", 1.4, 1.7, 1.5, 1.4), - Vector [ValueType] ("20130105", 1.4, 1.7, 1.4, 1.4)), - 0,"SDDDD") - val revenue = Relation ("Revenue", -1, null, "Item", "FL", "GA", "NC", "SC") - - sales_item1.show () - price_item1.show () - - val x = sales_item1.toMatrixD (ArrayBuffer.range (1, 5)) - val y = price_item1.toMatrixD (ArrayBuffer.range (1, 5)) - val z = x dot y - //revenue.add ("Item1" +: z.toVector) // FIX - - banner ("revenue") - revenue.show () - -end RelationTest5 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `RelationTest6` object tests 'indexjoin', 'groupby' and 'aggregation'. - * > runMain scalation.database.relation.RelationTest6 - */ -object RelationTest6 extends App: - - val professor = Relation ("professor", 0, "ISS", "pid", "name", "prodeptid") - TableGen.popTable (professor, 10) - professor.generateIndex () - - val course = Relation ("course", 0, "ISS", "cid","crsname", "descr") - TableGen.popTable (course, 20) - course.generateIndex () - - val teaching = Relation ("teaching", 0, "IISI", "tid", "cid", "semester", "pid") - teaching.fKeys = ArrayBuffer (("cid", "course", 0), ("pid", "professor", 0)) - TableGen.popTable (teaching, 50, ArrayBuffer (course, professor)) - teaching.generateIndex () - - banner ("database") - professor.show () - course.show () - teaching.show () - teaching.showFkey () - - banner ("joinindex") - teaching.joinindex (ArrayBuffer ("pid"), ArrayBuffer("pid"), professor).show () - banner ("groupBy.eproject") -// teaching.groupBy ("cid").eproject ((count, "pid_count", "pid"))("tid", "semester").show () // FIX - -end RelationTest6 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `RelationTest7` object tests 'join' method. - * > runMain scalation.database.relation.RelationTest7 - */ -object RelationTest7 extends App: - - val professor = Relation ("professor", - ArrayBuffer("pid", "name", "department", "title"), - ArrayBuffer (Vector [ValueType] (1, "jackson", "pharm", 4), - Vector [ValueType] (2, "ken", "cs", 2), - Vector [ValueType] (3, "pan", "pharm", 0), - Vector [ValueType] (4, "yang", "gis", 3), - Vector [ValueType] (5, "zhang", "cs", 0), - Vector [ValueType] (6, "Yu", "cs", 0)), - -1, "ISSI") - - val professor2 = Relation ("professor", - ArrayBuffer ("pid", "name", "department", "title"), - ArrayBuffer (Vector [ValueType] (7, "LiLy", "gis", 5), - Vector [ValueType] (8, "Marry", "gis", 5), - Vector [ValueType] (0, "Kate", "cs", 5)), - 0, "ISSI") - - professor.generateIndex () - professor2.generateIndex () - - banner ("professor") - professor.show () - banner ("professor2") - professor2.show () - - banner ("join") - //professor.join (professor2, ("pid", "pid", (x: Int, y: Int) => x < y)).show () // FIX - //professor.join (professor2, ("pid", "pid", _ < _)).show () // FIX - -end RelationTest7 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `RelationTest8` object tests 'save' method. - * > runMain scalation.database.relation.RelationTest8 - */ -object RelationTest8 extends App: - - val professor = Relation ("professor", - ArrayBuffer("pid", "name", "department", "title"), - ArrayBuffer (Vector [ValueType] (1, "jackson", "pharm", 4), - Vector [ValueType] (2, "ken", "cs", 2), - Vector [ValueType] (3, "pan", "pharm", 0), - Vector [ValueType] (4, "yang", "gis", 3), - Vector [ValueType] (5, "zhang", "cs", 0), - Vector [ValueType] (6, "Yu", "cs", 0)), - -1, "ISSI") - - professor.save () - -end RelationTest8 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `RelationTest9` object tests 'apply' method to load a saved relation. - * > runMain scalation.database.relation.RelationTest9 - */ -object RelationTest9 extends App: - - Relation ("professor").show () - -end RelationTest9 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `RelationTest10` object tests the 'orderBy' method. - * > runMain scalation.database.relation.RelationTest10 - */ -object RelationTest10 extends App: - - import RelationEx.productSales - - productSales.orderBy ("SalesTotalCost", "Deviation").show () - -end RelationTest10 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `RelationTest11` object tests the `Relation` on the traffic schema. - * > runMain scalation.database.relation.RelationTest11 - */ -object RelationTest11 extends App: - - val sensor = Relation ("sensor", ArrayBuffer ("sensorID", "model", "latitude", "longitude", "on"), - null, 0, "ISDDI") - val road = Relation ("road", ArrayBuffer ("roadID", "rdName", "lat1", "long1", "lat2", "long2"), - null, 0, "ISDDDD") - val mroad = Relation ("road", ArrayBuffer ("roadID", "rdName", "lanes", "lat1", "long1", "lat2", "long2"), - null, 0, "ISIDDDD") - val traffic = Relation ("traffic", ArrayBuffer ("time", "sensorID", "count", "speed"), - null, 0, "TIID") - val wsensor = Relation ("sensor", ArrayBuffer ("sensorID", "model", "latitude", "longitude"), - null, 0, "ISDD") - val weather = Relation ("weather", ArrayBuffer ("time", "sensorID", "precipitation", "wind"), - null, 0, "TIID") - - sensor.show () - road.show () - mroad.show () - traffic.show () - wsensor.show () - weather.show () - -end RelationTest11 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `RelationTest12` object tests the `Relation` class on JSON data. - * @see www.learningcontainer.com/sample-json-file - * FIX - does not work for Scala 2.13 - * > runMain scalation.database.relation.RelationTest12 - * -object RelationTest12 extends App: - - val fname = BASE_DIR + "employee.json" - println (s"fname = $fname") - val employee = Relation (fname, "employee") - - employee.show () - -end RelationTest12 - */ - diff --git a/target/scala-3.6.4/classes/scalation/database/relation/old/Table.scala.bak b/target/scala-3.6.4/classes/scalation/database/relation/old/Table.scala.bak deleted file mode 100644 index 793437c60..000000000 --- a/target/scala-3.6.4/classes/scalation/database/relation/old/Table.scala.bak +++ /dev/null @@ -1,610 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller, Santosh Uttam Bobade - * @version 2.0 - * @date Sun Aug 23 15:42:06 EDT 2015 - * @see LICENSE (MIT style license file). - * - * @title Base Trait for Relational Database Engines - * - * An implementation supporting columnar relational databases facilitating easy - * and rapid analytics. The columns in a table/relation are vectors from the - * `scalation.mathstat` package. Vectors and matrices may be readily extracted - * from a relation and feed into any of the numerous analytics techniques provided - * in `scalation.modeling`. The implementation provides most of the columnar - * relational algebra operators given in the following paper: - * @see db.csail.mit.edu/projects/cstore/vldb.pdf - * - * Some of the operators have Unicode versions: @see `scalation.UnicodeTest` - */ - -package scalation -package database -package relation - -import scala.collection.mutable.{ArrayBuffer, IndexedSeq, Map} -import scala.collection.immutable.StringOps -import scala.reflect.ClassTag - -import scalation.mathstat._ - -/** Indicates which relation and which column an aggregate is to be applied to - */ -type AggFunction = (Table, String) => Vectr - -/** Aggregate column type has Aggregate Funtion, new column name and old column name - */ -type AggColumn = (AggFunction, String, String) - -/** Type definition for a row/tuple - */ -type Row = Vector [ValueType] - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Table` object provides functions for the `Table` trait. - */ -object Table: - - private val flaw = flawf ("Table") // flaw function - private var _ucount = 0 // counter for making unique table names - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the next unique count. - */ - def ucount (): Int = { _ucount += 1; _ucount } - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Given row 'tuple', project onto the given column positions specified in 'cPos'. - * @param tuple the row on which to apply the projection - * @param cPos the column positions - */ - def project (tuple: Row, cPos: IndexedSeq [Int]): Row = - cPos.map (tuple(_)).toVector - end project - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Given a domain string 'dom', project onto the given column positions specified - * in 'cPos'. - * @param dom the domain string on which to apply the projection - * @param cPos the column positions - */ - def projectD (dom: Domain, cPos: IndexedSeq [Int]): Domain = - if dom != null then - val sb = new StringBuilder - for i <- cPos do sb.append (dom(i)) - sb.toCharArray - else null - end projectD - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Determine whether 't' and 'u' are the same on column positions 'tp' and 'up'. - * @param t the first tuple - * @param u the second tuple - * @param tp the column positions for tuple t - * @param up the column positions for tuple u - */ - def sameOn (t: Row, u: Row, tp: ArrayBuffer [Int], up: ArrayBuffer [Int]): Boolean = - project (t, tp) sameElements project (u, up) - end sameOn - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a tuple with missing values for each column according to the given - * domains. This function is used by 'leftJoin' and 'rightJoin'. - * @param domain the domains of the table for which a null tuple is required - */ - def nullTuple (dom: Domain): Row = - var v = Array.ofDim [ValueType] (dom.length) - v.indices.map (i => - dom(i) match { - case 'D' => v(i) = NO_DOUBLE - case 'I' => v(i) = NO_INT - case 'L' => v(i) = NO_LONG - case 'S' => v(i) = NO_STRING - case 'T' => v(i) = NO_TIMENUM - case _ => flaw ("nullTuple", s"not supported domain type ${dom(i)}") - }) - v.toVector - end nullTuple - -end Table - -import Table._ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Table` trait defines methods for operating on vectors. - * The vectors form the columns of the columnar relational datastore. - * Columns may have any of the following domains/data-types: - * 'D' - `Double` - `VectorD` - 64 bit double precision floating point number - * 'I' - `Int` - `VectorI` - 32 bit integer - * 'L' - `Long` - `VectorL` - 64 bit long integer - * 'S' - `String` - `VectorS` - variable length numeric string - * 'T' - `TimeNum` - `VectorT` - time numbers for date-time - * @param name the name of the table - * @param schema the attributes for the table - * @param domain the domains/data-types for attributes ('D', 'I', 'L', 'S', 'T') - */ -trait Table (val name: String, val schema: Schema, val domain: Domain) - extends Serializable: - - private val flaw = flawf ("Table") // flaw function - - val on = Map [String, Int] () // map from attribute name to column number - for j <- schema.indices do on += schema(j) -> j - - if schema.size != domain.size then flaw ("init", "size mismatch between attributes and domains") - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the size in terms of number of rows in the table. - */ - def rows: Int - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the size in terms of number of columns in the table. - */ - def cols: Int = schema.size - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the cardinality (number of tuples) and arity (number of attributes). - */ - inline def dims: (Int, Int) = (rows, cols) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return all of the columns in the table. - */ - def columns: Vector [Vectr] - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the column in the table with column name cName. - * @param cName column name used to retrieve the column vector - */ - def column (cName: String): Vectr - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the range of index values for the table. - */ - def indices: Range = 0 until rows - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a row by pulling values from all columns at position i. - * @param i the i-th position - */ - def row (i: Int): Row - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a row by pulling values from an array of strings and converting - * elements to their appropriate types. - * @param sos the sequence of strings holding the values - * @param _typ the string of corresponding types, e.g., Array ('S', 'D', 'I') - */ - @throws (classOf [Exception]) - def row (sos: ArrayBuffer [String], dom: Domain): Row = - var result: Vector [ValueType] = null -// val typ = if _typ == null then "S" * sos.length else _typ // missing => assume String - try - result = (for j <- sos.indices yield - dom(j) match - case 'D' => if sos(j).isEmpty then 0.0 else new StringOps (sos(j)).toDouble - case 'I' => if sos(j).isEmpty then 0 else new StringOps (sos(j)).toInt - case 'L' => if sos(j).isEmpty then 0L else new StringOps (sos(j)).toLong - case 'T' => if sos(j).isEmpty then TimeNum._0 else TimeNum (sos(j)) - case _ => sos(j) - end match - ).toVector.asInstanceOf [Vector [ValueType]] - catch - case ex: Exception => - println (s"row function throw exception, row is:\n $sos \ntuple length is: ${sos.size}") - throw ex - end try - result - end row - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Determine whether this table contains a row matching the given 'tuple'. - * @param tuple an aggregation of columns values (potential row) - */ - def contains (tuple: Row): Boolean - - // ================================================================== RENAME - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** RENAME this table, returning a shallow copy of the table. - * @param newName the new name for the table. - */ - def rename (newName: String): Table - - inline def ρ (newName: String): Table = rename (newName) - - // ================================================================= PROJECT - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** PROJECT onto the columns with the given column names. - * @param x the names of the columns to project onto - */ - def project (x: Schema): Table - - inline def project (x: String): Table = project (splitTrim (x)) - - inline def π (x: String): Table = project (splitTrim (x)) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** PROJECT onto the columns with the given column positions. - * @param cPos the column positions to project onto - * @param cName the optional new names for the columns to project onto - */ - def project (cPos: IndexedSeq [Int], cName: Schema = null): Table - - inline def π (cPos: IndexedSeq [Int], cName: Schema = null): Table = project (cPos, cName) - - // ========================================================== SELECT-PROJECT - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** SELECT elements from column cName in this table that satisfy the - * predicate p and PROJECT onto that column. - * @param cName the name of the column used for selection - * @param p the atomic predicate (`Boolean` function) to be satisfied - */ - def selproject (cName: String, p: APredicate): Table - - inline def σπ (cName: String, p: APredicate): Table = selproject (cName, p) - - // ======================================================== EXTENDED PROJECT - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Aggregate/project on the given columns (an extended projection operator that - * applies aggregate operators to aggregation columns and regular projection - * to projection columns). - * @see en.wikipedia.org/wiki/Relational_algebra - * @param aggCol the columns to aggregate on: (aggregate function, new column name, old column name)* - * @param cName the other columns to project on - */ -// def eproject (aggCol: AggColumn*)(cName: String*): Table - -// inline def Π (aggCol: AggColumn*)(cName: String*): Table = eproject (aggCol :_*)(cName :_*) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Aggregate/project on the given column (an extended projection operator that - * applies an aggregate operator to an aggregation column and regular projection - * to projection columns). - * @see en.wikipedia.org/wiki/Relational_algebra - * @param aggr_func the aggregate function - * @param newcol the new column name - * @param oldcol the old column name - */ - def eproject (aggr_func: AggFunction, newcol: String, oldcol: String): Table - - inline def Π (aggr_func: AggFunction, newcol: String, oldcol: String): Table = - eproject (aggr_func, newcol, oldcol) - end Π - - // ================================================================== SELECT - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** SELECT elements from columns in cName in this table that satisfy - * the predicate 'p'. - * @param cName the name of the column used for selection - * @param p the predicate (`Boolean` function) to be satisfied - */ - def select (cName: String, p: APredicate): Table - - inline def σ (cName: String, p: APredicate): Table = select (cName, p) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Select across all columns at the specified row positions. - * @param pos the specified row positions - */ - def selectAt (pos: collection.immutable.IndexedSeq [Int]): Table - - // =========================================================== SET OPERATORS - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** UNION this table and r2. Check that the two tables are compatible. - * @param r2 the second table - */ - def union (r2: Table): Table - - inline def ⋃ (r2: Table): Table = union (r2) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute this table MINUS (set difference) table r2 (this - r2). Check that - * the two tables are compatible. - * @param r2 the second table - */ - def minus (r2: Table): Table - - inline def - (r2: Table): Table = minus (r2) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** INTERSECT this table and r2. Check that the two tables are compatible. - * @param r2 the second table - */ - def intersect (r2: Table): Table - - inline def ⋂ (r2: Table): Table = intersect (r2) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Determine whether this table and r2 are incompatible by having - * differing numbers of columns or differing domain strings. - * @param r2 the second table - */ - def incompatible (r2: Table): Boolean - - // ================================================================= PRODUCT - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the CARTESIAN PRODUCT of this table and r2 (this × r2). - * @param r2 the second table - */ - def product (r2: Table): Table - - inline def × (r2: Table): Table = product (r2) - - // ==================================================================== JOIN - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** JOIN this table and r2 by performing a "natural-join". Rows from both - * tables are compared requiring agreement on common attributes (column names). - * @param r2 the rhs table in the join operation - */ - def join (r2: Table): Table = join (schema intersect r2.schema, r2) - - inline def ⋈ (r2: Table): Table = join (r2) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** JOIN this table and r2 by performing a "natural-join". Rows from both - * tables are compared requiring cName values to be equal. - * @param cName the common join column name for both table - * @param r2 the rhs table in the join operation - */ - def jOIN (cName: String, r2: Table): Table = join (Array (cName), r2) - - inline def ⋈ (cName: String, r2: Table): Table = join (Array (cName), r2) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** JOIN this table and r2 by performing a "natural-join". Rows from both - * tables are compared requiring cName values to be equal. - * @param cName the common join column names for both table - * @param r2 the rhs table in the join operation - */ - def join (cName: Schema, r2: Table): Table - - inline def ⋈ (cName: Schema, r2: Table): Table = join (cName, r2) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** JOIN this table and r2 by performing an "equi-join". Rows from both - * tables are compared requiring cName1 values to equal cName2 values. - * Disambiguate column names by appending "2" to the end of any duplicate column name. - * @param cName1 the join column name of this table (e.g., the Foreign Key) - * @param cName2 the join column name of table r2 (e.g., the Primary Key) - * @param r2 the rhs table in the join operation - */ - def join (cName1: String, cName2: String, r2: Table): Table = - join (Array (cName1), Array (cName2), r2) - end join - - inline def ⋈ (cName1: String, cName2: String, r2: Table): Table = - join (Array (cName1), Array (cName2), r2) - end ⋈ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** JOIN this table and r2 by performing an "equi-join". Rows from both - * tables are compared requiring cName1 values to equal cName2 values. - * Disambiguate column names by appending "2" to the end of any duplicate column name. - * @param cName1 the join column names of this table (e.g., the Foreign Key) - * @param cName2 the join column names of table r2 (e.g., the Primary Key) - * @param r2 the rhs table in the join operation - */ - def join (cName1: Schema, cName2: Schema, r2: Table): Table - - inline def ⋈ (cName1: Schema, cName2: Schema, r2: Table): Table = - join (cName1, cName2, r2) - end ⋈ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** The THETA JOIN, handle the predicates in where are connect by "and" (where a....and b....). - * @param r2 the second table - * @param p0 the first theta join predicate (r1 cName, r2 cName, predicate to compare these two column) - * @param p the rest of theta join predicates (r1 cName, r2 cName, predicates to compare these two column) - */ - def join (r2: Table, p0: APredicate2, p: APredicate2*): Table - - inline def ⋈ (r2: Table, p0: APredicate2, p: APredicate2*): Table = - join (r2, p0, p :_*) - end ⋈ - - // ============================================================== OUTER JOIN - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Join this table and r2 by performing an LEFT-JOIN. Rows from both - * tables are compared requiring cName1 values to equal cName2 values. - * Disambiguate column names by appending "2" to the end of any duplicate column name. - * All rows from the left table are maintained with missing values indicators used - * where needed. - * @param cName1 the join column names of this table (e.g., the Foreign Key) - * @param cName2 the join column names of table r2 (e.g., the Primary Key) - * @param r2 the rhs table in the join operation - */ - def leftJoin (cName1: String, cName2: String, r2: Table): Table - - // Note: although this is the semi-join symbol, due to Unicode limitations, it is used for left-join. - - inline def ⋉ (cName1: String, cName2: String, r2: Table): Table = leftJoin (cName1, cName2, r2) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Join this table and r2 by performing an APPRIOXMATE LEFT-JOIN. Rows from both - * tables are compared requiring cName1 values to apprximately equal cName2 values. - * Disambiguate column names by appending "2" to the end of any duplicate column name. - * All rows from the left table are maintained with missing values indicators used - * where needed. - * @param thres the approximate equality threshold - * @param cName1 the join column names of this table (e.g., the Foreign Key) - * @param cName2 the join column names of table r2 (e.g., the Primary Key) - * @param r2 the rhs table in the join operation - */ - def leftJoinApx (thres: Double = 0.001) (cName1: String, cName2: String, r2: Table): Table - - inline def ⋉ (thres: Double = 0.001) (cName1: String, cName2: String, r2: Table): Table = - leftJoinApx (thres)(cName1, cName2, r2) - end ⋉ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Join this table and r2 by performing an RIGHT-JOIN. Rows from both - * tables are compared requiring cName1 values to equal cName2 values. - * Disambiguate column names by appending "2" to the end of any duplicate column name. - * All rows from the right table are maintained with missing values indicators used - * where needed. - * @param cName1 the join column names of this table (e.g., the Foreign Key) - * @param cName2 the join column names of table r2 (e.g., the Primary Key) - * @param r2 the rhs table in the join operation - */ - inline def rightJoin (cName1: String, cName2: String, r2: Table): Table = r2.leftJoin (cName2, cName1, this) - - inline def ⋊ (cName1: String, cName2: String, r2: Table): Table = r2.leftJoin (cName2, cName1, this) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Combine two sequences of column names, keeping all names from cn1 and - * only those in cn2 that are not repeats (i.e., not already in cn1). - * @param cn1 the first sequence of column names - * @param cn2 the second sequence of column names - */ - protected def uniq_union (cn1: Schema, cn2: Schema): Schema = - var cn3 = cn1 - for j <- cn2.indices if ! (cn3 contains cn2(j)) do cn3 = cn3 :+ cn2(j) - cn3 - end uniq_union - - // ================================================================== DIVIDE - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** DIVIDE this table by table r2. Requires a tuple in the quotient part of - * this table to be paired with all tuples in table r2. - * @param r2 the second table - */ - def divide (r2: Table): Table - - inline def / (r2: Table): Table = divide (r2) - - // ================================================================ GROUP BY - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** GROUP this table BY specified column name, returning this table. - * @param cName the group column - */ - def groupBy (cName: String): Table -// def groupBy (cName: String*): Table - - inline def γ (cName: String): Table = groupBy (cName) -// inline def γ (cName: String*): Table = groupBy (cName :_*) - - // ================================================================ ORDER BY - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** ORDER (ascending) the rows in the table BY the selected columns cName. - * A stable sorting is used to allow sorting on multiple columns. - * @param cName the column names that are to be sorted - */ - def orderBy (cName: String*): Table - - inline def ϙ (cName: String*): Table = orderBy (cName :_*) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** ORDER (descending) the rows in the table BY the selected columns cName. - * A stable sorting is used to allow sorting on multiple columns. - * @param cName the column names that are to be sorted - */ - def reverseOrderBy (cName: String*): Table - - inline def ω (cName: String*): Table = reverseOrderBy (cName :_*) - - // ================================================================= UPDATES - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** ADD (insert) tuple to this table as a new row. - * @param tuple an aggregation of columns values (new row) - */ - def add (tuple: Row): Table - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** ADD a tuple to this table as a new row, materialize and return updated table. - * May call for last tuple in a batch of tuples. - * @param tuple an aggregation of columns values (new row) - */ - def addm (tuple: Row): Table - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** UPDATE the column named cName using newVal for elements with value matchVal. - * @param cName the name of the column to be updated - * @param newVal the value used to assign updated values - * @param matchVal the value to be matched to elements - */ - def update (cName: String, newVal: ValueType, matchVal: ValueType): Boolean - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** UPDATE the column named cName using function func for elements with - * value matchVal. - * @param cName the name of the column to be updated - * @param func the function used to assign updated values - * @param matchVal the value to be matched to elements - */ - def update (cName: String, func: ValueType => ValueType, matchVal: ValueType): Boolean - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** DELETE the rows from this table that satisfy the predicates. - * @param p the atomic predicate - */ - def delete (p: Predicate): Boolean - - // =============================================================== TO MATRIX - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert this table into a matrix of doubles, e.g., - * in the regression equation: xb = y create matrix xy - * @param cols the column positions to use for the matrix - */ - def toMatrix (cols: ArrayBuffer [Int]): MatrixD - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert this table into a matrix of doubles and a vector of doubles. - * in the regression equation: xb = y create matrix x and vector y - * @param cols the column positions to use for the matrix - * @param colj the column position to use for the vector - */ - def toMatrixV (cols: ArrayBuffer [Int], colj: Int): (MatrixD, VectorD) - - // =============================================================== TO VECTOR - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert the colj column of this relation into a vector of doubles, etc. - * @param colj the column position to use for the vector - */ - def toVectorD (colj: Int = 0): VectorD - def toVectorI (colj: Int = 0): VectorI - def toVectorL (colj: Int = 0): VectorL - def toVectorS (colj: Int = 0): VectorS - def toVectorT (colj: Int = 0): VectorT - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Save this table in a file using serialization. - */ - def save (): Unit - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Show this relation row by row. - * @param rng the range of tuples to show - */ - def show (rng: Range = 0 until 1): Unit - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Write this table into a CSV file with each row written to a line. - * @param fileName the file name of the data file - */ - def writeCSV (fileName: String): Unit - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Write this table into a JSON file. - * @param fileName the file name of the data file - */ - def writeJSON (fileName: String): Unit - -end Table - diff --git a/target/scala-3.6.4/classes/scalation/database/relation/old/Table.scala.bak2 b/target/scala-3.6.4/classes/scalation/database/relation/old/Table.scala.bak2 deleted file mode 100644 index d8060bbc5..000000000 --- a/target/scala-3.6.4/classes/scalation/database/relation/old/Table.scala.bak2 +++ /dev/null @@ -1,599 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller, Santosh Uttam Bobade - * @version 2.0 - * @date Sun Aug 23 15:42:06 EDT 2015 - * @see LICENSE (MIT style license file). - * - * @title Base Trait for Relational Database Engines - * - * An implementation supporting columnar relational databases facilitating easy - * and rapid analytics. The columns in a table/relation are vectors from the - * `scalation.mathstat` package. Vectors and matrices may be readily extracted - * from a relation and feed into any of the numerous analytics techniques provided - * in `scalation.modeling`. The implementation provides most of the columnar - * relational algebra operators given in the following paper: - * @see db.csail.mit.edu/projects/cstore/vldb.pdf - * - * Some of the operators have Unicode versions: @see `scalation.UnicodeTest` - */ - -package scalation -package database -package relation - -import scala.collection.mutable.{ArrayBuffer, IndexedSeq, Map} -import scala.collection.immutable.StringOps -import scala.reflect.ClassTag - -import scalation.mathstat._ - -/** Indicates which relation and which column an aggregate is to be applied to - */ -//type AggFunction = (Table, String) => Vectr - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Table` object provides functions for the `Table` trait. - */ -object Table: - - private val flaw = flawf ("Table") // flaw function - private var _ucount = 0 // counter for making unique table names - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the next unique count. - */ - def ucount (): Int = { _ucount += 1; _ucount } - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Given row 'tuple', project onto the given column positions specified in 'cPos'. - * @param tuple the row on which to apply the projection - * @param cPos the column positions - */ - def project (tuple: Row, cPos: IndexedSeq [Int]): Row = - cPos.map (tuple(_)).toVector - end project - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Given a domain string 'dom', project onto the given column positions specified - * in 'cPos'. - * @param dom the domain string on which to apply the projection - * @param cPos the column positions - */ - def projectD (dom: Domain, cPos: IndexedSeq [Int]): Domain = - if dom != null then - val sb = new StringBuilder - for i <- cPos do sb.append (dom(i)) - sb.toCharArray - else null - end projectD - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Determine whether 't' and 'u' are the same on column positions 'tp' and 'up'. - * @param t the first tuple - * @param u the second tuple - * @param tp the column positions for tuple t - * @param up the column positions for tuple u - */ - def sameOn (t: Row, u: Row, tp: ArrayBuffer [Int], up: ArrayBuffer [Int]): Boolean = - project (t, tp) sameElements project (u, up) - end sameOn - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a tuple with missing values for each column according to the given - * domains. This function is used by 'leftJoin' and 'rightJoin'. - * @param domain the domains of the table for which a null tuple is required - */ - def nullTuple (dom: Domain): Row = - var v = Array.ofDim [ValueType] (dom.length) - v.indices.map (i => - dom(i) match { - case 'D' => v(i) = NO_DOUBLE - case 'I' => v(i) = NO_INT - case 'L' => v(i) = NO_LONG - case 'S' => v(i) = NO_STRING - case 'T' => v(i) = NO_TIMENUM - case _ => flaw ("nullTuple", s"not supported domain type ${dom(i)}") - }) - v.toVector - end nullTuple - -end Table - -import Table._ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Table` trait defines methods for operating on vectors. - * The vectors form the columns of the columnar relational datastore. - * Columns may have any of the following domains/data-types: - * 'D' - `Double` - `VectorD` - 64 bit double precision floating point number - * 'I' - `Int` - `VectorI` - 32 bit integer - * 'L' - `Long` - `VectorL` - 64 bit long integer - * 'S' - `String` - `VectorS` - variable length numeric string - * 'T' - `TimeNum` - `VectorT` - time numbers for date-time - * @param name the name of the table - * @param schema the attributes for the table - * @param domain the domains/data-types for attributes ('D', 'I', 'L', 'S', 'T') - * @param key the attributes forming the primary key - * -trait Table (val name: String, val schema: Schema, val domain: Domain, val key: Schema) - extends Serializable: - - private val flaw = flawf ("Table") // flaw function - - val on = Map [String, Int] () // map from attribute name to column number - for j <- schema.indices do on += schema(j) -> j - - if schema.size != domain.size then flaw ("init", "size mismatch between attributes and domains") - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the size in terms of number of rows in the table. - */ - def rows: Int - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the size in terms of number of columns in the table. - */ - def cols: Int = schema.size - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the cardinality (number of tuples) and arity (number of attributes). - */ - inline def dims: (Int, Int) = (rows, cols) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return all of the columns in the table. - */ - def columns: Vector [Vectr] - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the column in the table with column name cName. - * @param cName column name used to retrieve the column vector - */ - def column (cName: String): Vectr - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the range of index values for the table. - */ - def indices: Range = 0 until rows - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a row by pulling values from all columns at position i. - * @param i the i-th position - */ - def row (i: Int): Row - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a row by pulling values from an array of strings and converting - * elements to their appropriate types. - * @param sos the sequence of strings holding the values - * @param _typ the string of corresponding types, e.g., Array ('S', 'D', 'I') - * - @throws (classOf [Exception]) - def row (sos: ArrayBuffer [String], dom: Domain): Row = - var result: Vector [ValueType] = null -// val typ = if _typ == null then "S" * sos.length else _typ // missing => assume String - try - result = (for j <- sos.indices yield - dom(j) match - case 'D' => if sos(j).isEmpty then 0.0 else new StringOps (sos(j)).toDouble - case 'I' => if sos(j).isEmpty then 0 else new StringOps (sos(j)).toInt - case 'L' => if sos(j).isEmpty then 0L else new StringOps (sos(j)).toLong - case 'T' => if sos(j).isEmpty then TimeNum._0 else TimeNum (sos(j)) - case _ => sos(j) - end match - ).toVector.asInstanceOf [Vector [ValueType]] - catch - case ex: Exception => - println (s"row function throw exception, row is:\n $sos \ntuple length is: ${sos.size}") - throw ex - end try - result - end row - */ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Determine whether this table contains a row matching the given 'tuple'. - * @param tuple an aggregation of columns values (potential row) - */ - def contains (tuple: Row): Boolean - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the table restricted to the given range of rows. - * @param rng the given range of rows - */ - def apply (rng: Range): Table - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the table restricted to the given collection of rows. - * @param rng the given collection of rows - */ - def apply (pos: collection.immutable.IndexedSeq [Int]): Table - - // ================================================================== RENAME - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** RENAME this table, returning a shallow copy of the table. - * @param newName the new name for the table. - */ - def rename (newName: String): Table - - inline def ρ (newName: String): Table = rename (newName) - - // ================================================================= PROJECT - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** PROJECT onto the columns with the given column names. - * @param x the names of the columns to project onto - */ - def project (x: Schema): Table - - inline def project (x: String): Table = project (splitTrim (x)) - - inline def π (x: String): Table = project (splitTrim (x)) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** PROJECT onto the columns with the given column positions. - * @param cPos the column positions to project onto - * @param cName the optional new names for the columns to project onto - */ - def project (cPos: IndexedSeq [Int], cName: Schema = null): Table - - inline def π (cPos: IndexedSeq [Int], cName: Schema = null): Table = project (cPos, cName) - - // ========================================================== SELECT-PROJECT - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** SELECT elements from column cName in this table that satisfy the - * predicate p and PROJECT onto that column. - * @param cName the name of the column used for selection - * @param apred the atomic predicate (`Boolean` function) to be satisfied - */ - def selproject (cName: String, apred: APredicate): Table - - inline def σπ (cName: String, apred: APredicate): Table = selproject (cName, apred) - - // ======================================================== EXTENDED PROJECT - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Aggregate/project on the given column (an extended projection operator that - * applies an aggregate operator to an aggregation column and regular projection - * to projection columns). - * @see en.wikipedia.org/wiki/Relational_algebra - * @param aggr_func the aggregate function - * @param newcol the new column name - * @param oldcol the old column name - */ - def eproject (aggr_func: AggFunction, newcol: String, oldcol: String): Table - - inline def Π (aggr_func: AggFunction, newcol: String, oldcol: String): Table = - eproject (aggr_func, newcol, oldcol) - end Π - - // ================================================================== SELECT - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** SELECT elements from column cName in this table that satisfy the atomic - * predicate apred. - * @param cName the name of the column used for selection - * @param apred the atomic predicate (`Boolean` function) to be satisfied - */ - def select (cName: String, apred: APredicate): Table - - inline def σ (cName: String, apred: APredicate): Table = select (cName, apred) - - // =========================================================== SET OPERATORS - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** UNION this table and r2. Check that the two tables are compatible. - * @param r2 the second table - */ - def union (r2: Table): Table - - inline def ⋃ (r2: Table): Table = union (r2) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute this table MINUS (set difference) table r2 (this - r2). Check that - * the two tables are compatible. - * @param r2 the second table - */ - def minus (r2: Table): Table - - inline def - (r2: Table): Table = minus (r2) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** INTERSECT this table and r2. Check that the two tables are compatible. - * @param r2 the second table - */ - def intersect (r2: Table): Table - - inline def ⋂ (r2: Table): Table = intersect (r2) - - // ================================================================= PRODUCT - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the CARTESIAN PRODUCT of this table and r2 (this × r2). - * @param r2 the second table - */ - def product (r2: Table): Table - - inline def × (r2: Table): Table = product (r2) - - // ==================================================================== JOIN - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** JOIN this table and r2 by performing a "natural-join". Rows from both - * tables are compared requiring agreement on common attributes (column names). - * @param r2 the rhs table in the join operation - */ - def join (r2: Table): Table = join (schema intersect r2.schema, r2) - - inline def ⋈ (r2: Table): Table = join (r2) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** JOIN this table and r2 by performing a "natural-join". Rows from both - * tables are compared requiring cName values to be equal. - * @param cName the common join column name for both table - * @param r2 the rhs table in the join operation - */ - def jOIN (cName: String, r2: Table): Table = join (Array (cName), r2) - - inline def ⋈ (cName: String, r2: Table): Table = join (Array (cName), r2) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** JOIN this table and r2 by performing a "natural-join". Rows from both - * tables are compared requiring cName values to be equal. - * @param cName the common join column names for both table - * @param r2 the rhs table in the join operation - */ - def join (cName: Schema, r2: Table): Table - - inline def ⋈ (cName: Schema, r2: Table): Table = join (cName, r2) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** JOIN this table and r2 by performing an "equi-join". Rows from both - * tables are compared requiring cName1 values to equal cName2 values. - * Disambiguate column names by appending "2" to the end of any duplicate column name. - * @param cName1 the join column name of this table (e.g., the Foreign Key) - * @param cName2 the join column name of table r2 (e.g., the Primary Key) - * @param r2 the rhs table in the join operation - */ - def join (cName1: String, cName2: String, r2: Table): Table = - join (Array (cName1), Array (cName2), r2) - end join - - inline def ⋈ (cName1: String, cName2: String, r2: Table): Table = - join (Array (cName1), Array (cName2), r2) - end ⋈ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** JOIN this table and r2 by performing an "equi-join". Rows from both - * tables are compared requiring cName1 values to equal cName2 values. - * Disambiguate column names by appending "2" to the end of any duplicate column name. - * @param cName1 the join column names of this table (e.g., the Foreign Key) - * @param cName2 the join column names of table r2 (e.g., the Primary Key) - * @param r2 the rhs table in the join operation - */ - def join (cName1: Schema, cName2: Schema, r2: Table): Table - - inline def ⋈ (cName1: Schema, cName2: Schema, r2: Table): Table = - join (cName1, cName2, r2) - end ⋈ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** The THETA JOIN, handle the predicates in where are connect by "and" (where a....and b....). - * @param r2 the second table - * @param p0 the first theta join predicate (r1 cName, r2 cName, predicate to compare these two column) - * @param p the rest of theta join predicates (r1 cName, r2 cName, predicates to compare these two column) - */ - def join (r2: Table, p0: APredicate2, p: APredicate2*): Table - - inline def ⋈ (r2: Table, p0: APredicate2, p: APredicate2*): Table = - join (r2, p0, p :_*) - end ⋈ - - // ============================================================== OUTER JOIN - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Join this table and r2 by performing an LEFT-JOIN. Rows from both - * tables are compared requiring cName1 values to equal cName2 values. - * Disambiguate column names by appending "2" to the end of any duplicate column name. - * All rows from the left table are maintained with missing values indicators used - * where needed. - * @param cName1 the join column names of this table (e.g., the Foreign Key) - * @param cName2 the join column names of table r2 (e.g., the Primary Key) - * @param r2 the rhs table in the join operation - */ - def leftJoin (cName1: String, cName2: String, r2: Table): Table - - // Note: although this is the semi-join symbol, due to Unicode limitations, it is used for left-join. - - inline def ⋉ (cName1: String, cName2: String, r2: Table): Table = leftJoin (cName1, cName2, r2) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Join this table and r2 by performing an APPRIOXMATE LEFT-JOIN. Rows from both - * tables are compared requiring cName1 values to apprximately equal cName2 values. - * Disambiguate column names by appending "2" to the end of any duplicate column name. - * All rows from the left table are maintained with missing values indicators used - * where needed. - * @param thres the approximate equality threshold - * @param cName1 the join column names of this table (e.g., the Foreign Key) - * @param cName2 the join column names of table r2 (e.g., the Primary Key) - * @param r2 the rhs table in the join operation - */ - def leftJoinApx (thres: Double = 0.001) (cName1: String, cName2: String, r2: Table): Table - - inline def ⋉ (thres: Double = 0.001) (cName1: String, cName2: String, r2: Table): Table = - leftJoinApx (thres)(cName1, cName2, r2) - end ⋉ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Join this table and r2 by performing an RIGHT-JOIN. Rows from both - * tables are compared requiring cName1 values to equal cName2 values. - * Disambiguate column names by appending "2" to the end of any duplicate column name. - * All rows from the right table are maintained with missing values indicators used - * where needed. - * @param cName1 the join column names of this table (e.g., the Foreign Key) - * @param cName2 the join column names of table r2 (e.g., the Primary Key) - * @param r2 the rhs table in the join operation - */ - inline def rightJoin (cName1: String, cName2: String, r2: Table): Table = r2.leftJoin (cName2, cName1, this) - - inline def ⋊ (cName1: String, cName2: String, r2: Table): Table = r2.leftJoin (cName2, cName1, this) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Combine two sequences of column names, keeping all names from cn1 and - * only those in cn2 that are not repeats (i.e., not already in cn1). - * @param cn1 the first sequence of column names - * @param cn2 the second sequence of column names - */ - protected def uniq_union (cn1: Schema, cn2: Schema): Schema = - var cn3 = cn1 - for j <- cn2.indices if ! (cn3 contains cn2(j)) do cn3 = cn3 :+ cn2(j) - cn3 - end uniq_union - - // ================================================================== DIVIDE - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** DIVIDE this table by table r2. Requires a tuple in the quotient part of - * this table to be paired with all tuples in table r2. - * @param r2 the second table - */ - def divide (r2: Table): Table - - inline def / (r2: Table): Table = divide (r2) - - // ================================================================ GROUP BY - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** GROUP this table BY specified column name, returning this table. - * @param cName the group column - */ - def groupBy (cName: String): Table -// def groupBy (cName: String*): Table - - inline def γ (cName: String): Table = groupBy (cName) -// inline def γ (cName: String*): Table = groupBy (cName :_*) - - // ================================================================ ORDER BY - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** ORDER (ascending) the rows in the table BY the selected columns cName. - * A stable sorting is used to allow sorting on multiple columns. - * @param cName the column names that are to be sorted - */ - def orderBy (cName: String*): Table - - inline def ϙ (cName: String*): Table = orderBy (cName :_*) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** ORDER (descending) the rows in the table BY the selected columns cName. - * A stable sorting is used to allow sorting on multiple columns. - * @param cName the column names that are to be sorted - */ - def reverseOrderBy (cName: String*): Table - - inline def ω (cName: String*): Table = reverseOrderBy (cName :_*) - - // ================================================================= UPDATES - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** ADD (insert) tuple to this table as a new row. - * @param tuple an aggregation of columns values (new row) - */ - def add (tuple: Row): Table - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** ADD a tuple to this table as a new row, materialize and return updated table. - * May call for last tuple in a batch of tuples. - * @param tuple an aggregation of columns values (new row) - */ - def addm (tuple: Row): Table - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** UPDATE the column named cName using newVal for elements with value matchVal. - * @param cName the name of the column to be updated - * @param newVal the value used to assign updated values - * @param matchVal the value to be matched to elements - */ - def update (cName: String, newVal: ValueType, matchVal: ValueType): Boolean - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** UPDATE the column named cName using function func for elements with - * value matchVal. - * @param cName the name of the column to be updated - * @param func the function used to assign updated values - * @param matchVal the value to be matched to elements - */ - def update (cName: String, func: ValueType => ValueType, matchVal: ValueType): Boolean - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** DELETE the rows from this table that satisfy the predicates. - * @param p the atomic predicate - */ - def delete (p: Predicate): Boolean - - // =============================================================== TO MATRIX - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert this table into a matrix of doubles, e.g., - * in the regression equation: xb = y create matrix xy - * @param cols the column positions to use for the matrix - */ - def toMatrix (cols: ArrayBuffer [Int]): MatrixD - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert this table into a matrix of doubles and a vector of doubles. - * in the regression equation: xb = y create matrix x and vector y - * @param cols the column positions to use for the matrix - * @param colj the column position to use for the vector - */ - def toMatrixV (cols: ArrayBuffer [Int], colj: Int): (MatrixD, VectorD) - - // =============================================================== TO VECTOR - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert the colj column of this relation into a vector of doubles, etc. - * @param colj the column position to use for the vector - */ - def toVectorD (colj: Int = 0): VectorD - def toVectorI (colj: Int = 0): VectorI - def toVectorL (colj: Int = 0): VectorL - def toVectorS (colj: Int = 0): VectorS - def toVectorT (colj: Int = 0): VectorT - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Determine whether this table and r2 are incompatible by having - * differing numbers of columns or differing domain strings. - * @param r2 the second table - */ - def incompatible (r2: Table): Boolean - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Save this table in a file using serialization. - */ - def save (): Unit - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Show this relation row by row. - * @param rng the range of tuples to show - */ - def show (rng: Range = 0 until 1): Unit - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Write this table into a CSV file with each row written to a line. - * @param fileName the file name of the data file - */ - def writeCSV (fileName: String): Unit - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Write this table into a JSON file. - * @param fileName the file name of the data file - */ - def writeJSON (fileName: String): Unit - -end Table - */ - diff --git a/target/scala-3.6.4/classes/scalation/database/relation/relationTest.class b/target/scala-3.6.4/classes/scalation/database/relation/relationTest.class deleted file mode 100644 index 6f4bfed21..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/relation/relationTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/relation/relationTest.tasty b/target/scala-3.6.4/classes/scalation/database/relation/relationTest.tasty deleted file mode 100644 index 942e20f4c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/relation/relationTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/relation/relationTest11.class b/target/scala-3.6.4/classes/scalation/database/relation/relationTest11.class deleted file mode 100644 index 017a6fe13..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/relation/relationTest11.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/relation/relationTest11.tasty b/target/scala-3.6.4/classes/scalation/database/relation/relationTest11.tasty deleted file mode 100644 index 378856765..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/relation/relationTest11.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/relation/relationTest2.class b/target/scala-3.6.4/classes/scalation/database/relation/relationTest2.class deleted file mode 100644 index deeceb5b6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/relation/relationTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/relation/relationTest2.tasty b/target/scala-3.6.4/classes/scalation/database/relation/relationTest2.tasty deleted file mode 100644 index 29e9189a0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/relation/relationTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/relation/relationTest5.class b/target/scala-3.6.4/classes/scalation/database/relation/relationTest5.class deleted file mode 100644 index 8aea9a1aa..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/relation/relationTest5.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/relation/relationTest5.tasty b/target/scala-3.6.4/classes/scalation/database/relation/relationTest5.tasty deleted file mode 100644 index 520142978..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/relation/relationTest5.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/relation/relationTest7.class b/target/scala-3.6.4/classes/scalation/database/relation/relationTest7.class deleted file mode 100644 index cccef491e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/relation/relationTest7.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/relation/relationTest7.tasty b/target/scala-3.6.4/classes/scalation/database/relation/relationTest7.tasty deleted file mode 100644 index 236dbec79..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/relation/relationTest7.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/relation/relationTest8.class b/target/scala-3.6.4/classes/scalation/database/relation/relationTest8.class deleted file mode 100644 index 70ed07d13..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/relation/relationTest8.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/relation/relationTest8.tasty b/target/scala-3.6.4/classes/scalation/database/relation/relationTest8.tasty deleted file mode 100644 index da1047d57..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/relation/relationTest8.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/relation/relationTest9.class b/target/scala-3.6.4/classes/scalation/database/relation/relationTest9.class deleted file mode 100644 index c3657f86e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/relation/relationTest9.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/relation/relationTest9.tasty b/target/scala-3.6.4/classes/scalation/database/relation/relationTest9.tasty deleted file mode 100644 index ad8fbfbf5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/relation/relationTest9.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/relation/showTables.class b/target/scala-3.6.4/classes/scalation/database/relation/showTables.class deleted file mode 100644 index b6ea49b10..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/relation/showTables.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/relation/showTables.tasty b/target/scala-3.6.4/classes/scalation/database/relation/showTables.tasty deleted file mode 100644 index 4fc150ed5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/relation/showTables.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/relation/tableGenTest.class b/target/scala-3.6.4/classes/scalation/database/relation/tableGenTest.class deleted file mode 100644 index 62c4ff4ca..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/relation/tableGenTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/relation/tableGenTest.tasty b/target/scala-3.6.4/classes/scalation/database/relation/tableGenTest.tasty deleted file mode 100644 index e933db58f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/relation/tableGenTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/spanningTreeTest.class b/target/scala-3.6.4/classes/scalation/database/spanningTreeTest.class deleted file mode 100644 index 06cb490b7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/spanningTreeTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/spanningTreeTest.tasty b/target/scala-3.6.4/classes/scalation/database/spanningTreeTest.tasty deleted file mode 100644 index e8a0a8012..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/spanningTreeTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/tNodeTest.class b/target/scala-3.6.4/classes/scalation/database/tNodeTest.class deleted file mode 100644 index b485d0827..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/tNodeTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/tNodeTest.tasty b/target/scala-3.6.4/classes/scalation/database/tNodeTest.tasty deleted file mode 100644 index 82120d979..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/tNodeTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/BankDB$package$.class b/target/scala-3.6.4/classes/scalation/database/table/BankDB$package$.class deleted file mode 100644 index 66dcb6462..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/BankDB$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/BankDB$package.class b/target/scala-3.6.4/classes/scalation/database/table/BankDB$package.class deleted file mode 100644 index b64c4d73e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/BankDB$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/BankDB$package.tasty b/target/scala-3.6.4/classes/scalation/database/table/BankDB$package.tasty deleted file mode 100644 index 1a397d220..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/BankDB$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/Edge$.class b/target/scala-3.6.4/classes/scalation/database/table/Edge$.class deleted file mode 100644 index b9864c285..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/Edge$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/Edge.class b/target/scala-3.6.4/classes/scalation/database/table/Edge.class deleted file mode 100644 index 8039b346e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/Edge.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/Edge.tasty b/target/scala-3.6.4/classes/scalation/database/table/Edge.tasty deleted file mode 100644 index 090659edd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/Edge.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/GTable$.class b/target/scala-3.6.4/classes/scalation/database/table/GTable$.class deleted file mode 100644 index abcfee01f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/GTable$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/GTable$package$.class b/target/scala-3.6.4/classes/scalation/database/table/GTable$package$.class deleted file mode 100644 index 8cea61d42..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/GTable$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/GTable$package.class b/target/scala-3.6.4/classes/scalation/database/table/GTable$package.class deleted file mode 100644 index d95a615f5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/GTable$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/GTable$package.tasty b/target/scala-3.6.4/classes/scalation/database/table/GTable$package.tasty deleted file mode 100644 index 0281591a5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/GTable$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/GTable.class b/target/scala-3.6.4/classes/scalation/database/table/GTable.class deleted file mode 100644 index 6b5bd647e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/GTable.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/GTable.tasty b/target/scala-3.6.4/classes/scalation/database/table/GTable.tasty deleted file mode 100644 index 104673e97..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/GTable.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/KGTable$.class b/target/scala-3.6.4/classes/scalation/database/table/KGTable$.class deleted file mode 100644 index 7ed12105e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/KGTable$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/KGTable$package$.class b/target/scala-3.6.4/classes/scalation/database/table/KGTable$package$.class deleted file mode 100644 index 994219637..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/KGTable$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/KGTable$package.class b/target/scala-3.6.4/classes/scalation/database/table/KGTable$package.class deleted file mode 100644 index 4251b03aa..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/KGTable$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/KGTable$package.tasty b/target/scala-3.6.4/classes/scalation/database/table/KGTable$package.tasty deleted file mode 100644 index 5da180ae8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/KGTable$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/KGTable.class b/target/scala-3.6.4/classes/scalation/database/table/KGTable.class deleted file mode 100644 index 3e8642824..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/KGTable.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/KGTable.tasty b/target/scala-3.6.4/classes/scalation/database/table/KGTable.tasty deleted file mode 100644 index 138ce6361..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/KGTable.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/LTable$.class b/target/scala-3.6.4/classes/scalation/database/table/LTable$.class deleted file mode 100644 index 26ab0dd67..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/LTable$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/LTable$package$.class b/target/scala-3.6.4/classes/scalation/database/table/LTable$package$.class deleted file mode 100644 index 116677d9a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/LTable$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/LTable$package.class b/target/scala-3.6.4/classes/scalation/database/table/LTable$package.class deleted file mode 100644 index 729529e21..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/LTable$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/LTable$package.tasty b/target/scala-3.6.4/classes/scalation/database/table/LTable$package.tasty deleted file mode 100644 index f990dfee0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/LTable$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/LTable.class b/target/scala-3.6.4/classes/scalation/database/table/LTable.class deleted file mode 100644 index 9f571c4ed..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/LTable.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/LTable.tasty b/target/scala-3.6.4/classes/scalation/database/table/LTable.tasty deleted file mode 100644 index 64d211458..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/LTable.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/MovieDB$package$.class b/target/scala-3.6.4/classes/scalation/database/table/MovieDB$package$.class deleted file mode 100644 index ffcb81704..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/MovieDB$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/MovieDB$package.class b/target/scala-3.6.4/classes/scalation/database/table/MovieDB$package.class deleted file mode 100644 index e1b2114e0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/MovieDB$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/MovieDB$package.tasty b/target/scala-3.6.4/classes/scalation/database/table/MovieDB$package.tasty deleted file mode 100644 index 9a83bc30e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/MovieDB$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/PurchaseOrderDB$package$.class b/target/scala-3.6.4/classes/scalation/database/table/PurchaseOrderDB$package$.class deleted file mode 100644 index fc388627d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/PurchaseOrderDB$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/PurchaseOrderDB$package.class b/target/scala-3.6.4/classes/scalation/database/table/PurchaseOrderDB$package.class deleted file mode 100644 index 45170a5d3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/PurchaseOrderDB$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/PurchaseOrderDB$package.tasty b/target/scala-3.6.4/classes/scalation/database/table/PurchaseOrderDB$package.tasty deleted file mode 100644 index 010f15c14..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/PurchaseOrderDB$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/PurchaseOrderDB.class b/target/scala-3.6.4/classes/scalation/database/table/PurchaseOrderDB.class deleted file mode 100644 index 6a224c3c3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/PurchaseOrderDB.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/PurchaseOrderDB.tasty b/target/scala-3.6.4/classes/scalation/database/table/PurchaseOrderDB.tasty deleted file mode 100644 index 20959dde2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/PurchaseOrderDB.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/TA_AssignmentDB$.class b/target/scala-3.6.4/classes/scalation/database/table/TA_AssignmentDB$.class deleted file mode 100644 index 9b9d1e404..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/TA_AssignmentDB$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/TA_AssignmentDB$package$.class b/target/scala-3.6.4/classes/scalation/database/table/TA_AssignmentDB$package$.class deleted file mode 100644 index 0e68e86ac..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/TA_AssignmentDB$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/TA_AssignmentDB$package.class b/target/scala-3.6.4/classes/scalation/database/table/TA_AssignmentDB$package.class deleted file mode 100644 index f288d3f46..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/TA_AssignmentDB$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/TA_AssignmentDB$package.tasty b/target/scala-3.6.4/classes/scalation/database/table/TA_AssignmentDB$package.tasty deleted file mode 100644 index 234c867f9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/TA_AssignmentDB$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/TA_AssignmentDB.class b/target/scala-3.6.4/classes/scalation/database/table/TA_AssignmentDB.class deleted file mode 100644 index 6c5e00eb6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/TA_AssignmentDB.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/TA_AssignmentDB.tasty b/target/scala-3.6.4/classes/scalation/database/table/TA_AssignmentDB.tasty deleted file mode 100644 index ef54775d7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/TA_AssignmentDB.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/Table$.class b/target/scala-3.6.4/classes/scalation/database/table/Table$.class deleted file mode 100644 index 6f27b5bd2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/Table$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/Table$package$.class b/target/scala-3.6.4/classes/scalation/database/table/Table$package$.class deleted file mode 100644 index a0494703d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/Table$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/Table$package.class b/target/scala-3.6.4/classes/scalation/database/table/Table$package.class deleted file mode 100644 index 9b85e8d35..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/Table$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/Table$package.tasty b/target/scala-3.6.4/classes/scalation/database/table/Table$package.tasty deleted file mode 100644 index bb9669562..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/Table$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/Table.class b/target/scala-3.6.4/classes/scalation/database/table/Table.class deleted file mode 100644 index c6e477c39..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/Table.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/Table.scala.bak b/target/scala-3.6.4/classes/scalation/database/table/Table.scala.bak deleted file mode 100644 index 713259150..000000000 --- a/target/scala-3.6.4/classes/scalation/database/table/Table.scala.bak +++ /dev/null @@ -1,2019 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Fri Jun 17 11:19:14 EDT 2022 - * @see LICENSE (MIT style license file). - * - * @note Relational Algebra (RA) for Row-Oriented Relational DBMS - * - * RA Operators: rename, project, select, union, minus, intersect, product, join, - * leftJoin, divide, groupBy, aggregate, orderBy - * - * Most of the RA Operators have Unicode versions: @see `scalation.UnicodeTest` - * - * Types of Indices (for Unique, Non-Unique Indices): - * LinHashMap, LinHashMultiMap // ScalaTion's Linear Hash Maps - * HashMap, HashMultiMap // Scala's Hash Maps - * JHashMap, JHashMultiMap // Java's Hash Maps - * BpTreeMap, BpTreeMultiMap // ScalaTion's B+Tree Maps - * TreeMap, TreeMultiMap // Scala's Tree Maps - * JTreeMap, JTreeMultiMap // Java's Tree Maps - */ - -package scalation -package database -package table - -//import com.google.gson.Gson -//import com.google.gson.reflect.TypeToken - -import java.io.{FileInputStream, FileOutputStream, File} -import java.io.{ObjectInputStream, ObjectOutputStream, PrintWriter} - -// pick a type of Map for Unique `IndexMap` and for Non-Unique `MIndexMap` - -//import scalation.database.{LinHashMap => IndexMap} -//import scalation.database.{LinHashMultiMap => MIndexMap} - -//import scala.collection.mutable.{HashMap => IndexMap} -//import scalation.database.{HashMultiMap => MIndexMap} - -//import scalaTion.database.{JHashMap => IndexMap} -//import scalaTion.database.{JHashMultiMap => MIndexMap} - -import scalation.database.{BpTreeMap => IndexMap} -import scalation.database.{BpTreeMultiMap => MIndexMap} - -//import scala.collection.mutable.{TreeMap => IndexMap} -//import scalation.database.{TreeMultiMap => MIndexMap} - -//import scalation.database.{JTreeMap => IndexMap} -//import scalation.database.{JTreeMultiMap => MIndexMap} - -import scala.collection.mutable.{ArrayBuffer => Bag, IndexedSeq, Map} -import scala.math.max -import scala.runtime.ScalaRunTime.stringOf -import scala.util.control.Breaks.{breakable, break} - -import scalation.mathstat.{MatrixD, VectorD, VectorI, VectorL, VectorS, VectorT} - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Determine whether the bag of tuples ts1 is the same ts2. - * @param ts1 the first bag of tuples - * @param ts2 the second bag of tuples - */ -def sameTuples (ts1: Bag [Tuple], ts2: Bag [Tuple]): Boolean = - if ts1.size != ts2.size then return false - var i = 0 - while i < ts1.size do - if ! (ts1(i) sameElements ts2(i)) then return false - i += 1 - true -end sameTuples - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Table` companion object provides factory methods for creating tables. - * Supported domains/data-types are 'D'ouble, 'I'nt, 'L'ong, 'S'tring, and 'T'imeNum. - * Note 'X' is for Long String (a formatting issue). - */ -object Table: - - private val debug = debugf ("Table", false) // debug function - private val flaw = flawf ("Table") // flaw function - private val cntr = Counter () // counter for generating unique names - - private var useFullPath = false // defaults to using relative file paths - private var limit = -1 // limit on number of lines to read - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Set the full-path flag to the value of parameter fullPath. - * @param fullPath flag indicating whether full or relative paths should be used - */ - def setFullPath (fullPath: Boolean = true): Unit = { useFullPath = fullPath } - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Set the limit on the number of lines to read to lim. - * @param lim the limit on the number of lines to read (<= 0 => unlimited) - */ - def setLimit (lim: Int): Unit = { limit = lim } - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a table given convenient string specifications. - * @param name the name of the table - * @param schema the attributes for the table - * @param domain_ the domains/data-types for attributes ('D', 'I', 'L', 'S', 'X', 'T') - * @param key the attributes forming the primary key - */ - def apply (name: String, schema: String, domain_ : String, key: String): Table = - new Table (name, strim (schema), strim (domain_).map (_.head), strim (key)) - end apply - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a new empty table with the same schema as an existing table. - * @param name the name of the new table - * @param tab the existing table - */ - def apply (name: String, tab: Table): Table = - new Table (name, tab.schema, tab.domain, tab.key) - end apply - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Read the table with the given name into memory loading its columns with data from - * the CSV file named fileName. The attribute names are read from the FIRST LINE. - * @param fileName the file name (or file-path) of the data file - * @param name the name of the table - * @param domain_ the domains/data-types (as one string) for attributes ('D', 'I', 'L', 'S', 'X', 'T') - * @param key the attributes forming the primary key - * @param pos_ the sequence of column positions in the input file to be used (null => select all) - * @param sep the element separation string/regex (e.g., "," ";" " +") - */ - def load (fileName: String, name: String, domain_ : String, key: String, - pos: Array [Int], sep: String): Table = - load (fileName, name, strim (domain_).map (_.head), key, pos, sep) - end load - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Read the table with the given name into memory loading its columns with data from - * the CSV file named fileName. The attribute names are read from the FIRST LINE. - * @see scalation.readFileIntoArray - * @param fileName the file name (or file-path) of the data file - * @param name the name of the table - * @param domain the domains/data-types for attributes ('D', 'I', 'L', 'S', 'X', 'T') - * @param key the attributes forming the primary key - * @param pos_ the sequence of column positions in the input file to be used (null => select all) - * @param sep the element separation string/regex (e.g., "," ";" " +") - */ - def load (fileName: String, name: String, domain: Domain, key: String, - pos_ : Array [Int] = null, sep: String = ","): Table = - - debug ("load", s"""fileName = $fileName, name = $name, domain = ${stringOf (domain)}, key = $key, - pos_ = $pos_, sep = '$sep'; useFullPath = $useFullPath, limit = $limit""") - - val pos = if pos_ == null then Array.range (0, domain.size) else pos_ - val schema = Array.ofDim [String] (domain.size) - - if pos.size != domain.size then flaw ("apply", "pos size should be same as domain size") - - var s: Table = null // new Table (name, schema, domain, strim (key)) - -// val lines = getFromURL_File (fileName) // read the CSV file - val lines = readFileIntoArray (fileName, useFullPath, limit) // read the CSV file - var l_no = 0 // the line number - - println (s"lines(0) = ${lines(0)}") - - for ln <- lines do // iterate by lines in file - - if l_no == 0 then // FIRST LINE - for schema - val header = ln.split (sep, -1).map (_.trim) // array of column names - debug ("load", s"header = ${stringOf (header)}") - for j <- pos.indices do schema(j) = header(pos(j)) // use those at positions in pos - s = new Table (name, schema, domain, strim (key)) // make table after schema is formed - - else // REMAINING LINES - val token = ln.split (sep, -1).map (_.trim) // array of token strings - s.tuples += makeTuple (token, domain, pos) - end if - - l_no += 1 - end for - s - end load - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Read the table with the given name into memory loading its columns with data from - * the CSV file named fileName. The attribute names are read from the FIRST LINE. - * Use a short-cut (not reliable) to determines the column domains, by applying - * the 'tuple2type' method to the SECOND LINE. - * Note: safer to pull a row without missing or zero values from the middle of the dataset - * @see `tableTest3` - * @see scalation.readFileIntoArray - * @param fileName the file name (or file-path) of the data file - * @param name the name of the table - * @param mumCol the number of columns - * @param key the attributes forming the primary key - */ - def load (fileName: String, name: String, numCol: Int, key: String): Table = - - val pos = Array.range (0, numCol) - val sep = "," - debug ("load", s"""fileName = $fileName, name = $name, numCol = $numCol, key = $key, - pos = $pos, sep = '$sep'; useFullPath = $useFullPath, limit = $limit""") - - val schema = Array.ofDim [String] (numCol) - val domain = Array.ofDim [Char] (numCol) - - var s: Table = null // new Table (name, schema, domain, strim (key)) - -// val lines = getFromURL_File (fileName) // read the CSV file - val lines = readFileIntoArray (fileName, useFullPath, limit) // read the CSV file - var l_no = 0 // the line number - - for ln <- lines do // iterate by lines in file - - if l_no == 0 then // FIRST LINE - for schema - val header = ln.split (sep, -1).map (_.trim) // array of column names - debug ("load", s"header = ${stringOf (header)}") - for j <- 0 until numCol do schema(j) = header(j) // collect from header - s = new Table (name, schema, domain, strim (key)) // make table after schema is formed - - else if l_no == 1 then // SECOND LINE - for domains - val token = ln.split (sep, -1).map (_.trim) // array of token strings - val dom = tuple2type (token) // guess domains from first data row - debug ("load", s"dom = ${stringOf (dom)}") - for j <- 0 until numCol do domain(j) = dom(j) // collect from dom - s.tuples += makeTuple (token, domain, pos) - - else // REMAINING LINES - val token = ln.split (sep, -1).map (_.trim) // array of token strings - s.tuples += makeTuple (token, domain, pos) - end if - - l_no += 1 - end for - s - end load - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Make a tuple from an array of token strings, converting each each token - * according the corresponding domain specification. Use only the tokens - * in the array at the pos positions. - * @param token the array of token strings, e.g., Array ("5.0", "12", "Smith") - * @param domain the domains/data-types for attributes ('D', 'I', 'L', 'S', 'X', 'T') - * @param pos the positions in the token array to be used, e.g., Array (0, 2) - */ - def makeTuple (token: Array [String], domain: Domain, pos: Array [Int]): Tuple = - if token.size < pos.max then - flaw ("makeTuple", "not enough tokens for positions given in pos") - return null - end if - - val tup = Array.ofDim [ValueType] (domain.size) // more robust than using token.size - for j <- pos.indices do - val nextToken = token(pos(j)) // get j-th token according to pos - tup(j) = domain(j) match - case 'D' => nextToken.mkDouble // Double - case 'I' => nextToken.toInt // Int - case 'L' => nextToken.toLong // Long - case 'S' | 'X' => nextToken // String or Long-String - case 'T' => TimeNum (nextToken) // TimeNum - case _ => { flaw ("makeTuple", s"domain($j) = ${domain(j)} not supported"); "?" } - end for - tup - end makeTuple - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Given an array of strings (e.g., read from a file) with unknown domains, - * return the data-types (domains) by the lexical form of the strings. - * @see `scalation.typeOfStr` (in ValueType.scala) - * Caveat: may not be reliable since a column of doubles may start: 5, 7, 9.2, ... - * @param tup the type un-differentiated tuple as an array of strings - */ - def tuple2type (tup: Array [String]): Domain = - val dom = Array.ofDim [Char] (tup.size) - for j <- dom.indices do dom(j) = typeOfStr (tup(j)) - dom - end tuple2type - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return whether the given table file exists in STORE_DIR. - * @param name the name of table. - */ - def exist (name: String): Boolean = File (STORE_DIR + name + SER).exists () - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** LOAD/Read the table with the given name into memory using serialization. - * @see save in `Table` class. - * @param name the name of the table to load - */ - def load (name: String): Table = - val ois = new ObjectInputStream (new FileInputStream (STORE_DIR + name + SER)) - val tab = ois.readObject.asInstanceOf [Table] - ois.close () - tab - end load - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** LOAD/Read the table with the given name into memory from a JSON file. - * @param fileName the file name of the JSON file - * @param name the name of the table to load - */ - def load (fileName: String, name: String): Table = - val jsonArr = readFileIntoArray (fileName) -// val nlines = jsonArr.size - val jsonStr: String = jsonArr(0) - debug ("load", s"jsonStr = ${jsonStr.slice (0, 5000)}") - val tab: Table = null // FIX - change to var -// val gson = new Gson () -// val tableType = new TypeToken [Table] ().getType // FIX - fails -// tab = gson.fromJson (jsonStr, tableType) - tab - end load - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a table from a matrix of doubles. - * @see the `toMatrix` mathod - * @param x the matrix containing the data - * @param name the name of the table - * @param schema the attribute/column names - * @param key the attributes forming the primary key - */ - def fromMatrix (x: MatrixD, name: String, schema: Schema, key: String): Table = - val domain = Array.fill (x.dim2)('D') // domain is all 'D' - val s = new Table (name, schema, domain, strim (key)) - - for i <- x.indices do s.tuples += x(i).toArray // i-th vector to tuple - s - end fromMatrix - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return basic statistics on the given column corresponding to SQL's - * aggregate functions: count, countd, min, max, sum, avg. - * @param cname the given column name - * @param colj the given column - */ - def stats (cname: String, colj: Array [ValueType]): Array [ValueType] = - Array (cname, count (colj), countd (colj), min (colj).toString, max (colj).toString, sum (colj), avg (colj)) - end stats - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the total number of elements in a column. - * @param colj the given column - */ - def count (colj: Array [ValueType]): ValueType = colj.size - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the number of distinct elements in a column. - * @param colj the given column - */ - def countd (colj: Array [ValueType]): ValueType = colj.distinct.size - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the minimum value of all the elements in a column. - * @param colj the given column - */ - def min (colj: Array [ValueType]): ValueType = colj.min (ValueTypeOrd) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the maximum value of all the elements in a column. - * @param colj the given column - */ - def max (colj: Array [ValueType]): ValueType = colj.max (ValueTypeOrd) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the average of all the elements for a numeric column or 0 otherwise. - * @param colj the given column - */ - def avg (colj: Array [ValueType]): ValueType = sum (colj).toDouble / colj.size - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the sum of all the elements for a numeric column or 0 otherwise. - * @param colj the given column - */ - def sum (colj: Array [ValueType]): ValueType = - colj(0) match - case _: Double => var s = 0.0; for x <- colj do s += x.toDouble; s - case _: Int => var s = 0.0; for x <- colj do s += x.toDouble; s - case _: Long => var s = 0.0; for x <- colj do s += x.toDouble; s - case _: String => -0.0 - case _: TimeNum => -0.0 - case null => -0.0 - end match - end sum - - def π (x: String)(r: Table): Table = r.project (strim (x)) - def σ (condition: String)(r: Table): Table = r.select (condition) - def σ (predicate: Predicate)(r: Table): Table = r.select (predicate) - -end Table - -import Table.{cntr, debug, flaw} - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Table` class stores relational data and implements relational algebra operators. - * Supported domains/data-types are 'D'ouble, 'I'nt, 'L'ong, 'S'tring, and 'T'imeNum. - * @param name the name of the table - * @param schema the attributes for the table - * @param domain the domains/data-types for the attributes ('D', 'I', 'L', 'S', 'X', 'T') - * @param key the attributes forming the primary key - */ -class Table (name: String, schema: Schema, domain: Domain, key: Schema) - extends Tabular [Table] (name, schema, domain, key) - with Serializable: - - private [table] val tuples = Bag [Tuple] () // storage of tuples - private [table] val linkTypes = Map [String, Table] () // link types for foreign keys -// private [table] val index = IndexMap [KeyType, Tuple] () // index on primary key - private [table] val index = IndexMap [Tuple] () // index on primary key - private [table] var hasIndex = false // whether the primary index has been built -// private [table] val sindex = Map [String, IndexMap [ValueType, Tuple]] () // map of secondary unique indices - private [table] val sindex = Map [String, IndexMap [Tuple]] () // map of secondary unique indices -// private [table] val mindex = Map [String, MIndexMap [ValueType, Tuple]] () // map of secondary non-unique indices - private [table] val mindex = Map [String, MIndexMap [Tuple]] () // map of secondary non-unique indices - private val groupMap = Map [ValueType, Bag [Tuple]] () // map from group key to collection of tuples - - protected val countX = domain.count ((c: Char) => c == 'X') // count the number of eXtended Strings - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the cardinality (number of tuples) in this table. - */ - inline def rows: Int = tuples.size - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the j-th column in this table (or the passed in tuples) as an array of value-type. - * @param j the column to return - * @param tups the collection of tuples to use (defaults to all tuples in this table) - */ - def col (j: Int, tups: Bag [Tuple] = tuples): Array [ValueType] = - if j >= schema.size then - flaw ("col", s"column index j = $j exceeds the number of columns") - end if - val c = Array.ofDim [ValueType] (tups.size) - for i <- c.indices do c(i) = tups(i)(j) - c - end col - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return whether this table contains tuple u. - * @param u the tuple to look for - */ - infix def contains (u: Tuple): Boolean = tuples.exists (_ sameElements u) - - // I N T E G R I T Y C H E C K S - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Add LINKAGE from this table to the refTab, by adding a FOREIGN KEY CONSTRAINT - * to this table specifying the foreign key attribute fkey and the table it - * references refTab. If refTab does not have a primary index already, make one. - * Caveat: a foreign key may not be composite. - * @param fkey the foreign key attribute - * @param refTab the table being referenced (to its primary key) - */ - def addLinkage (fkey: String, refTab: Table): Unit = - if ! refTab.hasIndex then refTab.create_index () - linkTypes += fkey -> refTab - end addLinkage - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Check that all the foreign keys values in tuple t satisfy their - * REFERENTIAL INTEGRITY CONSTRAINTS. - * @param t the tuple being checked for referential integrity - */ - def referenceCheck (t: Tuple): Boolean = - var satisfy = true - for (fkey, refTab) <- linkTypes do - debug ("referenceCheck", s"does fkey = $fkey reference a pkey in ${refTab.name}") -// val fkeyVal = new KeyType (pull (t, fkey)) - val fkeyVal = pull (t, fkey) - if refTab.hasIndex && refTab.index.getOrElse (fkeyVal, null) == null then - flaw ("referenceCheck", s"foreign key $fkey = $fkeyVal is not in table ${refTab.name}") - flaw ("referenceCheck", s"where the tuple is ${stringOf (t)}") - satisfy = false - end if - end for - satisfy - end referenceCheck - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the i-th primary key. - * @param i the index in the tuples/row index - */ - inline def getPkey (i: Int): KeyType = new KeyType (pull (tuples(i), key)) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** CREATE/recreate the primary INDEX that maps the primary key to the tuple - * containing it. Warning, creating an index will remove DUPLICATES based - * on maintaining UNIQUENESS CONSTRAINT of primary key values. - * @param rebuild if rebuild is true, use old index to build new index; otherwise, create new index - */ - def create_index (rebuild: Boolean = false): Unit = - debug ("create_index", s"create an index of type ${index.getClass.getName}") - if rebuild then flaw ("create_index", "rebuilding off old primary key index has not yet been implemented") - index.clear () - val toRemove = Bag [Tuple] () - for t <- tuples do -// val pkey = new KeyType (pull (t, key)) // primary key - val pkey = pull (t, key)(0) // primary key - if index.getOrElse (pkey, null) == null then index += pkey -> t - else toRemove += t - end for - debug ("create_index", s"remove duplicate tuples = ${showT (toRemove)}") - tuples --= toRemove - hasIndex = true - end create_index - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** CREATE a secondary unique INDEX that maps a secondary key to the tuple - * containing it. Has no effect on duplicates; should first create a primary - * index to remove duplicates, otherwise, this index may skip tuples. - * @param atr the attribute/column to create the index on - */ - def create_sindex (atr: String): Unit = - debug ("create_sindex", s"create a secondary unique index of type ${index.getClass.getName}") - if ! hasIndex then flaw ("create_sindex", "should first create a primary index to eliminate duplicates") -// val newIndex = IndexMap [ValueType, Tuple] () - val newIndex = IndexMap [Tuple] () - for t <- tuples do - val skey = (pull (t, atr)) // secondary (non-composite) key - newIndex += skey -> t // add key-value pair into new index - end for - sindex += atr -> newIndex // add new index into the sindex map - end create_sindex - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** CREATE a non-unique INDEX (multi-valued) that maps a non-unique attribute - * to the tuple containing it. - * @see `scalation.database.MultiMap` - * @param atr the attribute/column to create the non-unique index on - */ - def create_mindex (atr: String): Unit = - debug ("create_mindex", s"create a non-unique index of type ${index.getClass.getName}") -// val newIndex = MIndexMap [ValueType, Tuple] () - val newIndex = MIndexMap [Tuple] () - for t <- tuples do - val t_atr = (pull (t, atr)) // non-unique attribute - newIndex.addOne1 (t_atr, t) // add key-value pair into new index - end for - mindex += atr -> newIndex // add new index into the mindex map - end create_mindex - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** DROP the primary INDEX that maps the primary key to the tuple containing it. - */ - def drop_index (): Unit = - index.clear () - hasIndex = false - end drop_index - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** DROP a secondary INDEX that maps a secondary key to the tuple containing it. - */ - def drop_sindex (atr: String): Unit = - val oldIndex = sindex.getOrElse (atr, null) - if oldIndex != null then - oldIndex.clear () - sindex -= atr - else - flaw ("drop_sindex", s"no index found for attribute = $atr") - end if - end drop_sindex - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** DROP a non-unique INDEX that maps a non-unique attribute to the tuple containing it. - */ - def drop_mindex (atr: String): Unit = - val oldIndex = mindex.getOrElse (atr, null) - if oldIndex != null then - oldIndex.clear () - mindex -= atr - else - flaw ("drop_mindex", s"no index found for attribute = $atr") - end if - end drop_mindex - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the table restricted to the given range of rows. - * @param r the given range of rows - */ - def apply (r: Range): Table = - val s = new Table (s"${name}_a_${cntr.inc ()}", schema, domain, key) - - s.tuples ++= (for i <- r yield tuples(i)) - s - end apply - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the table restricted to the given collection of rows. - * @param pos the given collection of rows - */ - def apply (pos: collection.immutable.IndexedSeq [Int]): Table = - val s = new Table (s"${name}_a_${cntr.inc ()}", schema, domain, key) - - s.tuples ++= (for i <- pos yield tuples(i)) - s - end apply - - // R E L A T I O N A L G E B R A O P E R A T O R S - - // ================================================================== RENAME - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** RENAME this table, returning a shallow copy of this table. - * Usage: customer rename "client" - *-------------------------------------------------------------------------- - * @param newName the new name for the table - */ - def rename (newName: String): Table = - val s = new Table (newName, schema, domain, key) - s.tuples ++= tuples // shallow copy - s - end rename - - // ================================================================= PROJECT - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** PROJECT the tuples in this table onto the given attribute names. - * Uaage: customer project (Array ("street", "ccity")) - *-------------------------------------------------------------------------- - * @param x the schema/attribute names to project onto - */ - def project (x: Schema): Table = - val newKey = if subset (key, x) then key else x - val s = new Table (s"${name}_p_${cntr.inc ()}", x, pull (x), newKey) - - s.tuples ++= (for t <- tuples yield pull (t, x)) - s - end project - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** PROJECT onto the columns with the given column positions (first is column 0). - * Uaage: customer.project (Array (1, 2)) - *-------------------------------------------------------------------------- - * @param cPos the column positions to project onto - */ - def project (cPos: IndexedSeq [Int]): Table = - val mxPos = cPos.max - if mxPos >= cols then flaw ("project", s"mxPos = $mxPos is too large for the number of columns") - - val newAtrs = (for c <- cPos yield schema(c)).toArray - val newKey = if subset (key, newAtrs) then key else newAtrs - val s = new Table (s"${name}_p_${cntr.inc ()}", newAtrs, pull (cPos), newKey) - - s.tuples ++= (for t <- tuples yield pull (t, cPos)) - s - end project - - // ========================================================== PROJECT-SELECT - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** SELECT elements from column a in this table that satisfy the atomic - * predicate apred and PROJECT onto that column. - * Usage: customer selproject ("ccity", _ > "Athens") - *-------------------------------------------------------------------------- - * @param a the attribute name of the column used for selection - * @param apred the atomic predicate (`Boolean` function) to be satisfied - */ - def selproject (a: String, apred: APredicate): Table = - val newAtr = Array (a) - val newDom = Array (domain(on(a))) - val s = new Table (s"${name}_s_${cntr.inc ()}", newAtr, newDom, newAtr) - - for t <- tuples do - val ta = pull (t, a) - if apred (ta) then s.tuples += Array (ta) - end for - s - end selproject - - // ================================================================== SELECT - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** SELECT the tuples in this table that satisfy the atomic predicate on column a. - * Usage: customer select ("ccity", _ == "Athens") - *-------------------------------------------------------------------------- - * @param a the attribute name of the column used for selection - * @param apred the atomic predicate (`Boolean` function) to be satisfied - */ - def select (a: String, apred: APredicate): Table = - val s = new Table (s"${name}_s_${cntr.inc ()}", schema, domain, key) - - for t <- tuples if apred (pull (t, a)) do s.tuples += t - s - end select - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** SELECT the tuples in this table that satisfy the predicate. - * Usage: customer select (t => t(customer.on("ccity")) == "Athens") - *-------------------------------------------------------------------------- - * @param predicate the predicate (`Boolean` function) to be satisfied - */ - def select (predicate: Predicate): Table = - val s = new Table (s"${name}_s_${cntr.inc ()}", schema, domain, key) - - s.tuples ++= tuples.filter (predicate) - s - end select - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** SELECT the tuples in this table that satisfy the given simple (3 token) condition. - * Usage: customer select ("ccity == 'Athens'") - *-------------------------------------------------------------------------- - * @param condition the simple condition string "a1 op a2" to be satisfied, where - * a1 is attribute, op is comparison operator, a2 is attribute or value - */ - def select (condition: String): Table = - val s = new Table (s"${name}_s_${cntr.inc ()}", schema, domain, key) - - val (tok, twoAtrs) = parseCond (condition) - val (a1, op, a2) = (tok(0), tok(1), tok(2)) - debug ("select", s"condition: (a1, op, a2) = ($a1, $op, $a2), twoAtrs = $twoAtrs") - - s.tuples ++= selectTups (a1, op, a2, twoAtrs) - s - end select - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** SELECT via the INDEX the tuple with the given primary key value pkey. - * Returns an empty table if the primary index has not been created. - * Usage: customer select (new KeyType ("Mary")) - *-------------------------------------------------------------------------- - * @param pkey_ the primary key value - */ - def select (pkey_ : KeyType): Table = - val pkey = pkey_.key(0) // FIX - val s = new Table (s"${name}_s_${cntr.inc ()}", schema, domain, key) - - if hasIndex then - debug ("select", s"primary key pkey = $pkey") - val t = index.getOrElse (pkey, null) - if t != null then s.tuples += t - else - flaw ("select", s"must call 'create_index' before using indexed-select on table $name") - end if - s - end select - - // =================================================================== UNION - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** UNION this table and r2. Check that the two tables are compatible. - * If they are not, return the first table. - * Caveat: Assumes the key from the first table still works (@see create_index) - * Acts like union-all, so to remove duplicates call create_index after union. - * Usage: deposit union loan - *-------------------------------------------------------------------------- - * @param r2 the second table - */ - infix def union (r2: Table): Table = - if incompatible (r2) then return this - val s = new Table (s"${name}_u_${cntr.inc ()}", schema, domain, key) - - s.tuples ++= tuples ++ r2.tuples - s - end union - - // =================================================================== MINUS - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute this table MINUS (set difference) table r2 (this - r2). Check that - * the two tables are compatible. If they are not, return the first table. - * Usage: account minus loan - *-------------------------------------------------------------------------- - * @param r2 the second table - */ - infix def minus (r2: Table): Table = - if incompatible (r2) then return this - val s = new Table (s"${name}_m_${cntr.inc ()}", schema, domain, key) - - for t <- tuples do if ! (r2 contains t) then s.tuples += t - s - end minus - - // =============================================================== INTERSECT - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** INTERSECT this table and r2. Check that the two tables are compatible. - * If they are not, return the first table. - * Usage: account intersect loan - *-------------------------------------------------------------------------- - * @param r2 the second table - */ - infix def intersect (r2: Table): Table = - if incompatible (r2) then return this - val s = new Table (s"${name}_i_${cntr.inc ()}", schema, domain, key) - - for t <- tuples do if r2 contains t then s.tuples += t - s - end intersect - - // ================================================================= PRODUCT - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the CARTESIAN PRODUCT of this table and r2 (this × r2). - * Usage: customer product deposit - *-------------------------------------------------------------------------- - * @param r2 the second table - */ - infix def product (r2: Table): Table = - val newKey = key ++ r2.key // requires keys from both tables - val s = new Table (s"${name}_x_${cntr.inc ()}", disambiguate (schema, r2.schema), - domain ++ r2.domain, newKey) - - for t <- tuples; u <- r2.tuples do - s.tuples += t ++ u - end for - s - end product - - // ==================================================================== JOIN - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** JOIN this table and r2 keeping concatenated tuples that satisfy the predicate. - * Caveat: Assumes both keys are needed for the new key (depending on the - * predicate both may not be required). - * Usage: customer join ((t, u) => t(customer.on("cname")) == u(deposit.on("cname")), deposit) - *-------------------------------------------------------------------------- - * @param predicate the join predicate to be satisfied - * @param r2 the second table - */ - def join (predicate: Predicate2, r2: Table): Table = - val newKey = key ++ r2.key // requires keys from both tables - val s = new Table (s"${name}_j_${cntr.inc ()}", disambiguate (schema, r2.schema), - domain ++ r2.domain, newKey) - - for t <- tuples; u <- r2.tuples do - if predicate (t, u) then s.tuples += t ++ u - end for - s - end join - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the THETA-JOIN of this table and r2 keeping concatenated tuples that - * satisfy the given simple (3 token) condition. - * Usage: customer join ("cname == cname"), deposit) - *-------------------------------------------------------------------------- - * @param condition the simple condition "a1 op a2" - * @param r2 the second table - */ - def join (condition: String, r2: Table): Table = - val tok = parseCond (condition)._1 - val (a1, op, a2) = (tok(0), tok(1), tok(2)) -// debug ("join", s"(a1, op, a2) = ($a1, $op, $a2)") - - val newKey = key ++ r2.key // requires keys from both tables - - val s = new Table (s"${name}_j_${cntr.inc ()}", disambiguate (schema, r2.schema), - domain ++ r2.domain, newKey) - - s.tuples ++= - (op match - case "==" => tJoinTups (a1, equ, a2, r2) - case "!=" => tJoinTups (a1, neq, a2, r2) - case "<" => tJoinTups (a1, <, a2, r2) - case "<=" => tJoinTups (a1, <=, a2, r2) - case ">" => tJoinTups (a1, >, a2, r2) - case ">=" => tJoinTups (a1, >=, a2, r2) - case _ => flaw ("join", s"$op is an unrecognized operator"); null) - s - end join - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the EQUI-JOIN via the NESTED-LOOP JOIN (NLJ) algorithm of this table and - * r2 keeping concatenated tuples that are equal on specified attributes. - * Usage: customer join (Array ("cname"), Array ("cname"), deposit) - *-------------------------------------------------------------------------- - * @param x the subschema/attributes for the first/this table - * @param y the subschema/attributes for the second table - * @param r2 the second table - */ - def join (x: Schema, y: Schema, r2: Table): Table = - val newKey = if subset (x, key) then r2.key // three possibilities for new key - else if subset (y, r2.key) then key - else key ++ r2.key - - val s = new Table (s"${name}_j_${cntr.inc ()}", disambiguate (schema, r2.schema), - domain ++ r2.domain, newKey) - - for t <- tuples; u <- r2.tuples do - if pull (t, x) sameElements r2.pull (u, y) then s.tuples += t ++ u - end for - s - end join - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the EQUI-JOIN via the INDEX JOIN (IJ) algorithm of this table and the - * referenced table keeping concatenated tuples that are equal on the primary key - * and foreign key attributes. Uses a UNIQUE INDEX (UI) on the primary key. - * Caveat: Requires the foreign key table to be first [ fkey_table join ((fkey, pkey_table) ]. - * Usage: deposit join (("cname", customer)) - * as if join_, where the index is on the right, i.e., customer - *-------------------------------------------------------------------------- - * @param ref the foreign key reference (foreign key attribute, referenced table) - */ - def join (ref: (String, Table)): Table = -// show_foreign_keys () - val (fkey, refTab) = ref // foreign key, referenced table - - val s = new Table (s"${name}_j_${cntr.inc ()}", disambiguate (schema, refTab.schema), - domain ++ refTab.domain, key) - - if refTab.hasIndex then - for t <- tuples do // iterate over fkey table -// val t_fkey = new KeyType (pull (t, fkey)) - val t_fkey = pull (t, fkey) - debug ("join", s"foreign key t_fkey = $t_fkey") - val u = refTab.index.getOrElse (t_fkey, null) // get u via pkey from refTab - if u != null then s.tuples += t ++ u // add concatenated tuples - end for - else - flaw ("join", s"must call 'create_index' before using indexed-join on ${refTab.name}") - end if - s - end join - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the EQUI-JOIN via the INDEX JOIN (IJ) algorithm of this table and the - * referenced table keeping concatenated tuples that are equal on the primary key - * and foreign key attributes. Uses a NON-UNIQUE INDEX (NUI) on the foreign key. - * Caveat: Requires the foreign key table to be first [ fkey_table _join ((fkey, pkey_table) ]. - * Usage: deposit _join (("cname", customer)) - * where the index is on the left, i.e., deposit - *-------------------------------------------------------------------------- - * @param ref the foreign key reference (foreign key attribute, referenced table) - */ - def _join (ref: (String, Table)): Table = -// show_foreign_keys () - val (fkey, refTab) = ref // foreign key, referenced table - - val s = new Table (s"${name}_j_${cntr.inc ()}", disambiguate (schema, refTab.schema), - domain ++ refTab.domain, key) - - if hasIndex then - for u <- refTab.tuples do // iterate over pkey/refTab table - val u_pkey = pull (u, key) - debug ("join", s"primary key u_pkey = $u_pkey") - val idx = mindex(key(0)) // select multi-index by attribute - val ts = idx.getOrElse (u_pkey(0), null) // get {t} via fkey from this table - if ts != null then - for t <- ts do s.tuples += t ++ u // add concatenated tuples - end for - else - flaw (")join", s"must call 'create_index' before using indexed-join on $name") - end if - s - end _join - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the EQUI-JOIN via the SORT-MERGE JOIN (SMJ) algorithm of this table and the - * referenced table keeping concatenated tuples that are equal on the primary key - * and foreign key attributes. - * Caveat: Requires the foreign key table to be first [ fkey_table _join_ ((fkey, pkey_table) ]. - * Usage: deposit _join_ (("cname", customer)) - * where both sides (left and right) must be in order - *-------------------------------------------------------------------------- - * @param ref the foreign key reference (foreign key attribute, referenced table) - */ - def _join_ (ref: (String, Table)): Table = -// show_foreign_keys () - val (fkey, refTab) = ref // foreign key, referenced table - - val s = new Table (s"${name}_j_${cntr.inc ()}", disambiguate (schema, refTab.schema), - domain ++ refTab.domain, key) - - val t_sz = tuples.size // number of typles in foreign key table - val u_sz = refTab.tuples.size // number of typles in foreign key table - val pkey = refTab.key (0) // again requires non-composite primary keys - val r1 = orderBy (fkey) // order the foreign key table - val r2 = refTab.orderBy (pkey) // order the primary key table - - var i, j = 0 // cursors i and j for foreign, primary key tables - var (t, u) = (r1.tuples(i), r2.tuples(j)) - var (t_k, u_k) = (pull (t, fkey), pull (u, fkey)) - - while i < t_sz do - while j < u_sz && u_k < t_k do - j += 1 - if j < u_sz then { u = r2.tuples(j); u_k = pull (u, fkey) } - while i < t_sz && u_k >= t_k do - if u_k == t_k then s.tuples += t ++ u - i += 1 - if i < t_sz then { t = r1.tuples(i); t_k = pull (t, fkey) } - end while - debug ("join", s"cursors: i = $i, j = $j") - s - end _join_ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the NATURAL JOIN via the NESTED LOOP JOIN (NLJ) algorithm of this table and - * r2 keeping concatenated tuples that agree on the common attributes. - * Usage: customer join deposit - *-------------------------------------------------------------------------- - * @param r2 the second table - */ - infix def join (r2: Table): Table = -// val common = schema intersect r2.schema // common attributes - val common = meet (schema, r2.schema) // common attributes - debug ("join", s"common = ${stringOf (common)}") - val rest = r2.schema diff common - val newKey = if subset (common, key) then r2.key // three possibilities for new key - else if subset (common, r2.key) then key - else key ++ r2.key - - val s = new Table (s"${name}_j_${cntr.inc ()}", schema ++ rest, - domain ++ r2.pull (rest), newKey) - - for t <- tuples; u <- r2.tuples do - if pull (t, common) sameElements r2.pull (u, common) then - s.tuples += t ++ r2.pull (u, rest) - end if - end for - s - end join - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the NATURAL JOIN via the INDEX JOIN (IJ) algorithm of this table and the - * referenced table keeping concatenated tuples that are equal on the primary key - * and foreign key attributes. Uses a UNIQUE INDEX (UI) on the primary key. - * Caveat: Requires the foreign key table to be first [ fkey_table join_ pkey_table ]. - * Usage: deposit join_ customer - * where the index is on the right, i.e., customer - *-------------------------------------------------------------------------- - * @param r2 the second table - */ - infix def join_ (r2: Table): Table = -// val common = schema intersect r2.schema // common attributes - val common = meet (schema, r2.schema) // common attributes - debug ("join", s"common = ${stringOf (common)}") - val rest = r2.schema diff common - val newKey = if subset (common, key) then r2.key // three possibilities for new key - else if subset (common, r2.key) then key - else key ++ r2.key - - val s = new Table (s"${name}_j_${cntr.inc ()}", schema ++ rest, - domain ++ r2.pull (rest), newKey) - - // implement IJ-UI - - s - end join_ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the NATURAL JOIN via the INDEX JOIN (IJ) algorithm of this table and the - * referenced table keeping concatenated tuples that are equal on the primary key - * and foreign key attributes. Uses a NON-UNIQUE INDEX () on the foreign key. - * Caveat: Requires the foreign key table to be first [ fkey_table _join pkey_table ]. - * Usage: deposit _join customer - * where the index is on the left, i.e., deposit - *-------------------------------------------------------------------------- - * @param r2 the second table - */ - infix def _join (r2: Table): Table = -// val common = schema intersect r2.schema // common attributes - val common = meet (schema, r2.schema) // common attributes - debug ("join", s"common = ${stringOf (common)}") - val rest = r2.schema diff common - val newKey = if subset (common, key) then r2.key // three possibilities for new key - else if subset (common, r2.key) then key - else key ++ r2.key - - val s = new Table (s"${name}_j_${cntr.inc ()}", schema ++ rest, - domain ++ r2.pull (rest), newKey) - - // implement IJ-NUI - - s - end _join - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the EQUI-JOIN via the SORT-MERGE JOIN (SMJ) algorithm of this table and the - * referenced table keeping concatenated tuples that are equal on the primary key - * and foreign key attributes. - * Caveat: Requires the foreign key table to be first [ fkey_table _join_ pkey_table ]. - * Usage: deposit _join_ customer - * where both sides (left and right) must be in order - *-------------------------------------------------------------------------- - * @param r2 the second table - */ - infix def _join_ (r2: Table): Table = -// val common = schema intersect r2.schema // common attributes - val common = meet (schema, r2.schema) // common attributes - debug ("join", s"common = ${stringOf (common)}") - val rest = r2.schema diff common - val newKey = if subset (common, key) then r2.key // three possibilities for new key - else if subset (common, r2.key) then key - else key ++ r2.key - - val s = new Table (s"${name}_j_${cntr.inc ()}", schema ++ rest, - domain ++ r2.pull (rest), newKey) - - // implement SMJ - - s - end _join_ - - // =============================================================== LEFT-JOIN - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the LEFT-EQUI-JOIN of this table and r2 keeping concatenated tuples - * that are equal on specified attributes. Also, keep all tuples in the left - * table padding the missing attributes with null. - * For right-join swap table1 and table2, e.g., table1.leftJoin (... table2) - * Usage: customer leftJoin (Array ("cname"), Array ("cname"), deposit) - *-------------------------------------------------------------------------- - * @param x the subschema/attributes for the left/first/this table - * @param y the subschema/attributes for the right/second table - * @param r2 the second table - */ - def leftJoin (x: Schema, y: Schema, r2: Table): Table = - val s = join (x, y, r2) - - val absentTuple = nullTuple (r2.domain) - val ss = s.project (schema) // join projected onto original schema - for t <- tuples if ! (ss contains t) do - s.tuples += t ++ absentTuple - end for - s - end leftJoin - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the LEFT-EQUI-JOIN via the INDEX of this table and the referenced table keeping - * concatenated tuples that are equal on the primary key and foreign key attributes. - * Caveat: Requires the foreign key table to be first [ fkey_table join ((fkey, pkey_table) ]. - * Usage: deposit leftJoin (("cname", customer)) - *-------------------------------------------------------------------------- - * @param ref the foreign key reference (foreign key attribute, referenced table) - */ - def leftJoin (ref: (String, Table)): Table = - val (fkey, refTab) = ref - val s = join (ref) - - val absentTuple = nullTuple (refTab.domain) - val ss = s.project (schema) // join projected onto original schema - for t <- tuples if ! (ss contains t) do - s.tuples += t ++ absentTuple - end for - s - end leftJoin - - // ================================================================== DIVIDE - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** DIVIDE this table by table r2. Requires a tuple in the quotient part of - * this table to be paired with all tuples in table r2. - * Usage: deposit.project ("cname, bname") divide branch.project ("bname") - *-------------------------------------------------------------------------- - * @param r2 the second table - */ - infix def divide (r2: Table): Table = - val divisor = r2.schema - if ! subset (divisor, schema) then flaw ("divide", "divisor schema must be a subset of schema") - val quotient = schema diff divisor - val newKey = if subset (key, quotient) then key else quotient - val s = new Table (s"${name}_d_${cntr.inc ()}", quotient, pull (quotient), newKey) - - val q = project (quotient) - var keep = false - for t <- q.tuples do - keep = true - breakable { - for u <- r2.tuples do - if ! (this contains t ++ u) then { keep = false; break () } - end for - } // breakable - if keep then s.tuples += t - end for - s - end divide - - // ================================================================ GROUP-BY - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** GROUP this table BY the specified attribute, returning this table. - * Each value for attribute ag will be mapped to a collection of tuples. - * Usage: deposit groupBy "bname" - *-------------------------------------------------------------------------- - * @param ag the attribute to group by - */ - def groupBy (ag: String): Table = - if ! (schema contains ag) then - flaw ("groupBy", s"ag = $ag is not contained in schema") - end if - - val col = on(ag) // the column number for ag - for t <- tuples do - val gkey = t(col) - val group = groupMap.getOrElseUpdate (gkey, Bag [Tuple] ()) - group += t // add tuple t to gkey's group - end for - this - end groupBy - - // =============================================================== AGGREGATE - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Assuming this table has been grouped by attribute ag, create a table - * where the first column is ag and the rest are AGGREGATE FUNCTIONs applied - * to their corresponding attributes. - * Usage: deposit aggregate ("bname", (avg, "balance")) - *-------------------------------------------------------------------------- - * @param ag the attribute the table has been grouped on - * @param f_as the aggregate function and the attribute to apply it to (as varargs) - */ - def aggregate (ag: String, f_as: (AggFunction, String)*): Table = - val n = f_as.size + 1 - val cols = Array.ofDim [Int] (n - 1) - val schm = Array.ofDim [String] (n) - schm(0) = ag - for j <- f_as.indices do - cols(j) = on(f_as(j)._2) // the column number for atr j - schm(j+1) = f_as(j)._2 - end for - val s = new Table (s"${name}_a_${cntr.inc ()}", schm, pull (schm), Array (ag)) - - for (gkey, tups) <- groupMap do - val t = Array.ofDim [ValueType] (n) - t(0) = gkey - for j <- f_as.indices do t(j+1) = f_as(j)._1 (col (cols(j), tups)) - s.tuples += t - end for - s - end aggregate - - // ================================================================ ORDER-BY - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** ORDER-BY the given attributes, i.e., reorder the tuples in this table into - * 'ascending' order. A stable sorting is used to allow sorting on multiple attributes. - * Usage: deposit orderBy "bname" - *-------------------------------------------------------------------------- - * @param x the subschema/attributes to order by - */ - def orderBy (x: String*): Table = - val s = new Table (s"${name}_o_${cntr.inc ()}", schema, domain, key) - - val perm = rankOrder (x*) - for i <- perm do s.tuples += tuples(i) - s - end orderBy - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** ORDER-BY-DESC the given attributes, i.e., reorder the tuples in this table into - * 'descending' order. A stable sorting is used to allow sorting on multiple attributes. - * Usage: deposit orderByDesc "bname" - *-------------------------------------------------------------------------- - * @param x the subschema/attributes to order by - */ - def orderByDesc (x: String*): Table = - val s = new Table (s"${name}_o_${cntr.inc ()}", schema, domain, key) - - val perm = rankOrder (x*) - for i <- perm.reverse do s.tuples += tuples(i) - s - end orderByDesc - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the basic statistics for each column of this table. - */ - def stats: Table = - val s = new Table (s"${name}_stats", - Array ("column", "count", "countd", "min", "max", "sum", "avg"), - Array ('S', 'I', 'I', 'S', 'S', 'D', 'D'), Array ("column")) - - for j <- colIndices do s.add (Table.stats (schema(j), col(j))) - s - end stats - - // D A T A M A N I P U L A T I O N - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** ADD (insert) tuple t into this table checking to make sure the domains are correct. - * Also, checks referential integrity for any foreign keys in the tuple. - * Return true iff the tuple passes the type check and reference check. - * @param t the tuple to be inserted - */ - def add (t: Tuple): Table = - if typeCheck (t) && referenceCheck (t) then - if hasIndex then -// val pkey = new KeyType (pull (t, key)) // values for primary key part of t - val pkey = pull (t, key)(0) // values for primary key part of t - if index.getOrElse (pkey, null) == null then // check if it's a duplicate - index += pkey -> t // add to index map - tuples += t // add to tuples - else - flaw ("add", s"$name: tuple ${stringOf (t)} has a duplicate value for its primary key") - end if - else - tuples += t // no index - allow duplicates - end if - end if - this - end add - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** UPDATE the column with attribute name a using newVal for elements with value - * matchVal. Return true iff at least one tuple is updated. - * @param a the attribute name for the column to be updated - * @param newVal the value used to assign updated values - * @param matchVal the value to be matched to elements - */ - def update (a: String, newVal: ValueType, matchVal: ValueType): Boolean = - var updated = false - if hasIndex && (key contains a) then - flaw ("update", "attempt to update an indexed primary key: use delete and add") - end if - - val j = on(a) - for i <- tuples.indices do - if tuples(i)(j) == matchVal then - tuples(i)(j) = newVal - updated = true - end if - end for - updated - end update - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** UPDATE the column with attribute name a using function func for elements with - * value matchVal. Return true iff at least one tuple is updated. - * @param a the attribute name for the column to be updated - * @param func the function used to assign updated values - * @param matchVal the value to be matched to elements - */ - def update (a: String, func: ValueType => ValueType, matchVal: ValueType): Boolean = - var updated = false - if hasIndex && (key contains a) then - flaw ("update", "attempt to update an indexed primary key: use delete and add") - end if - - val funcVal = func (matchVal) - val j = on(a) - for i <- tuples.indices do - if tuples(i)(j) == matchVal then - tuples(i)(j) = funcVal - updated = true - end if - end for - updated - end update - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** DELETE all tuples in this table satisfying the deletion predicate. - * If there is an index, remove those tuples from the index as well. - * Return true iff at least one tuple is deleted. - * @param predicate the predicate that specifies which tuples to delete - */ - def delete (predicate: Predicate): Boolean = - val rem = tuples.filter (predicate) - for t <- rem do - tuples -= t // remove from tuples -// if hasIndex then index -= new KeyType (pull (t, key)) // remove from index map - if hasIndex then index -= pull (t, key)(0) // remove from index map - end for - rem.size > 0 - end delete - - // C O N V E R T - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** CONVERT this table to a matrix of doubles by making the necessary - * type transformations. - * @see the `fromMatrix` method - * @param cols the column positions to use for forming the matrix - */ - def toMatrix (cols: Array [Int] = Array.range (0, schema.size)): MatrixD = - val (m, n) = (tuples.size, cols.size) - - val a = Array.ofDim [Double] (m, n) - for j <- 0 until n do - val jj = cols(j) - domain(jj) match - case 'S' | 'X' => val x = VectorS.map2Int (col(jj).map (_.toString))._1 - for i <- 0 until m do a(i)(j) = x(i).toDouble - case 'T' => val x = VectorT.map2Long (col(jj).map (TimeNum.fromValueType (_)))._1 - for i <- 0 until m do a(i)(j) = x(i).toDouble - case _ => for i <- 0 until m do a(i)(j) = tuples(i)(jj).toDouble - end for - - new MatrixD (m, n, a) - end toMatrix - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** CONVERT this table to a matrix and a vector of doubles by making the necessary - * type transformations. - * Usage: table -> (X, y) for linear algebra/regression problem Xb = y. - * @param cols the column positions to use for forming the matrix - * @param colj the column position to use for forming the vector - */ - def toMatrixV (cols: Array [Int] = Array.range (0, schema.size-1), - colj: Int = schema.size-1): (MatrixD, VectorD) = - val (m, n) = (tuples.size, cols.size) - - val a = Array.ofDim [Double] (m, n) - for j <- 0 until n do - val jj = cols(j) - domain(j) match - case 'S' | 'X' => val x = VectorS.map2Int (col(jj).map (_.toString))._1 - for i <- 0 until m do a(i)(j) = x(i).toDouble - case 'T' => val x = VectorT.map2Long (col(jj).map (TimeNum.fromValueType (_)))._1 - for i <- 0 until m do a(i)(j) = x(i).toDouble - case _ => for i <- 0 until m do a(i)(j) = tuples(i)(jj).toDouble - end for - - val b = Array.ofDim [Double] (m) - domain(colj) match - case 'S' | 'X' => val x = VectorS.map2Int (col(colj).map (_.toString))._1 - for i <- 0 until m do b(i) = x(i).toDouble - case 'T' => val x = VectorT.map2Long (col(colj).map (TimeNum.fromValueType (_)))._1 - for i <- 0 until m do b(i) = x(i).toDouble - case _ => for i <- 0 until m do b(i) = tuples(i)(colj).toDouble - - (new MatrixD (m, n, a), new VectorD (m, b)) - end toMatrixV - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** CONVERT the colj column of this table into a vector of doubles, etc. - * @param colj the column position to use for the vector - */ - def toVectorD (colj: Int = 0): VectorD = - val b = Array.ofDim [Double] (rows) - for i <- indices do b(i) = tuples(i)(colj).toDouble - new VectorD (rows, b) - end toVectorD - - def toVectorI (colj: Int = 0): VectorI = - val b = Array.ofDim [Int] (rows) - for i <- indices do b(i) = tuples(i)(colj).toInt - new VectorI (rows, b) - end toVectorI - - def toVectorL (colj: Int = 0): VectorL = - val b = Array.ofDim [Long] (rows) - for i <- indices do b(i) = tuples(i)(colj).toLong - new VectorL (rows, b) - end toVectorL - - def toVectorS (colj: Int = 0): VectorS = - val b = Array.ofDim [String] (rows) - for i <- indices do b(i) = tuples(i)(colj).toString - new VectorS (rows, b) - end toVectorS - - def toVectorT (colj: Int = 0): VectorT = - val b = Array.ofDim [TimeNum] (rows) - for i <- indices do b(i) = tuples(i)(colj).asInstanceOf [TimeNum] - new VectorT (rows, b) - end toVectorT - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return a copy of this table limited to the first n tuples/rows. - * @param n the number of tuples/rows to keep - */ - def limit (n: Int): Table = - val s = new Table (name + "_$n", schema, domain, key) - s.tuples ++= tuples.slice (0, n) - s - end limit - - // O U T P U T - - private val width_ = 18 // default column width - private val width = Array.fill (domain.size) (width_) // width for each column - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Reset the width of column col to w. - * @param col the column whose width is to be adjusted - * @param w the new width (# chars) for column col - */ - def resetWidth (col: Int, w: Int): Unit = width(col) = w - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** SHOW/print this table, one tuple per row. - * Formatting: regular column is 'width_' chars wide, 'X' is double that - * FIX - currently only works for width_, not width - * @param rng the range of tuples to show (e.g, 0 until 10), defaults to all - */ - def show (rng: Range = tuples.indices): Unit = - val len = width_ * (schema.size + countX) - println (s"\n>> Table $name with ${rng.size} rows, primary key = ${stringOf (key)}") - println ("|-" + "-" * len + "-|") - print ("| ") - for j <- schema.indices do - val wj = if domain(j) == 'X' then 2 * width_ else width_ - prt (schema(j), wj) - end for - println (" |") - println ("|-" + "-" * len + "-|") - if rows > 0 then - for i <- rng do - print ("| ") - val tuple_i = tuples(i) - if tuple_i.size > domain.size then flaw ("show", s"tuple($i) has size ${tuple_i.size}") - for j <- tuple_i.indices do - val wj = if domain(j) == 'X' then 2 * width_ else width_ - prt (tuple_i(j), wj) - end for - println (" |") - end for - println ("|-" + "-" * len + "-|") - end show - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** PRINT value-type v with a width of w. - * @param v the value to be printed - * @param w the width (# chars) for the column - */ - def prt (v: ValueType, w: Int): Unit = - val str = if v == null then "null" else v.toString - val w0 = str.size - val rem = w - w0 - val lft = max (rem / 2, 0) - val rht = max (rem - lft, 0) - print (" " * lft + str + " " * rht) - end prt - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** SHOW/print this table's primary index. - */ - def show_index (): Unit = - println (s"\n>> Table $name has indexed primary key = ${stringOf (key)}") - for (k, v) <- index do println (s"index: ${stringOf (k)} -> ${stringOf (v)}") - end show_index - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** SHOW/print this table's foreign keys. - */ - def show_foreign_keys (): Unit = - println (s"\n>> Table $name has foreign keys:") - for (fk, fkt) <- linkTypes do println (s"\t foreign key link = $fk references ${fkt.name}") - end show_foreign_keys - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** SAVE this table in a file using serialization. - * @see load in `Table` object - */ - def save (): Unit = - val oos = new ObjectOutputStream (new FileOutputStream (STORE_DIR + name + SER)) - oos.writeObject (this) - oos.close () - end save - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** WRITE this table into a Comma-Separated-Value (CSV) file with each tuple - * written to a line. - * @param fileName the file name of the data file (defaults to "name.csv") - */ - def writeCSV (fileName: String = name + ".csv"): Unit = - val out = new PrintWriter (DATA_DIR + fileName) - out.println (stringOf (schema).drop (6).dropRight (1)) - for i <- tuples.indices do - val tuple_i = stringOf (tuples(i)) - out.println (tuple_i.drop (6).dropRight (1)) - end for - out.close - end writeCSV - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** WRITE this table into a JavaScript Object Notation (JSON) file. - * @param fileName the file name of the data file - */ - def writeJSON (fileName: String = name + ".json"): Unit = ??? - /* - val gson = new Gson () - val jsonStr = gson.toJson (this) - debug ("writeJSON", s"jsonStr = ${jsonStr.slice (0, min (jsonStr.size, 5000))}") - val out = new PrintWriter (DATA_DIR + fileName) - out.println (jsonStr) - out.close () - end writeJSON - */ - - // P R I V A T E M E T H O D S - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the tuples in this table that satisfy the given simple (3 token) condition. - * @param a1 the left attribute - * @param op the comparison operator (==, !=, <, <=. >, >=) - * @param a2 the right attribute or value - * @param twoAtrs the whether a2 is an attribute or value - * @param tups the initial collection of tuples - */ - protected def selectTups (a1: String, op: String, a2: String, twoAtrs: Boolean, - tups: Bag [Tuple] = tuples): Bag [Tuple] = - if twoAtrs then // a1 and a2 are attributes - val a2_ = a2.toString - op match - case "==" => tups.filter (t => t(on(a1)) == t(on(a2_))) - case "!=" => tups.filter (t => t(on(a1)) != t(on(a2_))) - case "<" => tups.filter (t => t(on(a1)) < t(on(a2_))) - case "<=" => tups.filter (t => t(on(a1)) <= t(on(a2_))) - case ">" => tups.filter (t => t(on(a1)) > t(on(a2_))) - case ">=" => tups.filter (t => t(on(a1)) >= t(on(a2_))) - case _ => flaw ("select", s"$op is an unrecognized operator"); tups - else // a1 is attribute, a2 is value - val col = on(a1) - val a2_ : ValueType = string2Dom (a2, domain (col)) - op match - case "==" => tups.filter (t => t(col) == a2_) - case "!=" => tups.filter (t => t(col) != a2_) - case "<" => tups.filter (t => t(col) < a2_) - case "<=" => tups.filter (t => t(col) <= a2_) - case ">" => tups.filter (t => t(col) > a2_) - case ">=" => tups.filter (t => t(col) >= a2_) - case _ => flaw ("select", s"$op is an unrecognized operator"); tups - end if - end selectTups - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert a `String` into a `ValueType` with the given domain. - * @param str the given string - * @param dom the domain/data-type to convert it into - */ - def string2Dom (str: String, dom: Char): ValueType = - dom match - case 'D' => str.toDouble - case 'I' => str.toInt - case 'L' => str.toLong - case 'S' | 'X' => str - case 'T' => TimeNum (str) - end string2Dom - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the theta-join tuples for this table and r2 keeping concatenated tuples that - * satisfy the comparison (theta) operator on the specified attributes. - * @param a1 the attribute from the first/this table - * @param op the comparison operator (==, !=, <, <=. >, >=) - * @param a2 the attribute from the second table - * @param r2 the second table - */ - private def tJoinTups (a1: String, op: (ValueType, ValueType) => Boolean, a2: String, - r2: Table): Bag [Tuple] = - val tups = Bag [Tuple] () - for t <- tuples; u <- r2.tuples do - if op (t(on(a1)), u(r2.on(a2))) then tups += t ++ u - end for - tups - end tJoinTups - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the rank order of the tuples in this table by performing indirect - * merge-sort on the given attributes. - * @param x the attributes to indirectly sort on - */ - private def rankOrder (x: String*): Array [Int] = - var perm: Array [Int] = null // permutation giving rank order - - for j <- x.indices do - val col_j = col (on (x (j))) - perm = if j == 0 then (new MergeSortIndirect (col_j)()).isort () - else (new MergeSortIndirect (col_j)(perm)).isort () - end for - perm - end rankOrder - -end Table - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `tableTest` main function tests the `Table` class with queries on the - * Bank database. - * > runMain scalation.database.table.tableTest - */ -@main def tableTest (): Unit = - - // Data Definition Language - - val customer = Table ("customer", "cname, street, ccity", "S, S, S", "cname") - val branch = Table ("branch", "bname, assets, bcity", "S, D, S", "bname") - val deposit = Table ("deposit", "accno, balance, cname, bname", "I, D, S, S", "accno") - val loan = Table ("loan", "loanno, amount, cname, bname", "I, D, S, S", "loanno") - - //-------------------------------------------------------------------------- - banner ("Populate Database") - - customer += ("Peter", "Oak St", "Bogart") - += ("Paul", "Elm St", "Watkinsville") - += ("Mary", "Maple St", "Athens") - customer.show () - - branch += ("Alps", 20000000.0, "Athens") - += ("Downtown", 30000000.0, "Athens") - += ("Lake", 10000000.0, "Bogart") - branch.show () - - deposit += (11, 2000.0, "Peter", "Lake") - += (12, 1500.0, "Paul", "Alps") - += (13, 2500.0, "Paul", "Downtown") - += (14, 2500.0, "Paul", "Lake") - += (15, 3000.0, "Mary", "Alps") - += (16, 1000.0, "Mary", "Downtown") - deposit.show () - - loan += (21, 2200.0, "Peter", "Alps") - += (22, 2100.0, "Peter", "Downtown") - += (23, 1500.0, "Paul", "Alps") - += (24, 2500.0, "Paul", "Downtown") - += (25, 3000.0, "Mary", "Alps") - += (26, 1000.0, "Mary", "Lake") - loan.show () - - //-------------------------------------------------------------------------- - banner ("Show Table Statistics") - - customer.stats.show () - branch.stats.show () - deposit.stats.show () - loan.stats.show () - - //-------------------------------------------------------------------------- - banner ("Verify Usage Queries") - - import Table._ - - var a, q: Table = null - - banner (""" customer rename "client" """) - q = customer.rename ("client") - q.show () - - banner (""" customer.project (Array ("street", "ccity")) """) - q = customer.project (Array ("street", "ccity")) - q.show () - - banner (""" customer.project (Array (1, 2)) """) - q = customer.project (Array (1, 2)) - q.show () - - banner (""" customer.selproject ("ccity", _ > "Athens") """) - q = customer.selproject ("ccity", _ > "Athens") - q.show () - - banner (""" customer.select ("ccity", _ == "Athens") """) - q = customer.select ("ccity", _ == "Athens") - q.show () - - banner (""" customer.select (t => t(customer.on("ccity")) == "Athens") """) - q = customer.select (t => t(customer.on("ccity")) == "Athens") - q.show () - - banner (""" customer.select ("ccity == 'Athens'") """) - q = customer.select ("ccity == 'Athens'") - q.show () - - banner (""" customer.select (new KeyType ("Mary")) """) - q = customer.select (new KeyType ("Mary")) - q.show () - - banner (""" deposit union loan """) - a = deposit union loan // save as a for account - a.show () - - banner (""" a minus loan """) - q = a minus loan - q.show () - - banner (""" a intersect loan """) - q = a intersect loan - q.show () - - banner (""" customer product deposit """) - q = customer product deposit - q.show () - - banner (""" customer.join ((t, u) => t(customer.on("cname")) == u(deposit.on("cname")), deposit) """) - q = customer.join ((t, u) => t(customer.on("cname")) == u(deposit.on("cname")), deposit) - q.show () - - banner (""" customer.join ("cname == cname", deposit) """) - q = customer.join ("cname == cname", deposit) - q.show () - - banner (""" customer.join (Array ("cname"), Array ("cname"), deposit) """) - q = customer.join (Array ("cname"), Array ("cname"), deposit) - q.show () - - banner (""" deposit.join (("cname", customer)) """) - q = deposit.join (("cname", customer)) - q.show () - - banner (""" customer join deposit """) - q = customer join deposit - q.show () - - banner (""" customer.leftJoin (Array ("cname"), Array ("cname"), deposit) """) - q = customer.leftJoin (Array ("cname"), Array ("cname"), deposit) - q.show () - - banner (""" deposit.leftjoin (("cname", customer)) """) - deposit.create_index () - q = deposit.leftJoin (("cname", customer)) - q.show () - - banner (""" deposit.project ("cname, bname") divide branch.project ("bname") """) - q = deposit.project ("cname, bname") divide branch.project ("bname") - q.show () - - banner (""" deposit.groupBy ("bname") """) - q = deposit.groupBy ("bname") - q.show () - - banner (""" deposit.aggregate ("bname", (avg, "balance")) """) - q = deposit.aggregate ("bname", (avg, "balance")) - q.show () - - banner (""" deposit.orderBy ("bname") """) - q = deposit.orderBy ("bname") - q.show () - - banner (""" deposit.orderByDesc ("bname") """) - q = deposit.orderByDesc ("bname") - q.show () - - //-------------------------------------------------------------------------- - banner ("Example Queries") - - banner ("Names of customers who live in Athens") - val liveAthens = customer.σ ("ccity == 'Athens'").π ("cname") - liveAthens.show () - - banner ("Names of customers who bank (deposits) in Athens") - val bankAthens = (deposit ⋈ branch).σ ("bcity == 'Athens'").π ("cname") - bankAthens.show () - - banner ("Names of customers who live or bank in Athens") - val liveBank = customer.σ ("ccity == 'Athens'").π ("cname") ⋃ - (deposit ⋈ branch).σ ("bcity == 'Athens'").π ("cname") - liveBank.create_index () - liveBank.show () - - banner ("Names of customers who live and bank in the same city") - val sameCity = (customer ⋈ deposit ⋈ branch).σ ("ccity == bcity").π ("cname") - sameCity.create_index () - sameCity.show () - - banner ("Names and account numbers of customers with the largest balance") - val largest = deposit.π ("cname, accno") - (deposit ⋈ ("balance < balance", deposit)).π ("cname, accno") - largest.show () - - banner ("Names of customers who are silver club members") - val silver = (loan.π ("cname, bname") ⋂ deposit.π ("cname, bname")).π ("cname") - silver.create_index () - silver.show () - - banner ("Names of customers who are gold club members") - val gold = loan.π ("cname") - (loan.π ("cname, bname") - deposit.π ("cname, bname")).π ("cname") - gold.create_index () - gold.show () - - banner ("Names of branches located in Athens") - val inAthens = branch.σ ("bcity == 'Athens'").π ("bname") - inAthens.show () - - banner ("Names of customers who have deposits at all branches located in Athens") - val allAthens = deposit.π ("cname, bname") / inAthens - allAthens.create_index () - allAthens.show () - - banner ("Branch names and their average balances") - val avgBalance = deposit.γ ("bname").aggregate ("bname", (count, "accno"), (avg, "balance")) - avgBalance.show () - -end tableTest - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `tableTest2` main function tests the `Table` class with queries on the - * Student-Course-Professor database. - * > runMain scalation.database.table.tableTest2 - */ -@main def tableTest2 (): Unit = - - // Data Definition Language - - val student = Table ("student", "sid, sname, street, city, dept, level", - "I, S, S, S, S, I", "sid") - val professor = Table ("professor", "pid, pname, street, city, dept", - "I, S, S, S, S", "pid") - val course = Table ("course", "cid, cname, hours, dept, pid", - "I, X, I, S, I", "cid") - val takes = Table ("takes", "sid, cid", - "I, I", "sid, cid") - - //-------------------------------------------------------------------------- - banner ("Populate Database") - - student += (101, "Peter", "Oak St", "Bogart", "CS", 3) - += (102, "Paul", "Elm St", "Watkinsville", "CE", 4) - += (103, "Mary", "Maple St", "Athens", "CS", 4) - student.show () - - professor += (104, "DrBill", "Plum St", "Athens", "CS") - += (105, "DrJohn", "Pine St", "Watkinsville", "CE") - professor.show () - - course += (4370, "Database Management", 4, "CS", 104) - += (4720, "Comp. Architecture", 4, "CE", 104) - += (4760, "Computer Networks", 4, "CS", 105) - course.show () - - takes += (101, 4370) - += (101, 4720) - += (102, 4370) - += (102, 4760) - += (103, 4760) - takes.show () - - // Add links for foreign key contraints and efficient joins (will make any needed primary indices) - - takes.addLinkage ("sid", student) // takes sid references student sid - takes.addLinkage ("cid", course) // takes cid references course cid - course.addLinkage ("pid", professor) // course pid references professor pid - - //-------------------------------------------------------------------------- - banner ("Example Queries") - - banner ("locations of students") - val locs = student.project ("sname, city") - locs.show () - - banner ("living in Athens") - val inAthens = student.select ("city == 'Athens'") - inAthens.show () - - banner ("not living in Athens") - val notAthens = student minus inAthens - notAthens.show () - - banner ("student intersect inAthens") - val inters = student intersect inAthens - inters.show () - - banner ("in-Athens union not-in-Athens") - val unio = inAthens union notAthens - unio.show () - - banner ("course taken: course id") - val taken_id = takes.join (("sid", student)) - .project ("sname, cid") - taken_id.show () - - banner ("course taken: course name") - val taken_nm = takes.join (("sid", student)) - .join (("cid", course)) - .project ("sname, cname") - taken_nm.show () - - banner ("student taught by") - val taught_by = takes.join (("sid", student)) - .join (("cid", course)) - .join (("pid", professor)) - .project ("sname, pname") - taught_by.show () - -end tableTest2 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `tableTest3` main function tests the `Table` object and class by loading - * a dataset from a file. It loads the ScalaTion "covid_19" dataset/CSV file. - * - RELATIVE PATHS are from ScalaTion's DATA-DIR (@see Util.scala) - * - FULL PATHS are from the OS's root directory - * Defaults to RELATIVE PATHS; use `setFullPath` method to change. - * > runMain scalation.database.table.tableTest3 - */ -@main def tableTest3 (): Unit = - - //-------------------------------------------------------------------------- - // Verify access to file contents, comment out readFile once verified. - //-------------------------------------------------------------------------- - - val fileName = "covid_19.csv" - println (s"fileName = $fileName") -// readFile (fileName) // for RELATIVE PATHS -// readFile (fileName, fullPath = true) // for FULL PATHS - - //-------------------------------------------------------------------------- - // Use sample row/tuple in the middle of the file that has full information. - //-------------------------------------------------------------------------- - - val data_str = """ -12/29/2020,19658043,205972,184889.714,342639,3611,2372.857,1.04,27782,122664,106708, -253765556,1887683,1484784,0.134,7.5,4387280,4282967,31140,722024,333594,325788 -""" - - //-------------------------------------------------------------------------- - // Use this to guess the data-types/domains. See last step for making corrections. - //-------------------------------------------------------------------------- - - val domain = Table.tuple2type (strim (data_str)) - println (s"domain = ${stringOf (domain)}") - - //-------------------------------------------------------------------------- - // Data stored relative to the "scalation_2.0/data" directory, if not use full path. - // Call the Table.load method: - // def load (fileName: String, name: String, domain: Domain, key: String, - // pos_ : Array [Int] = null, sep: String = ","): Table = - //-------------------------------------------------------------------------- - - val covid = Table.load (fileName, "covid", domain, "date") - covid.show (0 until 200) - - //-------------------------------------------------------------------------- - // If this fails due to incorrect domains, save the domain that was printed, - // correct the domains that are incorrect, and try again. - //-------------------------------------------------------------------------- - - //-------------------------------------------------------------------------- - // for fullPath: Table.setFullPath () - // for limit: Table.setLimit (200) - //-------------------------------------------------------------------------- - - //-------------------------------------------------------------------------- - // Serialize and output the data into a JSON file (covid.json) in DATA_DIR - //-------------------------------------------------------------------------- - -// covid.writeJSON () - -end tableTest3 - diff --git a/target/scala-3.6.4/classes/scalation/database/table/Table.tasty b/target/scala-3.6.4/classes/scalation/database/table/Table.tasty deleted file mode 100644 index 72ee7bd36..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/Table.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/TableGen$.class b/target/scala-3.6.4/classes/scalation/database/table/TableGen$.class deleted file mode 100644 index 226ecadd5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/TableGen$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/TableGen$package$.class b/target/scala-3.6.4/classes/scalation/database/table/TableGen$package$.class deleted file mode 100644 index 92e081f67..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/TableGen$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/TableGen$package.class b/target/scala-3.6.4/classes/scalation/database/table/TableGen$package.class deleted file mode 100644 index ff0966ebb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/TableGen$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/TableGen$package.tasty b/target/scala-3.6.4/classes/scalation/database/table/TableGen$package.tasty deleted file mode 100644 index 228b27b41..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/TableGen$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/TableGen.class b/target/scala-3.6.4/classes/scalation/database/table/TableGen.class deleted file mode 100644 index e11ba8835..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/TableGen.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/TableGen.tasty b/target/scala-3.6.4/classes/scalation/database/table/TableGen.tasty deleted file mode 100644 index 41ee24968..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/TableGen.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/TimeComparison$package$.class b/target/scala-3.6.4/classes/scalation/database/table/TimeComparison$package$.class deleted file mode 100644 index f6830b56d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/TimeComparison$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/TimeComparison$package.class b/target/scala-3.6.4/classes/scalation/database/table/TimeComparison$package.class deleted file mode 100644 index 86ff7dc61..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/TimeComparison$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/TimeComparison$package.tasty b/target/scala-3.6.4/classes/scalation/database/table/TimeComparison$package.tasty deleted file mode 100644 index 4b34b138a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/TimeComparison$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/VTable$.class b/target/scala-3.6.4/classes/scalation/database/table/VTable$.class deleted file mode 100644 index fc7468aec..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/VTable$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/VTable$package$.class b/target/scala-3.6.4/classes/scalation/database/table/VTable$package$.class deleted file mode 100644 index 2b15ca424..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/VTable$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/VTable$package.class b/target/scala-3.6.4/classes/scalation/database/table/VTable$package.class deleted file mode 100644 index f1e661229..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/VTable$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/VTable$package.tasty b/target/scala-3.6.4/classes/scalation/database/table/VTable$package.tasty deleted file mode 100644 index 51b5d45bf..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/VTable$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/VTable.class b/target/scala-3.6.4/classes/scalation/database/table/VTable.class deleted file mode 100644 index 770e34d8c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/VTable.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/VTable.tasty b/target/scala-3.6.4/classes/scalation/database/table/VTable.tasty deleted file mode 100644 index 070c5999d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/VTable.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/Vertex$.class b/target/scala-3.6.4/classes/scalation/database/table/Vertex$.class deleted file mode 100644 index 3710cd450..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/Vertex$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/Vertex.class b/target/scala-3.6.4/classes/scalation/database/table/Vertex.class deleted file mode 100644 index bfab5123e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/Vertex.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/Vertex.tasty b/target/scala-3.6.4/classes/scalation/database/table/Vertex.tasty deleted file mode 100644 index e9ca21205..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/Vertex.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/Vertex_$.class b/target/scala-3.6.4/classes/scalation/database/table/Vertex_$.class deleted file mode 100644 index 712742192..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/Vertex_$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/Vertex_.class b/target/scala-3.6.4/classes/scalation/database/table/Vertex_.class deleted file mode 100644 index 4fa41ced7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/Vertex_.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/Vertex_.tasty b/target/scala-3.6.4/classes/scalation/database/table/Vertex_.tasty deleted file mode 100644 index aee67fe40..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/Vertex_.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/bankDB.class b/target/scala-3.6.4/classes/scalation/database/table/bankDB.class deleted file mode 100644 index 9c65566a6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/bankDB.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/bankDB.tasty b/target/scala-3.6.4/classes/scalation/database/table/bankDB.tasty deleted file mode 100644 index 32d8f1945..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/bankDB.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/bankDB2.class b/target/scala-3.6.4/classes/scalation/database/table/bankDB2.class deleted file mode 100644 index 4b7636e4d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/bankDB2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/bankDB2.tasty b/target/scala-3.6.4/classes/scalation/database/table/bankDB2.tasty deleted file mode 100644 index 09989e55c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/bankDB2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/gTableTest.class b/target/scala-3.6.4/classes/scalation/database/table/gTableTest.class deleted file mode 100644 index 4fcc4a5a4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/gTableTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/gTableTest.tasty b/target/scala-3.6.4/classes/scalation/database/table/gTableTest.tasty deleted file mode 100644 index 76e8d83c7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/gTableTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/gTableTest2.class b/target/scala-3.6.4/classes/scalation/database/table/gTableTest2.class deleted file mode 100644 index 422fbb3eb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/gTableTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/gTableTest2.tasty b/target/scala-3.6.4/classes/scalation/database/table/gTableTest2.tasty deleted file mode 100644 index 157500ba3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/gTableTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/gTableTest3.class b/target/scala-3.6.4/classes/scalation/database/table/gTableTest3.class deleted file mode 100644 index 767ac9c18..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/gTableTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/gTableTest3.tasty b/target/scala-3.6.4/classes/scalation/database/table/gTableTest3.tasty deleted file mode 100644 index bd6bcf5a7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/gTableTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/index.html b/target/scala-3.6.4/classes/scalation/database/table/index.html deleted file mode 100644 index 406c0efa4..000000000 --- a/target/scala-3.6.4/classes/scalation/database/table/index.html +++ /dev/null @@ -1,17 +0,0 @@ - - -

    Source files in table Package

    -

    - - \ No newline at end of file diff --git a/target/scala-3.6.4/classes/scalation/database/table/kGTableTest.class b/target/scala-3.6.4/classes/scalation/database/table/kGTableTest.class deleted file mode 100644 index bc529ed20..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/kGTableTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/kGTableTest.tasty b/target/scala-3.6.4/classes/scalation/database/table/kGTableTest.tasty deleted file mode 100644 index 80b6320fd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/kGTableTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/kGTableTest2.class b/target/scala-3.6.4/classes/scalation/database/table/kGTableTest2.class deleted file mode 100644 index b7311a681..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/kGTableTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/kGTableTest2.tasty b/target/scala-3.6.4/classes/scalation/database/table/kGTableTest2.tasty deleted file mode 100644 index c1dc47bfa..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/kGTableTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/lTableTest.class b/target/scala-3.6.4/classes/scalation/database/table/lTableTest.class deleted file mode 100644 index dece91e4d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/lTableTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/lTableTest.tasty b/target/scala-3.6.4/classes/scalation/database/table/lTableTest.tasty deleted file mode 100644 index 8a104278c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/lTableTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/lTableTest2.class b/target/scala-3.6.4/classes/scalation/database/table/lTableTest2.class deleted file mode 100644 index c9718241c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/lTableTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/lTableTest2.tasty b/target/scala-3.6.4/classes/scalation/database/table/lTableTest2.tasty deleted file mode 100644 index 9d89698cb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/lTableTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/lTableTest3.class b/target/scala-3.6.4/classes/scalation/database/table/lTableTest3.class deleted file mode 100644 index b34da1639..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/lTableTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/lTableTest3.tasty b/target/scala-3.6.4/classes/scalation/database/table/lTableTest3.tasty deleted file mode 100644 index 883a3a1da..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/lTableTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/lTableTest4.class b/target/scala-3.6.4/classes/scalation/database/table/lTableTest4.class deleted file mode 100644 index a0b212186..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/lTableTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/lTableTest4.tasty b/target/scala-3.6.4/classes/scalation/database/table/lTableTest4.tasty deleted file mode 100644 index 9b0ba039b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/lTableTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/movieDB.class b/target/scala-3.6.4/classes/scalation/database/table/movieDB.class deleted file mode 100644 index db4eb2176..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/movieDB.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/movieDB.tasty b/target/scala-3.6.4/classes/scalation/database/table/movieDB.tasty deleted file mode 100644 index 3893d5e4b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/movieDB.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/showTabs.class b/target/scala-3.6.4/classes/scalation/database/table/showTabs.class deleted file mode 100644 index 8e4ecb4fc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/showTabs.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/showTabs.tasty b/target/scala-3.6.4/classes/scalation/database/table/showTabs.tasty deleted file mode 100644 index ee4bb0b71..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/showTabs.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/tableGenTest.class b/target/scala-3.6.4/classes/scalation/database/table/tableGenTest.class deleted file mode 100644 index b8f00730a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/tableGenTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/tableGenTest.tasty b/target/scala-3.6.4/classes/scalation/database/table/tableGenTest.tasty deleted file mode 100644 index 0f1365599..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/tableGenTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/tableTest.class b/target/scala-3.6.4/classes/scalation/database/table/tableTest.class deleted file mode 100644 index 113156d13..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/tableTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/tableTest.tasty b/target/scala-3.6.4/classes/scalation/database/table/tableTest.tasty deleted file mode 100644 index 0be41bc72..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/tableTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/tableTest2.class b/target/scala-3.6.4/classes/scalation/database/table/tableTest2.class deleted file mode 100644 index 853038a2c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/tableTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/tableTest2.tasty b/target/scala-3.6.4/classes/scalation/database/table/tableTest2.tasty deleted file mode 100644 index 4cbc91c2a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/tableTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/tableTest3.class b/target/scala-3.6.4/classes/scalation/database/table/tableTest3.class deleted file mode 100644 index 3a2893dbc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/tableTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/tableTest3.tasty b/target/scala-3.6.4/classes/scalation/database/table/tableTest3.tasty deleted file mode 100644 index fe5084aee..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/tableTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/timer_function.class b/target/scala-3.6.4/classes/scalation/database/table/timer_function.class deleted file mode 100644 index 93a66dc4d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/timer_function.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/timer_function.tasty b/target/scala-3.6.4/classes/scalation/database/table/timer_function.tasty deleted file mode 100644 index 44de82e9a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/timer_function.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/vTableTest.class b/target/scala-3.6.4/classes/scalation/database/table/vTableTest.class deleted file mode 100644 index 7320a7444..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/vTableTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/vTableTest.tasty b/target/scala-3.6.4/classes/scalation/database/table/vTableTest.tasty deleted file mode 100644 index 17452cf5e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/vTableTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/vTableTest2.class b/target/scala-3.6.4/classes/scalation/database/table/vTableTest2.class deleted file mode 100644 index c665f330e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/vTableTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/table/vTableTest2.tasty b/target/scala-3.6.4/classes/scalation/database/table/vTableTest2.tasty deleted file mode 100644 index 4f8d25533..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/table/vTableTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/timeIntervalTest.class b/target/scala-3.6.4/classes/scalation/database/timeIntervalTest.class deleted file mode 100644 index 3ccc497d0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/timeIntervalTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/timeIntervalTest.tasty b/target/scala-3.6.4/classes/scalation/database/timeIntervalTest.tasty deleted file mode 100644 index ba70ff3a2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/timeIntervalTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/timeIntervalTest2.class b/target/scala-3.6.4/classes/scalation/database/timeIntervalTest2.class deleted file mode 100644 index 895a3ea39..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/timeIntervalTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/timeIntervalTest2.tasty b/target/scala-3.6.4/classes/scalation/database/timeIntervalTest2.tasty deleted file mode 100644 index 93813789c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/timeIntervalTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/timeIntervalTest3.class b/target/scala-3.6.4/classes/scalation/database/timeIntervalTest3.class deleted file mode 100644 index 37dbbf20a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/timeIntervalTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/timeIntervalTest3.tasty b/target/scala-3.6.4/classes/scalation/database/timeIntervalTest3.tasty deleted file mode 100644 index ca74add6a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/timeIntervalTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/timeOfWeekTest.class b/target/scala-3.6.4/classes/scalation/database/timeOfWeekTest.class deleted file mode 100644 index b57d766fa..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/timeOfWeekTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/timeOfWeekTest.tasty b/target/scala-3.6.4/classes/scalation/database/timeOfWeekTest.tasty deleted file mode 100644 index 9f1c480a5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/timeOfWeekTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/treeTest.class b/target/scala-3.6.4/classes/scalation/database/treeTest.class deleted file mode 100644 index 1748ff96d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/treeTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/treeTest.tasty b/target/scala-3.6.4/classes/scalation/database/treeTest.tasty deleted file mode 100644 index cb2c8d0b3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/treeTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/treeTest2.class b/target/scala-3.6.4/classes/scalation/database/treeTest2.class deleted file mode 100644 index 1ca55d3df..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/treeTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/treeTest2.tasty b/target/scala-3.6.4/classes/scalation/database/treeTest2.tasty deleted file mode 100644 index 536966bc0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/treeTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/treeTest3.class b/target/scala-3.6.4/classes/scalation/database/treeTest3.class deleted file mode 100644 index 3bf5849e0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/treeTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/treeTest3.tasty b/target/scala-3.6.4/classes/scalation/database/treeTest3.tasty deleted file mode 100644 index 5543e97cf..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/treeTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/triplegraph/RDFTriple$.class b/target/scala-3.6.4/classes/scalation/database/triplegraph/RDFTriple$.class deleted file mode 100644 index d7656bbb5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/triplegraph/RDFTriple$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/triplegraph/RDFTriple.class b/target/scala-3.6.4/classes/scalation/database/triplegraph/RDFTriple.class deleted file mode 100644 index 9281a56cb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/triplegraph/RDFTriple.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/triplegraph/RDFTriple.tasty b/target/scala-3.6.4/classes/scalation/database/triplegraph/RDFTriple.tasty deleted file mode 100644 index 0e5a20d83..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/triplegraph/RDFTriple.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/triplegraph/Triple$.class b/target/scala-3.6.4/classes/scalation/database/triplegraph/Triple$.class deleted file mode 100644 index 306e7d8f4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/triplegraph/Triple$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/triplegraph/Triple.class b/target/scala-3.6.4/classes/scalation/database/triplegraph/Triple.class deleted file mode 100644 index f993961e9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/triplegraph/Triple.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/triplegraph/Triple.tasty b/target/scala-3.6.4/classes/scalation/database/triplegraph/Triple.tasty deleted file mode 100644 index bce08dd83..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/triplegraph/Triple.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/triplegraph/TripleGraph$.class b/target/scala-3.6.4/classes/scalation/database/triplegraph/TripleGraph$.class deleted file mode 100644 index f4c30b8e7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/triplegraph/TripleGraph$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/triplegraph/TripleGraph$package$.class b/target/scala-3.6.4/classes/scalation/database/triplegraph/TripleGraph$package$.class deleted file mode 100644 index c9bcaf75b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/triplegraph/TripleGraph$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/triplegraph/TripleGraph$package.class b/target/scala-3.6.4/classes/scalation/database/triplegraph/TripleGraph$package.class deleted file mode 100644 index f5c0364bd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/triplegraph/TripleGraph$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/triplegraph/TripleGraph$package.tasty b/target/scala-3.6.4/classes/scalation/database/triplegraph/TripleGraph$package.tasty deleted file mode 100644 index c84bb69bd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/triplegraph/TripleGraph$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/triplegraph/TripleGraph.class b/target/scala-3.6.4/classes/scalation/database/triplegraph/TripleGraph.class deleted file mode 100644 index 00a2a7008..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/triplegraph/TripleGraph.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/triplegraph/TripleGraph.tasty b/target/scala-3.6.4/classes/scalation/database/triplegraph/TripleGraph.tasty deleted file mode 100644 index 445ad52da..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/triplegraph/TripleGraph.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/triplegraph/TripleGraphMatcher.class b/target/scala-3.6.4/classes/scalation/database/triplegraph/TripleGraphMatcher.class deleted file mode 100644 index 23efa4ec5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/triplegraph/TripleGraphMatcher.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/triplegraph/TripleGraphMatcher.tasty b/target/scala-3.6.4/classes/scalation/database/triplegraph/TripleGraphMatcher.tasty deleted file mode 100644 index c4ed9efce..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/triplegraph/TripleGraphMatcher.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/triplegraph/TripleGraphSim$package$.class b/target/scala-3.6.4/classes/scalation/database/triplegraph/TripleGraphSim$package$.class deleted file mode 100644 index 4c2c27f64..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/triplegraph/TripleGraphSim$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/triplegraph/TripleGraphSim$package.class b/target/scala-3.6.4/classes/scalation/database/triplegraph/TripleGraphSim$package.class deleted file mode 100644 index 8dcad9f13..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/triplegraph/TripleGraphSim$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/triplegraph/TripleGraphSim$package.tasty b/target/scala-3.6.4/classes/scalation/database/triplegraph/TripleGraphSim$package.tasty deleted file mode 100644 index 10ce5af9e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/triplegraph/TripleGraphSim$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/triplegraph/TripleGraphSim.class b/target/scala-3.6.4/classes/scalation/database/triplegraph/TripleGraphSim.class deleted file mode 100644 index d3d0424b2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/triplegraph/TripleGraphSim.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/triplegraph/TripleGraphSim.tasty b/target/scala-3.6.4/classes/scalation/database/triplegraph/TripleGraphSim.tasty deleted file mode 100644 index 4139a73a4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/triplegraph/TripleGraphSim.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/triplegraph/index.html b/target/scala-3.6.4/classes/scalation/database/triplegraph/index.html deleted file mode 100644 index 9bf9052ff..000000000 --- a/target/scala-3.6.4/classes/scalation/database/triplegraph/index.html +++ /dev/null @@ -1,10 +0,0 @@ - - -

    Source files in triplegraph Package

    -

    - - \ No newline at end of file diff --git a/target/scala-3.6.4/classes/scalation/database/triplegraph/tripleGraphSimTest.class b/target/scala-3.6.4/classes/scalation/database/triplegraph/tripleGraphSimTest.class deleted file mode 100644 index b7ed133b4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/triplegraph/tripleGraphSimTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/triplegraph/tripleGraphSimTest.tasty b/target/scala-3.6.4/classes/scalation/database/triplegraph/tripleGraphSimTest.tasty deleted file mode 100644 index 947492dbd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/triplegraph/tripleGraphSimTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/triplegraph/tripleGraphTest4.class b/target/scala-3.6.4/classes/scalation/database/triplegraph/tripleGraphTest4.class deleted file mode 100644 index e60948794..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/triplegraph/tripleGraphTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/database/triplegraph/tripleGraphTest4.tasty b/target/scala-3.6.4/classes/scalation/database/triplegraph/tripleGraphTest4.tasty deleted file mode 100644 index 5ec1da32e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/database/triplegraph/tripleGraphTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/doublyLinkedListTest.class b/target/scala-3.6.4/classes/scalation/doublyLinkedListTest.class deleted file mode 100644 index 5c756e33c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/doublyLinkedListTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/doublyLinkedListTest.tasty b/target/scala-3.6.4/classes/scalation/doublyLinkedListTest.tasty deleted file mode 100644 index 535fd4ae9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/doublyLinkedListTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/BallFlight$package$.class b/target/scala-3.6.4/classes/scalation/dynamics/BallFlight$package$.class deleted file mode 100644 index cb01b1a31..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/BallFlight$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/BallFlight$package.class b/target/scala-3.6.4/classes/scalation/dynamics/BallFlight$package.class deleted file mode 100644 index 391ee9dc6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/BallFlight$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/BallFlight$package.tasty b/target/scala-3.6.4/classes/scalation/dynamics/BallFlight$package.tasty deleted file mode 100644 index 8f774a5b5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/BallFlight$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/DormandPrince$.class b/target/scala-3.6.4/classes/scalation/dynamics/DormandPrince$.class deleted file mode 100644 index 13aaf219b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/DormandPrince$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/DormandPrince$package$.class b/target/scala-3.6.4/classes/scalation/dynamics/DormandPrince$package$.class deleted file mode 100644 index 29809e29e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/DormandPrince$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/DormandPrince$package.class b/target/scala-3.6.4/classes/scalation/dynamics/DormandPrince$package.class deleted file mode 100644 index 6ba91eff5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/DormandPrince$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/DormandPrince$package.tasty b/target/scala-3.6.4/classes/scalation/dynamics/DormandPrince$package.tasty deleted file mode 100644 index 34ecf2c86..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/DormandPrince$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/DormandPrince.class b/target/scala-3.6.4/classes/scalation/dynamics/DormandPrince.class deleted file mode 100644 index 305e5d48c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/DormandPrince.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/DormandPrince.tasty b/target/scala-3.6.4/classes/scalation/dynamics/DormandPrince.tasty deleted file mode 100644 index 85dd735f6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/DormandPrince.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/DynamicEq$.class b/target/scala-3.6.4/classes/scalation/dynamics/DynamicEq$.class deleted file mode 100644 index d4d8ba722..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/DynamicEq$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/DynamicEq$package$.class b/target/scala-3.6.4/classes/scalation/dynamics/DynamicEq$package$.class deleted file mode 100644 index f34a8317f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/DynamicEq$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/DynamicEq$package.class b/target/scala-3.6.4/classes/scalation/dynamics/DynamicEq$package.class deleted file mode 100644 index 1d8cb3a35..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/DynamicEq$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/DynamicEq$package.tasty b/target/scala-3.6.4/classes/scalation/dynamics/DynamicEq$package.tasty deleted file mode 100644 index 5f33f8a4c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/DynamicEq$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/DynamicEq.class b/target/scala-3.6.4/classes/scalation/dynamics/DynamicEq.class deleted file mode 100644 index df1a2d0e5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/DynamicEq.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/DynamicEq.tasty b/target/scala-3.6.4/classes/scalation/dynamics/DynamicEq.tasty deleted file mode 100644 index 02182d77b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/DynamicEq.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/FirstOrderPDE$package$.class b/target/scala-3.6.4/classes/scalation/dynamics/FirstOrderPDE$package$.class deleted file mode 100644 index f14bc84c0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/FirstOrderPDE$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/FirstOrderPDE$package.class b/target/scala-3.6.4/classes/scalation/dynamics/FirstOrderPDE$package.class deleted file mode 100644 index 92c922f97..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/FirstOrderPDE$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/FirstOrderPDE$package.tasty b/target/scala-3.6.4/classes/scalation/dynamics/FirstOrderPDE$package.tasty deleted file mode 100644 index a1474479d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/FirstOrderPDE$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/FirstOrderPDE.class b/target/scala-3.6.4/classes/scalation/dynamics/FirstOrderPDE.class deleted file mode 100644 index 741f4a8a8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/FirstOrderPDE.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/FirstOrderPDE.tasty b/target/scala-3.6.4/classes/scalation/dynamics/FirstOrderPDE.tasty deleted file mode 100644 index 7097022f5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/FirstOrderPDE.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/Integrator$package$.class b/target/scala-3.6.4/classes/scalation/dynamics/Integrator$package$.class deleted file mode 100644 index fdecc297f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/Integrator$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/Integrator$package.class b/target/scala-3.6.4/classes/scalation/dynamics/Integrator$package.class deleted file mode 100644 index b3a826c55..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/Integrator$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/Integrator$package.tasty b/target/scala-3.6.4/classes/scalation/dynamics/Integrator$package.tasty deleted file mode 100644 index d5129ee09..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/Integrator$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/Integrator.class b/target/scala-3.6.4/classes/scalation/dynamics/Integrator.class deleted file mode 100644 index c601993e1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/Integrator.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/Integrator.tasty b/target/scala-3.6.4/classes/scalation/dynamics/Integrator.tasty deleted file mode 100644 index e0b7d4ea1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/Integrator.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/LinearDiffEq$package$.class b/target/scala-3.6.4/classes/scalation/dynamics/LinearDiffEq$package$.class deleted file mode 100644 index 48a30ab6c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/LinearDiffEq$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/LinearDiffEq$package.class b/target/scala-3.6.4/classes/scalation/dynamics/LinearDiffEq$package.class deleted file mode 100644 index 24e591cc2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/LinearDiffEq$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/LinearDiffEq$package.tasty b/target/scala-3.6.4/classes/scalation/dynamics/LinearDiffEq$package.tasty deleted file mode 100644 index 48df3efb4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/LinearDiffEq$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/LinearDiffEq.class b/target/scala-3.6.4/classes/scalation/dynamics/LinearDiffEq.class deleted file mode 100644 index 5e73068b2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/LinearDiffEq.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/LinearDiffEq.tasty b/target/scala-3.6.4/classes/scalation/dynamics/LinearDiffEq.tasty deleted file mode 100644 index 1b19dfc0d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/LinearDiffEq.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/ModRosenbrock$.class b/target/scala-3.6.4/classes/scalation/dynamics/ModRosenbrock$.class deleted file mode 100644 index 550778c7c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/ModRosenbrock$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/ModRosenbrock$package$.class b/target/scala-3.6.4/classes/scalation/dynamics/ModRosenbrock$package$.class deleted file mode 100644 index 564b3419e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/ModRosenbrock$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/ModRosenbrock$package.class b/target/scala-3.6.4/classes/scalation/dynamics/ModRosenbrock$package.class deleted file mode 100644 index 5eea3acb9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/ModRosenbrock$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/ModRosenbrock$package.tasty b/target/scala-3.6.4/classes/scalation/dynamics/ModRosenbrock$package.tasty deleted file mode 100644 index b1f6a9e11..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/ModRosenbrock$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/ModRosenbrock.class b/target/scala-3.6.4/classes/scalation/dynamics/ModRosenbrock.class deleted file mode 100644 index cc202a25e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/ModRosenbrock.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/ModRosenbrock.tasty b/target/scala-3.6.4/classes/scalation/dynamics/ModRosenbrock.tasty deleted file mode 100644 index a8a654195..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/ModRosenbrock.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/ParabolicPDE$package$.class b/target/scala-3.6.4/classes/scalation/dynamics/ParabolicPDE$package$.class deleted file mode 100644 index 250cfa64b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/ParabolicPDE$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/ParabolicPDE$package.class b/target/scala-3.6.4/classes/scalation/dynamics/ParabolicPDE$package.class deleted file mode 100644 index 3ee1c7000..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/ParabolicPDE$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/ParabolicPDE$package.tasty b/target/scala-3.6.4/classes/scalation/dynamics/ParabolicPDE$package.tasty deleted file mode 100644 index d8da733ff..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/ParabolicPDE$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/ParabolicPDE.class b/target/scala-3.6.4/classes/scalation/dynamics/ParabolicPDE.class deleted file mode 100644 index a4d744587..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/ParabolicPDE.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/ParabolicPDE.tasty b/target/scala-3.6.4/classes/scalation/dynamics/ParabolicPDE.tasty deleted file mode 100644 index 7cbdb9e1f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/ParabolicPDE.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/Radau$.class b/target/scala-3.6.4/classes/scalation/dynamics/Radau$.class deleted file mode 100644 index a3ff54c6c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/Radau$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/Radau$package$.class b/target/scala-3.6.4/classes/scalation/dynamics/Radau$package$.class deleted file mode 100644 index 170a235b9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/Radau$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/Radau$package.class b/target/scala-3.6.4/classes/scalation/dynamics/Radau$package.class deleted file mode 100644 index 7250afd1a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/Radau$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/Radau$package.tasty b/target/scala-3.6.4/classes/scalation/dynamics/Radau$package.tasty deleted file mode 100644 index 8389096d1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/Radau$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/Radau.class b/target/scala-3.6.4/classes/scalation/dynamics/Radau.class deleted file mode 100644 index 316370181..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/Radau.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/Radau.tasty b/target/scala-3.6.4/classes/scalation/dynamics/Radau.tasty deleted file mode 100644 index 2da248657..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/Radau.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/Reactions$package$.class b/target/scala-3.6.4/classes/scalation/dynamics/Reactions$package$.class deleted file mode 100644 index f3b99a9d8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/Reactions$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/Reactions$package.class b/target/scala-3.6.4/classes/scalation/dynamics/Reactions$package.class deleted file mode 100644 index d25783b86..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/Reactions$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/Reactions$package.tasty b/target/scala-3.6.4/classes/scalation/dynamics/Reactions$package.tasty deleted file mode 100644 index 51e0caa1f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/Reactions$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/RungeKutta$.class b/target/scala-3.6.4/classes/scalation/dynamics/RungeKutta$.class deleted file mode 100644 index 0b9a24d93..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/RungeKutta$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/RungeKutta$package$.class b/target/scala-3.6.4/classes/scalation/dynamics/RungeKutta$package$.class deleted file mode 100644 index 672794995..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/RungeKutta$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/RungeKutta$package.class b/target/scala-3.6.4/classes/scalation/dynamics/RungeKutta$package.class deleted file mode 100644 index 4be1e69e8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/RungeKutta$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/RungeKutta$package.tasty b/target/scala-3.6.4/classes/scalation/dynamics/RungeKutta$package.tasty deleted file mode 100644 index d7188ebed..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/RungeKutta$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/RungeKutta.class b/target/scala-3.6.4/classes/scalation/dynamics/RungeKutta.class deleted file mode 100644 index 3fbf10134..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/RungeKutta.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/RungeKutta.tasty b/target/scala-3.6.4/classes/scalation/dynamics/RungeKutta.tasty deleted file mode 100644 index b216da857..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/RungeKutta.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/RungeKutta2$.class b/target/scala-3.6.4/classes/scalation/dynamics/RungeKutta2$.class deleted file mode 100644 index da44b29d5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/RungeKutta2$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/RungeKutta2$package$.class b/target/scala-3.6.4/classes/scalation/dynamics/RungeKutta2$package$.class deleted file mode 100644 index a560a4d43..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/RungeKutta2$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/RungeKutta2$package.class b/target/scala-3.6.4/classes/scalation/dynamics/RungeKutta2$package.class deleted file mode 100644 index c7a0e4eb9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/RungeKutta2$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/RungeKutta2$package.tasty b/target/scala-3.6.4/classes/scalation/dynamics/RungeKutta2$package.tasty deleted file mode 100644 index 34449ac3f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/RungeKutta2$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/RungeKutta2.class b/target/scala-3.6.4/classes/scalation/dynamics/RungeKutta2.class deleted file mode 100644 index 5291a673b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/RungeKutta2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/RungeKutta2.tasty b/target/scala-3.6.4/classes/scalation/dynamics/RungeKutta2.tasty deleted file mode 100644 index 531828cf4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/RungeKutta2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/RungeKutta3$.class b/target/scala-3.6.4/classes/scalation/dynamics/RungeKutta3$.class deleted file mode 100644 index 6993e127c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/RungeKutta3$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/RungeKutta3$package$.class b/target/scala-3.6.4/classes/scalation/dynamics/RungeKutta3$package$.class deleted file mode 100644 index 106d3efa0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/RungeKutta3$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/RungeKutta3$package.class b/target/scala-3.6.4/classes/scalation/dynamics/RungeKutta3$package.class deleted file mode 100644 index 0d50c90d8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/RungeKutta3$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/RungeKutta3$package.tasty b/target/scala-3.6.4/classes/scalation/dynamics/RungeKutta3$package.tasty deleted file mode 100644 index 88811f2d0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/RungeKutta3$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/RungeKutta3.class b/target/scala-3.6.4/classes/scalation/dynamics/RungeKutta3.class deleted file mode 100644 index 58739cbeb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/RungeKutta3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/RungeKutta3.tasty b/target/scala-3.6.4/classes/scalation/dynamics/RungeKutta3.tasty deleted file mode 100644 index 0b3b6fa88..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/RungeKutta3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/ballFlight.class b/target/scala-3.6.4/classes/scalation/dynamics/ballFlight.class deleted file mode 100644 index 8cae82c1b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/ballFlight.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/ballFlight.tasty b/target/scala-3.6.4/classes/scalation/dynamics/ballFlight.tasty deleted file mode 100644 index c02f4c315..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/ballFlight.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/dormandPrinceTest.class b/target/scala-3.6.4/classes/scalation/dynamics/dormandPrinceTest.class deleted file mode 100644 index 3537b99c0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/dormandPrinceTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/dormandPrinceTest.tasty b/target/scala-3.6.4/classes/scalation/dynamics/dormandPrinceTest.tasty deleted file mode 100644 index 5def4e385..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/dormandPrinceTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/dormandPrinceTest2.class b/target/scala-3.6.4/classes/scalation/dynamics/dormandPrinceTest2.class deleted file mode 100644 index f38ae5942..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/dormandPrinceTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/dormandPrinceTest2.tasty b/target/scala-3.6.4/classes/scalation/dynamics/dormandPrinceTest2.tasty deleted file mode 100644 index 56fa0a61c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/dormandPrinceTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/dormandPrinceTest3.class b/target/scala-3.6.4/classes/scalation/dynamics/dormandPrinceTest3.class deleted file mode 100644 index d3c50e455..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/dormandPrinceTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/dormandPrinceTest3.tasty b/target/scala-3.6.4/classes/scalation/dynamics/dormandPrinceTest3.tasty deleted file mode 100644 index 7c87d4cf0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/dormandPrinceTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/dormandPrinceTest4.class b/target/scala-3.6.4/classes/scalation/dynamics/dormandPrinceTest4.class deleted file mode 100644 index 42257ed45..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/dormandPrinceTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/dormandPrinceTest4.tasty b/target/scala-3.6.4/classes/scalation/dynamics/dormandPrinceTest4.tasty deleted file mode 100644 index d84eea22d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/dormandPrinceTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/dynamicEqTest.class b/target/scala-3.6.4/classes/scalation/dynamics/dynamicEqTest.class deleted file mode 100644 index a5c79e431..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/dynamicEqTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/dynamicEqTest.tasty b/target/scala-3.6.4/classes/scalation/dynamics/dynamicEqTest.tasty deleted file mode 100644 index bf64c78d8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/dynamicEqTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/firstOrderPDETest.class b/target/scala-3.6.4/classes/scalation/dynamics/firstOrderPDETest.class deleted file mode 100644 index 6e1826b71..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/firstOrderPDETest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/firstOrderPDETest.tasty b/target/scala-3.6.4/classes/scalation/dynamics/firstOrderPDETest.tasty deleted file mode 100644 index 774dd04a5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/firstOrderPDETest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/firstOrderPDETest2.class b/target/scala-3.6.4/classes/scalation/dynamics/firstOrderPDETest2.class deleted file mode 100644 index 7ea1003b2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/firstOrderPDETest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/firstOrderPDETest2.tasty b/target/scala-3.6.4/classes/scalation/dynamics/firstOrderPDETest2.tasty deleted file mode 100644 index 2131bbf67..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/firstOrderPDETest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/firstOrderPDETest3.class b/target/scala-3.6.4/classes/scalation/dynamics/firstOrderPDETest3.class deleted file mode 100644 index 8c1ff4705..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/firstOrderPDETest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/firstOrderPDETest3.tasty b/target/scala-3.6.4/classes/scalation/dynamics/firstOrderPDETest3.tasty deleted file mode 100644 index 33150bdf5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/firstOrderPDETest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/index.html b/target/scala-3.6.4/classes/scalation/dynamics/index.html deleted file mode 100644 index 6d9803d6c..000000000 --- a/target/scala-3.6.4/classes/scalation/dynamics/index.html +++ /dev/null @@ -1,16 +0,0 @@ - - -

    Source files in dynamics Package

    -

    - - \ No newline at end of file diff --git a/target/scala-3.6.4/classes/scalation/dynamics/integrators.txt b/target/scala-3.6.4/classes/scalation/dynamics/integrators.txt deleted file mode 100644 index 98ff8ea62..000000000 --- a/target/scala-3.6.4/classes/scalation/dynamics/integrators.txt +++ /dev/null @@ -1,37 +0,0 @@ - -ScalaTion ODE Solvers/Integrators ---------------------------------- - -@see www.mathworks.com/help/matlab/math/choose-an-ode-solver.html -@see en.wikipedia.org/wiki/List_of_Runge%E2%80%93Kutta_methods - -A. Explicit, Fixed Step-size Runge-Kutta Methods ------------------------------------------------- - -rk2 (-) in RungeKutta2.scala (2nd-order Modified Euler Explicit Midpoint) -rk3 (-) in RungeKutta2.scala (Strong Stability Preserving 3rd-order Runge-Kutta) -rk4 (-) in RungeKutta2.scala (Classic 4th-order Runge-Kutta) -rk5 (-) in RungeKutta2.scala (Butcher's 5th-order Runge-Kutta) -RungeKutta (-) in RungeKutta.scala (hard-coded version of rk4) - -RungeKutta2 utilizes Butcher Tableaux - -B. Explicit, Adaptive Step-size (Embedded) Runge-Kutta Methods --------------------------------------------------------------- - -rk23 (ode23) in RungeKutta3.scala (Bogacki–Shampine) -rk45 (ode45) in RungeKutta3.scala (Dormand-Prince) -DormandPrince (ode45) in DormandPrince.scala (hard-coded version of rk45) - -RungeKutta3 utilizes Extended Butcher Tableaux - -C. Implicit Runge-Kutta Methods -------------------------------- - -ModRosenbrock (ode23s?) in ModRosenbrock.scala -Radau (-) in Radau.scala - ----------------------------------------- -Notes: For Handling Stiffness: C > B > A - Corresponding (MATLAB-solver) - diff --git a/target/scala-3.6.4/classes/scalation/dynamics/linearDiffEqTest.class b/target/scala-3.6.4/classes/scalation/dynamics/linearDiffEqTest.class deleted file mode 100644 index 8206d703d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/linearDiffEqTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/linearDiffEqTest.tasty b/target/scala-3.6.4/classes/scalation/dynamics/linearDiffEqTest.tasty deleted file mode 100644 index 1a4b08a78..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/linearDiffEqTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/modRosenbrockTest.class b/target/scala-3.6.4/classes/scalation/dynamics/modRosenbrockTest.class deleted file mode 100644 index a109de29b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/modRosenbrockTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/modRosenbrockTest.tasty b/target/scala-3.6.4/classes/scalation/dynamics/modRosenbrockTest.tasty deleted file mode 100644 index e43893ee1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/modRosenbrockTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/modRosenbrockTest2.class b/target/scala-3.6.4/classes/scalation/dynamics/modRosenbrockTest2.class deleted file mode 100644 index 132e087a2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/modRosenbrockTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/modRosenbrockTest2.tasty b/target/scala-3.6.4/classes/scalation/dynamics/modRosenbrockTest2.tasty deleted file mode 100644 index 416988cf6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/modRosenbrockTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/parabolicPDETest.class b/target/scala-3.6.4/classes/scalation/dynamics/parabolicPDETest.class deleted file mode 100644 index 7acbb92a7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/parabolicPDETest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/parabolicPDETest.tasty b/target/scala-3.6.4/classes/scalation/dynamics/parabolicPDETest.tasty deleted file mode 100644 index 8bb206049..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/parabolicPDETest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/radauTest.class b/target/scala-3.6.4/classes/scalation/dynamics/radauTest.class deleted file mode 100644 index 6cf75b621..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/radauTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/radauTest.tasty b/target/scala-3.6.4/classes/scalation/dynamics/radauTest.tasty deleted file mode 100644 index 0c9f60552..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/radauTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/reactions.class b/target/scala-3.6.4/classes/scalation/dynamics/reactions.class deleted file mode 100644 index 44a372392..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/reactions.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/reactions.tasty b/target/scala-3.6.4/classes/scalation/dynamics/reactions.tasty deleted file mode 100644 index 3f45c9f37..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/reactions.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/rungeKutta2Test.class b/target/scala-3.6.4/classes/scalation/dynamics/rungeKutta2Test.class deleted file mode 100644 index 49de21411..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/rungeKutta2Test.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/rungeKutta2Test.tasty b/target/scala-3.6.4/classes/scalation/dynamics/rungeKutta2Test.tasty deleted file mode 100644 index 8b2d17b58..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/rungeKutta2Test.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/rungeKutta2Test2.class b/target/scala-3.6.4/classes/scalation/dynamics/rungeKutta2Test2.class deleted file mode 100644 index 494eea4b4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/rungeKutta2Test2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/rungeKutta2Test2.tasty b/target/scala-3.6.4/classes/scalation/dynamics/rungeKutta2Test2.tasty deleted file mode 100644 index c777bb5e7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/rungeKutta2Test2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/rungeKutta3Test.class b/target/scala-3.6.4/classes/scalation/dynamics/rungeKutta3Test.class deleted file mode 100644 index e1445adc6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/rungeKutta3Test.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/rungeKutta3Test.tasty b/target/scala-3.6.4/classes/scalation/dynamics/rungeKutta3Test.tasty deleted file mode 100644 index 2610d4fca..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/rungeKutta3Test.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/rungeKutta3Test2.class b/target/scala-3.6.4/classes/scalation/dynamics/rungeKutta3Test2.class deleted file mode 100644 index f4ab78d76..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/rungeKutta3Test2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/rungeKutta3Test2.tasty b/target/scala-3.6.4/classes/scalation/dynamics/rungeKutta3Test2.tasty deleted file mode 100644 index 66265031a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/rungeKutta3Test2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/rungeKutta3Test3.class b/target/scala-3.6.4/classes/scalation/dynamics/rungeKutta3Test3.class deleted file mode 100644 index f5a216c82..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/rungeKutta3Test3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/rungeKutta3Test3.tasty b/target/scala-3.6.4/classes/scalation/dynamics/rungeKutta3Test3.tasty deleted file mode 100644 index 506af8c6c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/rungeKutta3Test3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/rungeKuttaTest.class b/target/scala-3.6.4/classes/scalation/dynamics/rungeKuttaTest.class deleted file mode 100644 index bede0780f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/rungeKuttaTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/rungeKuttaTest.tasty b/target/scala-3.6.4/classes/scalation/dynamics/rungeKuttaTest.tasty deleted file mode 100644 index 78d67a70d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/rungeKuttaTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/rungeKuttaTest2.class b/target/scala-3.6.4/classes/scalation/dynamics/rungeKuttaTest2.class deleted file mode 100644 index eabbbe892..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/rungeKuttaTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/dynamics/rungeKuttaTest2.tasty b/target/scala-3.6.4/classes/scalation/dynamics/rungeKuttaTest2.tasty deleted file mode 100644 index 1afff48ac..000000000 Binary files a/target/scala-3.6.4/classes/scalation/dynamics/rungeKuttaTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/easyWriterTest.class b/target/scala-3.6.4/classes/scalation/easyWriterTest.class deleted file mode 100644 index 01484cfb0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/easyWriterTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/easyWriterTest.tasty b/target/scala-3.6.4/classes/scalation/easyWriterTest.tasty deleted file mode 100644 index ce34b58ce..000000000 Binary files a/target/scala-3.6.4/classes/scalation/easyWriterTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/fibTest.class b/target/scala-3.6.4/classes/scalation/fibTest.class deleted file mode 100644 index a0e2fe2cc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/fibTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/fibTest.tasty b/target/scala-3.6.4/classes/scalation/fibTest.tasty deleted file mode 100644 index 5831b4bc2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/fibTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/genIndexHtml.class b/target/scala-3.6.4/classes/scalation/genIndexHtml.class deleted file mode 100644 index 47d0383cb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/genIndexHtml.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/genIndexHtml.tasty b/target/scala-3.6.4/classes/scalation/genIndexHtml.tasty deleted file mode 100644 index e7a6c7a85..000000000 Binary files a/target/scala-3.6.4/classes/scalation/genIndexHtml.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/hyperParameterTest.class b/target/scala-3.6.4/classes/scalation/hyperParameterTest.class deleted file mode 100644 index 306c96710..000000000 Binary files a/target/scala-3.6.4/classes/scalation/hyperParameterTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/hyperParameterTest.tasty b/target/scala-3.6.4/classes/scalation/hyperParameterTest.tasty deleted file mode 100644 index 954ec5287..000000000 Binary files a/target/scala-3.6.4/classes/scalation/hyperParameterTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/hyperParameterTest2.class b/target/scala-3.6.4/classes/scalation/hyperParameterTest2.class deleted file mode 100644 index ce423cdaf..000000000 Binary files a/target/scala-3.6.4/classes/scalation/hyperParameterTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/hyperParameterTest2.tasty b/target/scala-3.6.4/classes/scalation/hyperParameterTest2.tasty deleted file mode 100644 index d8cf2616c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/hyperParameterTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/index.html b/target/scala-3.6.4/classes/scalation/index.html deleted file mode 100644 index 1cca48e5d..000000000 --- a/target/scala-3.6.4/classes/scalation/index.html +++ /dev/null @@ -1,43 +0,0 @@ - - -

    Source files in scalation Package

    -

    - - \ No newline at end of file diff --git a/target/scala-3.6.4/classes/scalation/latLongTest.class b/target/scala-3.6.4/classes/scalation/latLongTest.class deleted file mode 100644 index 478881dc3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/latLongTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/latLongTest.tasty b/target/scala-3.6.4/classes/scalation/latLongTest.tasty deleted file mode 100644 index 4afe0fdfe..000000000 Binary files a/target/scala-3.6.4/classes/scalation/latLongTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/latLongTest2.class b/target/scala-3.6.4/classes/scalation/latLongTest2.class deleted file mode 100644 index 8c7586bbc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/latLongTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/latLongTest2.tasty b/target/scala-3.6.4/classes/scalation/latLongTest2.tasty deleted file mode 100644 index 008521c15..000000000 Binary files a/target/scala-3.6.4/classes/scalation/latLongTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/latLongTest3.class b/target/scala-3.6.4/classes/scalation/latLongTest3.class deleted file mode 100644 index c638d0d60..000000000 Binary files a/target/scala-3.6.4/classes/scalation/latLongTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/latLongTest3.tasty b/target/scala-3.6.4/classes/scalation/latLongTest3.tasty deleted file mode 100644 index e9ddb2ed4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/latLongTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/latLongTest4.class b/target/scala-3.6.4/classes/scalation/latLongTest4.class deleted file mode 100644 index e137af0c1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/latLongTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/latLongTest4.tasty b/target/scala-3.6.4/classes/scalation/latLongTest4.tasty deleted file mode 100644 index c3fb918f1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/latLongTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/makeVectorI.class b/target/scala-3.6.4/classes/scalation/makeVectorI.class deleted file mode 100644 index 7cef444f4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/makeVectorI.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/makeVectorI.tasty b/target/scala-3.6.4/classes/scalation/makeVectorI.tasty deleted file mode 100644 index 41cd99671..000000000 Binary files a/target/scala-3.6.4/classes/scalation/makeVectorI.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Bidiagonal$package$.class b/target/scala-3.6.4/classes/scalation/mathstat/Bidiagonal$package$.class deleted file mode 100644 index a4408e191..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Bidiagonal$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Bidiagonal$package.class b/target/scala-3.6.4/classes/scalation/mathstat/Bidiagonal$package.class deleted file mode 100644 index 58ee6ee5e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Bidiagonal$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Bidiagonal$package.tasty b/target/scala-3.6.4/classes/scalation/mathstat/Bidiagonal$package.tasty deleted file mode 100644 index 261e3542b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Bidiagonal$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Bidiagonal.class b/target/scala-3.6.4/classes/scalation/mathstat/Bidiagonal.class deleted file mode 100644 index 3a522a604..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Bidiagonal.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Bidiagonal.tasty b/target/scala-3.6.4/classes/scalation/mathstat/Bidiagonal.tasty deleted file mode 100644 index 53512fac5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Bidiagonal.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Canvas$.class b/target/scala-3.6.4/classes/scalation/mathstat/Canvas$.class deleted file mode 100644 index 795374912..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Canvas$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Canvas.class b/target/scala-3.6.4/classes/scalation/mathstat/Canvas.class deleted file mode 100644 index 94e27155c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Canvas.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Canvas.tasty b/target/scala-3.6.4/classes/scalation/mathstat/Canvas.tasty deleted file mode 100644 index 2460dbb1b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Canvas.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Combinatorics$.class b/target/scala-3.6.4/classes/scalation/mathstat/Combinatorics$.class deleted file mode 100644 index 9da701757..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Combinatorics$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Combinatorics$package$.class b/target/scala-3.6.4/classes/scalation/mathstat/Combinatorics$package$.class deleted file mode 100644 index ad30d7e9b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Combinatorics$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Combinatorics$package.class b/target/scala-3.6.4/classes/scalation/mathstat/Combinatorics$package.class deleted file mode 100644 index 310cabc4d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Combinatorics$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Combinatorics$package.tasty b/target/scala-3.6.4/classes/scalation/mathstat/Combinatorics$package.tasty deleted file mode 100644 index 914a5fd67..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Combinatorics$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Combinatorics.class b/target/scala-3.6.4/classes/scalation/mathstat/Combinatorics.class deleted file mode 100644 index 5ae011bad..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Combinatorics.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Combinatorics.tasty b/target/scala-3.6.4/classes/scalation/mathstat/Combinatorics.tasty deleted file mode 100644 index 80f74fbd2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Combinatorics.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Complex$$anon$1.class b/target/scala-3.6.4/classes/scalation/mathstat/Complex$$anon$1.class deleted file mode 100644 index 1ec776ab2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Complex$$anon$1.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Complex$.class b/target/scala-3.6.4/classes/scalation/mathstat/Complex$.class deleted file mode 100644 index ce809e7cc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Complex$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Complex$package$.class b/target/scala-3.6.4/classes/scalation/mathstat/Complex$package$.class deleted file mode 100644 index 4ff1ef468..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Complex$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Complex$package.class b/target/scala-3.6.4/classes/scalation/mathstat/Complex$package.class deleted file mode 100644 index 1b8eacc88..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Complex$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Complex$package.tasty b/target/scala-3.6.4/classes/scalation/mathstat/Complex$package.tasty deleted file mode 100644 index 68c9dc3ad..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Complex$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Complex.class b/target/scala-3.6.4/classes/scalation/mathstat/Complex.class deleted file mode 100644 index 42e12234b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Complex.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Complex.tasty b/target/scala-3.6.4/classes/scalation/mathstat/Complex.tasty deleted file mode 100644 index a88b8e438..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Complex.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Convert$package$.class b/target/scala-3.6.4/classes/scalation/mathstat/Convert$package$.class deleted file mode 100644 index a176662ee..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Convert$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Convert$package.class b/target/scala-3.6.4/classes/scalation/mathstat/Convert$package.class deleted file mode 100644 index 42c29f444..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Convert$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Convert$package.tasty b/target/scala-3.6.4/classes/scalation/mathstat/Convert$package.tasty deleted file mode 100644 index 927e74ba0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Convert$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Correlogram$.class b/target/scala-3.6.4/classes/scalation/mathstat/Correlogram$.class deleted file mode 100644 index a6e44c0c8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Correlogram$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Correlogram$package$.class b/target/scala-3.6.4/classes/scalation/mathstat/Correlogram$package$.class deleted file mode 100644 index fb80c63e2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Correlogram$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Correlogram$package$CT$2$.class b/target/scala-3.6.4/classes/scalation/mathstat/Correlogram$package$CT$2$.class deleted file mode 100644 index cf2947b40..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Correlogram$package$CT$2$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Correlogram$package.class b/target/scala-3.6.4/classes/scalation/mathstat/Correlogram$package.class deleted file mode 100644 index 969bbef69..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Correlogram$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Correlogram$package.tasty b/target/scala-3.6.4/classes/scalation/mathstat/Correlogram$package.tasty deleted file mode 100644 index a7eb14784..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Correlogram$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Correlogram.class b/target/scala-3.6.4/classes/scalation/mathstat/Correlogram.class deleted file mode 100644 index c5dc737ed..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Correlogram.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Correlogram.tasty b/target/scala-3.6.4/classes/scalation/mathstat/Correlogram.tasty deleted file mode 100644 index 4119fa48f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Correlogram.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Eigen$package$.class b/target/scala-3.6.4/classes/scalation/mathstat/Eigen$package$.class deleted file mode 100644 index e3e222243..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Eigen$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Eigen$package.class b/target/scala-3.6.4/classes/scalation/mathstat/Eigen$package.class deleted file mode 100644 index 4e7bbb498..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Eigen$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Eigen$package.tasty b/target/scala-3.6.4/classes/scalation/mathstat/Eigen$package.tasty deleted file mode 100644 index 1150c2df8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Eigen$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Eigenvalue.class b/target/scala-3.6.4/classes/scalation/mathstat/Eigenvalue.class deleted file mode 100644 index 9bd61720c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Eigenvalue.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Eigenvalue.tasty b/target/scala-3.6.4/classes/scalation/mathstat/Eigenvalue.tasty deleted file mode 100644 index 5e2e9b35c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Eigenvalue.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/EigenvalueSym.class b/target/scala-3.6.4/classes/scalation/mathstat/EigenvalueSym.class deleted file mode 100644 index a1901dd08..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/EigenvalueSym.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/EigenvalueSym.tasty b/target/scala-3.6.4/classes/scalation/mathstat/EigenvalueSym.tasty deleted file mode 100644 index 9e7ca4601..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/EigenvalueSym.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Eigenvector$.class b/target/scala-3.6.4/classes/scalation/mathstat/Eigenvector$.class deleted file mode 100644 index fda5b2881..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Eigenvector$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Eigenvector.class b/target/scala-3.6.4/classes/scalation/mathstat/Eigenvector.class deleted file mode 100644 index 4101a5d4e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Eigenvector.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Eigenvector.tasty b/target/scala-3.6.4/classes/scalation/mathstat/Eigenvector.tasty deleted file mode 100644 index 1c553a26c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Eigenvector.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Fac_Cholesky$package$.class b/target/scala-3.6.4/classes/scalation/mathstat/Fac_Cholesky$package$.class deleted file mode 100644 index 2748ff646..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Fac_Cholesky$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Fac_Cholesky$package.class b/target/scala-3.6.4/classes/scalation/mathstat/Fac_Cholesky$package.class deleted file mode 100644 index a0a3ca788..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Fac_Cholesky$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Fac_Cholesky$package.tasty b/target/scala-3.6.4/classes/scalation/mathstat/Fac_Cholesky$package.tasty deleted file mode 100644 index a47ea660d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Fac_Cholesky$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Fac_Cholesky.class b/target/scala-3.6.4/classes/scalation/mathstat/Fac_Cholesky.class deleted file mode 100644 index 15ce8505a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Fac_Cholesky.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Fac_Cholesky.tasty b/target/scala-3.6.4/classes/scalation/mathstat/Fac_Cholesky.tasty deleted file mode 100644 index fa4a3f353..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Fac_Cholesky.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Fac_Inverse$.class b/target/scala-3.6.4/classes/scalation/mathstat/Fac_Inverse$.class deleted file mode 100644 index 84819caf0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Fac_Inverse$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Fac_Inverse$package$.class b/target/scala-3.6.4/classes/scalation/mathstat/Fac_Inverse$package$.class deleted file mode 100644 index 4afe45a14..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Fac_Inverse$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Fac_Inverse$package.class b/target/scala-3.6.4/classes/scalation/mathstat/Fac_Inverse$package.class deleted file mode 100644 index 4e495d6e6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Fac_Inverse$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Fac_Inverse$package.tasty b/target/scala-3.6.4/classes/scalation/mathstat/Fac_Inverse$package.tasty deleted file mode 100644 index 20fbe5135..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Fac_Inverse$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Fac_Inverse.class b/target/scala-3.6.4/classes/scalation/mathstat/Fac_Inverse.class deleted file mode 100644 index c6ae8efde..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Fac_Inverse.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Fac_Inverse.tasty b/target/scala-3.6.4/classes/scalation/mathstat/Fac_Inverse.tasty deleted file mode 100644 index 64130c435..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Fac_Inverse.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Fac_LQ$package$.class b/target/scala-3.6.4/classes/scalation/mathstat/Fac_LQ$package$.class deleted file mode 100644 index 917fd00e8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Fac_LQ$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Fac_LQ$package.class b/target/scala-3.6.4/classes/scalation/mathstat/Fac_LQ$package.class deleted file mode 100644 index ec342d7d0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Fac_LQ$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Fac_LQ$package.tasty b/target/scala-3.6.4/classes/scalation/mathstat/Fac_LQ$package.tasty deleted file mode 100644 index ad16d6f3a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Fac_LQ$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Fac_LQ.class b/target/scala-3.6.4/classes/scalation/mathstat/Fac_LQ.class deleted file mode 100644 index ea6633bff..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Fac_LQ.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Fac_LQ.tasty b/target/scala-3.6.4/classes/scalation/mathstat/Fac_LQ.tasty deleted file mode 100644 index bf0bd3ab9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Fac_LQ.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Fac_LU$.class b/target/scala-3.6.4/classes/scalation/mathstat/Fac_LU$.class deleted file mode 100644 index 7f9d8b118..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Fac_LU$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Fac_LU$package$.class b/target/scala-3.6.4/classes/scalation/mathstat/Fac_LU$package$.class deleted file mode 100644 index 1dab590b0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Fac_LU$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Fac_LU$package.class b/target/scala-3.6.4/classes/scalation/mathstat/Fac_LU$package.class deleted file mode 100644 index 6f3e2811d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Fac_LU$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Fac_LU$package.tasty b/target/scala-3.6.4/classes/scalation/mathstat/Fac_LU$package.tasty deleted file mode 100644 index a0a346dc4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Fac_LU$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Fac_LU.class b/target/scala-3.6.4/classes/scalation/mathstat/Fac_LU.class deleted file mode 100644 index 05ff09d0c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Fac_LU.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Fac_LU.tasty b/target/scala-3.6.4/classes/scalation/mathstat/Fac_LU.tasty deleted file mode 100644 index 3d8721131..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Fac_LU.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Fac_QR$.class b/target/scala-3.6.4/classes/scalation/mathstat/Fac_QR$.class deleted file mode 100644 index 80bc29658..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Fac_QR$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Fac_QR$package$.class b/target/scala-3.6.4/classes/scalation/mathstat/Fac_QR$package$.class deleted file mode 100644 index 41c9c4486..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Fac_QR$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Fac_QR$package.class b/target/scala-3.6.4/classes/scalation/mathstat/Fac_QR$package.class deleted file mode 100644 index 707060509..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Fac_QR$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Fac_QR$package.tasty b/target/scala-3.6.4/classes/scalation/mathstat/Fac_QR$package.tasty deleted file mode 100644 index 3210d3015..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Fac_QR$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Fac_QR.class b/target/scala-3.6.4/classes/scalation/mathstat/Fac_QR.class deleted file mode 100644 index 9d88dc41c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Fac_QR.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Fac_QR.tasty b/target/scala-3.6.4/classes/scalation/mathstat/Fac_QR.tasty deleted file mode 100644 index 9301b705b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Fac_QR.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Fac_QR_RR$.class b/target/scala-3.6.4/classes/scalation/mathstat/Fac_QR_RR$.class deleted file mode 100644 index d59ea07e3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Fac_QR_RR$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Fac_QR_RR$package$.class b/target/scala-3.6.4/classes/scalation/mathstat/Fac_QR_RR$package$.class deleted file mode 100644 index 76473e1ce..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Fac_QR_RR$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Fac_QR_RR$package.class b/target/scala-3.6.4/classes/scalation/mathstat/Fac_QR_RR$package.class deleted file mode 100644 index 407987d31..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Fac_QR_RR$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Fac_QR_RR$package.tasty b/target/scala-3.6.4/classes/scalation/mathstat/Fac_QR_RR$package.tasty deleted file mode 100644 index a6032daa8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Fac_QR_RR$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Fac_QR_RR.class b/target/scala-3.6.4/classes/scalation/mathstat/Fac_QR_RR.class deleted file mode 100644 index cf1b43236..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Fac_QR_RR.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Fac_QR_RR.tasty b/target/scala-3.6.4/classes/scalation/mathstat/Fac_QR_RR.tasty deleted file mode 100644 index 1fe008f7d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Fac_QR_RR.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Fac_SVD$.class b/target/scala-3.6.4/classes/scalation/mathstat/Fac_SVD$.class deleted file mode 100644 index 8f3e0eabf..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Fac_SVD$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Fac_SVD$package$.class b/target/scala-3.6.4/classes/scalation/mathstat/Fac_SVD$package$.class deleted file mode 100644 index 68ab0ddc3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Fac_SVD$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Fac_SVD$package.class b/target/scala-3.6.4/classes/scalation/mathstat/Fac_SVD$package.class deleted file mode 100644 index 6fad892ac..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Fac_SVD$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Fac_SVD$package.tasty b/target/scala-3.6.4/classes/scalation/mathstat/Fac_SVD$package.tasty deleted file mode 100644 index 857f8507b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Fac_SVD$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Fac_SVD.class b/target/scala-3.6.4/classes/scalation/mathstat/Fac_SVD.class deleted file mode 100644 index f2e701234..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Fac_SVD.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Fac_SVD.tasty b/target/scala-3.6.4/classes/scalation/mathstat/Fac_SVD.tasty deleted file mode 100644 index 9cf526e97..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Fac_SVD.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Factorization.class b/target/scala-3.6.4/classes/scalation/mathstat/Factorization.class deleted file mode 100644 index 6ee5e025c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Factorization.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Factorization.tasty b/target/scala-3.6.4/classes/scalation/mathstat/Factorization.tasty deleted file mode 100644 index 1762eb191..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Factorization.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/FramelessHistogram.class b/target/scala-3.6.4/classes/scalation/mathstat/FramelessHistogram.class deleted file mode 100644 index 37e3be116..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/FramelessHistogram.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/FramelessHistogram.tasty b/target/scala-3.6.4/classes/scalation/mathstat/FramelessHistogram.tasty deleted file mode 100644 index 0264f8306..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/FramelessHistogram.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/FramelessPlot$.class b/target/scala-3.6.4/classes/scalation/mathstat/FramelessPlot$.class deleted file mode 100644 index b9f1b8251..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/FramelessPlot$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/FramelessPlot.class b/target/scala-3.6.4/classes/scalation/mathstat/FramelessPlot.class deleted file mode 100644 index fb3534f7b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/FramelessPlot.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/FramelessPlot.tasty b/target/scala-3.6.4/classes/scalation/mathstat/FramelessPlot.tasty deleted file mode 100644 index bce727734..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/FramelessPlot.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/HCanvas$.class b/target/scala-3.6.4/classes/scalation/mathstat/HCanvas$.class deleted file mode 100644 index 91d2b87c2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/HCanvas$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/HCanvas.class b/target/scala-3.6.4/classes/scalation/mathstat/HCanvas.class deleted file mode 100644 index 356409ac3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/HCanvas.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/HCanvas.tasty b/target/scala-3.6.4/classes/scalation/mathstat/HCanvas.tasty deleted file mode 100644 index 6c05e04d8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/HCanvas.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Hessenburg.class b/target/scala-3.6.4/classes/scalation/mathstat/Hessenburg.class deleted file mode 100644 index 265b0cf5c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Hessenburg.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Hessenburg.tasty b/target/scala-3.6.4/classes/scalation/mathstat/Hessenburg.tasty deleted file mode 100644 index 6d5951bdf..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Hessenburg.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Histogram$.class b/target/scala-3.6.4/classes/scalation/mathstat/Histogram$.class deleted file mode 100644 index bfe4ac28c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Histogram$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Histogram$package$.class b/target/scala-3.6.4/classes/scalation/mathstat/Histogram$package$.class deleted file mode 100644 index e9259eb89..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Histogram$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Histogram$package.class b/target/scala-3.6.4/classes/scalation/mathstat/Histogram$package.class deleted file mode 100644 index 838085188..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Histogram$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Histogram$package.tasty b/target/scala-3.6.4/classes/scalation/mathstat/Histogram$package.tasty deleted file mode 100644 index 7d3ad89bb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Histogram$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Histogram.class b/target/scala-3.6.4/classes/scalation/mathstat/Histogram.class deleted file mode 100644 index 42ce119c6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Histogram.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Histogram.tasty b/target/scala-3.6.4/classes/scalation/mathstat/Histogram.tasty deleted file mode 100644 index 0ac7e02de..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Histogram.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Householder$.class b/target/scala-3.6.4/classes/scalation/mathstat/Householder$.class deleted file mode 100644 index 5e10d90d1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Householder$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Householder$package$.class b/target/scala-3.6.4/classes/scalation/mathstat/Householder$package$.class deleted file mode 100644 index 9e63a7478..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Householder$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Householder$package.class b/target/scala-3.6.4/classes/scalation/mathstat/Householder$package.class deleted file mode 100644 index 8d0b58a9b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Householder$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Householder$package.tasty b/target/scala-3.6.4/classes/scalation/mathstat/Householder$package.tasty deleted file mode 100644 index ece372e2d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Householder$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Householder.class b/target/scala-3.6.4/classes/scalation/mathstat/Householder.class deleted file mode 100644 index 30b5bb63f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Householder.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Householder.tasty b/target/scala-3.6.4/classes/scalation/mathstat/Householder.tasty deleted file mode 100644 index 5d8d08263..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Householder.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/HouseholderT.class b/target/scala-3.6.4/classes/scalation/mathstat/HouseholderT.class deleted file mode 100644 index 8c5fb2395..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/HouseholderT.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/HouseholderT.tasty b/target/scala-3.6.4/classes/scalation/mathstat/HouseholderT.tasty deleted file mode 100644 index 05200924e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/HouseholderT.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/InverseTest$package$.class b/target/scala-3.6.4/classes/scalation/mathstat/InverseTest$package$.class deleted file mode 100644 index f706689f9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/InverseTest$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/InverseTest$package.class b/target/scala-3.6.4/classes/scalation/mathstat/InverseTest$package.class deleted file mode 100644 index 2a071e7ff..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/InverseTest$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/InverseTest$package.tasty b/target/scala-3.6.4/classes/scalation/mathstat/InverseTest$package.tasty deleted file mode 100644 index e070293ab..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/InverseTest$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/MatrixCalc$package$.class b/target/scala-3.6.4/classes/scalation/mathstat/MatrixCalc$package$.class deleted file mode 100644 index e3205c968..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/MatrixCalc$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/MatrixCalc$package.class b/target/scala-3.6.4/classes/scalation/mathstat/MatrixCalc$package.class deleted file mode 100644 index a955a2011..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/MatrixCalc$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/MatrixCalc$package.tasty b/target/scala-3.6.4/classes/scalation/mathstat/MatrixCalc$package.tasty deleted file mode 100644 index 791311367..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/MatrixCalc$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/MatrixCalc.class b/target/scala-3.6.4/classes/scalation/mathstat/MatrixCalc.class deleted file mode 100644 index 5af84b27f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/MatrixCalc.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/MatrixCalc.tasty b/target/scala-3.6.4/classes/scalation/mathstat/MatrixCalc.tasty deleted file mode 100644 index 9d0861732..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/MatrixCalc.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/MatrixD$.class b/target/scala-3.6.4/classes/scalation/mathstat/MatrixD$.class deleted file mode 100644 index 92dfe999e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/MatrixD$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/MatrixD$package$.class b/target/scala-3.6.4/classes/scalation/mathstat/MatrixD$package$.class deleted file mode 100644 index fd5e823cd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/MatrixD$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/MatrixD$package.class b/target/scala-3.6.4/classes/scalation/mathstat/MatrixD$package.class deleted file mode 100644 index 9de501e28..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/MatrixD$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/MatrixD$package.tasty b/target/scala-3.6.4/classes/scalation/mathstat/MatrixD$package.tasty deleted file mode 100644 index ce831cb1e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/MatrixD$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/MatrixD.class b/target/scala-3.6.4/classes/scalation/mathstat/MatrixD.class deleted file mode 100644 index 434de2c8d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/MatrixD.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/MatrixD.scala.bak b/target/scala-3.6.4/classes/scalation/mathstat/MatrixD.scala.bak deleted file mode 100644 index 14e56c371..000000000 --- a/target/scala-3.6.4/classes/scalation/mathstat/MatrixD.scala.bak +++ /dev/null @@ -1,2180 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Thu Jun 17 19:29:23 EDT 2021 - * @see LICENSE (MIT style license file). - * - * @note Matrix Data Structure of Doubles - */ - -package scalation -package mathstat - -import java.util.Arrays.copyOf -import java.io.PrintWriter - -import scala.collection.immutable.{IndexedSeq => IIndexedSeq, Set => ISet} -import scala.collection.mutable.{ArrayBuffer, IndexedSeq, Set} -import scala.math.{round, sqrt} -import scala.util.control.Breaks.{break, breakable} - -/** Top-level type definition for functions mapping: - */ -type FunctionM2V = MatrixD => VectorD // matrix `MatrixD` to vector `VectorD` -type FunctionM2M = MatrixD => MatrixD // matrix `MatrixD` to matrix `MatrixD` - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Matricize a vector function (V2V) by applying it to each row of a matrix. - * @param f the vector function to matricize - * @param x the matrix to apply the function to - */ -def matricize (f: FunctionV2V)(x: MatrixD): MatrixD = - MatrixD (for i <- x.indices yield f(x(i))) - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Matrixize a vector function (V2V) to create a matrix function (M2M). - * @param f the vector function to matrixize - */ -def matrixize (f: FunctionV2V): FunctionM2M = (x: MatrixD) => x.mmap (f(_)) - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Diagnose matrix x looking for high correlation, high condition number, - * lower than expected rank, zero variance columns (there should only be one). - * @param x the data matrix to diagnose - */ -def diagnoseMat (x: MatrixD): Unit = - banner ("diagnoseMat: Matrix Dimensions") - println (s"x.dim = ${x.dim}, x.dim2 = ${x.dim2}") - - banner ("Correlation Matrix") - println (s"x.corr = ${x.corr}") - -// banner ("Matrix Condition Number") -// println (s"x.conditionNum = ${x.conditionNum}") // FIX - better ways to calculate - - banner ("Matrix Rank") - val fac = new Fac_QR_RR (x).factor () // use Rank Revealing QR Factorization - println (s"fac.rank = ${fac.rank}") - - banner ("Variance of Matrix Columns") - cfor (x.indices2) { j => println (s"x(?, $j).variance = ${x(?, j).variance}") } -end diagnoseMat - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `MatrixD` class stores and operates on Numeric Matrices of base type `Double`. - * the 'cfor' loop is used for innermost loops for faster execution. - * @param dim the first (row) dimension of the matrix - * @param dim2 the second (column) dimension of the matrix - * @param v the 2D array used to store matrix elements - */ -class MatrixD (val dim: Int, - val dim2: Int, - private [mathstat] var v: Array [Array [Double]] = null): - - private val debug = debugf ("MatrixD", true) // partial invocation of debug function - private val flaw = flawf ("MatrixD") // partial invocation of flaw function - - if v == null then // no array => allocate array - v = Array.ofDim [Double] (dim, dim2) - else // existing array => check dimensions - val v_dim = v.length - val v_dim2 = if v_dim > 0 then v(0).length else dim2 - if dim != v_dim || dim2 != v_dim2 then - flaw ("init", s"dimensions are wrong: dims = ($dim, $dim2) vs. ($v_dim, $v_dim2)") -// throw new Exception () - end if -// if dim == 0 || dim2 == 0 then -// flaw ("init", s"warning, a matrix dimension is zero: dims = ($dim, $dim2)") -// throw new Exception () -// end if - end if - - /** The row index range - */ - val indices = 0 until dim - - /** The column index range - */ - val indices2 = 0 until dim2 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the row and column dimensions of this matrix. - */ - inline def dims: (Int, Int) = (dim, dim2) - - private val minDim = math.min (dim, dim2) // the minimum dimension - private val TSZ = 100 // the tile/block size (tunable) - private val fString = "%g,\t" // output format spec - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return a deep copy of this matrix (note: clone may not be deep). - * Uses Java's native `Arrays.copyOf` for efficient copying of 2D array. - */ - def copy: MatrixD = - val a = new Array [Array [Double]] (dim) - var i = 0 - cfor (i < dim, i += 1) { a(i) = copyOf (v(i), dim2) } - new MatrixD (dim, dim2, a) - end copy - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return a deep copy of this matrix (check which is more efficient). - * @see https://stackoverflow.com/questions/1870711/deep-copy-of-2d-array-in-scala - */ -// def copy: MatrixD = new MatrixD (dim, dim2, v.map (_.clone)) - -// apply, update and related methods - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the ELEMENT in row i, column j of this matrix. - * usage: x(3, 2) - * @param i the row index - * @param j the column index - */ - def apply (i: Int, j: Int): Double = v(i)(j) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the intersection of the ROWS in range ir and COLUMNS in range jr - * of this matrix as a new independent matrix. - * usage: x(3 to 6, 2 to 4) - * @param ir the index range of rows to return - * @param jr the index range of columns to return - */ - def apply (ir: Range, jr: Range): MatrixD = - val i1 = ir.start; val j1 = jr.start - val a = Array.ofDim [Double] (ir.size, jr.size) - for i <- ir do - val v_i = v(i); val a_i = a(i-i1) - for j <- jr do a_i(j-j1) = v_i(j) - end for - new MatrixD (ir.size, jr.size, a) - end apply - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the actual i-th ROW of this matrix. - * usage: x(3) - * @param i the row index - */ - inline def apply (i: Int): VectorD = new VectorD (dim2, v(i)) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the ROWS in range ir of this matrix as a new independent matrix. - * usage: x(3 to 6) - * @param ir the index range of rows to return - */ - def apply (ir: Range): MatrixD = - val i1 = ir.start - val a = Array.ofDim [Array [Double]] (ir.size) - for i <- ir do a(i-i1) = copyOf (v(i), dim2) - new MatrixD (ir.size, dim2, a) - end apply - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the ROWS in range ir of this matrix for column j as a vector. - * usage: x(3 to 6) - * @param ir the index range of rows to return - * @param j the column index - */ - def apply (ir: Range, j: Int): VectorD = - val i1 = ir.start - val a = Array.ofDim [Double] (ir.size) - for i <- ir do a(i-i1) = v(i)(j) - new VectorD (ir.size, a) - end apply - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the ROWS in index set iset of this matrix as a new independent matrix. - * usage: x(Set (3, 5, 7)) - * @param iset the index set of rows to return - */ - def apply (iset: Set [Int]): MatrixD = - val a = Array.ofDim [Array [Double]] (iset.size) - var k = 0 - for i <- iset do { a(k) = copyOf (v(i), dim2); k += 1 } - new MatrixD (iset.size, dim2, a) - end apply - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the ROWS in index sequence idx of this matrix as a new independent matrix. - * usage: x(Array (3, 5, 7)) - * @param idx the index sequence of rows to return - */ - def apply (idx: IndexedSeq [Int]): MatrixD = - val a = Array.ofDim [Array [Double]] (idx.size) - var k = 0 - for i <- idx do { a(k) = copyOf (v(i), dim2); k += 1 } - new MatrixD (idx.size, dim2, a) - end apply - - def apply (idx: IIndexedSeq [Int]): MatrixD = - val a = Array.ofDim [Array [Double]] (idx.size) - var k = 0 - for i <- idx do { a(k) = copyOf (v(i), dim2); k += 1 } - new MatrixD (idx.size, dim2, a) - end apply - - def apply (idx: Array [Int]): MatrixD = - val a = Array.ofDim [Array [Double]] (idx.size) - var k = 0 - for i <- idx do { a(k) = copyOf (v(i), dim2); k += 1 } - new MatrixD (idx.size, dim2, a) - end apply - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the j-th COLUMN of this matrix as an independent vector. - * usage: x(?, 2) - * @param all use the all rows indicator ? - * @param j the column index - */ - inline def apply (all: Char, j: Int): VectorD = - val a = Array.ofDim [Double] (dim) - var i = 0 - cfor (i < dim, i += 1) { a(i) = v(i)(j) } - new VectorD (dim, a) - end apply - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the COLUMNS in range jr of this matrix as a new independent matrix. - * usage: x(?, 2 to 4) - * @param all use the all rows indicator ? - * @param jr the index range of columns to return - */ - def apply (all: Char, jr: Range): MatrixD = - val j1 = jr.start - val a = Array.ofDim [Double] (dim, jr.size) - for i <- indices do - val v_i = v(i); val a_i = a(i) - for j <- jr do a_i(j-j1) = v_i(j) - end for - new MatrixD (dim, jr.size, a) - end apply - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the COLUMNS in index set jset of this matrix as a new independent matrix. - * usage: x(?, Set (2, 4, 6)) - * @param all use the all rows indicator ? - * @param jset the index set of columns to return - */ - def apply (all: Char, jset: Set [Int]): MatrixD = - val a = Array.ofDim [Double] (dim, jset.size) - for i <- indices do - val v_i = v(i); val a_i = a(i) - var l = 0 - for j <- jset do { a_i(l) = v_i(j); l += 1 } - end for - new MatrixD (dim, jset.size, a) - end apply - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the COLUMNS in index sequence jdx of this matrix as a new independent matrix. - * usage: x(?, Array (2, 4, 6)) - * @param all use the all rows indicator ? - * @param jdx the index set of columns to return - */ - def apply (all: Char, jdx: IndexedSeq [Int]): MatrixD = - val a = Array.ofDim [Double] (dim, jdx.size) - for i <- indices do - val v_i = v(i); val a_i = a(i) - var l = 0 - for j <- jdx do { a_i(l) = v_i(j); l += 1 } - end for - new MatrixD (dim, jdx.size, a) - end apply - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the main DIAGONAL of this matrix as a new independent vector. - * usage: x(?) - * @param diag use the all diagonal elements indicator ? - */ - inline def apply (diag: Char): VectorD = - val a = Array.ofDim [Double] (minDim) - for i <- 0 until minDim do a(i) = v(i)(i) - new VectorD (minDim, a) - end apply - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the last element (at the last row and last column) in the matrix. - * usage: x.last - */ - inline def last: Double = v(dim-1)(dim2-1) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return all but the i-th ROW of this matrix as a new independent matrix. - * usage: x.not(3) - * @param i the row index to exclude - */ - def not (i: Int): MatrixD = - if i == 0 then apply(i+1 until dim) - else if i == dim-1 then apply(0 until i) - else apply(0 until i) ++ apply(i+1 until dim) - end not - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return all but the ROWS in index sequence idx of this matrix as a new - * independent matrix. - * usage: x.not(Array (3, 5, 7)) - * @param idx the index sequence of rows to exclude - */ - def not (idx: IndexedSeq [Int]): MatrixD = - val a = Array.ofDim [Array [Double]] (dim - idx.size) - var k = 0 - for i <- indices if ! (idx contains i) do { a(k) = copyOf (v(i), dim2); k += 1 } - new MatrixD (a.length, dim2, a) - end not - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return all but the j-th COLUMN of this matrix as a new independent matrix.. - * usage: x.not(?, 2) - * @param all use the all rows indicator ? - * @param j the column index to exclude - */ - def not (all: Char, j: Int): MatrixD = - if j == 0 then apply(?, j+1 until dim2) - else if j == dim2-1 then apply(?, 0 until j) - else apply(?, 0 until j) ++^ apply(?, j+1 until dim2) - end not - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Get column col from the matrix, returning it as a vector. - * @param col the column to extract from the matrix - * @param from the position to start extracting from - */ - def col (col: Int, from: Int = 0): VectorD = - val u = new VectorD (dim - from) - for i <- from until dim do u(i-from) = v(i)(col) - u - end col - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Combine this matrix with matrix b, placing them along the diagonal and - * filling in the bottom left and top right regions with zeros; [this, b]. - * @param b the matrix to combine with this matrix - */ - infix def diag (b: MatrixD): MatrixD = - val m = dim + b.dim - val n = dim2 + b.dim2 - val c = new MatrixD (m, n) - - for i <- 0 until m; j <- 0 until n do - c.v(i)(j) = if i < dim && j < dim2 then v(i)(j) - else if i >= dim && j >= dim2 then b(i-dim, j-dim2) - else 0.0 - end for - c - end diag - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Split the rows from this matrix to form two matrices: one from the rows in - * idx (e.g., testing set) and the other from rows not in idx (e.g., training set). - * Note split and split_ produce different row orders. - * @param idx the set of row indices to include/exclude - */ - def split (idx: ISet [Int]): (MatrixD, MatrixD) = - val len = idx.size - val a = new MatrixD (len, dim2) - val b = new MatrixD (dim - len, dim2) - var j, k = 0 - for i <- indices do - if idx contains i then - for l <- indices2 do a.v(j)(l) = v(i)(l) - j += 1 - else - for l <- indices2 do b.v(k)(l) = v(i)(l) - k += 1 - end if - end for - (a, b) - end split - - inline def split (idx: IndexedSeq [Int]): (MatrixD, MatrixD) = split (idx.toSet [Int]) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Split the rows from this matrix to form two matrices: one from the rows in - * idx (e.g., testing set) and the other from rows not in idx (e.g., training set). - * Concise, but less efficient than split. - * @param idx the row indices to include/exclude - */ - def split_ (idx: IndexedSeq [Int]): (MatrixD, MatrixD) = (apply(idx), not(idx)) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the dim-by-dim2 lower triangle of this matrix (rest are zero). - */ - def lower: MatrixD = - val a = Array.ofDim [Double] (dim, dim2) - for (i <- indices; j <- 0 to i) a(i)(j) = v(i)(j) - new MatrixD (dim, dim2, a) - end lower - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the dim2-by-dim2 upper triangle of this matrix (rest are zero). - */ - def upper: MatrixD = - val a = Array.ofDim [Double] (dim2, dim2) - for (i <- indices2; j <- i until dim2) a(i)(j) = v(i)(j) - new MatrixD (dim2, dim2, a) - end upper - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Update the ELEMENT in row i, column j of this matrix. - * usage: x(i, j) = 5 - * @param i the row index - * @param j the column index - * @param s the scalar value to assign - */ - inline def update (i: Int, j: Int, s: Double): Unit = v(i)(j) = s - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Update the elements in the i-th ROW of this matrix. - * usage: x(i) = u - * @param i the row index - * @param u the vector to assign - */ - inline def update (i: Int, u: VectorD): Unit = v(i) = u.toArray - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Update the elements in the j-th COLUMN of this matrix. - * usage: x(?, 2) = u - * @param all use the all rows indicator ? - * @param j the column index - * @param u the vector to assign - */ - def update (all: Char, j: Int, u: VectorD): Unit = - for i <- 0 until dim do v(i)(j) = u(i) - end update - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Update the elements for a given row in the COLUMNS in range jr of this matrix. - * usage: x(3, 2 to 4) = u - * @param i use row index - * @param jr the index range of columns to be updated - * @param u the vector to assign - */ - def update (i: Int, jr: Range, u: VectorD): Unit = - val j1 = jr.start - for j <- jr do v(i)(j) = u(j-j1) - end update - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Update the main DIAGONAL of this matrix according to the given scalar. - * usage: x(?, ?) = 5 - * @param d1 use the all diagonal elements indicator ? - * @param d2 use the all diagonal elements indicator ? - * @param s the scalar value to assign - */ - def update (d1: Char, d2: Char, s: Double): Unit = - for i <- 0 until minDim do v(i)(i) = s - end update - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Update the main DIAGONAL of this matrix according to the given vector. - * usage: x(?, ?) = u - * @param d1 use the all diagonal elements indicator ? - * @param d2 use the all diagonal elements indicator ? - * @param u the vector to assign - */ - def update (d1: Char, d2: Char, u: VectorD): Unit = - for i <- 0 until minDim do v(i)(i) = u(i) - end update - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Insert vector u into this matrix at j-th COLUMN after shifting j ... k-1 right. - * @param j the start column index [... j ... k k+1 ... ] --> - * @param k the end column index [... u j ... k+1 ... ] - * @param u the vector to insert into column j - */ - def insert (j: Int, k: Int, u: VectorD): Unit = - for jj <- k to j+1 by -1 do this(?, jj) = this(?, jj-1) // shift columns right - this(?, j) = u // insert u - end insert - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Set this matrix's i-th ROW to the elements in vector u. May have left-over - * elements in row unassigned. - * @param i the row index - * @param u the vector value to assign - */ - def set (i: Int, u: VectorD): Unit = - if u.dim > dim2 then flaw ("set", "vector u is larger than the number of columns") - for j <- u.indices do v(i)(j) = u(j) - end set - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Set the k-th DIAGONAL of this matrix to the elements in vector u. - * @param u the vector to set the diagonal to - * @param k how far above the main diagonal, e.g., (-1, 0, 1) for (sub, main, super) - */ - def setDiag (u: VectorD, k: Int = 0): Unit = - val dm = math.min (dim, dim2) - val (j, l) = (math.max (-k, 0), math.min (dm-k, dm)) - for i <- j until l do v(i)(i+k) = u(i-j) - end setDiag - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Set all the elements in the j-th COLUMN of this matrix to the scalar s. - * @param j the column index - * @param s the scalar value to assign - */ - def setCol (j: Int, s: Double): Unit = for i <- indices do v(i)(j) = s - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Set all the elements of this ENTIRE matrix to the scalar s. - * @param s the scalar value to assign - */ - def setAll (s: Double): Unit = for i <- indices; j <- indices2 do v(i)(j) = s - -// Build new matrix from existing matrices - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Shift the columns this matrix so the rows become diagonals, i,e., move - * v_ij -> v_i,(i+j). Will produce a upper right and lower left triangles - * of zeros. - */ - def shiftDiag: MatrixD = - val a = Array.ofDim [Double] (dim + dim2 - 1, dim2) - for i <- indices; j <- indices2 do a(i+j)(j) = v(i)(j) - new MatrixD (dim + dim2 - 1, dim2, a) - end shiftDiag - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Unshift the columns this matrix so the diagonals become rows, i,e., move - * v_i,(i+j) -> v_ij. Will lose elements in the upper right and lower left - * triangles. - */ - def unshiftDiag: MatrixD = - val a = Array.ofDim [Double] (dim - dim2 + 1, dim2) - for i <- a.indices; j <- indices2 do a(i)(j) = v(i+j)(j) - new MatrixD (dim - dim2 + 1, dim2, a) - end unshiftDiag - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Transpose this matrix (swap columns <=> rows). - * Note: new MatrixD (dim2, dim, v.transpose) does not work when a dimension is 0. - */ - def transpose: MatrixD = - val a = Array.ofDim [Double] (dim2, dim) - for j <- indices do - val v_j = v(j) - var i = 0 - cfor (i < dim2, i += 1) { a(i)(j) = v_j(i) } - end for - new MatrixD (dim2, dim, a) - end transpose - - inline def 𝐓: MatrixD = transpose // unicode (𝐓) mathematical bold capital T -// inline def Ƭ: MatrixD = transpose // unicode (Ƭ) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Concatenate (row-wise) this matrix and matrix y (requires y to have the - * same column dimension as this). - * @param y the other matrix - */ - def ++ (y: MatrixD): MatrixD = - if dim2 != y.dim2 then - flaw ("++", s"requires same column dimensions: dim2 = $dim2 != y.dim2 = ${y.dim2}") - - new MatrixD (dim + y.dim, dim2, v ++ y.v) - end ++ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Concatenate (column-wise) this matrix and matrix y (requires y to have the - * same row dimension as this). - * @param y the other matrix - */ - def ++^ (y: MatrixD): MatrixD = - if dim != y.dim then - flaw ("++^", s"requires same row dimensions: dim = $dim != y.dim = ${y.dim}") - - val n = dim2 + y.dim2 - val a = Array.ofDim [Double] (dim, n) - for i <- a.indices do - val a_i = a(i) - var j = 0 - cfor (j < dim2, j += 1) { a_i(j) = v(i)(j) } - cfor (j < n, j += 1) { a_i(j) = y.v(i)(j-dim2) } - end for - new MatrixD (dim, n, a) - end ++^ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Concatenate (row) vector u and this matrix, i.e., prepend u to this. - * @param u the vector to be prepended as the new first row in new matrix - */ - def +: (u: VectorD): MatrixD = - if u.dim != dim2 then - flaw ("+:", s"vector does not match row dimension: u.dim = ${u.dim} != dim2 = $dim2") - - val c = new MatrixD (dim + 1, dim2) - for i <- c.indices do c(i) = if i == 0 then u else apply(i-1) - c - end +: - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Concatenate (column) vector u and this matrix, i.e., prepend u to this. - * @param u the vector to be prepended as the new first column in new matrix - */ - def +^: (u: VectorD): MatrixD = - if u.dim != dim then - flaw ("+^:", s"vector does not match column dimension: u.dim = ${u.dim} != dim = $dim") - - val c = new MatrixD (dim, dim2 + 1) - for j <- c.indices2 do c(?, j) = if j == 0 then u else apply(?, j-1) - c - end +^: - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Concatenate this matrix and (row) vector u, i.e., append u to this. - * @param u the vector to be appended as the new last row in new matrix - */ - def :+ (u: VectorD): MatrixD = - if u.dim != dim2 then - flaw (":+", s"vector does not match row dimension: u.dim = ${u.dim} != dim2 = $dim2") - - val c = new MatrixD (dim + 1, dim2) - for i <- c.indices do c(i) = if i < dim then apply(i) else u - c - end :+ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Concatenate this matrix and (column) vector u, i.e., append u to this. - * @param u the vector to be appended as the new last column in new matrix - */ - def :^+ (u: VectorD): MatrixD = - if u.dim != dim then - flaw (":^+", s"vector does not match column dimension: u.dim = ${u.dim} != dim = $dim") - - val c = new MatrixD (dim, dim2 + 1) - for j <- c.indices2 do c(?, j) = if j < dim2 then apply(?, j) else u - c - end :^+ - -// Add (+) matrix and (matrix, vector, scalar) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Add this matrix and matrix y (requires y to have at least the dimensions of this). - * Alias rows to avoid double subscripting. - * @param y the other matrix - */ - def + (y: MatrixD): MatrixD = - if y.dim < dim || y.dim2 < dim2 then - flaw ("+", s"matrix + matrix - incompatible dimensions: this = $dims, y = ${y.dims}") - - val a = Array.ofDim [Double] (dim, dim2) - for i <- indices do - val v_i = v(i); val y_i = y.v(i); val a_i = a(i) - var j = 0 - cfor (j < dim2, j += 1) { a_i(j) = v_i(j) + y_i(j) } - end for - new MatrixD (dim, dim2, a) - end + - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Add (in-place) this matrix and matrix y (requires y to have at least the - * dimensions of this). Alias rows to avoid double subscripting. - * @param y the other matrix - */ - def += (y: MatrixD): MatrixD = - if y.dim < dim || y.dim2 < dim2 then - flaw ("+", s"matrix + matrix - incompatible dimensions: this = $dims, y = ${y.dims}") - - for i <- indices do - val v_i = v(i); val y_i = y.v(i) - var j = 0 - cfor (j < dim2, j += 1) { v_i(j) += y_i(j) } - end for - this - end += - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Add this matrix and (row) vector u. - * @param u the vector to add - */ - def + (u: VectorD): MatrixD = - if u.dim < dim2 then - flaw ("+", s"matrix + vector - incompatible dimensions: this = $dims, u = ${u.dim}") - - val a = Array.ofDim [Double] (dim, dim2) - for i <- indices do - val v_i = v(i); val a_i = a(i) - var j = 0 - cfor (j < dim2, j += 1) { a_i(j) = v_i(j) + u(j) } - end for - new MatrixD (dim, dim2, a) - end + - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Add this matrix and (column) vector u. - * @param u the vector to add - */ - def +^ (u: VectorD): MatrixD = - if u.dim < dim2 then - flaw ("+", s"matrix + vector - incompatible dimensions: this = $dims, u = ${u.dim}") - - val a = Array.ofDim [Double] (dim, dim2) - for i <- indices do - val v_i = v(i); val a_i = a(i) - var j = 0 - cfor (j < dim2, j += 1) { a_i(j) = v_i(j) + u(i) } - end for - new MatrixD (dim, dim2, a) - end +^ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Add this matrix and scaler u. - * @param u the scalar to add - */ - def + (u: Double): MatrixD = - val a = Array.ofDim [Double] (dim, dim2) - for i <- indices do - val v_i = v(i); val a_i = a(i) - var j = 0 - cfor (j < dim2, j += 1) { a_i(j) = v_i(j) + u } - end for - new MatrixD (dim, dim2, a) - end + - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Add (in-place) this matrix and scaler u. - * @param u the scalar to add - */ - def += (u: Double): MatrixD = - for i <- indices do - val v_i = v(i) - var j = 0 - cfor (j < dim2, j += 1) { v_i(j) += u } - end for - this - end += - -// Subtract (-) from matrix, (matrix, vector, scalar) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the negative of this matrix (unary minus). - */ - def unary_- : MatrixD = - val a = Array.ofDim [Double] (dim, dim2) - for i <- indices do - val v_i = v(i); val a_i = a(i) - var j = 0 - cfor (j < dim2, j += 1) { a_i(j) = -v_i(j) } - end for - new MatrixD (dim, dim2, a) - end unary_- - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Subtract from this matrix the matrix y (requires y to have at least the dimensions of this). - * Alias rows to avoid double subscripting. - * @param y the other matrix - */ - def - (y: MatrixD): MatrixD = - if y.dim < dim || y.dim2 < dim2 then - flaw ("-", s"matrix - matrix - incompatible dimensions: this = $dims, y = ${y.dims}") - - val a = Array.ofDim [Double] (dim, dim2) - for i <- indices do - val v_i = v(i); val y_i = y.v(i); val a_i = a(i) - var j = 0 - cfor (j < dim2, j += 1) { a_i(j) = v_i(j) - y_i(j) } - end for - new MatrixD (dim, dim2, a) - end - - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Subtract (in-place) from this matrix the matrix y (requires y to have at - * least the dimensions of this). Alias rows to avoid double subscripting. - * @param y the other matrix - */ - def -= (y: MatrixD): MatrixD = - if y.dim < dim || y.dim2 < dim2 then - flaw ("-", s"matrix - matrix - incompatible dimensions: this = $dims, y = ${y.dims}") - - for i <- indices do - val v_i = v(i); val y_i = y.v(i) - var j = 0 - cfor (j < dim2, j += 1) { v_i(j) -= y_i(j) } - end for - this - end -= - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Subtract from this matrix, the (row) vector u. - * @param u the vector to subtract - */ - def - (u: VectorD): MatrixD = - if u.dim < dim2 then - flaw ("-", s"matrix - vector - incompatible dimensions: this = $dims, u = ${u.dim}") - - val a = Array.ofDim [Double] (dim, dim2) - for i <- indices do - val v_i = v(i); val a_i = a(i) - var j = 0 - cfor (j < dim2, j += 1) { a_i(j) = v_i(j) - u(j) } - end for - new MatrixD (dim, dim2, a) - end - - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Subtract from this matrix, the (column) vector u. - * @param u the vector to subtract - */ - def -^ (u: VectorD): MatrixD = - if u.dim < dim2 then - flaw ("-", s"matrix - vector - incompatible dimensions: this = $dims, u = ${u.dim}") - - val a = Array.ofDim [Double] (dim, dim2) - for i <- indices do - val v_i = v(i); val a_i = a(i) - var j = 0 - cfor (j < dim2, j += 1) { a_i(j) = v_i(j) - u(i) } - end for - new MatrixD (dim, dim2, a) - end -^ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Subtract from this matrix, the scalar u. - * @param u the scalar to subtract - */ - def - (u: Double): MatrixD = - val a = Array.ofDim [Double] (dim, dim2) - for i <- indices do - val v_i = v(i); val a_i = a(i) - var j = 0 - cfor (j < dim2, j += 1) { a_i(j) = v_i(j) - u } - end for - new MatrixD (dim, dim2, a) - end - - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Subtract (in-place) from this matrix, the scalar u. - * @param u the scalar to subtract - */ - def -= (u: Double): MatrixD = - for i <- indices do - val v_i = v(i) - var j = 0 - cfor (j < dim2, j += 1) { v_i(j) -= u } - end for - this - end -= - -// Multiply element-wise (*~) matrix and (matrix, vector) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Multiply element-wise, this matrix and matrix y (requires y to have at least - * the dimensions of this). Alias rows to avoid double subscripting. - * Also known as Hadamard product. - * @param y the other matrix - */ - def *~ (y: MatrixD): MatrixD = - if y.dim < dim || y.dim2 < dim2 then - flaw ("*~", s"matrix *~ matrix - incompatible dimensions: this = $dims, y = ${y.dims}") - - val a = Array.ofDim [Double] (dim, dim2) - for i <- indices do - val v_i = v(i); val y_i = y.v(i); val a_i = a(i) - var j = 0 - cfor (j < dim2, j += 1) { a_i(j) = v_i(j) * y_i(j) } - end for - new MatrixD (dim, dim2, a) - end *~ - - inline def ⊙ (y: MatrixD): MatrixD = *~ (y) // unicode XNOR gate - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Multiply this matrix by vector u to produce another matrix v_ij * u_j. - * E.g., multiply a matrix by a diagonal matrix represented as a vector. - * @param u the vector to multiply by - */ - def *~ (u: VectorD): MatrixD = - val dm = math.min (dim2, u.dim) - val a = Array.ofDim [Double] (dim, dm) - for i <- indices do - val v_i = v(i); val a_i = a(i) - var j = 0 - cfor (j < dm, j += 1) { a_i(j) = v_i(j) * u(j) } - end for - new MatrixD (dim, dm, a) - end *~ - - inline def ⊙ (y: VectorD): MatrixD = *~ (y) // unicode XNOR gate - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Multiply vector u by this matrix to produce another matrix u_i * v_ij. - * E.g., multiply a diagonal matrix represented as a vector by a matrix. - * This operator is right associative (vector *~: matrix). - * @param u the vector to multiply by - */ - def *~: (u: VectorD): MatrixD = - val dm = math.min (dim2, u.dim) - val a = Array.ofDim [Double] (dim, dm) - for i <- indices do - val v_i = v(i); val a_i = a(i) - var j = 0 - cfor (j < dm, j+= 1) { a_i(j) = u(i) * v_i(j) } - end for - new MatrixD (dim, dm, a) - end *~: - -// Multiply (*) matrix and (matrix, vector, scalar) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Multiply this matrix and matrix y (requires y to have at least the dimensions of this). - * Alias rows to avoid double subscripting, use tiling/blocking, and optimized i, k, j loop order. - * @see software.intel.com/content/www/us/en/develop/documentation/advisor-cookbook/top/ - * optimize-memory-access-patterns-using-loop-interchange-and-cache-blocking-techniques.html - * @param y the other matrix - */ - def * (y: MatrixD): MatrixD = - if y.dim != dim2 then - flaw ("*", s"matrix * matrix - incompatible cross dimensions: dim2 = $dim2, y.dim = ${y.dim}") - - val a = Array.ofDim [Double] (dim, y.dim2) - - for ii <- 0 until dim by TSZ do - val i2 = math.min (ii + TSZ, dim) - for kk <- 0 until dim2 by TSZ do - val k2 = math.min (kk + TSZ, dim2) - for jj <- 0 until y.dim2 by TSZ do - val j2 = math.min (jj + TSZ, y.dim2) - - for i <- ii until i2 do - val v_i = v(i); val a_i = a(i) - for k <- kk until k2 do - val y_k = y.v(k); val v_ik = v_i(k) - var j = jj - cfor (j < j2, j += 1) { a_i(j) += v_ik * y_k(j) } - end for - end for - - end for - end for - end for - new MatrixD (dim, y.dim2, a) - end * - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Multiply this matrix and vector y (requires y to have at least dim2 elements). - * Alias rows to avoid double subscripting and use array ops. - * @param y the vector to multiply by - */ - def * (y: VectorD): VectorD = - if y.dim < dim2 then - flaw ("*", s"matrix * vector - dimension of vector y: y.dim = ${y.dim} < dim2 = $dim2") - - val a = Array.ofDim [Double] (dim) - for i <- indices do - val v_i = v(i) - var sum = 0.0 - var j = 0 - cfor (j < dim2, j += 1) { sum += v_i(j) * y(j) } - a(i) = sum - end for - new VectorD (dim, a) - end * - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Multiply (row) vector y by this matrix. Note '*:' is right associative. - * vector = vector *: matrix - * @param y the vector to multiply by - */ - def *: (y: VectorD): VectorD = this.transpose * y - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Multiply this matrix and scaler u. - * @param u the scalar to multiply by - */ - def * (u: Double): MatrixD = - val a = Array.ofDim [Double] (dim, dim2) - for i <- indices do - val v_i = v(i); val a_i = a(i) - var j = 0 - cfor (j < dim2, j += 1) { a_i(j) = v_i(j) * u } - end for - new MatrixD (dim, dim2, a) - end * - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Multiply (in-place) this matrix and scaler u. - * @param u the scalar to multiply by - */ - def *= (u: Double): MatrixD = - for i <- indices do - val v_i = v(i) - var j = 0 - cfor (j < dim2, j += 1) { v_i(j) *= u } - end for - this - end *= - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Multiply this matrix and matrix y (requires y to have at least the dimensions of this). - * Alias rows to avoid double subscripting, transpose y first and use array ops. - * A simpler, less efficient version of '*'. - * @param y the other matrix - */ - infix def mul (y: MatrixD): MatrixD = - if dim2 != y.dim then - flaw ("mul", s"matrix mul matrix - incompatible cross dimensions: dim2 = $dim2, y.dim = ${y.dim}") - - val a = Array.ofDim [Double] (dim, y.dim2) - val yt = y.v.transpose - for i <- indices do - val v_i = v(i); val a_i = a(i) - for j <- y.indices2 do - val y_j = yt(j) - var sum = 0.0 - var k = 0 - cfor (k < dim2, k += 1) { sum += v_i(k) * y_j(k) } - a_i(j) = sum - end for - end for - new MatrixD (dim, y.dim2, a) - end mul - -// Divide (/) matrix by (matrix, vector, scalar) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Divide element-wise, this matrix by matrix y (requires y to have at least - * the dimensions of this). Alias rows to avoid double subscripting. - * @param y the other matrix - */ - def / (y: MatrixD): MatrixD = - if y.dim < dim || y.dim2 < dim2 then - flaw ("/", s"matrix / matrix - incompatible dimensions: this = $dims, y = ${y.dims}") - - val a = Array.ofDim [Double] (dim, dim2) - for i <- indices do - val v_i = v(i); val y_i = y.v(i); val a_i = a(i) - var j = 0 - cfor (j < dim2, j += 1) { a_i(j) = v_i(j) / y_i(j) } - end for - new MatrixD (dim, dim2, a) - end / - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Divide element-wise, this matrix by (row) vector u. - * @param u the vector to divide by - */ - def / (u: VectorD): MatrixD = - if u.dim < dim2 then - flaw ("/", s"matrix / vector - incompatible dimensions: this = $dims, u = ${u.dim}") - - val a = Array.ofDim [Double] (dim, dim2) - for i <- indices do - val v_i = v(i); val a_i = a(i) - var j = 0 - cfor (j < dim2, j += 1) { a_i(j) = v_i(j) / u(j) } - end for - new MatrixD (dim, dim2, a) - end / - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Divide element-wise this matrix by scaler u. - * @param u the scalar to divide by - */ - def / (u: Double): MatrixD = - val a = Array.ofDim [Double] (dim, dim2) - for i <- indices do - val v_i = v(i); val a_i = a(i) - var j = 0 - cfor (j < dim2, j += 1) { a_i(j) = v_i(j) / u } - end for - new MatrixD (dim, dim2, a) - end / - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Divide (in-place) element-wise this matrix by scaler u. - * @param u the scalar to divide by - */ - def /= (u: Double): MatrixD = - for i <- indices do - val v_i = v(i) - var j = 0 - cfor (j < dim2, j += 1) { v_i(j) /= u } - end for - this - end /= - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the inverse of this matrix using the inverse method in the `Fac_LU` object. - * Note, other factorizations also compute the inverse. - * @see `Fac_Inv`, `Fac_Cholesky`, `Fac-QR`. - */ - def inverse: MatrixD = Fac_LU.inverse (this)() - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Raise the elements in this matrix to the p-th power (e.g., x~^2 = x *~ x) - * Being element-wise, x~^2 is not x * x. - * @param p the scalar power - */ - def ~^ (p: Double): MatrixD = - val a = Array.ofDim [Double] (dim, dim2) - for i <- indices do - val v_i = v(i); val a_i = a(i) - var j = 0 - cfor (j < dim2, j += 1) { a_i(j) = v_i(j) ~^ p } - end for - new MatrixD (dim, dim2, a) - end ~^ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Raise this matrix to the p-th power (for some integer p >= 1) using - * a divide and conquer algorithm and matrix multiplication (x~^^2 = x * x). - * @param p the power to raise this matrix to - */ - def ~^^ (p: Int): MatrixD = - if p < 1 then flaw ("~^^", "power p must be an integer >= 1") - if dim != dim2 then flaw ("~^^", "only defined on square matrices") - - if p == 2 then this * this - else if p == 1 then this - else if p % 2 == 1 then this * this ~^^ (p - 1) - else { val c = this ~^^ (p / 2); c * c } - end ~^^ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Multiply each column of this matrix by its other columns to forms a matrix - * consisting of all 2-way cross terms [ x_i * x_j ] for j < i. - */ - def crossAll: MatrixD = - val n = dim2 - if n < 2 then flaw ("crossAll", s"requires at least 2 columns, but n = $n") - val nn = n * (n - 1) / 2 - debug ("crossAll", s"create matrix with dims = ($dim, $nn)") - val xx = new MatrixD (dim, nn) - var k = 0 - for i <- indices2; j <- 0 until i do - xx(?, k) = apply(?, i) * apply(?, j) - k += 1 - end for - xx - end crossAll - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Multiply each column of this matrix by its other two columns to forms a matrix - * consisting of all 3-way cross terms [ x_i * x_j * x_k ] for k < j < i. - */ - def crossAll3: MatrixD = - val n = dim2 - if n < 3 then flaw ("crossAll3", s"requires at least 3 columns, but n = $n") - val nn = n * (n - 1) * (n - 2) / 6 - debug ("crossAll3", s"create matrix with dims = ($dim, $nn)") - val xx = new MatrixD (dim, nn) - var l = 0 - for i <- indices2; j <- 0 until i; k <- 0 until j do - xx(?, l) = apply(?, i) * apply(?, j) * apply(?, k) - l += 1 - end for - xx - end crossAll3 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the dot product of this matrix and matrix y (requires y to have at - * least the dimensions of this). Alias rows to avoid double subscripting, - * use tiling/blocking, and use array ops. - * @see en.wikipedia.org/wiki/Matrix_multiplication_algorithm - * @param y the other matrix - */ - infix def dot (y: MatrixD): MatrixD = - if dim2 != y.dim then - flaw ("dot", s"matrix dot matrix - incompatible cross dimensions: dim2 = $dim2, y.dim = ${y.dim}") - - val a = Array.ofDim [Double] (dim, y.dim) - for ii <- 0 until dim by TSZ do - for jj <- 0 until y.dim2 by TSZ do - for kk <- 0 until dim2 by TSZ do - val k2 = math.min (kk + TSZ, dim2) - - for i <- ii until math.min (ii + TSZ, dim) do - val v_i = v(i); val a_i = a(i) - for j <- jj until math.min (jj + TSZ, y.dim2) do - val y_j = y.v(j) - var sum = 0.0 - var k = kk - cfor (k < k2, k += 1) { sum += v_i(k) * y_j(k) } - a_i(j) += sum - end for - end for - - end for - end for - end for - new MatrixD (dim, y.dim, a) - end dot - - inline def ∙ (y: MatrixD): MatrixD = dot (y) // unicode bullet point - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the dot product of this matrix and vector y. - * @param y the vector to take the dot product with - */ - infix def dot (y: VectorD): VectorD = - if y.dim < dim then - flaw ("dot", s"matrix dot vector - dimension of vector y: y.dim = ${y.dim} < dim = $dim") - - val a = Array.ofDim [Double] (dim2) - for j <- indices2 do - val v_j = apply(?, j) - var sum = 0.0 - var i = 0 - cfor (i < dim, i += 1) { sum += v_j(i) * y(i) } - a(j) = sum - end for - new VectorD (dim2, a) - end dot - - inline def ∙ (y: VectorD): VectorD = dot (y) // unicode bullet point - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the 'valid' (no padding) convolution of cofilter matrix c and input matrix x. - * Take the Hadamard product of c (this) with a slice of x and sum, then shift - * by one and repeat. - * Usage: c conv x - * Caveat: does not include reversal. - * @see `scalation.modeling.neuralnet.CoFilter_1D - * @param x the input/data matrix - */ - infix def conv (x: MatrixD): MatrixD = - val y = new MatrixD (x.dim - dim + 1, x.dim2 - dim2 + 1) - for k <- y.indices; l <- y.indices2 do - y(k, l) = (this *~ x(k until k + dim, l until l + dim2)).sum - y - end conv - - inline def *+ (x: MatrixD): MatrixD = conv (x) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the 'valid' (no padding) convolution of cofilter matrix c and input matrix x. - * Computes the discrete convolution of cofilter matrix c and input matrix x. - * Usage: c conv_ x - * @param x the input/data matrix - */ - inline infix def conv_ (x: MatrixD): MatrixD = reverse.conv (x) // FIX - may neeed another reverse method - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the 'same' (with padding) convolution of cofilter matrix c and input matrix x. - * Same means that the size of the result is the same as the input. - * Usage: c convs x - * @param x the input/data matrix - */ - infix def convs (x: MatrixD): MatrixD = - val y = new MatrixD (x.dim, x.dim2) - for k <- y.indices; l <- y.indices2 do - var sum = 0.0 - for i <- indices; j <- indices2 do - if (k-i in (0, x.dim-1)) && (l-j in (0, x.dim2-1)) then - sum += v(i)(j) * x(k-i, l-j) - y(k, l) = sum - y - end convs - - inline def *~+ (x: MatrixD): MatrixD = convs (x) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the 'full' convolution of cofilter c and input matrix x. - * @param x the input/data matrix - */ - infix def convf (x: MatrixD): MatrixD = - val y = new MatrixD (dim + x.dim - 1, dim2 + x.dim2 - 1) - for k <- y.indices; l <- y.indices2 do - var sum = 0.0 - for i <- 0 until math.min (k+1, dim); j <- 0 until math.min (l+1, dim2) do - if k-i < x.dim && l-j < x.dim2 then - sum += v(i)(j) * x(k-i, l-j) - y(k, l) = sum - y - end convf - - inline def *++ (x: MatrixD): MatrixD = convf (x) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Flatten this matrix in row-major fashion, returning a vector containing - * all the elements from the matrix. - */ - def flatten: VectorD = - val a = Array.ofDim [Double] (dim * dim2) - var k = 0 - for i <- indices do - val v_i = v(i) - var j = 0 - cfor (j < dim2, j += 1) { a(k) = v_i(j); k += 1 } - end for - new VectorD (a.length, a) - end flatten - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Solve for x using back substitution (/~) in the equation - * u * x = y - * where this matrix (u) must be upper triangular. - * @see MatLab's / operator - * @param y the constant vector - */ - def /~ (y: VectorD): VectorD = - val a = Array.ofDim [Double] (dim2) // array to hold solution - val b = y.v // y's internal array - for k <- dim2 - 1 to 0 by -1 do // solve for x in u*x = y - val u_k = v(k) // k-th row - var sum = 0.0 - for j <- k + 1 until dim2 do sum += u_k(j) * a(j) - a(k) = (b(k) - sum) / v(k)(k) - end for - new VectorD (dim2, a) // return vector x - end /~ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Determine whether this matrix and matrix y are nearly equal. - * @param y the other matrix - */ - def =~ (y: MatrixD): Boolean = - if dim != y.dim || dim2 != y.dim2 then return false - - var close = true - breakable { - for i <- indices; j <- indices2 do - if ! (v(i)(j) =~ y.v(i)(j)) then { close = false; break () } - end for - } // breakable - close - end =~ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Show how this matrix and matrix y differ, the first element and row that differ. - * @param y the other matrix - */ - def showDiff (y: MatrixD): Unit = - if dim != y.dim then println (s"showDiff: dim = $dim != y.dim = ${y.dim}") - if dim2 != y.dim2 then println (s"showDiff: dim2 = $dim2 != y.dim2 = ${y.dim2}") - - breakable { - for i <- indices; j <- indices2 do - if ! (v(i)(j) =~ y.v(i)(j)) then - println (s"showDiff: v($i)($j) = ${v(i)(j)} != y.v($i)($j) = ${y.v(i)(j)}") - println (s"showDiff: v($i) = ${v(i)} \n y.v($i) = ${y.v(i)}") - break () - end if - end for - } // breakable - end showDiff - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Map each row of this matrix by applying function f to each row vector and - * returning the collected result as a vector. - * @param f the vector to scalar function to apply - */ - def map (f: FunctionV2S): VectorD = - VectorD (for i <- indices yield f(apply(i))) - end map - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Map each row of this matrix by applying function f to each row vector and - * returning the collected result as a matrix. - * @param f the vector to vector function to apply - */ - def mmap (f: FunctionV2V): MatrixD = - MatrixD (for i <- indices yield f(apply(i))) - end mmap - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Map each element of this matrix by applying function f to each element and - * returning the collected result as a matrix. - * @param f the scalar to scalar function to apply - */ - def map_ (f: FunctionS2S): MatrixD = - val x = new MatrixD (dim, dim2) - for i <- indices; j <- indices2 do x.v(i)(j) = f(v(i)(j)) - x - end map_ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Log transform this matrix by using math.log1p (avoiding the log (0) problem). - */ - def log1p: MatrixD = map_ (math.log1p (_)) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Exp transform this matrix by using math.expm1 (the inverse of log1p). - */ - def expm1: MatrixD = map_ (math.expm1 (_)) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the sum of this matrix, i.e., the sum of all its elements. - */ - def sum: Double = - var s = 0.0 - for i <- indices; j <- indices2 do s += v(i)(j) - s - end sum - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the column sums of this matrix, i.e., the sums for each of its columns. - */ - def sumV: VectorD = - val s = new VectorD (dim2) - for i <- indices; j <- indices2 do s(j) += v(i)(j) - s - end sumV - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the row sums of this matrix, i.e., the sums for each of its rows. - */ - def sumVr: VectorD = - val s = new VectorD (dim) - for i <- indices; j <- indices2 do s(i) += v(i)(j) - s - end sumVr - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the maximum value for the entire matrix. - */ - def mmax: Double = - var x = v(0).max - for i <- 1 until dim do { val z = v(i).max; if z > x then x = z } - x - end mmax - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the maximum value for each column in the matrix. - */ - def max: VectorD = VectorD (for j <- indices2 yield apply(?, j).max) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the minimum value for the entire matrix. - */ - def mmin: Double = - var x = v(0).min - for i <- 1 until dim do { val z = v(i).min; if z < x then x = z } - x - end mmin - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the minimum value for each column in the matrix. - */ - def min: VectorD = VectorD (for j <- indices2 yield apply(?, j).min) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the trace of this matrix, i.e., the sum of the elements on the - * main diagonal. Should also equal the sum of the eigenvalues. - * @see Eigen.scala - */ - def trace: Double = - if dim != dim2 then flaw ("trace", "trace only works on square matrices") - - var sum = 0.0 - for i <- indices do sum += v(i)(i) - sum - end trace - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the 1-norm of this matrix, i.e., the maximum 1-norm of the - * column vectors. This is useful for comparing matrices (a - b).norm1. - * @see en.wikipedia.org/wiki/Matrix_norm - */ - def norm1: Double = (for j <- indices2 yield apply(?, j).norm1).max - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the square of the Frobenius-norm of this matrix, i.e., - * the sum of the squared values over all the elements (sse). - */ - def normFSq: Double = - var sum = 0.0 - for i <- indices do sum += apply(i).normSq - sum - end normFSq - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the Frobenius-norm of 'this' matrix, i.e., the square root of - * the sum of the squared values over all the elements (sqrt (sse)). - * @see en.wikipedia.org/wiki/Matrix_norm#Frobenius_norm - */ - inline def normF: Double = sqrt (normFSq) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the column means of this matrix. - */ - def mean: VectorD = VectorD (for j <- indices2 yield apply(?, j).mean) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the matrix/grand mean of this matrix. - */ - def mmean: Double = sum / (dim * dim2) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the column variances of this matrix. - */ - def variance: VectorD = VectorD (for j <- indices2 yield apply(?, j).variance) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the column standard deviations of this matrix. - */ - def stdev: VectorD = VectorD (for j <- indices2 yield apply(?, j).stdev) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return a matrix that is in the reverse row order of this matrix. - */ - def reverse: MatrixD = new MatrixD (dim, dim2, v.reverse) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return whether this matrix is symmetric (i.e, equals its transpose). - */ - def isSymmetric: Boolean = - var symm = true - breakable { - for i <- indices; j <- 0 until i do - if v(i)(j) != v(j)(i) then - symm = false - println (s"MatrixD.isSymmetric: v($i)($j) = ${v(i)(j)} != v($j)($i) = ${v(j)(i)}") - break () - end for - } // breakable - symm - end isSymmetric - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return whether this matrix is non-negative (has no elements less than 0). - */ - def isNonnegative: Boolean = - var nonneg = true - breakable { - for i <- indices; j <- indices2 do - if v(i)(j) < 0.0 then { nonneg = false; break () } - end for - } // breakable - nonneg - end isNonnegative - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Swap (in-place) rows i and k in this matrix. - * @param i the first row in the swap - * @param k the second row in the swap - */ - def swap (i: Int, k: Int): Unit = - val tmp = v(i); v(i) = v(k); v(k) = tmp - end swap - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Swap (in-place) the elements in rows i and k starting from column col. - * @param i the first row in the swap - * @param k the second row in the swap - * @param col the starting column for the swap - */ - def swap (i: Int, k: Int, col: Int): Unit = - val a = this; var tmp = 0.0 - for j <- col until dim2 do { tmp = a(k, j); a(k, j) = a(i, j); a(i, j) = tmp } - end swap - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Swap (in-place) columns j and l in this matrix. - * @param j the first column in the swap - * @param l the second column in the swap - */ - def swapCol (j: Int, l: Int): Unit = - var tmp = 0.0 - for i <- indices do { tmp = v(i)(l); v(i)(l) = v(i)(j); v(i)(j) = tmp } - end swapCol - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Center this matrix to zero mean, column-wise, by subtracting the mean. - * @param mu_x the vector of column means for this matrix - */ - def center (mu_x: VectorD = mean): MatrixD = this - mu_x - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the sample covariance matrix for the columns of this matrix. - */ - def cov: MatrixD = - val z = center () - (z.transpose * z) / (dim.toDouble - 1.0) - end cov - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the population covariance matrix for the columns of this matrix. - */ - def cov_ : MatrixD = - val z = center () - (z.transpose * z) / dim.toDouble - end cov_ - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the correlation matrix for the columns of this matrix. - * If either variance is zero (column i, column j), will result in Not-a-Number (NaN), - * return one if the vectors are the same, or -0 (indicating undefined). - * Note: sample vs. population results in essentially the same values. - * @see the related cos function - */ - def corr: MatrixD = - val covv = cov // sample covariance matrix - val cor = MatrixD.eye (covv.dim, covv.dim) // correlation matrix - - for i <- covv.indices do - val var_i = covv (i, i) // variance of column i - for j <- 0 until i do - cor(i, j) = covv (i, j) / sqrt (var_i * covv (j, j)) - if cor(i, j).isNaN then cor(i, j) = if v(i) == v(j) then 1.0 else -0.0 - cor(j, i) = cor (i, j) - end for - end for - cor - end corr - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the correlation vector for the columns of this matrix with vector y. - * @param y the vector to compute correlations with - * @param skip the number of initial columns to skip (e.g., first column of all ones) - */ - def corr (y: VectorD, skip: Int = 0): VectorD = - VectorD (for j <- skip until dim2 yield apply(?, j) corr y) - end corr - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the cosine similarity matrix for the columns of matrix 'x'. - * If the vectors are centered, will give the correlation. - * @see stats.stackexchange.com/questions/97051/] - * building-the-connection-between-cosine-similarity-and-correlation-in-r - */ - def cos: MatrixD = - val cs = MatrixD.eye (dim2, dim2) // cosine matrix - - for i <- cs.indices do - val y = apply(?, i) // ith column vector - val ny = y.norm - for j <- 0 until i do - val z = apply(?, j) // jth column vector - val nz = z.norm - cs(i, j) = (y dot z) / (ny * nz) - cs(j, i) = cs (i, j) - end for - end for - cs - end cos - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert this matrix to the same matrix, i.e., return this matrix. - */ - def toMatrixD: MatrixD = this - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert this matrix to a matrix where all the elements have integer values. - */ - def toInt: MatrixD = - val x = new MatrixD (dim, dim2) - for i <- indices; j <- indices2 do x.v(i)(j) = round (v(i)(j)).toDouble - x - end toInt - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert this matrix to a string. - */ - override def toString: String = - val sb = new StringBuilder ("\nMatrixD (") - if dim == 0 || dim2 == 0 then return sb.append (")").mkString - for i <- indices; j <- indices2 do - sb.append (fString.format (v(i)(j))) - if j == dim2-1 then sb.replace (sb.length-1, sb.length, "\n \t") - end for - sb.replace (sb.length-4, sb.length, ")").mkString - end toString - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Write this matrix to a CSV-formatted text file with name fileName. - * @param fileName the name of file to hold the data - */ - def write (fileName: String): Unit = - val out = new PrintWriter (fileName) - for i <- indices do - for j <- indices2 do - out.print (v(i)(j)) - if j < dim2-1 then out.print (",") - end for - out.println () - end for - out.close - end write - -end MatrixD - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `MatrixD companion object provides factory methods. - */ -object MatrixD: - - private val flaw = flawf ("MatrixD") // flaw function - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a matrix from repeated values. - * @param dim the (row, column) dimensions - * @param u the repeated values - */ - def apply (dim: (Int, Int), u: Double*): MatrixD = - val a = Array.ofDim [Double] (dim._1, dim._2) - for i <- 0 until dim._1; j <- 0 until dim._2 do a(i)(j) = u(i * dim._2 + j) - new MatrixD (dim._1, dim._2, a) - end apply - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a square matrix of dimension dim where all elements equal zero. - * @param dim the square dimensions - */ - def apply (dim: Int): MatrixD = new MatrixD (dim, dim) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a matrix from a variable argument list of vectors (row-wise). - * Use transpose to make it column-wise. - * @param vs the vararg list of vectors - */ - def apply (vs: VectorD*): MatrixD = - val (m, n) = (vs.length, vs(0).length) - val a = Array.ofDim [Array [Double]] (m) - for i <- vs.indices do a(i) = vs(i).v - new MatrixD (m, n, a) - end apply - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a matrix from an mutable `IndexedSeq` of vectors (row-wise). - * Use transpose to make it column-wise. - * @param vs the indexed sequence of vectors - */ - def apply (vs: IndexedSeq [VectorD]): MatrixD = - val (m, n) = (vs.length, vs(0).length) - val a = Array.ofDim [Array [Double]] (m) - for i <- vs.indices do a(i) = vs(i).v - new MatrixD (m, n, a) - end apply - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a matrix from an immutable `IndexedSeq` of vectors (row-wise), - * as produce by for yield. Use transpose to make it column-wise. - * @param vs the indexed sequence of vectors - */ - def apply (vs: collection.immutable.IndexedSeq [VectorD]): MatrixD = - val (m, n) = (vs.length, vs(0).length) - val a = Array.ofDim [Array [Double]] (m) - for i <- vs.indices do a(i) = vs(i).v - new MatrixD (m, n, a) - end apply - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create an m-by-1 matrix from an m-vector (column-wise). - * @param v the vector to build the matrix from - */ - def fromVector (v: VectorD): MatrixD = - val x = new MatrixD (v.dim, 1) - for i <- x.indices do x(i, 0) = v(i) - x - end fromVector - - private val DEF_SEP = ',' // default character separating the values - private val PROGRESS = 1000 // give feedback at progress count - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a matrix by reading from a text file, e.g., a CSV file. - * @param fileName the name of file holding the data - * @param skip the initial number of lines/rows to skip - * @param skipCol the initial number of columns to skip - * @param sp the character used to separate values (',', '\t', ...) - * @param fullPath flag indicating whether to use full-path or path relative to 'DATA_DIR' - * defaults to false (relative paths) - */ - def load (fileName: String, skip: Int = 0, skipCol: Int = 0, - sp: Char = DEF_SEP, fullPath: Boolean = false): MatrixD = - val lines = readFileIntoArray (fileName, fullPath) // array of strings/lines - val m = lines.length // number lines in the file - val mm = m - skip // number of lines with data - val a = Array.ofDim [Array [Double]] (mm) // array buffer to hold data values - var n = -1 // number of values in a row (TBD) - - for i <- skip until m do - val j = i - skip - a(j) = for str <- lines(i).split (sp).drop (skipCol) yield str.mkDouble - if (j+1) % PROGRESS == 0 then println (s"load: read $j data rows so far ...") - if n < 0 then n = a(j).length - else if a(j).length != n then flaw ("load", s"row $j has the wrong length ${a(j).length} != $n") - end for - println (s"load: read in an $mm-by-$n matrix from $fileName") - new MatrixD (mm, n, a) - end load - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a matrix by reading from a text file, e.g., a CSV file. - * Convert string columns into ordinal/integer columns. - * @param fileName the name of file holding the data - * @param skip the initial number of lines/rows to skip - * @param skipCol the initial number of columns to skip - * @param sp the character used to separate values (',', '\t', ...) - * @param fullPath flag indicating whether to use full-path or path relative to 'DATA_DIR' - * defaults to false (relative paths) - * @param ordCols the set of ordinal columns (column indices) - * @param ordStrs the corresponding strings for the ordinal/integer values - */ - def loadStr (fileName: String, skip: Int = 0, skipCol: Int = 0, - sp: Char = DEF_SEP, fullPath: Boolean = false) - (ordCols: Set [Int], ordStrs: VectorS*): MatrixD = - val lines = readFileIntoArray (fileName, fullPath) // array of strings/lines - val m = lines.length // number lines in the file - val mm = m - skip // number of lines with data - val a = Array.ofDim [Array [Double]] (mm) // array buffer to hold data values - var n = -1 // number of values in a row (TBD) - - for i <- skip until m do - val j = i - skip - var col, ordCol = -1 - a(j) = for str <- lines(i).split (sp).drop (skipCol) yield - col += 1 - if ordCols contains col then - ordCol += 1 - mkOrdinal (str, ordStrs(ordCol)) - else str.mkDouble - if (j+1) % PROGRESS == 0 then println (s"load: read $j data rows so far ...") - if n < 0 then n = a(j).length - else if a(j).length != n then flaw ("load", s"row $j has the wrong length ${a(j).length} != $n") - end for - println (s"load: read in an $mm-by-$n matrix from $fileName") - new MatrixD (mm, n, a) - end loadStr - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Given a string value, convert it to an ordinal/integer based on the ordStr mapping. - * @param str the string to be mapped to an ordinal value - * @param ordStr the VectorS containing strings that can be ordered, e.g., - * VectorS ("low", "medium", "high") for 0, 1, 2 - */ - def mkOrdinal (str: String, ordStr: VectorS): Int = - val (xe, map) = ordStr.map2Int // @see `VectorS` - map (str) // return the str mapped to an integer - end mkOrdinal - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a matrix-vector pair (x, y) by reading from a text file, e.g., a CSV file. - * Use readFileIter to only read the necessary columns from the file. - * @param fileName the name of file holding the data - * @param xCols the columns that are to make up the x-matrix - * @param yCol the column that is to make up the y-vector (use the defualt -1 to skip this) - * @param skip the initial number of lines to skip - * @param sp the character used to separate values (',', '\t', ...) - * @param fullPath flag indivating whether to use full-path or path relative to 'DATA_DIR' - * defaults to false (relative paths) - */ - def loadIter (fileName: String, xCols: Array [Int], yCol: Int = -1, skip: Int = 0, - sp: Char = DEF_SEP, fullPath: Boolean = true): (MatrixD, VectorD) = - val (it, buffer) = readFileIter (fileName, fullPath) // iterator of strings/lines, io buffer - val xAb = ArrayBuffer [Array [Double]] () // array buffer to hold x-matrix - val yAb = ArrayBuffer [Double] () // array buffer to hold y-vector - var n = -1 // number of values in a row (determined below) - - var i = 0 // line number - while it.hasNext do - val line = it.next() // read next line - if i >= skip then - val k = i - skip // row number - val token = line.split (sp) - xAb += (for j <- xCols yield token(j).mkDouble).toArray - if yCol >= 0 then yAb += token (yCol).mkDouble - if (k+1) % PROGRESS == 0 then println (s"loadIter: read $k data rows so far ...") - if n < 0 then n = xAb(k).length - else if xAb(k).length != n then flaw ("loadIter", s"row $k has the wrong length ${xAb(k).length} != $n") - end if - i += 1 - end while - buffer.close () - - val m = xAb.size - (new MatrixD (m, n, xAb.toArray), new VectorD (m, yAb.toArray)) - end loadIter - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a matrix of dimension dim by 1 that consists of all ones. - * @param dim the row dimension - */ - def one (dim: Int): MatrixD = - val a = Array.fill (dim, 1)(1.0) - new MatrixD (dim, 1, a) - end one - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a matrix of dimensions dim by dim2 where all elements equal zero. - * @param dim the row dimension - * @param dim2 the column dimension - */ - def eye (dim: Int, dim2: Int): MatrixD = - val x = new MatrixD (dim, dim2) - x(?, ?) = 1.0 // set diagonal to one - x - end eye - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a matrix of dimensions dim by dim2 where all elements equal to the given value. - * @param dim the row dimension - * @param dim2 the column dimension - * @param value the given value to assign to all elements - */ - def fill (dim: Int, dim2: Int, value: Double): MatrixD = - val a = Array.fill (dim, dim2)(value) - new MatrixD (dim, dim2, a) - end fill - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** A null matrix of type `MatrixD`. - */ - val nullm: MatrixD = null.asInstanceOf [MatrixD] - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the outer product of vector x and vector y. The result of the - * outer product is a matrix where element (i, j) is the product of i-th element - * of x with the j-th element of y. - * @param x the first vector - * @param y the second vector - */ - def outer (x: VectorD, y: VectorD): MatrixD = - val a = Array.ofDim [Double] (x.dim, y.dim) - for i <- x.indices; j <- y.indices do a(i)(j) = x(i) * y(j) - new MatrixD (x.dim, y.dim, a) - end outer - - inline def ⊗ (x: VectorD, y: VectorD): MatrixD = outer (x, y) // unicode tensor product - -end MatrixD - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `MatrixDExample` object provides example instances of the`MatrixD` class. - */ -object MatrixDExample: - - val x = MatrixD ((8, 8), 1, 2, 3, 4, 5, 6, 7, 8, - 2, 3, 4, 5, 6, 7, 8, 9, - 3, 4, 5, 6, 7, 8, 9, 10, - 4, 5, 6, 7, 8, 9, 10, 11, - 5, 6, 7, 8, 9, 10, 11, 12, - 6, 7, 8, 9, 10, 11, 12, 13, - 7, 8, 9, 10, 11, 12, 13, 14, - 8, 9, 10, 11, 12, 13, 14, 15) - - val y = MatrixD ((8, 8), 1, 2, 3, 4, 5, 6, 7, 8, - 2, 3, 4, 5, 6, 7, 8, 9, - 3, 4, 5, 6, 7, 8, 9, 10, - 4, 5, 6, 7, 8, 9, 10, 11, - 5, 6, 7, 8, 9, 10, 11, 12, - 6, 7, 8, 9, 10, 11, 12, 13, - 7, 8, 9, 10, 11, 12, 13, 14, - 8, 9, 10, 11, 12, 13, 14, 15) - -end MatrixDExample - -import MatrixDExample.{x, y} - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `matrixDTest` main function tests the `MatrixD` class. Compares the performance - * of matrix addition implementations - * > runMain scalation.mathstat.matrixDTest - */ -@main def matrixDTest (): Unit = - - println (s"x = $x") - - banner ("Test apply methods") - - println (s" x(3, 2) = ${x(3, 2)}") // element (3, 2) - println (s" x(3 to 6, 2 to 4) = ${x(3 to 6, 2 to 4)}") // slice of rows and columns - println (s" x(3) = ${x(3)}") // row 3 - println (s" x(3 to 6) = ${x(3 to 6)}") // slice of rows - println (s" x(?, 2) = ${x(?, 2)}") // column 2 - println (s" x(?, 2 to 4) = ${x(?, 2 to 4)}") // slice of columns - - banner ("Test element-wise methods") - - println (s" x + y = ${x + y}") - println (s" x - y = ${x - y}") - println (s" x *~ y = ${x *~ y}") - println (s" x / y = ${x / y}") - println (s" x ~^ 2 = ${x ~^ 2}") - - println (s" x.crossAll = ${x.crossAll}") - - val a = new MatrixD (1000, 1000) - val b = new MatrixD (1000, 1000) - for i <- a.indices; j <- a.indices2 do { a(i, j) = i + j; b(i, j) = a(i, j) } - - for it <- 1 to 10 do - banner (s"Timing results to iteration $it") - val t1 = gauge { a + b }; println (s" a + b = $t1") - val t2 = gauge { a - b }; println (s" a - b = $t2") - val t3 = gauge { a *~ b }; println (s" a *~ b = $t3") - val t4 = gauge { a / b }; println (s" a / b = $t4") - end for - -end matrixDTest - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `matrixDTest2` main function tests the `MatrixD` class. Compares the performance - * of matrix multiplication implementations. - * > runMain scalation.mathstat.matrixDTest2 - */ -@main def matrixDTest2 (): Unit = - - println (s" x mul y = ${x mul y}") - println (s" x * y = ${x * y}") - println (s" x dot y = ${x dot y}") - println (s" x * y(0) = ${x * y(0)}") - - val a = new MatrixD (1000, 1000) - val b = new MatrixD (1000, 1000) - for i <- a.indices; j <- a.indices2 do { a(i, j) = i + j; b(i, j) = a(i, j) } - - for it <- 1 to 10 do - banner (s"Timing results to iteration $it") - val t1 = gauge { a mul b }; println (s" a mul b = $t1") - val t2 = gauge { a * b }; println (s" a * b = $t2") - val t3 = gauge { a dot b }; println (s" a dot b = $t3") - val t4 = gauge { a * b(0) }; println (s" a * b(0) = $t4") - end for - -end matrixDTest2 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `matrixDTest3` main function tests the `MatrixD` class. Test the back substitution - * (/~) operator for solving for x in u * x = y. - * It also computes an outer product - * > runMain scalation.mathstat.matrixDTest3 - */ -@main def matrixDTest3 (): Unit = - - import MatrixD.⊗ - - val u = MatrixD ((3, 3), 1, 2, 3, - 0, 4, 5, - 0, 0, 6) - val y = VectorD (1, 2, 3) - - banner ("Back Substitution") - val x = u /~ y - println (s"x = u /~ y = $x") - assert (u * x == y) - println (s"y = $y") - - banner ("Outer Product") - println ("⊗ (x, y) = " + ⊗ (x, y)) - -end matrixDTest3 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `matrixDTest4` main function tests the `MatrixD` class. Test the split - * methods: split and split_ as well as shiftDiag and unshiftDiag - * > runMain scalation.mathstat.matrixDTest4 - */ -@main def matrixDTest4 (): Unit = - - val x = MatrixD ((10, 3), 1, 1, 1, - 2, 2, 2, - 3, 3, 3, - 4, 4, 4, - 5, 5, 5, - 6, 6, 5, - 7, 7, 7, - 8, 8, 8, - 9, 9, 9, - 10, 10, 10) - - val idx = VectorI (1, 2, 5, 9) - - banner ("Test split methods") - val (x_e, x_) = x.split (idx) - val (z_e, z_) = x.split_ (idx) - - println (s"split: x_e = $x_e") - println (s"split_: z_e = $z_e") - - println (s"split: x_ = $x_") - println (s"split_: z_ = $z_") - - assert (z_e =~ x_e) - assert (z_ =~ x_) - - banner ("Test shiftDiag methods") - val _x = x.shiftDiag - val _x_ = _x.unshiftDiag - - println (s"x = x = $x") - println (s"_x = x.shiftDiag = $_x") - println (s"_x_ = _x.unshiftDiag = $_x_") - - assert (_x_ =~ x) - -end matrixDTest4 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `matrixDTest5` main function tests the `MatrixD` class. Test the covariance - * and correlation. - * > runMain scalation.mathstat.matrixDTest5 - */ -@main def matrixDTest5 (): Unit = - - val x = MatrixD ((6, 2), 1, 1, - 2, 3, - 3, 3, - 4, 5, - 5, 4, - 6, 4) - - println (s"Data Matrix x = $x") - println (s"Samp. Covariance x.cov = ${x.cov}") - println (s"Pop. Covariance x.cov_ = ${x.cov_}") - println (s"Correlation x.corr = ${x.corr}") - -end matrixDTest5 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `matrixDTest6` main function tests the `MatrixD` class. Test the insert method. - * > runMain scalation.mathstat.matrixDTest6 - */ -@main def matrixDTest6 (): Unit = - - val x = MatrixD ((4, 5), 1, 2, 3, 4, 5, - 1, 2, 3, 4, 5, - 1, 2, 3, 4, 5, - 1, 2, 3, 4, 5) - - println (s"Matrix x = $x") - - banner ("x.insert (1, 3, u)") - val u = VectorD (1, 2, 3, 4) - x.insert (1, 3, u) - println (s"Matrix x = $x") - -end matrixDTest6 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `matrixDTest7` main function tests the `MatrixD` class. Test the convolution - * operators. - * > runMain scalation.mathstat.matrixDTest7 - */ -@main def matrixDTest7 (): Unit = - - - val x = MatrixD ((5, 5), 0, 0, 2, 1, 0, - 0, 0, 0, 1, 2, - 1, 2, 2, 0, 2, - 2, 0, 0, 0, 1, - 2, 2, 2, 0, 1) - - val c = MatrixD ((2, 2), 1, 1, - 0, 1) - - banner ("Convolution Operators") - println (s"c conv x = ${c conv x}") // conv valid convolution, no reversal - println (s"c *+ x = ${c *+ x}") // *+ valid convolution, no reversal - println (s"c conv_ x = ${c conv_ x}") // conv_ valid convolution, with reversal - println (s"c convs x = ${c convs x}") // convs same convolution, with reversal - println (s"c *~+ x = ${c *~+ x}") // *~+ same convolution, with reversal - println (s"c convf x = ${c convf x}") // convf full convolution, with reversal - println (s"c *++ x = ${c convf x}") // *++ full convolution, with reversal - -end matrixDTest7 - diff --git a/target/scala-3.6.4/classes/scalation/mathstat/MatrixD.tasty b/target/scala-3.6.4/classes/scalation/mathstat/MatrixD.tasty deleted file mode 100644 index 30b0fe727..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/MatrixD.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/MatrixD2$.class b/target/scala-3.6.4/classes/scalation/mathstat/MatrixD2$.class deleted file mode 100644 index 753f59883..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/MatrixD2$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/MatrixD2$package$.class b/target/scala-3.6.4/classes/scalation/mathstat/MatrixD2$package$.class deleted file mode 100644 index 0e2513ea8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/MatrixD2$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/MatrixD2$package.class b/target/scala-3.6.4/classes/scalation/mathstat/MatrixD2$package.class deleted file mode 100644 index f5739a6c3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/MatrixD2$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/MatrixD2$package.tasty b/target/scala-3.6.4/classes/scalation/mathstat/MatrixD2$package.tasty deleted file mode 100644 index 8ebe37747..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/MatrixD2$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/MatrixD2.class b/target/scala-3.6.4/classes/scalation/mathstat/MatrixD2.class deleted file mode 100644 index 16ce561fd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/MatrixD2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/MatrixD2.tasty b/target/scala-3.6.4/classes/scalation/mathstat/MatrixD2.tasty deleted file mode 100644 index 37e16ad5e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/MatrixD2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/MatrixD2Example$.class b/target/scala-3.6.4/classes/scalation/mathstat/MatrixD2Example$.class deleted file mode 100644 index 3b3d4f5d7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/MatrixD2Example$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/MatrixD2Example.class b/target/scala-3.6.4/classes/scalation/mathstat/MatrixD2Example.class deleted file mode 100644 index c79fca1ed..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/MatrixD2Example.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/MatrixD2Example.tasty b/target/scala-3.6.4/classes/scalation/mathstat/MatrixD2Example.tasty deleted file mode 100644 index c1190e15e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/MatrixD2Example.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/MatrixDExample$.class b/target/scala-3.6.4/classes/scalation/mathstat/MatrixDExample$.class deleted file mode 100644 index 33cb53d54..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/MatrixDExample$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/MatrixDExample.class b/target/scala-3.6.4/classes/scalation/mathstat/MatrixDExample.class deleted file mode 100644 index 0bc3cdfca..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/MatrixDExample.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/MatrixDExample.tasty b/target/scala-3.6.4/classes/scalation/mathstat/MatrixDExample.tasty deleted file mode 100644 index 75f5e0ea7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/MatrixDExample.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/MatrixDOps$.class b/target/scala-3.6.4/classes/scalation/mathstat/MatrixDOps$.class deleted file mode 100644 index 5502437ea..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/MatrixDOps$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/MatrixDOps.class b/target/scala-3.6.4/classes/scalation/mathstat/MatrixDOps.class deleted file mode 100644 index 89432a264..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/MatrixDOps.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/MatrixDOps.tasty b/target/scala-3.6.4/classes/scalation/mathstat/MatrixDOps.tasty deleted file mode 100644 index 08a9ca014..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/MatrixDOps.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/MatrixI$.class b/target/scala-3.6.4/classes/scalation/mathstat/MatrixI$.class deleted file mode 100644 index 5e3056251..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/MatrixI$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/MatrixI.class b/target/scala-3.6.4/classes/scalation/mathstat/MatrixI.class deleted file mode 100644 index bf3a6a6dc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/MatrixI.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/MatrixI.tasty b/target/scala-3.6.4/classes/scalation/mathstat/MatrixI.tasty deleted file mode 100644 index 2d6213a2e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/MatrixI.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Pivoting.class b/target/scala-3.6.4/classes/scalation/mathstat/Pivoting.class deleted file mode 100644 index 35f015a88..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Pivoting.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Pivoting.tasty b/target/scala-3.6.4/classes/scalation/mathstat/Pivoting.tasty deleted file mode 100644 index df1db1e2b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Pivoting.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/PivotingTest$.class b/target/scala-3.6.4/classes/scalation/mathstat/PivotingTest$.class deleted file mode 100644 index 88f196283..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/PivotingTest$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/PivotingTest.class b/target/scala-3.6.4/classes/scalation/mathstat/PivotingTest.class deleted file mode 100644 index 18bfd7e13..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/PivotingTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/PivotingTest.tasty b/target/scala-3.6.4/classes/scalation/mathstat/PivotingTest.tasty deleted file mode 100644 index a221bc98e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/PivotingTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Plot$.class b/target/scala-3.6.4/classes/scalation/mathstat/Plot$.class deleted file mode 100644 index 21c295ca3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Plot$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Plot$package$.class b/target/scala-3.6.4/classes/scalation/mathstat/Plot$package$.class deleted file mode 100644 index 5ae3e75ec..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Plot$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Plot$package.class b/target/scala-3.6.4/classes/scalation/mathstat/Plot$package.class deleted file mode 100644 index b95244b32..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Plot$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Plot$package.tasty b/target/scala-3.6.4/classes/scalation/mathstat/Plot$package.tasty deleted file mode 100644 index 7b5f44e31..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Plot$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Plot.class b/target/scala-3.6.4/classes/scalation/mathstat/Plot.class deleted file mode 100644 index 724635d57..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Plot.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Plot.tasty b/target/scala-3.6.4/classes/scalation/mathstat/Plot.tasty deleted file mode 100644 index 802c2f4a3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Plot.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/PlotC$.class b/target/scala-3.6.4/classes/scalation/mathstat/PlotC$.class deleted file mode 100644 index 591977b36..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/PlotC$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/PlotC$Canvas.class b/target/scala-3.6.4/classes/scalation/mathstat/PlotC$Canvas.class deleted file mode 100644 index 2238b2139..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/PlotC$Canvas.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/PlotC$package$.class b/target/scala-3.6.4/classes/scalation/mathstat/PlotC$package$.class deleted file mode 100644 index d318328de..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/PlotC$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/PlotC$package.class b/target/scala-3.6.4/classes/scalation/mathstat/PlotC$package.class deleted file mode 100644 index 8837c9b27..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/PlotC$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/PlotC$package.tasty b/target/scala-3.6.4/classes/scalation/mathstat/PlotC$package.tasty deleted file mode 100644 index 2517632cc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/PlotC$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/PlotC.class b/target/scala-3.6.4/classes/scalation/mathstat/PlotC.class deleted file mode 100644 index 9f9e6509a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/PlotC.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/PlotC.tasty b/target/scala-3.6.4/classes/scalation/mathstat/PlotC.tasty deleted file mode 100644 index 270fecbf6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/PlotC.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/PlotM$.class b/target/scala-3.6.4/classes/scalation/mathstat/PlotM$.class deleted file mode 100644 index 24c8d527b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/PlotM$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/PlotM$CanvasP.class b/target/scala-3.6.4/classes/scalation/mathstat/PlotM$CanvasP.class deleted file mode 100644 index cfde23a7f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/PlotM$CanvasP.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/PlotM$package$.class b/target/scala-3.6.4/classes/scalation/mathstat/PlotM$package$.class deleted file mode 100644 index 2a6320813..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/PlotM$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/PlotM$package.class b/target/scala-3.6.4/classes/scalation/mathstat/PlotM$package.class deleted file mode 100644 index c642ab606..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/PlotM$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/PlotM$package.tasty b/target/scala-3.6.4/classes/scalation/mathstat/PlotM$package.tasty deleted file mode 100644 index 4e21bf2a1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/PlotM$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/PlotM.class b/target/scala-3.6.4/classes/scalation/mathstat/PlotM.class deleted file mode 100644 index 2fd8cf2d9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/PlotM.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/PlotM.tasty b/target/scala-3.6.4/classes/scalation/mathstat/PlotM.tasty deleted file mode 100644 index a22b0482e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/PlotM.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Probability$.class b/target/scala-3.6.4/classes/scalation/mathstat/Probability$.class deleted file mode 100644 index 54a7929b2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Probability$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Probability$package$.class b/target/scala-3.6.4/classes/scalation/mathstat/Probability$package$.class deleted file mode 100644 index d404f7d83..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Probability$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Probability$package.class b/target/scala-3.6.4/classes/scalation/mathstat/Probability$package.class deleted file mode 100644 index c92e32610..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Probability$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Probability$package.tasty b/target/scala-3.6.4/classes/scalation/mathstat/Probability$package.tasty deleted file mode 100644 index 5f1ac56b8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Probability$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Probability.class b/target/scala-3.6.4/classes/scalation/mathstat/Probability.class deleted file mode 100644 index fe0bd2a90..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Probability.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Probability.tasty b/target/scala-3.6.4/classes/scalation/mathstat/Probability.tasty deleted file mode 100644 index d0b391060..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Probability.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/RTensor4D$.class b/target/scala-3.6.4/classes/scalation/mathstat/RTensor4D$.class deleted file mode 100644 index f6ad86c46..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/RTensor4D$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/RTensor4D$package$.class b/target/scala-3.6.4/classes/scalation/mathstat/RTensor4D$package$.class deleted file mode 100644 index 1ada6e0ff..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/RTensor4D$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/RTensor4D$package.class b/target/scala-3.6.4/classes/scalation/mathstat/RTensor4D$package.class deleted file mode 100644 index 2926833dd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/RTensor4D$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/RTensor4D$package.tasty b/target/scala-3.6.4/classes/scalation/mathstat/RTensor4D$package.tasty deleted file mode 100644 index ebff62fce..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/RTensor4D$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/RTensor4D.class b/target/scala-3.6.4/classes/scalation/mathstat/RTensor4D.class deleted file mode 100644 index 033b0c039..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/RTensor4D.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/RTensor4D.tasty b/target/scala-3.6.4/classes/scalation/mathstat/RTensor4D.tasty deleted file mode 100644 index 7a258e68e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/RTensor4D.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/RTensorD$.class b/target/scala-3.6.4/classes/scalation/mathstat/RTensorD$.class deleted file mode 100644 index 0435cb82c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/RTensorD$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/RTensorD$package$.class b/target/scala-3.6.4/classes/scalation/mathstat/RTensorD$package$.class deleted file mode 100644 index bd387cb15..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/RTensorD$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/RTensorD$package.class b/target/scala-3.6.4/classes/scalation/mathstat/RTensorD$package.class deleted file mode 100644 index d206242c7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/RTensorD$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/RTensorD$package.tasty b/target/scala-3.6.4/classes/scalation/mathstat/RTensorD$package.tasty deleted file mode 100644 index 30bceb0c4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/RTensorD$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/RTensorD.class b/target/scala-3.6.4/classes/scalation/mathstat/RTensorD.class deleted file mode 100644 index a75a3c55c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/RTensorD.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/RTensorD.tasty b/target/scala-3.6.4/classes/scalation/mathstat/RTensorD.tasty deleted file mode 100644 index ad53de90e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/RTensorD.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/StatTable$package$.class b/target/scala-3.6.4/classes/scalation/mathstat/StatTable$package$.class deleted file mode 100644 index c8c15bd01..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/StatTable$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/StatTable$package.class b/target/scala-3.6.4/classes/scalation/mathstat/StatTable$package.class deleted file mode 100644 index 6be817530..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/StatTable$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/StatTable$package.tasty b/target/scala-3.6.4/classes/scalation/mathstat/StatTable$package.tasty deleted file mode 100644 index 3d85ea601..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/StatTable$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/StatTable.class b/target/scala-3.6.4/classes/scalation/mathstat/StatTable.class deleted file mode 100644 index ec4bb3d01..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/StatTable.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/StatTable.tasty b/target/scala-3.6.4/classes/scalation/mathstat/StatTable.tasty deleted file mode 100644 index 48aadc5b1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/StatTable.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Statistic$.class b/target/scala-3.6.4/classes/scalation/mathstat/Statistic$.class deleted file mode 100644 index c322811be..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Statistic$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Statistic$package$.class b/target/scala-3.6.4/classes/scalation/mathstat/Statistic$package$.class deleted file mode 100644 index eb4a0d53c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Statistic$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Statistic$package.class b/target/scala-3.6.4/classes/scalation/mathstat/Statistic$package.class deleted file mode 100644 index 64a9183ed..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Statistic$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Statistic$package.tasty b/target/scala-3.6.4/classes/scalation/mathstat/Statistic$package.tasty deleted file mode 100644 index f4099b986..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Statistic$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Statistic.class b/target/scala-3.6.4/classes/scalation/mathstat/Statistic.class deleted file mode 100644 index 09998fe1a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Statistic.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Statistic.tasty b/target/scala-3.6.4/classes/scalation/mathstat/Statistic.tasty deleted file mode 100644 index c2b978228..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Statistic.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Stats4TS$.class b/target/scala-3.6.4/classes/scalation/mathstat/Stats4TS$.class deleted file mode 100644 index eb1fc6421..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Stats4TS$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Stats4TS$package$.class b/target/scala-3.6.4/classes/scalation/mathstat/Stats4TS$package$.class deleted file mode 100644 index 64a57a96c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Stats4TS$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Stats4TS$package.class b/target/scala-3.6.4/classes/scalation/mathstat/Stats4TS$package.class deleted file mode 100644 index daf0a02a2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Stats4TS$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Stats4TS$package.tasty b/target/scala-3.6.4/classes/scalation/mathstat/Stats4TS$package.tasty deleted file mode 100644 index bfe65a7f5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Stats4TS$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Stats4TS.class b/target/scala-3.6.4/classes/scalation/mathstat/Stats4TS.class deleted file mode 100644 index ba9d63191..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Stats4TS.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Stats4TS.tasty b/target/scala-3.6.4/classes/scalation/mathstat/Stats4TS.tasty deleted file mode 100644 index 07975b6ce..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Stats4TS.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/SymTriMatrixD.class b/target/scala-3.6.4/classes/scalation/mathstat/SymTriMatrixD.class deleted file mode 100644 index 7b2eacc01..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/SymTriMatrixD.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/SymTriMatrixD.tasty b/target/scala-3.6.4/classes/scalation/mathstat/SymTriMatrixD.tasty deleted file mode 100644 index 10e7e2912..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/SymTriMatrixD.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/SymmetricQRstep$.class b/target/scala-3.6.4/classes/scalation/mathstat/SymmetricQRstep$.class deleted file mode 100644 index b96423807..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/SymmetricQRstep$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/SymmetricQRstep.class b/target/scala-3.6.4/classes/scalation/mathstat/SymmetricQRstep.class deleted file mode 100644 index b686a8b2f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/SymmetricQRstep.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/SymmetricQRstep.tasty b/target/scala-3.6.4/classes/scalation/mathstat/SymmetricQRstep.tasty deleted file mode 100644 index d24ce9b76..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/SymmetricQRstep.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/TensorD$.class b/target/scala-3.6.4/classes/scalation/mathstat/TensorD$.class deleted file mode 100644 index 690a32f28..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/TensorD$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/TensorD$package$.class b/target/scala-3.6.4/classes/scalation/mathstat/TensorD$package$.class deleted file mode 100644 index 99feaf92a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/TensorD$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/TensorD$package.class b/target/scala-3.6.4/classes/scalation/mathstat/TensorD$package.class deleted file mode 100644 index cbe63566d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/TensorD$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/TensorD$package.tasty b/target/scala-3.6.4/classes/scalation/mathstat/TensorD$package.tasty deleted file mode 100644 index b1f51f7c9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/TensorD$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/TensorD.class b/target/scala-3.6.4/classes/scalation/mathstat/TensorD.class deleted file mode 100644 index a407fb4a8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/TensorD.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/TensorD.scala.bak b/target/scala-3.6.4/classes/scalation/mathstat/TensorD.scala.bak deleted file mode 100644 index a57980c58..000000000 --- a/target/scala-3.6.4/classes/scalation/mathstat/TensorD.scala.bak +++ /dev/null @@ -1,852 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller, Hao Peng - * @version 2.0 - * @date Thu May 10 15:50:15 EDT 2018 - * @see LICENSE (MIT style license file). - * - * @note Tensor (3D) Algebra - * - * @see www.stat.uchicago.edu/~lekheng/work/icm1.pdf - * @see www.math.ias.edu/csdm/files/13-14/Gnang_Pa_Fi_2014.pdf - * @see www.kolda.net/publication/TensorReview.pdf - * @see tspace.l - */ - -package scalation -package mathstat - -import scala.collection.mutable.IndexedSeq -import scala.math.round -import scala.runtime.ScalaRunTime.stringOf - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Tensorize a vector function (V2V) by applying it to each (row, column) of a tensor. - * @param f the vector function to tensorize - * @param x the tensor to apply the function to - */ -def tensorize (f: FunctionV2V)(x: TensorD): TensorD = - TensorD (x.dim, for i <- x.indices; j <- x.indices2 yield f(x(i, j))) - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Return the complement of index positions idx, e.g., - * comple (Array (1, 3), 5) = Array (0, 2, 4). - * param idx the index positions to be complemented - * param dim the exclusive upper bound - */ -def comple (idx: Array [Int], dim: Int): Array [Int] = - val a = Array.ofDim [Int] (dim - idx.size) - var j, l = 0 - for i <- idx do - while j < i do - a(l) = j; j += 1; l += 1 - end while - j += 1; - end for - a -end comple - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `TensorD` class is a simple implementation for 3-dimensional tensors. - * The names of the dimensions corresponds to MATLAB (row, column, sheet). - * @see www.kolda.net/publication/TensorReview.pdf for details on layout - * @see `RTensorD` for non-rectangular (ragged) tensors. - * @param dim size of the 1st level/dimension (row) of the tensor (height) - * @param dim2 size of the 2nd level/dimension (column) of the tensor (width) - * @param dim3 size of the 3rd level/dimension (sheet) of the tensor (depth) - * @param v the 3D array for holding the tensor elements - */ -class TensorD (val dim: Int, val dim2: Int, val dim3: Int, - private [mathstat] var v: Array [Array [Array [Double]]] = null) - extends Serializable: - - private val flaw = flawf ("TensorD") // flaw flag - private val TAB = "\t\t" // use "\t" for scala and "\t\t" for sbt - - val indices = 0 until dim // index range for the first level/dimension - val indices2 = 0 until dim2 // index range for the second level/dimension - val indices3 = 0 until dim3 // index range for the third level/dimension - - /** Multi-dimensional array storage for tensor - */ - if v == null then - v = Array.ofDim [Double] (dim, dim2, dim3) - else if dim != v.length || dim2 != v(0).length || dim3 != v(0)(0).length then - flaw ("init", "dimensions are wrong") - end if - - /** Format string used for printing vector values (change using setFormat) - */ - protected var fString = "%g,\t" - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Construct a dim by dim by dim cubic tensor. - * @param dim the row and column dimension - */ - def this (dim: Int) = { this (dim, dim, dim) } - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Construct a tensor from three dimensional array. - * @param u the three dimensional array - */ - def this (u: Array [Array [Array [Double]]]) = { this (u.size, u(0).size, u(0)(0).size, u) } - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the row, column and sheet dimensions of this tensor. - */ - inline def dims: (Int, Int, Int) = (dim, dim2, dim3) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Set the format to the newFormat. - * @param newFormat the new format string - */ - def setFormat (newFormat: String): Unit = fString = newFormat - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Retrieve the i, j, k-th SCALAR element from the tensor x_ijk. - * @param i the 1st dimension (row) index of the tensor - * @param j the 2nd dimension (column) index of the tensor - * @param k the 3rd dimension (sheet) index of the tensor - */ - def apply (i: Int, j: Int, k: Int): Double = v(i)(j)(k) - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Retrieve the i, j-th VECTOR from the tensor x_ij:. - * @param i the 1st dimension (row) index of the tensor - * @param j the 2nd dimension (column) index of the tensor - */ - def apply (i: Int, j: Int): VectorD = VectorD (v(i)(j).toIndexedSeq) - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Retrieve the i, k-th VECTOR from the tensor x_i:k. - * @param i the 1st dimension (row) index of the tensor - * @param all use the all columns indicator ? - * @param k the 3rd dimension (sheet) index of the tensor - */ - def apply (i: Int, all: Char, k: Int): VectorD = - val a = Array.ofDim [Double] (dim2) - for j <- 0 until dim2 do a(j) = v(i)(j)(k) - new VectorD (dim2, a) - end apply - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Retrieve the j, k-th VECTOR from the tensor x_:jk. - * @param all use the all rows indicator ? - * @param j the 2nd dimension (column) index of the tensor - * @param k the 3rd dimension (sheet) index of the tensor - */ - def apply (all: Char, j: Int, k: Int): VectorD = - val a = Array.ofDim [Double] (dim) - for i <- 0 until dim do a(i) = v(i)(j)(k) - new VectorD (dim, a) - end apply - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the i-th ROW FIXED MATRIX from the tensor (horizontal slice x_i::). - * @see www.kolda.net/publication/TensorReview.pdf - * @param i the 1st dimension (row) index of the tensor - */ - def apply (i: Int): MatrixD = new MatrixD (dim2, dim3, v(i)) - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Retrieve the j-th COLUMN FIXED MATRIX from the tensor (lateral slice x_:j:). - * @see www.kolda.net/publication/TensorReview.pdf - * @param all use the all rows indicator ? - * @param j the 2nd dimension (column) index of the tensor - */ - def apply (all: Char, j: Int): MatrixD = - val a = Array.ofDim [Double] (dim, dim3) - for i <- 0 until dim; k <- 0 until dim3 do a(i)(k) = v(i)(j)(k) - new MatrixD (dim, dim3, a) - end apply - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Retrieve the k-th SHEET FIXED MATRIX from the tensor (frontal slice x_::k). - * @see www.kolda.net/publication/TensorReview.pdf - * @param all use the all rows indicator ? - * @param all2 use the all columns indicator ? - * @param k the 3rd dimension (sheet) index of the tensor - */ - inline def apply (all: Char, all2: Char, k: Int): MatrixD = - val a = Array.ofDim [Double] (dim, dim2) - for i <- 0 until dim; j <- 0 until dim2 do a(i)(j) = v(i)(j)(k) - new MatrixD (dim, dim2, a) - end apply - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Retrieve the ii._1 to ii._2 row slice of the tensor. - * @param ii 1st dimension (row) indices of the tensor - */ - def apply (ii: (Int, Int)): TensorD = new TensorD (v.slice (ii._1, ii._2)) - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Retrieve the ii._1 to ii._2, jj._1 to jj._2 row-column slice of the tensor. - * @param ii 1st dimension (row) indices of the tensor (null => all) - * @param jj 2nd dimension (column) indices of the tensor - */ - def apply (ii: (Int, Int), jj: (Int, Int)): TensorD = - val (i1, i2) = if ii == null then (0, dim) else ii - val u = v.slice (i1, i2) - for i <- u.indices do u(i) = u(i).slice (jj._1, jj._2) - new TensorD (u) - end apply - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Retrieve the ii._1 to ii._2, jj._1 to jj._2, kk._1 to kk._2 - * row-column-sheet slice of the tensor. - * @param ii 1st dimension (row) indices of the tensor (null => all) - * @param jj 2nd dimension (column) indices of the tensor (null => all) - * @param kk 3rd dimension (sheet) indices of the tensor - */ - def apply (ii: (Int, Int), jj: (Int, Int), kk: (Int, Int)): TensorD = - val (i1, i2) = if ii == null then (0, dim) else ii - val (j1, j2) = if jj == null then (0, dim2) else jj - val u = v.slice (i1, i2) - for i <- u.indices do u(i) = u(i).slice (j1, j2) - for i <- u.indices; j <- u(i).indices do u(i)(j) = u(i)(j).slice (kk._1, kk._2) - new TensorD (u) - end apply - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Retrieve the is row selections from the tensor. - * @param is 1st dimension (row) indices of the tensor - */ - def apply (is: Array [Int]): TensorD = - val u = Array.ofDim [Double] (is.size, dim2, dim3) - for i <- is.indices; j <- indices2; k <- indices3 do u(i)(j)(k) = v(is(i))(j)(k) - new TensorD (u) - end apply - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Retrieve the is, js row-column selections from the tensor. - * @param is 1st dimension (row) indices of the tensor (null => all) - * @param js 2nd dimension (column) indices of the tensor - */ - def apply (is: Array [Int], js: Array [Int]): TensorD = - if is == null then - val u = Array.ofDim [Double] (dim, js.size, dim3) - for i <- indices; j <- js.indices; k <- indices3 do u(i)(j)(k) = v(i)(js(j))(k) - new TensorD (u) - else - val u = Array.ofDim [Double] (is.size, js.size, dim3) - for i <- is.indices; j <- js.indices; k <- indices3 do u(i)(j)(k) = v(is(i))(js(j))(k) - new TensorD (u) - end if - end apply - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Retrieve the is, js, ks row-column-sheet selections from the tensor. - * @param is 1st dimension (row) indices of the tensor (null => all) - * @param js 2nd dimension (column) indices of the tensor (null => all) - * @param ks 3rd dimension (sheet) indices of the tensor - */ - def apply (is: Array [Int], js: Array [Int], ks: Array [Int]): TensorD = - if is == null && js == null then - val u = Array.ofDim [Double] (dim, dim2, ks.size) - for i <- indices; j <- indices2; k <- ks.indices do u(i)(j)(k) = v(i)(j)(ks(k)) - new TensorD (u) - else if is == null then - val u = Array.ofDim [Double] (dim, js.size, ks.size) - for i <- indices; j <- js.indices; k <- ks.indices do u(i)(j)(k) = v(i)(js(j))(ks(k)) - new TensorD (u) - else if js == null then - val u = Array.ofDim [Double] (is.size, dim2, ks.size) - for i <- is.indices; j <- indices2; k <- ks.indices do u(i)(j)(k) = v(is(i))(j)(ks(k)) - new TensorD (u) - else - val u = Array.ofDim [Double] (is.size, js.size, ks.size) - for i <- is.indices; j <- js.indices; k <- ks.indices do u(i)(j)(k) = v(is(i))(js(j))(ks(k)) - new TensorD (u) - end if - end apply - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Retrieve the complement of the is row selections from the tensor. - * @param is 1st dimension (row) indices of the tensor - */ - def not (is: Array [Int]): TensorD = apply (Array.range (0, dim) diff is) - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Retrieve the complement of the is row selections from the tensor. - * @param is 1st dimension (row) indices of the tensor - * @param js 2nd dimension (column) indices of the tensor - */ - def not (is: Array [Int], js: Array [Int]): TensorD = apply (comple (is, dim), comple (js, dim2)) - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Retrieve the complement of the is row selections from the tensor. - * @param is 1st dimension (row) indices of the tensor - * @param js 2nd dimension (column) indices of the tensor - * @param ks 3rd dimension (sheet) indices of the tensor - */ - def not (is: Array [Int], js: Array [Int], ks: Array [Int]): TensorD = - apply (comple (is, dim), comple (js, dim2), comple (ks, dim3)) - end not - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Update a single SCALAR element of the tensor to the given value. - * Usage: z(i, j, k) = x - * @param i 1st dimension (row) index of the tensor - * @param j 2nd dimension (column) index of the tensor - * @param k 3rd dimension (sheet) index of the tensor - * @param x the value for updating the tensor at the above position - */ - def update (i: Int, j: Int, k: Int, x: Double): Unit = v(i)(j)(k) = x - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Update a single VECTOR of the tensor to the given vector. - * Usage: z(i, j) = x - * @param i 1st dimension (row) index of the tensor - * @param j 2nd dimension (column) index of the tensor - * @param x the vector for updating the tensor at the above position - */ - def update (i: Int, j: Int, x: VectorD): Unit = v(i)(j) = x.toArray - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Update a single VECTOR of the tensor to the given vector. - * Usage: z(i, ?, k) = x - * @param i 1st dimension (row) index of the tensor - * @param all use the all columns indicator ? - * @param k 3rd dimension (sheet) index of the tensor - * @param x the vector for updating the tensor at the above position - */ - def update (i: Int, all: Char, k: Int, x: VectorD): Unit = - for j <- indices2 do v(i)(j)(k) = x(j) - end update - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Update a single VECTOR of the tensor to the given vector. - * Usage: z(?, j, k) = x - * @param all use the all rows indicator ? - * @param j 2nd dimension (column) index of the tensor - * @param k 3rd dimension (sheet) index of the tensor - * @param x the vector for updating the tensor at the above position - */ - def update (all: Char, j: Int, k: Int, x: VectorD): Unit = - for i <- indices do v(i)(j)(k) = x(i) - end update - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Update a single MATRIX of the tensor (for ROW i) to the given matrix. - * Usage: z(i) = x - * @param i 1st dimension (row) index of the tensor - * @param x the matrix for updating the tensor at the above position - */ - def update (i: Int, x: MatrixD): Unit = - for j <- indices2 do v(i)(j) = x(j).toArray - end update - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Update a single MATRIX of the tensor (for COLUMN j) to the given matrix. - * Usage: z(?, j) = x - * @param all use the all rows indicator ? - * @param j 2nd dimension (column) index of the tensor - * @param x the matrix for updating the tensor at the above position - */ - def update (all: Char, j: Int, x: MatrixD): Unit = - for i <- indices; k <- indices3 do v(i)(j)(k) = x(i, k) - end update - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Update a single MATRIX of the tensor (for SHEET k) to the given matrix. - * Usage: z(?, ?, k) = x - * @param all use the all rows indicator ? - * @param all2 use the all columns indicator ? - * @param k the 3rd dimension (sheet) index of the tensor - * @param x the matrix for updating the tensor at the above position - */ - def update (all: Char, all2: Char, k: Int, x: MatrixD): Unit = - for i <- indices; j <- indices2 do v(i)(j)(k) = x(i, j) - end update - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Set all the tensor element values to x. - * @param x the value to set all elements to - */ - def set (x: Double): Unit = - for i <-indices; j <- indices2; k <- indices3 do v(i)(j)(k) = x - end set - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Add this tensor and tensor b. - * @param b the tensor to add (requires leDimensions) - */ - def + (b: TensorD): TensorD = - val c = new TensorD (dim, dim2, dim3) - for i <- indices; j <- indices2; k <- indices3 do - c.v(i)(j)(k) = v(i)(j)(k) + b.v(i)(j)(k) - end for - c - end + - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Add this tensor and scalar s. - * @param s the scalar to add - */ - def + (s: Double): TensorD = - val c = new TensorD (dim, dim2, dim3) - for i <- indices; j <- indices2; k <- indices3 do - c.v(i)(j)(k) = v(i)(j)(k) + s - end for - c - end + - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** From this tensor subtract tensor b. - * @param b the tensor to add (requires leDimensions) - */ - def - (b: TensorD): TensorD = - val c = new TensorD (dim, dim2, dim3) - for i <- indices; j <- indices2; k <- indices3 do - c.v(i)(j)(k) = v(i)(j)(k) - b.v(i)(j)(k) - end for - c - end - - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** From this tensor subtract scalar s. - * @param s the scalar to add - */ - def - (s: Double): TensorD = - val c = new TensorD (dim, dim2, dim3) - for i <- indices; j <- indices2; k <- indices3 do - c.v(i)(j)(k) = v(i)(j)(k) - s - end for - c - end - - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Multiply this tensor by scalar s. - * @param s the scalar to multiply by - */ - def * (s: Double): TensorD = - val c = new TensorD (dim, dim2, dim3) - for i <- indices; j <- indices2; k <- indices3 do - c.v(i)(j)(k) = v(i)(j)(k) * s - end for - c - end * - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Multiply (multi-linear product) this tensor by three matrices b, c and d. - * this * (a, b, c) - * @see www.stat.uchicago.edu/~lekheng/work/icm1.pdf - equation 15.1 - * @param b the first matrix to multiply by (requires leDimensions) - * @param c the second matrix to multiply by (requires leDimensions) - * @param d the third matrix to multiply by (requires leDimensions) - */ - def * (b: MatrixD, c: MatrixD, d: MatrixD): TensorD = - val (m1, n1) = (b.dim, b.dim2) - val (m2, n2) = (c.dim, c.dim2) - val (m3, n3) = (d.dim, d.dim2) - if n1 > dim2 || n2 > dim2 || n3 > dim3 then flaw ("*", "dimensions don't match") - - val e = new TensorD (m1, m2, m3) - for i <- b.indices; j <- c.indices; k <- d.indices do - var sum = 0.0 - for l1 <- b.indices2; l2 <- c.indices2; l3 <- d.indices2 do - sum += b(i, l1) * c(j, l2) * d(k, l3) * v(l1)(l2)(l3) - end for - e.v(i)(j)(k) = sum - end for - e - end * - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Multiply element-wise (Hadamard product) this tensor by tensor b. - * @param b the tensor to add (requires leDimensions) - */ - def *~ (b: TensorD): TensorD = - val c = new TensorD (dim, dim2, dim3) - for i <- indices; j <- indices2; k <- indices3 do - c.v(i)(j)(k) = v(i)(j)(k) * b.v(i)(j)(k) - end for - c - end *~ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Map each row of this tensor by applying function f to each row matrix and - * returning the collected result as a matrix. - * @param f the matrix to vector function to apply - */ - def map (f: FunctionM2V): MatrixD = - MatrixD (for i <- indices yield f(apply(i))) - end map - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Map each row of this tensor by applying function f to each row matrix and - * returning the collected result as a tensor. - * @param f the matrix to matrix function to apply - */ - def mmap (f: FunctionM2M): TensorD = - TensorD (for i <- indices yield f(apply(i))) - end mmap - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Map each element of this tensor by applying function f to each element and - * returning the collected result as a tensor. - * @param f the scalar to scalar function to apply - */ - def map_ (f: FunctionS2S): TensorD = - val x = new TensorD (dim, dim2, dim3) - for i <- indices; j <- indices2; k <- indices3 do x.v(i)(j)(k) = f(v(i)(j)(k)) - x - end map_ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Flatten this tensor in row-major fashion, returning a matrix containing - * all the elements from the tensor. - */ - def flatten: MatrixD = - val a = Array.ofDim [Double] (dim * dim2, dim3) - var k = 0 - for i <- indices do - val v_i = v(i) - var j = 0 - cfor (j < dim2, j += 1) { a(k) = v_i(j); k += 1 } - end for - new MatrixD (a.length, a(0).length, a) - end flatten - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Check whether the dimensions of this tensor are less than or equal to - * le those of the other tensor b. - * @param b the other matrix - */ - def leDimensions (b: TensorD): Boolean = - dim <= b.dim && dim2 <= b.dim2 && dim3 <= b.dim3 - end leDimensions - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert this tensor to a matrix where all the elements have integer values. - */ - def toInt: TensorD = - val x = new TensorD (dim, dim2, dim3) - for i <- indices; j <- indices2; k <- indices3 do x.v(i)(j)(k) = round (v(i)(j)(k)).toDouble - x - end toInt - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert this tensor to a string with a double line break after each sheet - * and a single line break after each row. - */ - override def toString: String = - val sb = new StringBuilder ("\nTensorD (") - if dim == 0 then return sb.append (")").mkString - for k <- indices3 do - for i <- indices do - for j <- indices2 do sb.append (s"${v(i)(j)(k)}, ") - sb.append ("\n" + TAB) - end for - sb.append ("\n" + TAB) - end for - sb.replace (sb.length-5, sb.length, ")").mkString - end toString - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert this tensor to a string with a line break after each sheet. - */ - def toString2: String = - val sb = new StringBuilder ("\nTensorD( ") - if dim == 0 then return sb.append (")").mkString - for i <- indices; j <- indices2 do - sb.append (stringOf (v(i)(j)) + ", ") - if j == dim2-1 then sb.replace (sb.length-1, sb.length, "\n\t") - end for - sb.replace (sb.length-3, sb.length, ")").mkString - end toString2 - -end TensorD - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `TensorD` companion object provides factory methods for the `TensorD` class. - */ -object TensorD: - -// private val flaw = flawf ("TensorD") // flaw function - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Build a tensor from the scaler argument list x. - * @param n1 the first dimension - * @param n2 the second dimension - * @param n3 the third dimension - * @param x the list/vararg of scacollection.immutable.IndexedSeq [MatrixD]lars - */ - def apply (n: (Int, Int, Int), x: Double*): TensorD = - val t = new TensorD (n._1, n._2, n._3) - var l = 0 - for k <- 0 until n._3; i <- 0 until n._1; j <- 0 until n._2 do - t(i, j, k) = x(l) - l += 1 - end for - t - end apply - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Build a tensor from the vector argument list x. - * @param n the first dimension - * @param vs the list/vararg of vectors - */ - def apply (n: Int, vs: VectorD*): TensorD = - val t = new TensorD (n, vs.length, vs(0).dim) - var l = 0 - for i <- t.indices; j <- t.indices2 do - t(i, j) = vs(l) - l += 1 - end for - t - end apply - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Build a tensor from the vector argument list x. - * @param n the first dimension - * @param vs the indexed sequence of vectors - */ - def apply (n: Int, vs: IndexedSeq [VectorD]): TensorD = - val t = new TensorD (n, vs.length, vs(0).dim) - var l = 0 - for i <- t.indices; j <- t.indices2 do - t(i, j) = vs(l) - l += 1 - end for - t - end apply - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Build a tensor from the vector argument list x. - * @param n the first dimension - * @param vs the indexed sequence of vectors - */ - def apply (n: Int, vs: collection.immutable.IndexedSeq [VectorD]): TensorD = - val t = new TensorD (n, vs.length, vs(0).dim) - var l = 0 - for i <- t.indices; j <- t.indices2 do - t(i, j) = vs(l) - l += 1 - end for - t - end apply - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a tensor from a variable argument list of matrices (row-wise). - * Use transpose to make it column-wise. - * @param vs the vararg list of matrices - */ - def apply (vs: MatrixD*): TensorD = - val (m, n, p) = (vs.length, vs(0).dim, vs(0).dim2) - val a = Array.ofDim [Array [Array [Double]]] (m) - for i <- vs.indices do a(i) = vs(i).v - new TensorD (m, n, p, a) - end apply - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a tensor from an mutable `IndexedSeq` of matrices (row-wise). - * Use transpose to make it column-wise. - * @param vs the indexed sequence of matrices - */ - def apply (vs: IndexedSeq [MatrixD]): TensorD = - val (m, n, p) = (vs.length, vs(0).dim, vs(0).dim2) - val a = Array.ofDim [Array [Array [Double]]] (m) - for i <- vs.indices do a(i) = vs(i).v - new TensorD (m, n, p, a) - end apply - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a tensor from an immutable `IndexedSeq` of matrices (row-wise), - * as produce by for yield. Use transpose to make it column-wise. - * @param vs the indexed sequence of matrices - */ - def apply (vs: collection.immutable.IndexedSeq [MatrixD]): TensorD = - val (m, n, p) = (vs.length, vs(0).dim, vs(0).dim2) - val a = Array.ofDim [Array [Array [Double]]] (m) - for i <- vs.indices do a(i) = vs(i).v - new TensorD (m, n, p, a) - end apply - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a tensor of dimensions dim by dim2 by dim3 where all elements equal - * to the given value. - * @param dim the row dimension - * @param dim2 the column dimension - * @param dim2 the sheet dimension - * @param value the given value to assign to all elements - */ - def fill (dim: Int, dim2: Int, dim3: Int, value: Double): TensorD = - val a = Array.fill (dim, dim2, dim3)(value) - new TensorD (dim, dim2, dim3, a) - end fill - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the cross-correlation tensor for the given data matrix for up to - * maxLags. - * @param x the given data matrix (row are instances, columns are variables) - * @param maxLags the maximum number of lags to consider - * - def crossCorr (x: MatrixD, maxLags: Int = 10): TensorD = - val n = x.dim2 - if 2 * maxLags >= x.dim then flaw ("crossCorr", "not enough data for maxLags = $maxLags") - val ccorr = new TensorD (maxLags+1, n, n) - for l <- 0 to maxLags do ccorr(l) = x.laggedCorr (l) - ccorr - end crossCorr - */ - -end TensorD - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `tensorDTest` main function is used to test the `TensorD` class. - * > runMain scalation.mathstat.tensorDTest - */ -@main def tensorDTest (): Unit = - - val s = 2.0 - val a = new TensorD (2, 3, 2) - val b = new TensorD (2, 3, 2) - // row column sheet - val c = TensorD ((2, 3, 2), 1, 2, 3, // 0 0-2 0 - 4, 5, 6, // 1 0-2 0 - - 7, 8, 9, // 0 0-2 1 - 10, 11, 12) // 1 0-2 1 - - for i <- 0 until 2; j <- 0 until 3; k <- 0 until 2 do - val sum = i + j + k - a(i, j, k) = sum - b(i, j, k) = sum - end for - - println ("s = " + s) - println ("a = " + a) - println ("b = " + b) - println ("c = " + c) - println ("c(0) = " + c(0)) - println ("c(0, 0) = " + c(0, 0)) - println ("c(0, 0, 0) = " + c(0, 0, 0)) - - banner ("Test operators") - println ("a + b = " + (a + b)) - println ("a + s = " + (a + b)) - println ("a - b = " + (a - b)) - println ("a - s = " + (a - s)) - println ("c * s = " + c * s) - println ("a *~ c = " + a *~ c) - - val x = MatrixD ((2, 2), 1, 2, - 3, 4) - val y = MatrixD ((2, 3), 1, 2, 3, - 4, 5, 6) - val z = MatrixD ((2, 2), 5, 6, - 7, 8) - - println ("c * (x, y, z) = " + c * (x, y, z)) - - banner ("Test slice") - println ("c = " + c) - println ("slice row 0:1 = " + c((0, 1))) - - println ("slice row col: 0:1, 0:2 = " + c((0, 1), (0, 2))) - println ("slice col: null, 0:2 = " + c(null, (0, 2))) - - println ("slice row col sheet: 0:1, 0:2, 0:1 = " + c((0, 1), (0, 2), (0, 1))) - println ("slice sheet: null, null, 0:1 = " + c(null, null, (0, 1))) - println ("slice row sheet: 0:1, null, 0:1 = " + c((0, 1), null, (0, 1))) - println ("slice col sheet null, 0:2, 0:1 = " + c(null, (0, 2), (0, 1))) - - banner ("Test select") - println ("c = " + c) - println ("select row 0 = " + c(Array [Int] (0))) - - println ("select row col: 0, 0,2 = " + c(Array [Int] (0), Array [Int] (0, 2))) - println ("select col: null, 0,2 = " + c(null, Array [Int] (0, 2))) - - println ("select row col sheet: 0, 0,2, 1 = " + c(Array [Int] (0), Array [Int] (0, 2), Array [Int] (1))) - println ("select sheet: null, null, 1 = " + c(null, null, Array [Int] (1))) - println ("select row sheet: 0, null, 1 = " + c(Array [Int] (0), null, Array [Int] (1))) - println ("select col sheet null, 0,2, 1 = " + c(null, Array [Int] (0, 2), Array [Int] (1))) - - banner ("Test not") - println ("c = " + c) - println ("not row 0 = " + c.not(Array [Int] (0))) - println ("not row col: 0, 0,2 = " + c.not(Array [Int] (0), Array [Int] (0, 2))) - println ("not row col sheet: 0, 0,2, 1 = " + c.not(Array [Int] (0), Array [Int] (0, 2), Array [Int] (1))) - -end tensorDTest - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `TensorDTest2` main function is used to test the `TensorD` class. - * It tests pulling matrices and vectors from the tensor. - * > runMain scalation.mathstat.tensorDTest2 - */ -@main def tensorDTest2 (): Unit = - - // 4 rows, 3 columns, 2 sheets - x_ijk - // row columns sheet - val x = TensorD ((4, 3, 2), 1, 2, 3, // 0 0,1,2 0 - 4, 5, 6, // 1 0,1,2 0 - 7, 8, 9, // 2 0,1,2 0 - 10, 11, 12, // 3 0,1,1 0 - - 13, 14, 15, // 0 0,1,2 1 - 16, 17, 18, // 1 0,1,2 1 - 19, 20, 21, // 2 0,1,2 1 - 22, 23, 24) // 3 0,1,2 1 - - banner ("Tensor with dimensions (rows, columns, sheets) = (4, 3, 2)") - println ("x = " + x) - - // SCALARS - banner ("Scalar element at index position (i, j, k) = (0, 0, 0)") - println ("x(0, 0, 0) = " + x(0, 0, 0)) // x_000 - element i=0, j=0, k=0 - - // VECTORS - banner ("Vector at index position (i, j) = (0, 0)") - println ("x(0, 0) = " + x(0, 0)) // x_00: - vector i=0, j=0, k=all - banner ("Vector at index position (i, ?, k) = (0, all, 0)") - println ("x(0, ?, 0) = " + x(0, ?, 0)) // x_0:0 - vector i=0, j=all, k=0 - banner ("Vector at index position (?, j, k) = (all, 0, 0)") - println ("x(?, 0, 0) = " + x(?, 0, 0)) // x_:00 - vector i=all, j=0, k=0 - - // MATRICES - banner ("Matrix from tensor with row i fixed at 0") - println ("x(0) = " + x(0)) // x_0:: - matrix with row i fixed - banner ("Matrix from tensor with column j fixed at 0") - println ("x(?, 0) = " + x(?, 0)) // x_:0: - matrix with column j fixed - banner ("Matrix from tensor with sheet k fixed at 0") - println ("x(?, ?, 0) = " + x(?, ?, 0)) // x_::0 - matrix with sheet k fixed - -end tensorDTest2 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `TensorDTest3` main function is used to test the `TensorD` class. - * It tests the use of tensors and matrices for convolutional operation needed in - * Convolutional Nets. - * > runMain scalation.mathstat.tensorDTest3 - */ -@main def tensorDTest3 (): Unit = - - val a = new TensorD (2, 9, 9) - for i <- a.indices; j <- a.indices2; k <- a.indices3 do a(i, j, k) = i + j + k - println (s"a = $a") - - val image0 = a(0) - val image1 = a(1) - println (s"image0 = $image0") - println (s"image1 = $image1") - - val kernel = MatrixD ((3, 3), 1, 2, 1, - 2, 3, 2, - 1, 2, 1) - println (s"kernel = $kernel") - - val sp = new MatrixD (image0.dim - kernel.dim2 + 1, image0.dim2 - kernel.dim2 + 1) -// for i <- sp.indices; j <- sp.indices2 do sp(i, j) = kernel **+ (image0, i, j) // FIX **+ only in MatrixD.scala.sav - println (s"sp = $sp") - -end tensorDTest3 - diff --git a/target/scala-3.6.4/classes/scalation/mathstat/TensorD.tasty b/target/scala-3.6.4/classes/scalation/mathstat/TensorD.tasty deleted file mode 100644 index 4c4bca962..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/TensorD.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/TimeStatistic$.class b/target/scala-3.6.4/classes/scalation/mathstat/TimeStatistic$.class deleted file mode 100644 index 2e8bd268d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/TimeStatistic$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/TimeStatistic$package$.class b/target/scala-3.6.4/classes/scalation/mathstat/TimeStatistic$package$.class deleted file mode 100644 index 29631f3eb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/TimeStatistic$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/TimeStatistic$package.class b/target/scala-3.6.4/classes/scalation/mathstat/TimeStatistic$package.class deleted file mode 100644 index 2e9660821..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/TimeStatistic$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/TimeStatistic$package.tasty b/target/scala-3.6.4/classes/scalation/mathstat/TimeStatistic$package.tasty deleted file mode 100644 index 0bdb4c33a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/TimeStatistic$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/TimeStatistic.class b/target/scala-3.6.4/classes/scalation/mathstat/TimeStatistic.class deleted file mode 100644 index f85d0da07..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/TimeStatistic.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/TimeStatistic.tasty b/target/scala-3.6.4/classes/scalation/mathstat/TimeStatistic.tasty deleted file mode 100644 index c07c20e52..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/TimeStatistic.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/TnT_Split$.class b/target/scala-3.6.4/classes/scalation/mathstat/TnT_Split$.class deleted file mode 100644 index 4f627cf2b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/TnT_Split$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/TnT_Split$package$.class b/target/scala-3.6.4/classes/scalation/mathstat/TnT_Split$package$.class deleted file mode 100644 index 0448f400c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/TnT_Split$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/TnT_Split$package.class b/target/scala-3.6.4/classes/scalation/mathstat/TnT_Split$package.class deleted file mode 100644 index 8365644bf..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/TnT_Split$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/TnT_Split$package.tasty b/target/scala-3.6.4/classes/scalation/mathstat/TnT_Split$package.tasty deleted file mode 100644 index a8eb68bab..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/TnT_Split$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/TnT_Split.class b/target/scala-3.6.4/classes/scalation/mathstat/TnT_Split.class deleted file mode 100644 index 613742cb0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/TnT_Split.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/TnT_Split.tasty b/target/scala-3.6.4/classes/scalation/mathstat/TnT_Split.tasty deleted file mode 100644 index cc2899f57..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/TnT_Split.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Transform$.class b/target/scala-3.6.4/classes/scalation/mathstat/Transform$.class deleted file mode 100644 index 3df656d31..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Transform$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Transform$package$.class b/target/scala-3.6.4/classes/scalation/mathstat/Transform$package$.class deleted file mode 100644 index 7ed0ca30e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Transform$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Transform$package.class b/target/scala-3.6.4/classes/scalation/mathstat/Transform$package.class deleted file mode 100644 index c20cfc8b0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Transform$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Transform$package.tasty b/target/scala-3.6.4/classes/scalation/mathstat/Transform$package.tasty deleted file mode 100644 index 6fee2fcdf..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Transform$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Transform.class b/target/scala-3.6.4/classes/scalation/mathstat/Transform.class deleted file mode 100644 index ca1171321..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Transform.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Transform.scala.bak b/target/scala-3.6.4/classes/scalation/mathstat/Transform.scala.bak deleted file mode 100644 index 581d25a86..000000000 --- a/target/scala-3.6.4/classes/scalation/mathstat/Transform.scala.bak +++ /dev/null @@ -1,265 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Thu Mar 13 14:06:11 EDT 2025 - * @see LICENSE (MIT style license file). - * - * @note Support for Transformation Functions with their Inverse - * - * https://www.infoq.com/news/2023/10/foreign-function-and-memory-api/ - */ - -package scalation -package mathstat - -import scala.collection.mutable.ArrayBuffer -import scala.math._ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `TForm` trait supports the use of transformation functions, such that it - * is easy to take the inverse transform. When a transformation uses arguments, - * they are remembered for use by the inverse transformation. - */ -trait TForm: - protected var lu: VectorD = VectorD (1, 2) // optional default range/bounds [l .. u] - protected var a: VectorD = null // optional argument vector - protected var b: MatrixD = null // optional argument matrix - - def apply (x: VectorD): Unit = a = x.mu_sig // set the argument vector - - def apply (x: MatrixD): Unit = // set the argument matrix - val c = ArrayBuffer [VectorD] () - for j <- x.indices2 do { apply (x(?, j)); c += a } - b = MatrixD (c).transpose - - def setLU (_lu: VectorD): Unit = lu = _lu // set the default bounds - - def f (x: VectorD): VectorD // abstract transformation function - - def fi (y: VectorD): VectorD // abstract inverse transformation function - - val f: FunctionM2M = (x: MatrixD) => // matrix version of transformation function - if b != null then - val y = new MatrixD (x.dim, x.dim2) - for j <- x.indices2 do { a = b(?, j); y(?, j) = f(x(?, j)) } - y - else - x.mmap_(f(_)) - - val fi: FunctionM2M = (y: MatrixD) => // matrix version of inverse transformation function - if b != null then - val x = new MatrixD (y.dim, y.dim2) - for j <- y.indices2 do { a = b(?, j); x(?, j) = fi(y(?, j)) } - x - else - y.mmap_(fi(_)) -end TForm - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `zForm` object applies the z-transformation (subtract mean and divide by standard deviation). - */ -object zForm extends TForm: - def f (x: VectorD): VectorD = (x - a(0)) / a(1) - def fi (y: VectorD): VectorD = (y * a(1)) + a(0) - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `rangeForm` object transforms values to the default range/bounds lu. - */ -object rangeForm extends TForm: - override def apply (x: VectorD): Unit = a = x.min_max ++ lu // set the argument vector - def f (x: VectorD): VectorD = (x - a(0)) * (a(3) - a(2))/(a(1) - a(0)) + a(2) - def fi (y: VectorD): VectorD = (y - a(2)) * (a(1) - a(0))/(a(3) - a(2)) + a(0) - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `logForm` object applies the log-transformation. - */ -object logForm extends TForm: - def f (x: VectorD): VectorD = x.log - def fi (y: VectorD): VectorD = y.exp - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `log1pForm` object applies the log1p-transformation (log (z+1)). - */ -object log1pForm extends TForm: - def f (x: VectorD): VectorD = x.log1p - def fi (y: VectorD): VectorD = y.expm1 - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `cosForm` object applies the cosine-transformation. - */ -object cosForm extends TForm: - override def apply (r: VectorD): Unit = a = r // set the argument vector - def f (x: VectorD): VectorD = x.map (z => cos (a(0) * Piby2 * z)) - def fi (y: VectorD): VectorD = y.map (z => acos (z) / (a(0) * Piby2)) - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `sinForm` object applies the sine-transformation. - */ -object sinForm extends TForm: - override def apply (r: VectorD): Unit = a = r // set the argument vector - def f (x: VectorD): VectorD = x.map (z => sin (a(0) * Piby2 * z)) - def fi (y: VectorD): VectorD = y.map (z => asin (z) / (a(0) * Piby2)) - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `sqrtForm` object applies the square-root-transformation. - */ -object sqrtForm extends TForm: - def f (x: VectorD): VectorD = x.sqrt - def fi (y: VectorD): VectorD = y ~^ 2 - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `powForm` object applies the power-transformation x^p for power p > 1. - */ -object powForm extends TForm: - override def apply (p: VectorD): Unit = a = p // set the argument vector - def f (x: VectorD): VectorD = x ~^ a(0) - def fi (y: VectorD): VectorD = y ~^ (1/a(0)) - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `rootForm` object applies the power-transformation x^r for root r < 1. - */ -object rootForm extends TForm: - override def apply (r: VectorD): Unit = a = r // set the argument vector - def f (x: VectorD): VectorD = x ~^ a(0) - def fi (y: VectorD): VectorD = y ~^ (1/a(0)) - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `transformTest` tests the `Transform` class at the vector level. - * > runMain scalation.mathstat.transformTest - */ -@main def transformTest (): Unit = - - val x = VectorD (1, 2, 3) - println (s"x = $x") - - banner ("zForm Transformation") - zForm (x) // set the argument vector - var y = zForm.f (x) - var z = zForm.fi (y) - println (s"y = $y, z = $z") - - banner ("rangeForm Transformation") - rangeForm (x) // set the argument vector - y = rangeForm.f (x) - z = rangeForm.fi (y) - println (s"y = $y, z = $z") - - banner ("logForm Transformation") - y = logForm.f (x) - z = logForm.fi (y) - println (s"y = $y, z = $z") - - banner ("log1pForm Transformation") - y = log1pForm.f (x) - z = log1pForm.fi (y) - println (s"y = $y, z = $z") - - banner ("cosForm Transformation") - cosForm (VectorD (0.25)) // set the argument vector - y = cosForm.f (x) - z = cosForm.fi (y) - println (s"y = $y, z = $z") - - banner ("sinForm Transformation") - sinForm (VectorD (0.25)) // set the argument vector - y = sinForm.f (x) - z = sinForm.fi (y) - println (s"y = $y, z = $z") - - banner ("sqrtForm Transformation") - y = sqrtForm.f (x) - z = sqrtForm.fi (y) - println (s"y = $y, z = $z") - - banner ("powForm Transformation") - powForm (VectorD (1.5)) // set the argument vector - y = powForm.f (x) - z = powForm.fi (y) - println (s"y = $y, z = $z") - - banner ("rootForm Transformation") - rootForm (VectorD (0.5)) // set the argument vector - y = rootForm.f (x) - z = rootForm.fi (y) - println (s"y = $y, z = $z") - - banner ("recheck powForm Transformation") // note, previous apply still holds - y = powForm.f (x) - z = powForm.fi (y) - println (s"y = $y, z = $z") - -end transformTest - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `transformTest2` tests the `Transform` class at the matrix level. - * > runMain scalation.mathstat.transformTest2 - */ -@main def transformTest2 (): Unit = - - val x = MatrixD ((3, 2), 1, 3, - 2, 5, - 3, 6) - println (s"x = $x") - - banner ("zForm Transformation") - zForm (x) // set the argument vector - var y = zForm.f (x) - var z = zForm.fi (y) - println (s"y = $y, z = $z") - - banner ("rangeForm Transformation") - rangeForm (x) // set the argument vector - y = rangeForm.f (x) - z = rangeForm.fi (y) - println (s"y = $y, z = $z") - - banner ("logForm Transformation") - y = logForm.f (x) - z = logForm.fi (y) - println (s"y = $y, z = $z") - - banner ("log1pForm Transformation") - y = log1pForm.f (x) - z = log1pForm.fi (y) - println (s"y = $y, z = $z") - - banner ("cosForm Transformation") - cosForm (VectorD (0.25)) // set the argument vector - y = cosForm.f (x) - z = cosForm.fi (y) - println (s"y = $y, z = $z") - - banner ("sinForm Transformation") - sinForm (VectorD (0.25)) // set the argument vector - y = sinForm.f (x) - z = sinForm.fi (y) - println (s"y = $y, z = $z") - - banner ("sqrtForm Transformation") - y = sqrtForm.f (x) - z = sqrtForm.fi (y) - println (s"y = $y, z = $z") - - banner ("powForm Transformation") - powForm (VectorD (1.5)) // set the argument vector - y = powForm.f (x) - z = powForm.fi (y) - println (s"y = $y, z = $z") - - banner ("rootForm Transformation") - rootForm (VectorD (0.5)) // set the argument vector - y = rootForm.f (x) - z = rootForm.fi (y) - println (s"y = $y, z = $z") - - banner ("recheck powForm Transformation") // note, previous apply still holds - y = powForm.f (x) - z = powForm.fi (y) - println (s"y = $y, z = $z") - -end transformTest2 - diff --git a/target/scala-3.6.4/classes/scalation/mathstat/Transform.tasty b/target/scala-3.6.4/classes/scalation/mathstat/Transform.tasty deleted file mode 100644 index c0dd0837c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/Transform.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/VMatrixD$.class b/target/scala-3.6.4/classes/scalation/mathstat/VMatrixD$.class deleted file mode 100644 index 2da4524a3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/VMatrixD$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/VMatrixD$package$.class b/target/scala-3.6.4/classes/scalation/mathstat/VMatrixD$package$.class deleted file mode 100644 index a0d916925..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/VMatrixD$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/VMatrixD$package.class b/target/scala-3.6.4/classes/scalation/mathstat/VMatrixD$package.class deleted file mode 100644 index 3047cdb09..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/VMatrixD$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/VMatrixD$package.tasty b/target/scala-3.6.4/classes/scalation/mathstat/VMatrixD$package.tasty deleted file mode 100644 index c483e5bfb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/VMatrixD$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/VMatrixD.class b/target/scala-3.6.4/classes/scalation/mathstat/VMatrixD.class deleted file mode 100644 index 8448a2543..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/VMatrixD.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/VMatrixD.tasty b/target/scala-3.6.4/classes/scalation/mathstat/VMatrixD.tasty deleted file mode 100644 index 98f4bbee0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/VMatrixD.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/VectorC$.class b/target/scala-3.6.4/classes/scalation/mathstat/VectorC$.class deleted file mode 100644 index 32d799cfc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/VectorC$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/VectorC$package$.class b/target/scala-3.6.4/classes/scalation/mathstat/VectorC$package$.class deleted file mode 100644 index b98cd7806..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/VectorC$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/VectorC$package.class b/target/scala-3.6.4/classes/scalation/mathstat/VectorC$package.class deleted file mode 100644 index cd1d33ceb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/VectorC$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/VectorC$package.tasty b/target/scala-3.6.4/classes/scalation/mathstat/VectorC$package.tasty deleted file mode 100644 index 3648a6d02..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/VectorC$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/VectorC.class b/target/scala-3.6.4/classes/scalation/mathstat/VectorC.class deleted file mode 100644 index 4d3cec619..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/VectorC.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/VectorC.tasty b/target/scala-3.6.4/classes/scalation/mathstat/VectorC.tasty deleted file mode 100644 index f55fc67d4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/VectorC.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/VectorD$.class b/target/scala-3.6.4/classes/scalation/mathstat/VectorD$.class deleted file mode 100644 index 54d90a7db..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/VectorD$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/VectorD$package$.class b/target/scala-3.6.4/classes/scalation/mathstat/VectorD$package$.class deleted file mode 100644 index 7d1f9f990..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/VectorD$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/VectorD$package.class b/target/scala-3.6.4/classes/scalation/mathstat/VectorD$package.class deleted file mode 100644 index 4ced50cf0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/VectorD$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/VectorD$package.tasty b/target/scala-3.6.4/classes/scalation/mathstat/VectorD$package.tasty deleted file mode 100644 index eaea993df..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/VectorD$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/VectorD.class b/target/scala-3.6.4/classes/scalation/mathstat/VectorD.class deleted file mode 100644 index fea5a820a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/VectorD.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/VectorD.tasty b/target/scala-3.6.4/classes/scalation/mathstat/VectorD.tasty deleted file mode 100644 index 8bec91b21..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/VectorD.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/VectorDOps$.class b/target/scala-3.6.4/classes/scalation/mathstat/VectorDOps$.class deleted file mode 100644 index ea1f3fd67..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/VectorDOps$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/VectorDOps.class b/target/scala-3.6.4/classes/scalation/mathstat/VectorDOps.class deleted file mode 100644 index 8c59a118e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/VectorDOps.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/VectorDOps.tasty b/target/scala-3.6.4/classes/scalation/mathstat/VectorDOps.tasty deleted file mode 100644 index a42618e2b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/VectorDOps.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/VectorI$.class b/target/scala-3.6.4/classes/scalation/mathstat/VectorI$.class deleted file mode 100644 index 3bbbbf733..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/VectorI$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/VectorI$package$.class b/target/scala-3.6.4/classes/scalation/mathstat/VectorI$package$.class deleted file mode 100644 index fa748f79f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/VectorI$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/VectorI$package.class b/target/scala-3.6.4/classes/scalation/mathstat/VectorI$package.class deleted file mode 100644 index 7811c68cd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/VectorI$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/VectorI$package.tasty b/target/scala-3.6.4/classes/scalation/mathstat/VectorI$package.tasty deleted file mode 100644 index bf4020e32..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/VectorI$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/VectorI.class b/target/scala-3.6.4/classes/scalation/mathstat/VectorI.class deleted file mode 100644 index 4f86a7c14..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/VectorI.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/VectorI.tasty b/target/scala-3.6.4/classes/scalation/mathstat/VectorI.tasty deleted file mode 100644 index c0e25b934..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/VectorI.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/VectorL$.class b/target/scala-3.6.4/classes/scalation/mathstat/VectorL$.class deleted file mode 100644 index 7884224c6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/VectorL$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/VectorL$package$.class b/target/scala-3.6.4/classes/scalation/mathstat/VectorL$package$.class deleted file mode 100644 index 59f7dde13..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/VectorL$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/VectorL$package.class b/target/scala-3.6.4/classes/scalation/mathstat/VectorL$package.class deleted file mode 100644 index c722f3dfe..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/VectorL$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/VectorL$package.tasty b/target/scala-3.6.4/classes/scalation/mathstat/VectorL$package.tasty deleted file mode 100644 index ded03616e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/VectorL$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/VectorL.class b/target/scala-3.6.4/classes/scalation/mathstat/VectorL.class deleted file mode 100644 index 305fb63f5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/VectorL.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/VectorL.tasty b/target/scala-3.6.4/classes/scalation/mathstat/VectorL.tasty deleted file mode 100644 index 9e61310ae..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/VectorL.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/VectorS$.class b/target/scala-3.6.4/classes/scalation/mathstat/VectorS$.class deleted file mode 100644 index b900dc80e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/VectorS$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/VectorS$package$.class b/target/scala-3.6.4/classes/scalation/mathstat/VectorS$package$.class deleted file mode 100644 index 0e0fd2fca..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/VectorS$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/VectorS$package.class b/target/scala-3.6.4/classes/scalation/mathstat/VectorS$package.class deleted file mode 100644 index 5fab612f0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/VectorS$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/VectorS$package.tasty b/target/scala-3.6.4/classes/scalation/mathstat/VectorS$package.tasty deleted file mode 100644 index 496c7912f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/VectorS$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/VectorS.class b/target/scala-3.6.4/classes/scalation/mathstat/VectorS.class deleted file mode 100644 index c342c1912..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/VectorS.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/VectorS.tasty b/target/scala-3.6.4/classes/scalation/mathstat/VectorS.tasty deleted file mode 100644 index 7660ef2ee..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/VectorS.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/VectorT$.class b/target/scala-3.6.4/classes/scalation/mathstat/VectorT$.class deleted file mode 100644 index efebf155a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/VectorT$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/VectorT$package$.class b/target/scala-3.6.4/classes/scalation/mathstat/VectorT$package$.class deleted file mode 100644 index 76aca6278..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/VectorT$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/VectorT$package.class b/target/scala-3.6.4/classes/scalation/mathstat/VectorT$package.class deleted file mode 100644 index a13019145..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/VectorT$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/VectorT$package.tasty b/target/scala-3.6.4/classes/scalation/mathstat/VectorT$package.tasty deleted file mode 100644 index 002a11ee2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/VectorT$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/VectorT.class b/target/scala-3.6.4/classes/scalation/mathstat/VectorT.class deleted file mode 100644 index f9aece419..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/VectorT.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/VectorT.tasty b/target/scala-3.6.4/classes/scalation/mathstat/VectorT.tasty deleted file mode 100644 index 5688f2c29..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/VectorT.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/bidiagonalTest.class b/target/scala-3.6.4/classes/scalation/mathstat/bidiagonalTest.class deleted file mode 100644 index 298e9abd0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/bidiagonalTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/bidiagonalTest.tasty b/target/scala-3.6.4/classes/scalation/mathstat/bidiagonalTest.tasty deleted file mode 100644 index 987039fd2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/bidiagonalTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/cholesky.txt b/target/scala-3.6.4/classes/scalation/mathstat/cholesky.txt deleted file mode 100644 index 444035fd4..000000000 --- a/target/scala-3.6.4/classes/scalation/mathstat/cholesky.txt +++ /dev/null @@ -1,21 +0,0 @@ - -Cholesky factorization -Nicholas J. Higham -‘kji’ form of the algorithm - -Set permutation matrix pi = 1, i = 1: n. -for k = 1: n - Find s such that ass = maxk≤i≤n aii. - Swap rows and columns k and s of A and swap pk and ps. - akk = √akk - for j = k + 1: n - akj = akj/akk - end - for j = k + 1: n - for i = k + 1: j - aij = aij − akiakj - end - end -end -Set P to the matrix whose jth column is the pjth column of I. - diff --git a/target/scala-3.6.4/classes/scalation/mathstat/combinatoricsTest.class b/target/scala-3.6.4/classes/scalation/mathstat/combinatoricsTest.class deleted file mode 100644 index c6d4854f4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/combinatoricsTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/combinatoricsTest.tasty b/target/scala-3.6.4/classes/scalation/mathstat/combinatoricsTest.tasty deleted file mode 100644 index 66ee726bb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/combinatoricsTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/combinatoricsTest2.class b/target/scala-3.6.4/classes/scalation/mathstat/combinatoricsTest2.class deleted file mode 100644 index d6f266314..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/combinatoricsTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/combinatoricsTest2.tasty b/target/scala-3.6.4/classes/scalation/mathstat/combinatoricsTest2.tasty deleted file mode 100644 index aaf877ddc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/combinatoricsTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/complexTest.class b/target/scala-3.6.4/classes/scalation/mathstat/complexTest.class deleted file mode 100644 index b00545c29..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/complexTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/complexTest.tasty b/target/scala-3.6.4/classes/scalation/mathstat/complexTest.tasty deleted file mode 100644 index 5a92b3d2e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/complexTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/correlogramTest.class b/target/scala-3.6.4/classes/scalation/mathstat/correlogramTest.class deleted file mode 100644 index 33d6a80f6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/correlogramTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/correlogramTest.tasty b/target/scala-3.6.4/classes/scalation/mathstat/correlogramTest.tasty deleted file mode 100644 index 37828f75d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/correlogramTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/cosForm.class b/target/scala-3.6.4/classes/scalation/mathstat/cosForm.class deleted file mode 100644 index 28dcbe576..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/cosForm.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/cosForm.tasty b/target/scala-3.6.4/classes/scalation/mathstat/cosForm.tasty deleted file mode 100644 index 936cd169a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/cosForm.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/eigenTest.class b/target/scala-3.6.4/classes/scalation/mathstat/eigenTest.class deleted file mode 100644 index dccb2eb7d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/eigenTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/eigenTest.tasty b/target/scala-3.6.4/classes/scalation/mathstat/eigenTest.tasty deleted file mode 100644 index 6f0a80144..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/eigenTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/fac_CholeskyTest.class b/target/scala-3.6.4/classes/scalation/mathstat/fac_CholeskyTest.class deleted file mode 100644 index a4949f371..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/fac_CholeskyTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/fac_CholeskyTest.tasty b/target/scala-3.6.4/classes/scalation/mathstat/fac_CholeskyTest.tasty deleted file mode 100644 index b040c525b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/fac_CholeskyTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/fac_CholeskyTest2.class b/target/scala-3.6.4/classes/scalation/mathstat/fac_CholeskyTest2.class deleted file mode 100644 index e5538deb7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/fac_CholeskyTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/fac_CholeskyTest2.tasty b/target/scala-3.6.4/classes/scalation/mathstat/fac_CholeskyTest2.tasty deleted file mode 100644 index 36078b0db..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/fac_CholeskyTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/fac_CholeskyTest3.class b/target/scala-3.6.4/classes/scalation/mathstat/fac_CholeskyTest3.class deleted file mode 100644 index 04fb677cb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/fac_CholeskyTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/fac_CholeskyTest3.tasty b/target/scala-3.6.4/classes/scalation/mathstat/fac_CholeskyTest3.tasty deleted file mode 100644 index 11482cd1e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/fac_CholeskyTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/fac_InverseTest.class b/target/scala-3.6.4/classes/scalation/mathstat/fac_InverseTest.class deleted file mode 100644 index 986fd578b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/fac_InverseTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/fac_InverseTest.tasty b/target/scala-3.6.4/classes/scalation/mathstat/fac_InverseTest.tasty deleted file mode 100644 index 30d1f1690..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/fac_InverseTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/fac_LQTest.class b/target/scala-3.6.4/classes/scalation/mathstat/fac_LQTest.class deleted file mode 100644 index 768305cfd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/fac_LQTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/fac_LQTest.tasty b/target/scala-3.6.4/classes/scalation/mathstat/fac_LQTest.tasty deleted file mode 100644 index aff443ef6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/fac_LQTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/fac_LUTest.class b/target/scala-3.6.4/classes/scalation/mathstat/fac_LUTest.class deleted file mode 100644 index 986a78df2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/fac_LUTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/fac_LUTest.tasty b/target/scala-3.6.4/classes/scalation/mathstat/fac_LUTest.tasty deleted file mode 100644 index 75877ccb3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/fac_LUTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/fac_LUTest2.class b/target/scala-3.6.4/classes/scalation/mathstat/fac_LUTest2.class deleted file mode 100644 index 231ff7482..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/fac_LUTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/fac_LUTest2.tasty b/target/scala-3.6.4/classes/scalation/mathstat/fac_LUTest2.tasty deleted file mode 100644 index 002a4e6f6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/fac_LUTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/fac_LUTest3.class b/target/scala-3.6.4/classes/scalation/mathstat/fac_LUTest3.class deleted file mode 100644 index 278009b70..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/fac_LUTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/fac_LUTest3.tasty b/target/scala-3.6.4/classes/scalation/mathstat/fac_LUTest3.tasty deleted file mode 100644 index 7acb0dbcc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/fac_LUTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/fac_QRTest.class b/target/scala-3.6.4/classes/scalation/mathstat/fac_QRTest.class deleted file mode 100644 index 82dd1469c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/fac_QRTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/fac_QRTest.tasty b/target/scala-3.6.4/classes/scalation/mathstat/fac_QRTest.tasty deleted file mode 100644 index 873988fa0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/fac_QRTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/fac_QRTest2.class b/target/scala-3.6.4/classes/scalation/mathstat/fac_QRTest2.class deleted file mode 100644 index 5ca727a01..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/fac_QRTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/fac_QRTest2.tasty b/target/scala-3.6.4/classes/scalation/mathstat/fac_QRTest2.tasty deleted file mode 100644 index fc865a4d9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/fac_QRTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/fac_QR_RRTest.class b/target/scala-3.6.4/classes/scalation/mathstat/fac_QR_RRTest.class deleted file mode 100644 index 2da203864..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/fac_QR_RRTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/fac_QR_RRTest.tasty b/target/scala-3.6.4/classes/scalation/mathstat/fac_QR_RRTest.tasty deleted file mode 100644 index 07805a22c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/fac_QR_RRTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/fac_SVDTest.class b/target/scala-3.6.4/classes/scalation/mathstat/fac_SVDTest.class deleted file mode 100644 index cccfe4dcf..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/fac_SVDTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/fac_SVDTest.tasty b/target/scala-3.6.4/classes/scalation/mathstat/fac_SVDTest.tasty deleted file mode 100644 index a610f3671..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/fac_SVDTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/fac_SVDTest2.class b/target/scala-3.6.4/classes/scalation/mathstat/fac_SVDTest2.class deleted file mode 100644 index ed950f802..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/fac_SVDTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/fac_SVDTest2.tasty b/target/scala-3.6.4/classes/scalation/mathstat/fac_SVDTest2.tasty deleted file mode 100644 index 17e530d45..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/fac_SVDTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/fac_SVDTest3.class b/target/scala-3.6.4/classes/scalation/mathstat/fac_SVDTest3.class deleted file mode 100644 index 26207e358..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/fac_SVDTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/fac_SVDTest3.tasty b/target/scala-3.6.4/classes/scalation/mathstat/fac_SVDTest3.tasty deleted file mode 100644 index 44fb605c2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/fac_SVDTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/fac_SVDTest4.class b/target/scala-3.6.4/classes/scalation/mathstat/fac_SVDTest4.class deleted file mode 100644 index 020cd20e9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/fac_SVDTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/fac_SVDTest4.tasty b/target/scala-3.6.4/classes/scalation/mathstat/fac_SVDTest4.tasty deleted file mode 100644 index 4bbf79e3c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/fac_SVDTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/histogramTest.class b/target/scala-3.6.4/classes/scalation/mathstat/histogramTest.class deleted file mode 100644 index 58cc395b2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/histogramTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/histogramTest.tasty b/target/scala-3.6.4/classes/scalation/mathstat/histogramTest.tasty deleted file mode 100644 index 53a6a91e4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/histogramTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/householderTest.class b/target/scala-3.6.4/classes/scalation/mathstat/householderTest.class deleted file mode 100644 index c82cd1c8f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/householderTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/householderTest.tasty b/target/scala-3.6.4/classes/scalation/mathstat/householderTest.tasty deleted file mode 100644 index 21070aa31..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/householderTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/index.html b/target/scala-3.6.4/classes/scalation/mathstat/index.html deleted file mode 100644 index e547e00c3..000000000 --- a/target/scala-3.6.4/classes/scalation/mathstat/index.html +++ /dev/null @@ -1,47 +0,0 @@ - - -

    Source files in mathstat Package

    -

    - - \ No newline at end of file diff --git a/target/scala-3.6.4/classes/scalation/mathstat/inverseTest.class b/target/scala-3.6.4/classes/scalation/mathstat/inverseTest.class deleted file mode 100644 index 401d8f7b1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/inverseTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/inverseTest.tasty b/target/scala-3.6.4/classes/scalation/mathstat/inverseTest.tasty deleted file mode 100644 index 045166291..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/inverseTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/log1pForm$.class b/target/scala-3.6.4/classes/scalation/mathstat/log1pForm$.class deleted file mode 100644 index 9e3f5e614..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/log1pForm$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/log1pForm.class b/target/scala-3.6.4/classes/scalation/mathstat/log1pForm.class deleted file mode 100644 index 5b252ff35..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/log1pForm.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/log1pForm.tasty b/target/scala-3.6.4/classes/scalation/mathstat/log1pForm.tasty deleted file mode 100644 index a9a8971f0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/log1pForm.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/logForm$.class b/target/scala-3.6.4/classes/scalation/mathstat/logForm$.class deleted file mode 100644 index 947c93e42..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/logForm$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/logForm.class b/target/scala-3.6.4/classes/scalation/mathstat/logForm.class deleted file mode 100644 index 095fbea82..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/logForm.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/logForm.tasty b/target/scala-3.6.4/classes/scalation/mathstat/logForm.tasty deleted file mode 100644 index 86453a96e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/logForm.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/matrixCalc0.class b/target/scala-3.6.4/classes/scalation/mathstat/matrixCalc0.class deleted file mode 100644 index 19001a965..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/matrixCalc0.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/matrixCalc0.tasty b/target/scala-3.6.4/classes/scalation/mathstat/matrixCalc0.tasty deleted file mode 100644 index 62627c3c4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/matrixCalc0.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/matrixCalc2.class b/target/scala-3.6.4/classes/scalation/mathstat/matrixCalc2.class deleted file mode 100644 index e2de08beb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/matrixCalc2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/matrixCalc2.tasty b/target/scala-3.6.4/classes/scalation/mathstat/matrixCalc2.tasty deleted file mode 100644 index 654fa2a05..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/matrixCalc2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/matrixCalc3.class b/target/scala-3.6.4/classes/scalation/mathstat/matrixCalc3.class deleted file mode 100644 index 0bc46b7c8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/matrixCalc3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/matrixCalc3.tasty b/target/scala-3.6.4/classes/scalation/mathstat/matrixCalc3.tasty deleted file mode 100644 index 5ca198930..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/matrixCalc3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/matrixCalc4.class b/target/scala-3.6.4/classes/scalation/mathstat/matrixCalc4.class deleted file mode 100644 index 14915e277..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/matrixCalc4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/matrixCalc4.tasty b/target/scala-3.6.4/classes/scalation/mathstat/matrixCalc4.tasty deleted file mode 100644 index 738d99607..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/matrixCalc4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/matrixD2Test.class b/target/scala-3.6.4/classes/scalation/mathstat/matrixD2Test.class deleted file mode 100644 index f112ede5f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/matrixD2Test.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/matrixD2Test.tasty b/target/scala-3.6.4/classes/scalation/mathstat/matrixD2Test.tasty deleted file mode 100644 index 589543f08..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/matrixD2Test.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/matrixD2Test2.class b/target/scala-3.6.4/classes/scalation/mathstat/matrixD2Test2.class deleted file mode 100644 index c18d5709b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/matrixD2Test2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/matrixD2Test2.tasty b/target/scala-3.6.4/classes/scalation/mathstat/matrixD2Test2.tasty deleted file mode 100644 index 46ec6c088..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/matrixD2Test2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/matrixDTest.class b/target/scala-3.6.4/classes/scalation/mathstat/matrixDTest.class deleted file mode 100644 index dc4880d3d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/matrixDTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/matrixDTest.tasty b/target/scala-3.6.4/classes/scalation/mathstat/matrixDTest.tasty deleted file mode 100644 index 83ab24a8a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/matrixDTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/matrixDTest2.class b/target/scala-3.6.4/classes/scalation/mathstat/matrixDTest2.class deleted file mode 100644 index 901b0d751..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/matrixDTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/matrixDTest2.tasty b/target/scala-3.6.4/classes/scalation/mathstat/matrixDTest2.tasty deleted file mode 100644 index 703a19869..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/matrixDTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/matrixDTest3.class b/target/scala-3.6.4/classes/scalation/mathstat/matrixDTest3.class deleted file mode 100644 index 8c378c2d5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/matrixDTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/matrixDTest3.tasty b/target/scala-3.6.4/classes/scalation/mathstat/matrixDTest3.tasty deleted file mode 100644 index c39b237bc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/matrixDTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/matrixDTest4.class b/target/scala-3.6.4/classes/scalation/mathstat/matrixDTest4.class deleted file mode 100644 index ba57b3519..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/matrixDTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/matrixDTest4.tasty b/target/scala-3.6.4/classes/scalation/mathstat/matrixDTest4.tasty deleted file mode 100644 index 27aed4f22..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/matrixDTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/matrixDTest5.class b/target/scala-3.6.4/classes/scalation/mathstat/matrixDTest5.class deleted file mode 100644 index f443d6df4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/matrixDTest5.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/matrixDTest5.tasty b/target/scala-3.6.4/classes/scalation/mathstat/matrixDTest5.tasty deleted file mode 100644 index a67fd867b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/matrixDTest5.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/matrixDTest6.class b/target/scala-3.6.4/classes/scalation/mathstat/matrixDTest6.class deleted file mode 100644 index 6f00fc011..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/matrixDTest6.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/matrixDTest6.tasty b/target/scala-3.6.4/classes/scalation/mathstat/matrixDTest6.tasty deleted file mode 100644 index 6fa54b69e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/matrixDTest6.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/matrixDTest7.class b/target/scala-3.6.4/classes/scalation/mathstat/matrixDTest7.class deleted file mode 100644 index 673b8f139..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/matrixDTest7.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/matrixDTest7.tasty b/target/scala-3.6.4/classes/scalation/mathstat/matrixDTest7.tasty deleted file mode 100644 index 3c7ac1333..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/matrixDTest7.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/old/Plot.scala.bak b/target/scala-3.6.4/classes/scalation/mathstat/old/Plot.scala.bak deleted file mode 100644 index 02865766f..000000000 --- a/target/scala-3.6.4/classes/scalation/mathstat/old/Plot.scala.bak +++ /dev/null @@ -1,383 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller, Michael Cotterell, Aiman Munir - * @version 2.0 - * @date Sun Nov 15 15:05:06 EDT 2009 - * @see LICENSE (MIT style license file). - * - * @title Plot Vectors y and z vs. x - */ - -package scalation -package mathstat - -import scala.math.{ceil, floor, min, pow, round} - -import scalation.scala2d._ -import scalation.scala2d.BorderLayout._ -import scalation.scala2d.Colors._ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Plot` class takes 'x' and 'y' vectors of data values and plots the '(x, y)' - * data points. Optionally, a 'z' vector may be plotted with 'y'. Note, axes are - * determined by the 'x' and 'y' vectors only. For more vertical vectors use `PlotM`. - *------------------------------------------------------------------------------ - * Zoom functionality has two options: - * When clicked on the plot label the value on that label will be selected as min/max value. - * By default, the clicked value on x and y axis will be chosen as min value. - * To change the value to max the resetLabel with title "Switch min and max value" can be used. - *------------------------------------------------------------------------------ - * @param x the x vector of data values (horizontal), use null to use y's index - * @param y the y vector of data values (primary vertical, black) - * @param z the z vector of data values (secondary vertical, red) to compare with y - * @param _title the title of the plot - * @param lines flag for generating a line plot - */ -class Plot (x: VectorD, y: VectorD, z: VectorD = null, _title: String = "Plot y vs. x", lines: Boolean = false) - extends VizFrame (_title, null): - - val resetLabel = new Label () - resetLabel.setText ("Reset Plot") - - val maxLabel = new Label () - maxLabel.setText ("Switch max and min value") - - val xx: VectorD = if x == null then VectorD.range (0, y.dim) else x - val canvas = new Canvas (xx, y, z, getW, getH, lines) - getContentPane.add (resetLabel, BorderLayout.NORTH) - getContentPane.add (maxLabel, BorderLayout.AFTER_LAST_LINE) - getContentPane.add (canvas, BorderLayout.CENTER) - setVisible (true) - - // reset plot to original values - resetLabel.addMouseListener (new MouseListener () { - override def mouseClicked (mouseEvent: MouseEvent): Unit = canvas.resetMinMax () - override def mousePressed (mouseEvent: MouseEvent): Unit = {} - override def mouseReleased (mouseEvent: MouseEvent): Unit = {} - override def mouseEntered (mouseEvent: MouseEvent): Unit = {} - override def mouseExited (mouseEvent: MouseEvent): Unit = {} - }) - - // mouse listener that switches max and min value - maxLabel.addMouseListener (new MouseListener () { - override def mouseClicked (mouseEvent: MouseEvent): Unit = canvas.setMaxMinValue () - override def mousePressed (mouseEvent: MouseEvent): Unit = {} - override def mouseReleased (mouseEvent: MouseEvent): Unit = {} - override def mouseEntered (mouseEvent: MouseEvent): Unit = {} - override def mouseExited (mouseEvent: MouseEvent): Unit = {} - }) - -end Plot - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Plot` companion object provides factory methods for creating plots. - */ -object Plot: - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a plot object from integer vectors. - * @param x the x vector of data values (horizontal) - * @param y the y vector of data values (primary vertical) - * @param z the z vector of data values (secondary vertical) to compare with y - * @param _title the title of the plot - * @param lines flag for generating a line plot - */ - def apply (x: VectorI, y: VectorI, z: VectorI = null, _title: String, lines: Boolean = false): Plot = - new Plot (x.toDouble, y.toDouble, if z == null then null else z.toDouble, _title, lines) - end apply - -end Plot - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `FramelessPlot` class is used for embedded applications. - * @param x the x vector of data values (horizontal) - * @param y the y vector of data values (primary vertical) - * @param z the z vector of data values (secondary vertical) to compare with y - * @param width the width - * @param height the height - */ -class FramelessPlot (x: VectorD, y: VectorD, z: VectorD = null, var width: Int = 840, var height: Int = 480): - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Dynamically create and return a drawing canvas. - */ - def canvas: Canvas = new Canvas (x, y, z, width, height) - -end FramelessPlot - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Canvas` class provides a canvas on which to draw the plot. - * @param x the x vector of data values (horizontal) - * @param y the y vector of data values (primary vertical) - * @param z the z vector of data values (secondary vertical) to compare with y - * @param width the width - * @param height the height - * @param lines flag for generating a line plot - */ -class Canvas (x: VectorD, y: VectorD, z: VectorD, width: Int, height: Int, lines: Boolean = false) - extends Panel: - - private val EPSILON = 1E-9 - private val SCALE = 10 // FIX - pass as a parameter - private val frameW = width - private val frameH = height - private val offset = 80 - private val baseX = offset - private val baseY = frameH - offset - private val stepsX = 10 - private val stepsY = 10 - - private var minX = floor (SCALE * x.min) / SCALE.toDouble - private var maxX = ceil (x.max + EPSILON) - private var minY = floor (SCALE * y.min) / SCALE.toDouble - private var maxY = ceil (y.max) -// private var maxY = ceil (y.max + EPSILON) - - private var deltaX = maxX - minX - private var deltaY = maxY - minY - - private val diameter = 4 - private val dot = Ellipse () - private val axis = Line (0, 0, 0, 0) - - private var setMin = true - private val origMinX = minX - private val origMaxX = maxX - private val origMinY = minY - private val origMaxY = maxY - private var setMax = false - - setBackground (white) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Paint the canvas by plotting the data points. - * @param gr low-resolution graphics environment - */ - override def paintComponent (gr: Graphics): Unit = - super.paintComponent (gr) - val g2d = gr.asInstanceOf [Graphics2D] // use hi-res - - var x_pos = 0 - var y_pos = 0 - var step = 0.0 - - //:: Draw the axes - - g2d.setPaint (black) - g2d.setStroke (new BasicStroke (2.0f)) - axis.setLine (baseX - 1, baseY + 1, baseX + 10 + frameW - 2 * offset, baseY + 1) - val linex = axis.getBounds - g2d.draw (axis) - axis.setLine (baseX - 1, offset - 10, baseX - 1, baseY + 1) - val liney = axis.getBounds - g2d.draw (axis) - - //:: Draw the labels on the axes - - var xlabels = List [Rectangle2D] () - var xValues = List [String] () - y_pos = baseY + 15 - step = deltaX / stepsX.asInstanceOf [Double] // for x-axis - for j <- 0 to stepsX do - val x_val = clip (minX + j * step) - x_pos = offset - 8 + j * (frameW - 2 * offset) / stepsX - g2d.drawString (x_val, x_pos, y_pos) - - xValues = xValues.::(x_val) // store the postion of x y labels to know the postion of click - var resumeRect = g2d.getFontMetrics.getStringBounds (x_val, g2d) - resumeRect.setRect (x_pos, y_pos - g2d.getFontMetrics ().getAscent (), - resumeRect.getWidth (), resumeRect.getHeight ()) - xlabels = xlabels.::(resumeRect) - end for - - var ylabels = List [Rectangle2D] () - var yValues = List [String] () - x_pos = baseX - 30 - step = deltaY / stepsY.asInstanceOf [Double] // for y-axis - for j <- 0 to stepsY do - val y_val = clip (maxY - j * step) - y_pos = offset + 2 + j * (frameH - 2 * offset) / stepsY - g2d.drawString (y_val, x_pos, y_pos) - - yValues = yValues.::(y_val) - var resumeRect = g2d.getFontMetrics.getStringBounds (y_val, g2d) - resumeRect.setRect (x_pos, y_pos - g2d.getFontMetrics ().getAscent (), - resumeRect.getWidth (), resumeRect.getHeight ()) - ylabels = ylabels.::(resumeRect) - end for - - //:: Draw the dots for the data points being plotted - - var px_pos = 0 // previous x - var py_pos = 0 // previous y - - for i <- 0 until y.dim do - val xx = round ((x(i) - minX) * (frameW - 2 * offset).asInstanceOf [Double]) - x_pos = (xx / deltaX).asInstanceOf [Int] + offset - val yy = round ((maxY - y(i)) * (frameH - 2 * offset).asInstanceOf [Double]) - y_pos = (yy / deltaY).asInstanceOf [Int] + offset - dot.setFrame (x_pos, y_pos, diameter, diameter) // x, y, w, h - - g2d.setPaint (black) - g2d.fill (dot) - - // connect with lines - if i != 0 && lines then - g2d.setStroke (new BasicStroke (1.0f)) - g2d.drawLine (px_pos+1, py_pos+1, x_pos+1, y_pos+1) - end if - - px_pos = x_pos // update previous x - py_pos = y_pos // update previous y - end for - - g2d.setStroke (new BasicStroke (2.0f)) - - if z != null then - for i <- 0 until min (y.dim, z.dim) do - val xx = round ((x(i) - minX) * (frameW - 2 * offset).asInstanceOf [Double]) - x_pos = (xx / deltaX).asInstanceOf [Int] + offset - val yy = round ((maxY - z(i)) * (frameH - 2 * offset).asInstanceOf [Double]) - y_pos = (yy / deltaY).asInstanceOf [Int] + offset - dot.setFrame (x_pos, y_pos, diameter, diameter) // x, z, w, h - g2d.setPaint (red) - g2d.fill (dot) - - // connect with lines - if i != 0 && lines then - g2d.setStroke (new BasicStroke (1.0f)) - g2d.drawLine (px_pos+1, py_pos+1, x_pos+1, y_pos+1) - end if - - px_pos = x_pos // update previous x - py_pos = y_pos // update previous y - end for - end if - - // mouse listener to identify axis click - addMouseListener (new MouseListener { - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Handle mouse clicked event - * @param mouseEvent the mouse clicked event - */ - override def mouseClicked (mouseEvent: MouseEvent): Unit = - var x = mouseEvent.getX - var y = mouseEvent.getY - - // give a gap of 4 points so click can be easily recognized - if linex.getY > y-4 && linex.getY < y+4 then - var pointFound = false - if x < linex.getMaxX && x > linex.getX then - for a <- 0 until xlabels.size do - if a == 0 then - if xlabels(a).getMaxX > x && xlabels(a+1).getMinX < x then - pointFound = true - if setMin then minX = xValues(a).toDouble - else if setMax then maxX = xValues(a).toDouble - deltaX = maxX - minX - repaint () - end if - else if a == xlabels.size-1 && ! pointFound then - if xlabels(a).getMinX < x && xlabels(a-1).getMinX > x then - pointFound = true - if setMin then minX = xValues(a).toDouble - else if setMax then maxX = xValues(a).toDouble - deltaX = maxX - minX - repaint () - end if - else if ! pointFound then - if xlabels(a).getMinX < x && xlabels(a-1).getMinX > x then - pointFound = true - if setMin then minX = xValues(a).toDouble - else if setMax then maxX = xValues(a).toDouble - deltaX = maxX - minX - repaint () - end if - end if - end for - end if - end if - - if liney.getX > mouseEvent.getX -4 && liney.getX < x+4 then - var pointFound = false - if y < liney.getMaxY && y > liney.getY then - for a <- 0 until ylabels.size do - if ! pointFound then - if ylabels(a).getMaxY > y && ylabels(a+1).getMinY < y then - pointFound = true - if setMin then minY = yValues(a).toDouble - else if setMax then maxY = yValues(a).toDouble - deltaY = maxY - minY - repaint () - end if - end if - end for - end if - end if - - end mouseClicked - - override def mousePressed (mouseEvent: MouseEvent): Unit = {} - override def mouseReleased (mouseEvent: MouseEvent): Unit = {} - override def mouseEntered (mouseEvent: MouseEvent): Unit = {} - override def mouseExited (mouseEvent: MouseEvent): Unit = {} - }) - end paintComponent - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert value to string and cut out the first four characters. - * @param x the value to convert and cut - */ - def clip (x: Double): String = - val s = x.toString - s.substring (0, min (s.length, 4)) - end clip - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Reset all plot values to their original. - */ - def resetMinMax (): Unit = - minX = origMinX - maxX = origMinX - deltaX = origMaxX - origMinX - minY = origMinX - maxY = origMaxY - deltaY = origMaxY - origMinY - repaint () - end resetMinMax - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Switch clicked location to be set as max or min. - */ - def setMaxMinValue (): Unit = - if setMin then - setMin = false - setMax = true - else - setMin = true - setMax = false - end if - end setMaxMinValue - -end Canvas - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `plotTest` main function is used to test the `Plot` class. - * > runMain scalation.mathstat.plotTest - */ -@main def plotTest (): Unit = - - val x = new VectorD (100) - val y = new VectorD (100) - for i <- 0 until 100 do { x(i) = i / 10.0; y(i) = pow (x(i) - 5, 2) } - new Plot (x, y, null, "plot1", lines = true) - val plot = new Plot (null, y, null, "plot2", lines = true) - -// writeImage (DATA_DIR + "plot.png", plot) - -end plotTest - diff --git a/target/scala-3.6.4/classes/scalation/mathstat/old/PlotC.scala.bak b/target/scala-3.6.4/classes/scalation/mathstat/old/PlotC.scala.bak deleted file mode 100644 index d9ca3e55e..000000000 --- a/target/scala-3.6.4/classes/scalation/mathstat/old/PlotC.scala.bak +++ /dev/null @@ -1,187 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Mon Oct 17 16:01:39 EDT 2011 - * @see LICENSE (MIT style license file). - * - * @title Contour Plots for z = f(x, y) using color-coding for z - */ - -package scalation -package mathstat - -import scala.math.{ceil, floor, round} - -import scalation.scala2d._ -import scalation.scala2d.Colors._ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `PlotC` class takes a function f and displays color-coded values for - * z = f(x, y) over a two dimensional grid defined the lower lb and upper ub bounds. - * An optional path is included that can be used to show, for example, the - * search path taken by an optimizer (e.g., a Conjugate Gradient NLP solver). - *------------------------------------------------------------------------------ - * Zoom functionality has two options: - * (1) mouse wheel controls the amount of zooming (in/out); - * (2) mouse dragging repositions the objects in the panel (drawing canvas). - * @see ZoomablePanel - *------------------------------------------------------------------------------ - * @param f the function whose color-coded contour plot is sought - * @param lb the lower bounds on the plotting domain - * @param ub the upper bounds on the plotting domain - * @param path the points on a path (e.g., a search path) - * @param deltaF estimate of the range of possible functional values (if < 0, will be computed) - * @param lbF the lower bound on the functional value - * @param _title the title of the plot - */ -class PlotC (f: FunctionV2S, lb: VectorD, ub: VectorD, path: List [VectorD] = null, - private var deltaF: Double = -1.0, private var lbF: Double = 0.0, - _title: String = "Contour Plot of f(x, y)") - extends VizFrame (_title, null): - - private val EPSILON = 1E-9 // number close to zero - private val _1_3 = 1.0 / 3.0 // one third - private val _2_3 = 2.0 / 3.0 // two thirds - private val offset = 50 // offset frame to axis - private val frameW = getW // frame width - private val frameH = getH // frame height - private val baseX = offset // base for x-axis - private val baseY = frameH - offset // base for y-axis - - private val minX = floor (lb(0)) - private val maxX = ceil (ub(0)) - private val minY = floor (lb(1)) - private val maxY = ceil (ub(1)) - private val deltaX = maxX - minX - private val deltaY = maxY - minY - - private val width_ = 9 - private val diameter = 6 - private val square = Rectangle () - private val nsquares = 80.0 // number of squares per direction x, y - private val dot = Ellipse () - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a canvas on which to draw the contour plot. - */ - class Canvas -// extends Panel: - extends ZoomablePanel: - - setBackground (white) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Paint the canvas by plotting color-coded squares representing the z-coordinate. - * @param gr low-resolution graphics environment - */ - override def paintComponent (gr: Graphics): Unit = - super.paintComponent (gr) - val g2d = gr.asInstanceOf [Graphics2D] // use hi-res graphics - - g2d.setTransform (at) // used for zooming (at @see `ZoomablePanel`) - - Plot.drawAxes (g2d, baseX, baseY, frameW, frameH, offset, minX, maxY, deltaX, deltaY) - - //:: Draw squares for the color-coded values of the points of the function being plotted - - var x_pos = 0 - var y_pos = 0 - - var x = lb(0) - while x <= ub(0) do - var y = lb(1) - while y <= ub(1) do - val vec = VectorD (x, y) - val frac = (f(vec) - lbF) / deltaF // fractional way from lower to upper bound - - val rgb = - if frac > _2_3 then ( ((frac-_2_3) * 765).toInt, ((1-frac) * 765).toInt, 0 ) - else if frac > _1_3 then ( 0, ((frac-_1_3) * 765).toInt, ((_2_3-frac) * 765).toInt ) - else ( ((_1_3-frac) * 400).toInt, 0, ((frac) * 765).toInt ) - - println (s"(x, y) = $vec, lbF = $lbF, frac = $frac, rgb = $rgb") - val color = new Color (rgb._1, rgb._2, rgb._3) - - val xx = round ((x - lb(0)) * (frameW - 2 * offset)) - x_pos = (xx / deltaX).asInstanceOf [Int] + offset - val yy = round ((ub(1) - y) * (frameH - 2 * offset)) - y_pos = (yy / deltaY).asInstanceOf [Int] + offset - diameter - square.setFrame (x_pos, y_pos, width_, width_) // x, y, w, h - g2d.setPaint (color) - g2d.fill (square) - y += deltaY / nsquares - end while - x += deltaX / nsquares - end while - - //:: Draw the dots for the points on a search path, if given - - if path != null then - for p <- path do - val xx = round ((p(0) - lb(0)) * (frameW - 2 * offset)) - x_pos = (xx / deltaX).asInstanceOf [Int] + offset - val yy = round ((ub(1) - p(1)) * (frameH - 2 * offset)) - y_pos = (yy / deltaY).asInstanceOf [Int] + offset - diameter - dot.setFrame (x_pos, y_pos, diameter, diameter) // x, y, w, h - g2d.setPaint (yellow) - g2d.fill (dot) - end if - end paintComponent - - end Canvas - - if deltaF < 0.0 then resetBounds () - getContentPane ().add (new Canvas ()) - setVisible (true) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Reset the bounds on the functional values of f. If the caller fails to - * provide an estimate for deltaF, this method should be called. - */ - def resetBounds (): Unit = - var minF = Double.PositiveInfinity - var maxF = Double.NegativeInfinity - - var x = lb(0) - while x <= ub(0) do - var y = lb(1) - while y <= ub(1) do - val vec = VectorD (x, y) - val f_vec = f(vec) - if f_vec < minF then minF = f_vec - if f_vec > maxF then maxF = f_vec - y += deltaY / nsquares - end while - x += deltaX / nsquares - end while - - lbF = minF // lower bounds on functional values for f - deltaF = maxF - minF // range of functional values for f - end resetBounds - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert basic Contour information to a string. - */ - override def toString: String = s"PlotC (lb = $lb, f(lb) = ${f(lb)}, ub = $ub, f(ub) = ${f(ub)})" - -end PlotC - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `plotCTest` main function is used to test the `PlotC` class. - * > runMain scalation.mathstat.plotCTest - */ -@main def plotCTest (): Unit = - - def f(x: VectorD): Double = (x(0)/2 - 3)~^2 + (x(1)/3 - 2)~^2 - - val lb = VectorD (0, 0) - val ub = VectorD (10, 10) - val deltaF = 18.0 - val path = List (VectorD (0, 0), VectorD (3, 2), VectorD (6, 6)) - val plot = new PlotC (f, lb, ub, path) - println (s"plot = $plot") - -end plotCTest - diff --git a/target/scala-3.6.4/classes/scalation/mathstat/old/VectorD.scala.bak b/target/scala-3.6.4/classes/scalation/mathstat/old/VectorD.scala.bak deleted file mode 100644 index a8f82ac37..000000000 --- a/target/scala-3.6.4/classes/scalation/mathstat/old/VectorD.scala.bak +++ /dev/null @@ -1,1048 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Thu Jun 17 19:29:23 EDT 2021 - * @see LICENSE (MIT style license file). - * - * @title Vector Data Structure of Doubles - */ - -package scalation -package mathstat - -import java.util.Arrays.copyOf - -import scala.collection.immutable.{IndexedSeq => IIndexedSeq} -import scala.collection.IterableFactoryDefaults -import scala.collection.generic._ -import scala.collection.mutable._ -import scala.runtime.ScalaRunTime.stringOf - -/** Top-level type definition for functions mapping: - */ -type FunctionS2V = Double => VectorD // scalar `Double` to vector `VectorD` -type FunctionV2S = VectorD => Double // vector `VectorD` to scalar `Double` -type FunctionV2V = VectorD => VectorD // vector `VectorD` to vector `VectorD` - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Vectorize a scalar function (S2S) to create a vector function (V2V). - * @param f the scalar function to vectorize - */ -def vectorize (f: FunctionS2S): FunctionV2V = (x: VectorD) => x.map (f(_)) - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `VectorD` class stores and operates on Numeric Vectors of base type `Double`. - * @param dim the dimension/size of the vector - * @param v the 1D array used to store vector elements - */ -class VectorD (val dim: Int, - private [mathstat] var v: Array [Double] = null) - extends IndexedSeq [Double] - with PartiallyOrdered [VectorD] - with DefaultSerializable: - - private val flaw = flawf ("VectorD") // partial invocation of flaw function - private var fString = "%g,\t" // output format spec - - if v == null then - v = Array.ofDim [Double] (dim) - else if dim > v.length then - flaw ("init", s"vector dimension is larger than space: dim = $dim > v.length = ${v.length}") - end if - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the length of this vector. - */ - inline def length: Int = dim - inline def nd = dim.toDouble - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Expand the size (dim) of this vector by more elements. - * @param more the number of new elements to add - */ - def expand (more: Int = dim): VectorD = - if more < 1 then this // no change - else new VectorD (dim + more, Array.concat (v, new Array [Double] (more))) - end expand - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return a deep copy of this vector (note: clone may not be deep). - * Uses Java's native `Arrays.copyOf` for efficiency. - */ - def copy: VectorD = new VectorD (dim, copyOf (v, dim)) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the i-th element of this vector. - * @param i the index of the element to return - */ - def apply (i: Int): Double = v(i) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the elements in range r of this vector. - * @param r the index range of elements to return - */ - def apply (r: Range): VectorD = new VectorD (r.end - r.start, v.slice (r.start, r.end)) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the elements in index sequence idx of this vector. - * @param idx the index sequence of elements to return - */ - def apply (idx: IndexedSeq [Int]): VectorD = VectorD (for i <- idx.indices yield v(idx(i))) - def apply (idx: IIndexedSeq [Int]): VectorD = VectorD (for i <- idx.indices yield v(idx(i))) - def apply (idx: Array [Int]): VectorD = VectorD (for i <- idx.indices yield v(idx(i))) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the elements not equal to index ix of this vector. - * @param ix the index to skip - */ - def not (ix: Int): VectorD = - VectorD (for i <- indices if i != ix yield v(i)) - end not - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the elements not in index sequence idx of this vector. - * @param idx the index sequence of elements to skip - */ - def not (idx: IndexedSeq [Int]): VectorD = - VectorD (for i <- indices if ! (idx contains i) yield v(i)) - end not - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Split the elements from this vector to form two vectors: one from the elements - * in idx (e.g., testing set) and the other from elements not in idx - * (e.g., training set). - * @param idx the indices to include/exclude - */ - def split (idx: IndexedSeq [Int]): (VectorD, VectorD) = (this(idx), not(idx)) - def split (idx: VectorI): (VectorD, VectorD) = { val idx_ = idx.toMuIndexedSeq; (this(idx_), not(idx_)) } - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Split the elements from this vector to form two vectors: one from the - * the first i elements and the other from the rest of the elements. - * @param i the split index - */ - def split (i: Int): (VectorD, VectorD) = (new VectorD (i, v.slice (0, i)), - new VectorD (dim - i, v.slice (i, dim))) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Chop this vector into k sub-vectors of equal sizes (perhaps except for the last one). - * @param k the number of pieces to chop this vector into - */ - def chop (k: Int): Array [VectorD] = - if k <= 0 then flaw ("chop", s"k = $k must be at least one") - val pieces = Array.ofDim [VectorD] (k) - val size = dim / k - for i <- 0 until k-1 do pieces(i) = this (i*size until (i+1)*size) - pieces(k-1) = this ((k-1)*size until dim) - pieces - end chop - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Update the i-th element (or in range) of this vector. - * @param i the index of the element to update - * @param a the updated value to assign - */ - def update (i: Int, a: Double): Unit = v(i) = a - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Update the i-th element (or in range) of this vector. - * @param i the index of the element to update - * @param a the update value to assign - */ - def update (r: Range, a: Double): Unit = for i <- r do v(i) = a - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Update the i-th element (or in range) of this vector. - * @param i the index of the element to update - * @param y the update vector/indexed sequence to assign - */ - def update (r: Range, y: IndexedSeq [Double]): Unit = for i <- r do v(i) = y(i) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Set all elements in this vector to scalar a. - * @param a the scalar value to be assigned - */ - def set (a: Double): Unit = for i <- v.indices do v(i) = a - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Set all elements in this vector to vector y. - * @param y the vector value to be assigned - */ - def set (y: IndexedSeq [Double]): Unit = for i <- v.indices do v(i) = y(i) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Iterate over this vector element by element applying the given function. - * @param f the function to apply - */ - override def foreach [U] (f: Double => U): Unit = { var i = 0; while i < dim do { f (v(i)); i += 1 } } - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Map the elements of this vector by applying the mapping function f. - * @param f the function to apply - */ - def map (f: FunctionS2S): VectorD = new VectorD (v.size, v.map (f)) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Try to compare this vector to bb (return None if they are not comparable). - * As a partial order some vectors may not be comparable. - * @param bb the other vector - */ - def tryCompareTo [B >: VectorD: AsPartiallyOrdered] (bb: B): Option [Int] = - if ! bb.isInstanceOf [VectorD] then return None - val b = bb.asInstanceOf [VectorD] - var le = true - var ge = true - for i <- v.indices do - if ge && v(i) < b(i) then ge = false - else if le && v(i) > b(i) then le = false - end for - if ge && le then Some (0) - else if le then Some (-1) - else if ge then Some (1) - else None - end tryCompareTo - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Find values in this vector of infinite magnitude, returning all such index positions. - */ - def findInfinity: IIndexedSeq [Int] = for i <- indices if v(i).isInfinite yield i - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return whether this is non-negative (contains no negative values). - */ - def isNonnegative: Boolean = - for e <- v if e < 0.0 do return false - true - end isNonnegative - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Concatenate this vector and vector y. - * @param y the other vector/indexed sequence - */ - def ++ (y: IndexedSeq [Double]): VectorD = new VectorD (dim + y.size, v ++ y) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Prepend (or append) this vector with scalar a. - * @param a the scalar second operand - */ - def +: (a: Double): VectorD = new VectorD (dim + 1, a +: v) - def :+ (a: Double): VectorD = new VectorD (dim + 1, v :+ a) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the negative of this vector (unary minus). - */ - def unary_- : VectorD = VectorD (for i <- v.indices yield -v(i)) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the element-wise sum (or difference, product, quotient) of vectors this and y. - * @param y the other vector/indexed sequence - */ - def + (y: IndexedSeq [Double]): VectorD = VectorD (for i <- v.indices yield v(i) + y(i)) - def - (y: IndexedSeq [Double]): VectorD = VectorD (for i <- v.indices yield v(i) - y(i)) - def * (y: IndexedSeq [Double]): VectorD = VectorD (for i <- v.indices yield v(i) * y(i)) - def / (y: IndexedSeq [Double]): VectorD = VectorD (for i <- v.indices yield v(i) / y(i)) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the element-wise sum (or difference, product, quotient) of this and scalar a. - * @param a the scalar second operand - */ - def + (a: Double): VectorD = VectorD (for i <- v.indices yield v(i) + a) - def - (a: Double): VectorD = VectorD (for i <- v.indices yield v(i) - a) - def * (a: Double): VectorD = VectorD (for i <- v.indices yield v(i) * a) - def / (a: Double): VectorD = VectorD (for i <- v.indices yield v(i) / a) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the element-wise sum (or difference, product, quotient) of vectors this and y. - * Perform operations in-place (destructive) to reduce memory allocations. - * @param y the other vector/indexed sequence - */ - def += (y: IndexedSeq [Double]): VectorD = { for i <- v.indices do v(i) += y(i); this } - def -= (y: IndexedSeq [Double]): VectorD = { for i <- v.indices do v(i) -= y(i); this } - def *= (y: IndexedSeq [Double]): VectorD = { for i <- v.indices do v(i) *= y(i); this } - def /= (y: IndexedSeq [Double]): VectorD = { for i <- v.indices do v(i) /= y(i); this } - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the element-wise sum (or difference, product, quotient) of this and scalar a. - * Perform operations in-place (destructive) to reduce memory allocations. - * @param a the scalar second operand - */ - def += (a: Double): VectorD = { for i <- v.indices do v(i) += a; this } - def -= (a: Double): VectorD = { for i <- v.indices do v(i) -= a; this } - def *= (a: Double): VectorD = { for i <- v.indices do v(i) *= a; this } - def /= (a: Double): VectorD = { for i <- v.indices do v(i) /= a; this } - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Add this vector and scalar a only at position i, e.g., x + (3, 5.5). - * @param ia = (i, a) the (index position, scalar) to add - */ - def + (ia: (Int, Double)): VectorD = { val c = copy; c.v(ia._1) += ia._2; c } - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Subtract from this vector the scalar a only at position i, e.g., x - (3, 5.5). - * @param ia = (i, a) the (index position, scalar) to subtract - */ - def - (ia: (Int, Double)): VectorD = { val c = copy; c.v(ia._1) -= ia._2; c } - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the element-wise power function of this vector raised to scalar a. - * @param the scalar second operand - */ - def ~^ (a: Double): VectorD = VectorD (for i <- v.indices yield v(i) ~^ a) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Determine whether this vector and y are nearly equal. - */ - def =~ (y: VectorD): Boolean = - if dim != y.dim then return false - for i <- indices if ! (v(i) =~ y.v(i)) do return false - true - end =~ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the vector consisting of the square root of each element of this vector. - */ - def sqrt: VectorD = VectorD (for i <- v.indices yield math.sqrt (v(i))) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the vector consisting of the reciprocal of each element of this vector. - */ - def recip: VectorD = VectorD (for i <- v.indices yield 1 / v(i)) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the difference between this vector and vector y. - * @param y the other vector/indexed sequence - */ - def diff (y: IndexedSeq [Double]): VectorD = { val a = v diff y; new VectorD (a.size, a) } - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the intersection of this vector and vector y. - * @param y the other vector/indexed sequence - */ - def intersect (y: IndexedSeq [Double]): VectorD = { val a = v intersect y; new VectorD (a.size, a) } - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Form a new vector consisting of the unique values in this vector. - */ - override def distinct: VectorD = { val a = v.distinct; new VectorD (a.size, a) } - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Reverse the elements in this vector. - */ - override def reverse: VectorD = { val a = v.reverse; new VectorD (a.size, a) } - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Sort the elements in this vector according to ord.lt (ascending order) - * returning a new sorted vector. - */ - def sorted: VectorD = { val a = v.sorted; new VectorD (a.size, a) } - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Sort the elements in this vector according to cmp (use '_ > _' for descending order). - * @param cmp the comparison operator. - */ - override def sortWith (cmp: (Double, Double) => Boolean): VectorD = - val a = v.sortWith (cmp) - new VectorD (a.size, a) - end sortWith - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Filter the elements in this vector based on the predicate. - * @param the filter predicate - */ - override def filter (p: Double => Boolean): VectorD = - val a = v.filter (p) - new VectorD (a.size, a) - end filter - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Filter the elements in this vector based on the negation of the predicate. - * @param the filter predicate - */ - override def filterNot (p: Double => Boolean): VectorD = - val a = v.filterNot (p) - new VectorD (a.size, a) - end filterNot - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Filter the elements in this vector based on the predicate, returning index positions. - * @param the filter predicate - */ - def filterPos (p: Double => Boolean): IIndexedSeq [Int] = for i <- v.indices if p(v(i)) yield i - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the dot (inner) product of vectors this and y. - * @param the other vector/indexed sequence - */ - def dot (y: IndexedSeq [Double]): Double = - var sum = 0.0 - for i <- v.indices do sum += v(i) * y(i) - sum - end dot - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return a new vector consisting of the maximum of this and y's corresponding elements. - * @param y the other vector/indexed sequence - */ - def maxv (y: IndexedSeq [Double]): VectorD = - VectorD (for i <- indices yield if v(i) >= y(i) then v(i) else y(i)) - end maxv - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return a new vector consisting of the minimum of this and y's corresponding elements. - * @param y the other vector/indexed sequence - */ - def minv (y: IndexedSeq [Double]): VectorD = - VectorD (for i <- indices yield if v(i) <= y(i) then v(i) else y(i)) - end minv - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Find the argument maximum of this vector (index of maximum element). - * @param e the ending index (exclusive) for the search - */ - def argmax (e: Int = dim): Int = - var j = 0 - for i <- 1 until e if v(i) > v(j) do j = i - j - end argmax - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Find the argument maximum of this vector (index of maximum element). - * @param s the starting index (inclusive) for the search - * @param e the ending index (exclusive) for the search - */ - def argmax (s: Int, e: Int): Int = - var j = s - for i <- s + 1 until e if v(i) > v(j) do j = i - j - end argmax - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Find the argument minimum of this vector (index of minimum element). - * @param e the ending index (exclusive) for the search - */ - def argmin (e: Int = dim): Int = - var j = 0 - for i <- 1 until e if v(i) < v(j) do j = i - j - end argmin - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Find the argument minimum of 'this' vector (index of minimum element). - * @param s the starting index (inclusive) for the search - * @param e the ending index (exclusive) for the search - */ - def argmin (s: Int, e: Int): Int = - var j = s - for i <- s + 1 until e if v(i) < v(j) do j = i - j - end argmin - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Find the argument maximum magnitude of this vector (index of most extreme element). - * @param e the ending index (exclusive) for the search - */ - def argmag (e: Int = dim): Int = - var j = 0 - for i <- 1 until e if math.abs (v(i)) > math.abs (v(j)) do j = i - j - end argmag - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the magnitude of this vector, i.e., the element value farthest from zero. - */ - def mag: Double = math.max (math.abs (min), math.abs (max)) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute three sums for the k-prefix, middle and k-suffix of this vector. - */ - def sums (k: Int): (Double, Double, Double) = - var s0, s1, s2 = 0.0 - for i <- v.indices do - if i < k then s0 += v(i) - else if i < dim-k then s1 += v(i) - else s2 += v(i) - end for - (s0, s1, s2) - end sums - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute three squared norms for the k-prefix, middle and k-suffix of this vector. - */ - def normSqs (k: Int): (Double, Double, Double) = - var s0, s1, s2 = 0.0 - for i <- v.indices do - if i < k then s0 += v(i) * v(i) - else if i < dim-k then s1 += v(i) * v(i) - else s2 += v(i) * v(i) - end for - (s0, s1, s2) - end normSqs - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the Euclidean norm (2-norm) (or its square) of this vector. - */ - def normSq: Double = v.fold (0.0)((s, e) => s + e*e) - def norm: Double = math.sqrt (normSq) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the Manhattan norm (1-norm) of this vector. - */ - def norm1: Double = v.fold (0.0)((s, e) => s + math.abs (e)) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the vector that is the element-wise absolute value of this vector. - */ - def abs: VectorD = VectorD (for i <- v.indices yield math.abs (v(i))) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the vector containing the mid-points between adjacent elements. - */ - def mids: VectorD = VectorD (for i <- 1 until dim yield 0.5 * (v(i) + v(i-1))) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Cumulate the values of 'this' vector from left to right (e.g., create a - * CDF from a pmf). Example: (4, 2, 3, 1) --> (4, 6, 9, 10) - */ - def cumulate: VectorD = { var s = 0.0; VectorD (for i <- v.indices yield { s += v(i); s })} - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert this `Double` vector to an `Int` vector. - */ - def toInt: VectorI = new VectorI (dim, v.map (_.toInt)) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert 'this' `VectorD` into a `VectorD`. - */ - def toDouble: VectorD = this - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert to a probability vector, by normalizing so that it sums to one. - */ - def toProbability: VectorD = this * (1.0 / v.sum) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Normalize this vector so its length is one (unit vector). - */ - def normalize: VectorD = this * (1.0 / norm) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Normalize this vector to have a maximum of one. - */ - def normalize1: VectorD = this * (1.0 / max) - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Count the number of zero elements in the this vector. - */ - def countZero: Int = - var count = 0 - for e <- v if e == 0.0 do count += 1 - count - end countZero - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert vector to a string. - */ - override def toString: String = - val sb = new StringBuilder ("VectorD(") - if dim == 0 then return sb.append (")").mkString - for i <- indices do sb.append (fString.format (v(i))) - sb.replace (sb.length-2, sb.length, ")").mkString - end toString - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert vector to vector of strings. - */ - def toString2: VectorS = new VectorS (dim, v.map (_.toString)) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Swap (in-place) elements i and k in this vector. - * @param i the first element in the swap - * @param k the second element in the swap - */ - def swap (i: Int, k: Int): Unit = - val tmp = v(i); v(i) = v(k); v(k) = tmp - end swap - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Indirectly find the k-median of the p to r partition of array v - * using the QuickSelect algorithm. - * @see http://en.wikipedia.org/wiki/Quickselect - * @param rk the rank order - * @param p the left cursor - * @param r the right cursor - * @param k the type of median (k-th smallest element) - */ - private def median (rk: Array [Int], p: Int, r: Int, k: Int): Double = - if p == r then return v(rk(p)) - iswap (rk, r, med3 (p, (p+r)/2, r)) // use median-of-3, comment out for simple pivot - val q = ipartition (rk, p, r) // partition into left (<=) and right (>=) - if q == k-1 then return v(rk(q)) // found k-median - else if q > k-1 then median (rk, p, q - 1, k) // recursively find median in left partition - else median (rk, q + 1, r, k) // recursively find median in right partition - end median - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Indirectly find the k-median (k-th smallest element) of array v. - * @param k the type of median (e.g., k = (dim+1)/2 is the median) - */ - def median (k: Int = (dim+1)/2): Double = - if dim <= 0 then flaw ("median", s"no vector to take the median of k = $k, dim = $dim") - median (Array.range (0, dim), 0, dim-1, k) - end median - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the fraction quantile. - * @param fraction the fraction/percentile to take - */ - def quantile (fraction: Double): Double = - var k = (fraction * dim).toInt - if k >= dim then k = dim - 1 - if k <= 0 then k = 1 - median (k) - end quantile - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the averaged median, which is the median when dim is odd and - * the average of the median and the next k-median when dim is even. - */ - def median_ : Double = if dim % 2 == 0 then (median () + median ((dim+2)/2)) / 2.0 - else median () - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Given the rank order for this vector, return its elements in that order. - * The rank order may be established using indirect sorting (e.g., iqsort). - * @param rank the rank order of elements in this vector - */ - def reorder (rank: Array [Int]): VectorD = VectorD (for i <- indices yield v(rank(i))) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Recursively and indirectly sort the p to r partition of array v - * using QuickSort. - * @param rk the rank order - * @param p the left cursor - * @param r the right cursor - */ - private def iqsort (rk: Array [Int], p: Int, r: Int): Array [Int] = - if r - p > 5 then - iswap (rk, r, med3 (p, (p+r)/2, r)) // use median-of-3, comment out for simple pivot - val q = ipartition (rk, p, r) // partition into left (<=) and right (>=) - iqsort (rk, p, q - 1) // recursively sort left partition - iqsort (rk, q + 1, r) // recursively sort right partition - else - iselsort (rk, p, r) // use simple sort when small - end if - rk - end iqsort - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Indirectly sort this vector using QuickSort, returning the rank order. - */ - def iqsort: Array [Int] = iqsort (Array.range (0, dim), 0, dim-1) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Indirectly sort the p to r partition of array v using SelectionSort. - * @param rk the rank order - * @param p the left cursor - * @param r the right cursor - */ - private def iselsort (rk: Array [Int], p: Int, r: Int): Array [Int] = - for i <- p to r do - var k = i - for j <- i+1 to r if v(rk(j)) < v(rk(k)) do k = j - if i != k then iswap (rk, i, k) - end for - rk - end iselsort - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Indirectly sort this vector using SelectionSort, returning the rank order. - */ - def iselsort: Array [Int] = iselsort (Array.range (0, dim), 0, dim-1) - - def iselsort (end: Int = dim): Array [Int] = iselsort (Array.range (0, end), 0, end-1) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Indirectly partition the array from 'p' to 'r' into a left partition - * (<= 'x') and a right partition (>= 'x'). - * @param rk the rank order - * @param p the left cursor - * @param r the right cursor - */ - private def ipartition (rk: Array [Int], p: Int, r: Int): Int = - val x = v(rk(r)) // pivot - var i = p - 1 - for j <- p until r if v(rk(j)) <= x do - i += 1; iswap (rk, i, j) - end for - iswap (rk, i + 1, r) - i + 1 - end ipartition - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the index of the median of three elements. - * @param i element index 1 - * @param j element index 2 - * @param k element index 3 - */ - inline private def med3 (i: Int, j: Int, k: Int): Int = - if v(i) < v(j) then - if v(j) < v(k) then j else if v(i) < v(k) then k else i - else - if v(j) > v(k) then j else if v(i) > v(k) then k else i - end if - end med3 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Indirectly swap the elements at i and j, i.e., rk(i) <-> rk(j). - * @param rk the rank order - * @param i the first index position - * @param j the second index position - */ - inline private def iswap (rk: Array [Int], i: Int, j: Int): Unit = - val t = rk(i); rk(i) = rk(j); rk(j) = t - end iswap - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the sample mean (also the population mean, they are the same). - * >> E(X) - */ - def mean: Double = sum / nd - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the centered norm-squared of this vector. - */ - def cnormSq: Double = - var e, s, ss = 0.0 - for i <- indices do { e = v(i); s += e; ss += e * e } - ss - s * s / nd - end cnormSq - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the sample variance (and population population). - * >> E(X - μ)^2 - */ - def variance: Double = { val s = sum; (normSq - s * s / nd) / (nd-1) } - def variance_ : Double = { val s = sum; (normSq - s * s / nd) / nd } - def variance (mu: Double): Double = (normSq - mu * mu * nd) / (nd-1) - def variance_ (mu: Double): Double = (normSq - mu * mu * nd) / nd - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the sample covariance (or population covariance) of this vector with vector y. - * @param y the other vector - */ - def cov (y: IndexedSeq [Double]): Double = ((this dot y) - sum * y.sum / nd) / (nd-1) - def cov_ (y: IndexedSeq [Double]): Double = ((this dot y) - sum * y.sum / nd) / nd - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the mean square (ms) (or root mean square (rms)) of this vector. - */ - def ms: Double = normSq / nd - def rms: Double = math.sqrt (ms) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the sample standard deviation (and population standard deviation). - */ - def stdev: Double = math.sqrt (variance) - def stdev_ : Double = math.sqrt (variance_) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the 'k'-lag auto-covariance of this vector for stationary and - * non-stationary series (acov_). - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the sample standard deviation (and population standard deviation). - */ - def stdev: Double = math.sqrt (variance) - def stdev_ : Double = math.sqrt (variance_) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the 'k'-lag auto-covariance of this vector for stationary and - * non-stationary series (acov_). - * @param k the lag parameter - */ - def acov (k: Int = 1): Double = - val n = dim - k - val mu = mean - var sum = 0.0 - for i <- 0 until n do sum += (v(i) - mu) * (v(i+k) - mu) - sum / n - end acov - - def acov_ (k: Int = 1): Double = - val n = dim - k - val ss = sums (k) - val mu = ((ss._1 + ss._2) / n, (ss._2 + ss._3) / n) - var sum = 0.0 - for i <- 0 until n do sum += (v(i) - mu._1) * (v(i+k) - mu._2) - sum / n - end acov_ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute Pearson's correlation of this vector with vector y. - * If either variance is zero, will result in Not-a-Number (NaN), return - * one if the vectors are the same, or -0 (indicating undefined). - * @param y the other vector - */ - def corr (y: VectorD): Double = - val c = cov (y) / math.sqrt (variance * y.variance) - if c.isNaN then if this == y then 1.0 else -0.0 else c - end corr - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the 'k'-lag auto-correlation of this vector. - * Assumes a stationary process vector, if not its an approximation. - * @param k the lag parameter - */ - def acorr (k: Int = 1): Double = acov (k) / variance - - def acorr_ (k: Int = 1): Double = - val n = dim - k - val ss = sums (k) - val sq = normSqs (k) - val mu = ((ss._1 + ss._2) / n, (ss._2 + ss._3) / n) - val vr = ((sq._1 + sq._2 - (mu._1 * mu._1) * n) / n, - (sq._2 + sq._3 - (mu._2 * mu._2) * n) / n) - var sum = 0.0 - for i <- 0 until n do sum += (v(i) - mu._1) * (v(i+k) - mu._2) - (sum / n) / (math.sqrt (vr._1) * math.sqrt (vr._2)) - end acorr_ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute Spearman's rank correlation of this vector with vector y. - * The `iqsort` method gives the rank order of a vector. - * @see en.wikipedia.org/wiki/Spearman%27s_rank_correlation_coefficient - * @param y the other vector - */ - def scorr (y: VectorD): Double = - val rk1 = iqsort // rank order for this vector - val rk2 = y.iqsort // rank order for vector y - var sum = 0.0 - for i <- v.indices do sum += (rk1(i) - rk2(i))~^2 - 1 - 6 * sum / (nd * (nd*nd - 1)) - end scorr - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the skewness of this vector. Negative skewness indicates the - * distribution is elongated on the left, zero skewness indicates it is - * symmetric, and positive skewness indicates it is elongated on the right. - * @see www.mathworks.com/help/stats/skewness.html - * >> E(X - μ)^3 / σ^3 - */ - def skew: Double = ((this - mean)~^3).sum / (nd * stdev_ ~^3) - def skew_ : Double = skew * math.sqrt (nd * (nd-1)) / (nd-2) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the kurtosis of 'x' vector. High kurtosis (> 3) indicates a - * distribution with heavier tails than a Normal distribution. - * @see www.mathworks.com/help/stats/kurtosis.html - * >> E(X - μ)^4 / σ^4 - */ - def kurtosis: Double = ((this - mean)~^4).sum / (nd * variance_ ~^2) - def kurtosis_ : Double = (nd-1) * ((nd+1) * kurtosis - 3 * (nd-1)) / ((nd-2) * (nd-3)) + 3 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Produce a standardized version of the vector by subtracting the mean and - * dividing by the standard deviation (e.g., Normal -> Standard Normal). - */ - def standardize: VectorD = (this - mean) / stdev - -end VectorD - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `VectorD` object is the companion object for the `VectorD` class. - */ -object VectorD: - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `VectorD` from an immutable indexed sequence of `Double`s. - * @param xs the sequence/array of the `Double` numbers - */ - def apply (xs: collection.immutable.IndexedSeq [Double]): VectorD = new VectorD (xs.size, xs.toArray) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `VectorD` from a mutable indexed sequence of `Double`s. - * @param xs the sequence/array of the `Double` numbers - */ - def apply (xs: IndexedSeq [Double]): VectorD = new VectorD (xs.size, xs.toArray) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `VectorD` from one or more values (repeated values `Double`*). - * @param x the first `Double` number - * @param xs the varargs of `Double` numbers - */ - def apply (x: Double, xs: Double*): VectorD = new VectorD (xs.size + 1, x +: xs.toArray) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `VectorD` from one or more values (repeated values String*). - * For numeric types, assign missing value indicator upon format failure. - * @param x the first string - * @param xs the varargs of strings - */ - def apply (x: String, xs: String*): VectorD = - val y = new VectorD (1 + xs.length) - for i <- y.indices do - y(i) = if i == 0 then x.mkDouble else xs(i-1).mkDouble - end for - y - end apply - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `VectorD` from a mutable indexed sequence of `String`. - * @param xs the sequence/array of the `String` numbers - */ - def fromStrings (xs: IndexedSeq [String]): VectorD = VectorD (xs.map (_.mkDouble)) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `VectorD` from a mutable indexed sequence of `TimeNum`. - * FIX - for numeric types, assign missing value indicator upon format failure. - * @param xs the sequence/array of the `TimeNum` numbers - */ - def fromTimeNums (xs: IndexedSeq [TimeNum]): VectorD = VectorD (xs.map (_.toDouble)) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return a `VectorD` consisting of a sequence of integers in a range. - * @param r the range of values - */ - def range (r: Range): VectorD = VectorD (for i <- r yield i.toDouble) - def range (i1: Int, i2: Int): VectorD = VectorD (for i <- i1 until i2 yield i.toDouble) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `VectorD` with n elements and fill it with the value x. - * @param n the number of elements - * @param x the value to assign to all elements - */ - def fill (n: Int)(x: Double): VectorD = new VectorD (n, Array.fill (n)(x)) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a one vector (all elements are one) of length n. - * @param size the size of the new vector - */ - def one (n: Int): VectorD = new VectorD (n, Array.fill (n)(1.0)) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a vector of the form (0, ... 1, ... 0) where the 1 is at position j. - * @param j the position to place the 1 - * @param size the size of the vector (upper bound = size - 1) - */ - def oneAt (j: Int, size: Int): VectorD = - val x = new VectorD (size) - x.v(j) = 1.0 - x - end oneAt - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** A null vector of type `VectorD`. - */ - val nullv: VectorD = null.asInstanceOf [VectorD] - -end VectorD - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `vectorDTest` main function tests the operations provided by the `VectorD` class. - * Only the most commonly used inherited operations are shown. - * @see mutable.IndexedSeq for a complete list - * > runMain scalation.mathstat.vectorDTest - */ -@main def vectorDTest (): Unit = - - val x = VectorD (1, 2, 3) - val y = VectorD (4, 6, 5) - val z = VectorD (4, 6, 5) - val w = VectorD (3, 4, 5, 5) - val u = VectorD ("1", "2", "3", "4") - val a = 2 - - banner ("Given Vectors:") - println (s"x = $x") - println (s"y = $y") - println (s"z = $z") - println (s"w = $w") - println (s"u = $u") - - banner ("Inherited Operations:") - - println (s"x == y = ${x == y}") // vector equality - println (s"y == z = ${y == z}") - println (s"x != y = ${x != y}") // vector inequality - println (s"x < y = ${x < y}") // less than - println (s"x <= y = ${x <= y}") // less than or equal - println (s"x > y = ${x > y}") // greater then - println (s"x >= y = ${x >= y}") // greater then or equal - - println (s"x contains 2 = ${x contains 2}") // element contained in vector - println (s"x contains 4 = ${x contains 4}") - println (s"x.exists (_ > 2) = ${x.exists (_ > 2)}") // existence of element satisfying predicate - println (s"x.groupBy (_ > 2) = ${x.groupBy (_ > 2)}") // group according function values - println (s"x.indexOf (2) = ${x.indexOf (2)}") // index of first element equaling - println (s"x.indexWhere (_ > 2) = ${x.indexWhere (_ > 2)}") // index of first element satisfying predicate - println (s"x.indices = ${x.indices}") // indices of the vector - println (s"u.map (_ * 2) = ${u.map (_ * 2)}") // map vector elements using the function - println (s"x.max = ${x.max}") // maximum element - println (s"x.min = ${x.min}") // minimum element - println (s"x.product = ${x.product}") // product of all elements - println (s"x.sum = ${x.sum}") // sum of all elements - println (s"w.toArray = ${stringOf (w.toArray)}") // convert to array - use stringOf to print arrays - println (s"w.toSet = ${w.toSet}") // convert to set - - banner ("Implemented Operations:") - - println (s"x(2) = ${x(2)}") // value at index - println (s"x(0 to 2) = ${x(0 to 2)}") // values in exclusive range - - println (s"-x = ${-x}") // unary minus - println (s"x + y = ${x + y}") // element-wise vector addition - println (s"x - y = ${x - y}") // element-wise vector subtraction - println (s"x * y = ${x * y}") // element-wise vector multiplication - println (s"x / y = ${x / y}") // element-wise vector division - println (s"x ++ y = ${x ++ y}") // concatenate vectors - - println (s"x + a = ${x + a}") // add scalar a - println (s"x - a = ${x - a}") // subtract scalar a - println (s"x * a = ${x * a}") // multiply by scalar a - println (s"x / a = ${x / a}") // divide by scalar a - println (s"x ~^ a = ${x ~^ a}") // raise to power of scalar a - println (s"a +: y = ${a +: x}") // prepend scalar a - println (s"x :+ a = ${x :+ a}") // append scalar a - - println (s"(x-y).abs = ${(x-y).abs}") // absolute value - println (s"x.cnormSq = ${x.cnormSq}") // center norm squared - println (s"x.cumulate = ${x.cumulate}") // cumulate all prior values - println (s"x diff w = ${x diff w}") // multi-set difference - println (s"w.distinct = ${w.distinct}") // extract distinct values - println (s"x dot y = ${x dot y}") // dot product - println (s"u.filter (_ > 2) = ${u.filter (_ > 2)}") // filter on predicate - println (s"u.filterNot (_ > 2) = ${u.filterNot (_ > 2)}") // filter on not predicate - println (s"u.filterPos (_ > 2) = ${u.filterPos (_ > 2)}") // filter return indices - println (s"x intersect w = ${x intersect w}") // multi-set intersection - println (s"y.iselsort = ${stringOf (y.iselsort)}") // indirect QuickSort in ascending order - println (s"y.iqsort = ${stringOf (y.iqsort)}") // indirect SelectionSort in ascending order - println (s"x.norm = ${x.norm}") // Euclidean norm - println (s"x.normalize = ${x.normalize}") // normalize the vector to a unit vector - println (s"x.normalize1 = ${x.normalize1}") // normalize the vector to max 1 vector - println (s"x.normSq = ${x.normSq}") // Euclidean norm squared - println (s"x.norm1 = ${x.norm1}") // Manhattan norm - println (s"x.recip = ${x.recip}") // reciprocal - println (s"x.reverse = ${x.reverse}") // reverse elements in vector - println (s"y.sorted = ${y.sorted}") // sort in ascending order - println (s"y.sortWith (_ > _) = ${y.sortWith (_ > _)}") // sort in descending order - println (s"y.toProbabilty = ${y.toProbability}") // convert to a probability vector - - banner ("Implemented Statistical Operations: (_ => population") - - println (s"x.acorr () = ${x.acorr ()}") // auto-correlation - println (s"x.acorr_ () = ${x.acorr_ ()}") // auto-correlation - non-stationary case - println (s"x.acov () = ${x.acov ()}") // auto-covariance - println (s"x.acov_ () = ${x.acov_ ()}") // auto-covariance - non-stationary case - println (s"x corr y = ${x corr y}") // correlation - println (s"x cov y = ${x cov y}") // covariance - println (s"x cov_ y = ${x cov_ y}") - println (s"x.kurtosis = ${x.kurtosis}") // kurtosis - println (s"x.mean = ${x.mean}") // mean - println (s"u.median () = ${u.median ()}") // median - println (s"u.median_ = ${u.median_}") // averaged median - println (s"x ms y = ${x.ms}") // mean square - println (s"x rms y = ${x.rms}") // root mean square - println (s"x scorr y = ${x scorr y}") // Spearman's rank correlation diff --git a/target/scala-3.6.4/classes/scalation/mathstat/plotCTest.class b/target/scala-3.6.4/classes/scalation/mathstat/plotCTest.class deleted file mode 100644 index 84e034277..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/plotCTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/plotCTest.tasty b/target/scala-3.6.4/classes/scalation/mathstat/plotCTest.tasty deleted file mode 100644 index e38c2eb11..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/plotCTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/plotMTest.class b/target/scala-3.6.4/classes/scalation/mathstat/plotMTest.class deleted file mode 100644 index 28b78524d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/plotMTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/plotMTest.tasty b/target/scala-3.6.4/classes/scalation/mathstat/plotMTest.tasty deleted file mode 100644 index af2d63d45..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/plotMTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/plotMTest2.class b/target/scala-3.6.4/classes/scalation/mathstat/plotMTest2.class deleted file mode 100644 index 536b9dd57..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/plotMTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/plotMTest2.tasty b/target/scala-3.6.4/classes/scalation/mathstat/plotMTest2.tasty deleted file mode 100644 index 2528cbe66..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/plotMTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/plotTest.class b/target/scala-3.6.4/classes/scalation/mathstat/plotTest.class deleted file mode 100644 index b111f6808..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/plotTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/plotTest.tasty b/target/scala-3.6.4/classes/scalation/mathstat/plotTest.tasty deleted file mode 100644 index 331533689..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/plotTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/powForm.class b/target/scala-3.6.4/classes/scalation/mathstat/powForm.class deleted file mode 100644 index b94a8d84f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/powForm.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/powForm.tasty b/target/scala-3.6.4/classes/scalation/mathstat/powForm.tasty deleted file mode 100644 index 8060d3121..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/powForm.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/probabilityTest.class b/target/scala-3.6.4/classes/scalation/mathstat/probabilityTest.class deleted file mode 100644 index 8878d1059..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/probabilityTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/probabilityTest.tasty b/target/scala-3.6.4/classes/scalation/mathstat/probabilityTest.tasty deleted file mode 100644 index 334310c2c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/probabilityTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/probabilityTest2.class b/target/scala-3.6.4/classes/scalation/mathstat/probabilityTest2.class deleted file mode 100644 index 6247b3524..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/probabilityTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/probabilityTest2.tasty b/target/scala-3.6.4/classes/scalation/mathstat/probabilityTest2.tasty deleted file mode 100644 index dee4ed27f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/probabilityTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/probabilityTest3.class b/target/scala-3.6.4/classes/scalation/mathstat/probabilityTest3.class deleted file mode 100644 index dfcbeddf8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/probabilityTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/probabilityTest3.tasty b/target/scala-3.6.4/classes/scalation/mathstat/probabilityTest3.tasty deleted file mode 100644 index 54e19a026..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/probabilityTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/probabilityTest4.class b/target/scala-3.6.4/classes/scalation/mathstat/probabilityTest4.class deleted file mode 100644 index 661aa6465..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/probabilityTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/probabilityTest4.tasty b/target/scala-3.6.4/classes/scalation/mathstat/probabilityTest4.tasty deleted file mode 100644 index e8f23dab5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/probabilityTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/probabilityTest5.class b/target/scala-3.6.4/classes/scalation/mathstat/probabilityTest5.class deleted file mode 100644 index ff4637e07..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/probabilityTest5.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/probabilityTest5.tasty b/target/scala-3.6.4/classes/scalation/mathstat/probabilityTest5.tasty deleted file mode 100644 index 017490cd9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/probabilityTest5.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/probabilityTest6.class b/target/scala-3.6.4/classes/scalation/mathstat/probabilityTest6.class deleted file mode 100644 index 47b269945..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/probabilityTest6.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/probabilityTest6.tasty b/target/scala-3.6.4/classes/scalation/mathstat/probabilityTest6.tasty deleted file mode 100644 index 6db97ff4f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/probabilityTest6.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/rTensor4DTest.class b/target/scala-3.6.4/classes/scalation/mathstat/rTensor4DTest.class deleted file mode 100644 index 3ce82d3ff..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/rTensor4DTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/rTensor4DTest.tasty b/target/scala-3.6.4/classes/scalation/mathstat/rTensor4DTest.tasty deleted file mode 100644 index 7b174adae..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/rTensor4DTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/rTensorDTest.class b/target/scala-3.6.4/classes/scalation/mathstat/rTensorDTest.class deleted file mode 100644 index 174472977..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/rTensorDTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/rTensorDTest.tasty b/target/scala-3.6.4/classes/scalation/mathstat/rTensorDTest.tasty deleted file mode 100644 index d90634c1e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/rTensorDTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/rangeForm.class b/target/scala-3.6.4/classes/scalation/mathstat/rangeForm.class deleted file mode 100644 index 165780f4b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/rangeForm.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/rangeForm.tasty b/target/scala-3.6.4/classes/scalation/mathstat/rangeForm.tasty deleted file mode 100644 index 501cbee51..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/rangeForm.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/sinForm.class b/target/scala-3.6.4/classes/scalation/mathstat/sinForm.class deleted file mode 100644 index c47b7e27f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/sinForm.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/sinForm.tasty b/target/scala-3.6.4/classes/scalation/mathstat/sinForm.tasty deleted file mode 100644 index 14b475ca9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/sinForm.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/statTableTest.class b/target/scala-3.6.4/classes/scalation/mathstat/statTableTest.class deleted file mode 100644 index 9a1310152..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/statTableTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/statTableTest.tasty b/target/scala-3.6.4/classes/scalation/mathstat/statTableTest.tasty deleted file mode 100644 index 692a39de0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/statTableTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/statisticTest.class b/target/scala-3.6.4/classes/scalation/mathstat/statisticTest.class deleted file mode 100644 index 76bd39767..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/statisticTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/statisticTest.tasty b/target/scala-3.6.4/classes/scalation/mathstat/statisticTest.tasty deleted file mode 100644 index 163ac5cd5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/statisticTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/stats4TSTest.class b/target/scala-3.6.4/classes/scalation/mathstat/stats4TSTest.class deleted file mode 100644 index 3fe6b3ac3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/stats4TSTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/stats4TSTest.tasty b/target/scala-3.6.4/classes/scalation/mathstat/stats4TSTest.tasty deleted file mode 100644 index 0a0848523..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/stats4TSTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/tensorDTest.class b/target/scala-3.6.4/classes/scalation/mathstat/tensorDTest.class deleted file mode 100644 index 76779e7bf..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/tensorDTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/tensorDTest.tasty b/target/scala-3.6.4/classes/scalation/mathstat/tensorDTest.tasty deleted file mode 100644 index 30d409689..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/tensorDTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/tensorDTest2.class b/target/scala-3.6.4/classes/scalation/mathstat/tensorDTest2.class deleted file mode 100644 index 87f9f8aed..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/tensorDTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/tensorDTest2.tasty b/target/scala-3.6.4/classes/scalation/mathstat/tensorDTest2.tasty deleted file mode 100644 index 3d0cf90b8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/tensorDTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/tensorDTest3.class b/target/scala-3.6.4/classes/scalation/mathstat/tensorDTest3.class deleted file mode 100644 index e7bfa5d91..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/tensorDTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/tensorDTest3.tasty b/target/scala-3.6.4/classes/scalation/mathstat/tensorDTest3.tasty deleted file mode 100644 index 45f39e3e7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/tensorDTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/timeStatisticTest.class b/target/scala-3.6.4/classes/scalation/mathstat/timeStatisticTest.class deleted file mode 100644 index 80458b103..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/timeStatisticTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/timeStatisticTest.tasty b/target/scala-3.6.4/classes/scalation/mathstat/timeStatisticTest.tasty deleted file mode 100644 index 27dbb5044..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/timeStatisticTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/tnT_SplitTest.class b/target/scala-3.6.4/classes/scalation/mathstat/tnT_SplitTest.class deleted file mode 100644 index 3d418731b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/tnT_SplitTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/tnT_SplitTest.tasty b/target/scala-3.6.4/classes/scalation/mathstat/tnT_SplitTest.tasty deleted file mode 100644 index 30c86663c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/tnT_SplitTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/transformTest.class b/target/scala-3.6.4/classes/scalation/mathstat/transformTest.class deleted file mode 100644 index dc7c71ea9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/transformTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/transformTest.tasty b/target/scala-3.6.4/classes/scalation/mathstat/transformTest.tasty deleted file mode 100644 index dbdc2f182..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/transformTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/transformTest2.class b/target/scala-3.6.4/classes/scalation/mathstat/transformTest2.class deleted file mode 100644 index c20a4a78b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/transformTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/transformTest2.tasty b/target/scala-3.6.4/classes/scalation/mathstat/transformTest2.tasty deleted file mode 100644 index f09b58a66..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/transformTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/transformTest3.class b/target/scala-3.6.4/classes/scalation/mathstat/transformTest3.class deleted file mode 100644 index bbb175a64..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/transformTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/transformTest3.tasty b/target/scala-3.6.4/classes/scalation/mathstat/transformTest3.tasty deleted file mode 100644 index e0cd0fa92..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/transformTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/vMatrixDTest.class b/target/scala-3.6.4/classes/scalation/mathstat/vMatrixDTest.class deleted file mode 100644 index 9d9612f8b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/vMatrixDTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/vMatrixDTest.tasty b/target/scala-3.6.4/classes/scalation/mathstat/vMatrixDTest.tasty deleted file mode 100644 index 438bab959..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/vMatrixDTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/vectorCTest.class b/target/scala-3.6.4/classes/scalation/mathstat/vectorCTest.class deleted file mode 100644 index 508e40fc8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/vectorCTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/vectorCTest.tasty b/target/scala-3.6.4/classes/scalation/mathstat/vectorCTest.tasty deleted file mode 100644 index f46f70cc3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/vectorCTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/vectorCTest2.class b/target/scala-3.6.4/classes/scalation/mathstat/vectorCTest2.class deleted file mode 100644 index 3de994bd5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/vectorCTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/vectorCTest2.tasty b/target/scala-3.6.4/classes/scalation/mathstat/vectorCTest2.tasty deleted file mode 100644 index 2fff1c03e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/vectorCTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/vectorCTest3.class b/target/scala-3.6.4/classes/scalation/mathstat/vectorCTest3.class deleted file mode 100644 index 9db4a79f6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/vectorCTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/vectorCTest3.tasty b/target/scala-3.6.4/classes/scalation/mathstat/vectorCTest3.tasty deleted file mode 100644 index c406db887..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/vectorCTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/vectorDTest.class b/target/scala-3.6.4/classes/scalation/mathstat/vectorDTest.class deleted file mode 100644 index 893adbecd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/vectorDTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/vectorDTest.tasty b/target/scala-3.6.4/classes/scalation/mathstat/vectorDTest.tasty deleted file mode 100644 index 3199bdf16..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/vectorDTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/vectorDTest2.class b/target/scala-3.6.4/classes/scalation/mathstat/vectorDTest2.class deleted file mode 100644 index 9ef2db106..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/vectorDTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/vectorDTest2.tasty b/target/scala-3.6.4/classes/scalation/mathstat/vectorDTest2.tasty deleted file mode 100644 index 140cf74dd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/vectorDTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/vectorDTest3.class b/target/scala-3.6.4/classes/scalation/mathstat/vectorDTest3.class deleted file mode 100644 index 6ac223dc7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/vectorDTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/vectorDTest3.tasty b/target/scala-3.6.4/classes/scalation/mathstat/vectorDTest3.tasty deleted file mode 100644 index c524b018c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/vectorDTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/vectorDTest4.class b/target/scala-3.6.4/classes/scalation/mathstat/vectorDTest4.class deleted file mode 100644 index ab5e0b426..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/vectorDTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/vectorDTest4.tasty b/target/scala-3.6.4/classes/scalation/mathstat/vectorDTest4.tasty deleted file mode 100644 index 612e63d73..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/vectorDTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/vectorDTest5.class b/target/scala-3.6.4/classes/scalation/mathstat/vectorDTest5.class deleted file mode 100644 index 6038df5a0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/vectorDTest5.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/vectorDTest5.tasty b/target/scala-3.6.4/classes/scalation/mathstat/vectorDTest5.tasty deleted file mode 100644 index a09f670f2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/vectorDTest5.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/vectorDTest6.class b/target/scala-3.6.4/classes/scalation/mathstat/vectorDTest6.class deleted file mode 100644 index 69067ce09..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/vectorDTest6.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/vectorDTest6.tasty b/target/scala-3.6.4/classes/scalation/mathstat/vectorDTest6.tasty deleted file mode 100644 index 365ecb349..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/vectorDTest6.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/vectorITest.class b/target/scala-3.6.4/classes/scalation/mathstat/vectorITest.class deleted file mode 100644 index 2de49e7b7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/vectorITest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/vectorITest.tasty b/target/scala-3.6.4/classes/scalation/mathstat/vectorITest.tasty deleted file mode 100644 index 8fb69205f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/vectorITest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/vectorLTest.class b/target/scala-3.6.4/classes/scalation/mathstat/vectorLTest.class deleted file mode 100644 index 17af85201..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/vectorLTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/vectorLTest.tasty b/target/scala-3.6.4/classes/scalation/mathstat/vectorLTest.tasty deleted file mode 100644 index 98e3c1cf3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/vectorLTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/vectorSTest.class b/target/scala-3.6.4/classes/scalation/mathstat/vectorSTest.class deleted file mode 100644 index 2dd4ed9d3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/vectorSTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/vectorSTest.tasty b/target/scala-3.6.4/classes/scalation/mathstat/vectorSTest.tasty deleted file mode 100644 index 6d8773b53..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/vectorSTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/vectorSTest2.class b/target/scala-3.6.4/classes/scalation/mathstat/vectorSTest2.class deleted file mode 100644 index ad5311624..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/vectorSTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/vectorSTest2.tasty b/target/scala-3.6.4/classes/scalation/mathstat/vectorSTest2.tasty deleted file mode 100644 index f3e7bcc1d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/vectorSTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/vectorTTest.class b/target/scala-3.6.4/classes/scalation/mathstat/vectorTTest.class deleted file mode 100644 index dc38a2afc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/vectorTTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/vectorTTest.tasty b/target/scala-3.6.4/classes/scalation/mathstat/vectorTTest.tasty deleted file mode 100644 index 225eec4ec..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/vectorTTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/vectorTTest2.class b/target/scala-3.6.4/classes/scalation/mathstat/vectorTTest2.class deleted file mode 100644 index bcf48d62f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/vectorTTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/vectorTTest2.tasty b/target/scala-3.6.4/classes/scalation/mathstat/vectorTTest2.tasty deleted file mode 100644 index eb8a11a45..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/vectorTTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/zForm.class b/target/scala-3.6.4/classes/scalation/mathstat/zForm.class deleted file mode 100644 index 7c5198fbd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/zForm.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mathstat/zForm.tasty b/target/scala-3.6.4/classes/scalation/mathstat/zForm.tasty deleted file mode 100644 index d7f270c80..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mathstat/zForm.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mergeSortIndirectTest.class b/target/scala-3.6.4/classes/scalation/mergeSortIndirectTest.class deleted file mode 100644 index 3c03a8d94..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mergeSortIndirectTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mergeSortIndirectTest.tasty b/target/scala-3.6.4/classes/scalation/mergeSortIndirectTest.tasty deleted file mode 100644 index 8be44f499..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mergeSortIndirectTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mergeSortIndirectTest2.class b/target/scala-3.6.4/classes/scalation/mergeSortIndirectTest2.class deleted file mode 100644 index e56be53dc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mergeSortIndirectTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/mergeSortIndirectTest2.tasty b/target/scala-3.6.4/classes/scalation/mergeSortIndirectTest2.tasty deleted file mode 100644 index dcbc119ad..000000000 Binary files a/target/scala-3.6.4/classes/scalation/mergeSortIndirectTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/AFF$.class b/target/scala-3.6.4/classes/scalation/modeling/AFF$.class deleted file mode 100644 index 4585ae68d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/AFF$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/AFF.class b/target/scala-3.6.4/classes/scalation/modeling/AFF.class deleted file mode 100644 index 43816d766..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/AFF.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/AFF.tasty b/target/scala-3.6.4/classes/scalation/modeling/AFF.tasty deleted file mode 100644 index 574ba4fea..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/AFF.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/ActivationFun$.class b/target/scala-3.6.4/classes/scalation/modeling/ActivationFun$.class deleted file mode 100644 index 501b034e3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/ActivationFun$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/ActivationFun$package$.class b/target/scala-3.6.4/classes/scalation/modeling/ActivationFun$package$.class deleted file mode 100644 index fb13f263d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/ActivationFun$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/ActivationFun$package.class b/target/scala-3.6.4/classes/scalation/modeling/ActivationFun$package.class deleted file mode 100644 index ecaf114a8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/ActivationFun$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/ActivationFun$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/ActivationFun$package.tasty deleted file mode 100644 index 0474979bb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/ActivationFun$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/ActivationFun.class b/target/scala-3.6.4/classes/scalation/modeling/ActivationFun.class deleted file mode 100644 index 72f9338e0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/ActivationFun.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/ActivationFun.tasty b/target/scala-3.6.4/classes/scalation/modeling/ActivationFun.tasty deleted file mode 100644 index 84c9ea210..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/ActivationFun.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/BestStep$.class b/target/scala-3.6.4/classes/scalation/modeling/BestStep$.class deleted file mode 100644 index 28986ce10..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/BestStep$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/BestStep.class b/target/scala-3.6.4/classes/scalation/modeling/BestStep.class deleted file mode 100644 index 65220a133..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/BestStep.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/BestStep.tasty b/target/scala-3.6.4/classes/scalation/modeling/BestStep.tasty deleted file mode 100644 index 8bed0244c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/BestStep.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/DistanceOutlier$.class b/target/scala-3.6.4/classes/scalation/modeling/DistanceOutlier$.class deleted file mode 100644 index 1a572e62c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/DistanceOutlier$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/DistanceOutlier.class b/target/scala-3.6.4/classes/scalation/modeling/DistanceOutlier.class deleted file mode 100644 index 81523b8ad..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/DistanceOutlier.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/DistanceOutlier.tasty b/target/scala-3.6.4/classes/scalation/modeling/DistanceOutlier.tasty deleted file mode 100644 index f2969585d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/DistanceOutlier.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Example_AutoMPG$.class b/target/scala-3.6.4/classes/scalation/modeling/Example_AutoMPG$.class deleted file mode 100644 index a8b8c0fb1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Example_AutoMPG$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Example_AutoMPG$package$.class b/target/scala-3.6.4/classes/scalation/modeling/Example_AutoMPG$package$.class deleted file mode 100644 index aec60f99d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Example_AutoMPG$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Example_AutoMPG$package.class b/target/scala-3.6.4/classes/scalation/modeling/Example_AutoMPG$package.class deleted file mode 100644 index d04755a2e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Example_AutoMPG$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Example_AutoMPG$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/Example_AutoMPG$package.tasty deleted file mode 100644 index c82f533af..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Example_AutoMPG$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Example_AutoMPG.class b/target/scala-3.6.4/classes/scalation/modeling/Example_AutoMPG.class deleted file mode 100644 index 6fa3f51a4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Example_AutoMPG.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Example_AutoMPG.tasty b/target/scala-3.6.4/classes/scalation/modeling/Example_AutoMPG.tasty deleted file mode 100644 index cb388895f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Example_AutoMPG.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Example_BPressure$.class b/target/scala-3.6.4/classes/scalation/modeling/Example_BPressure$.class deleted file mode 100644 index a73a6b0f8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Example_BPressure$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Example_BPressure$package$.class b/target/scala-3.6.4/classes/scalation/modeling/Example_BPressure$package$.class deleted file mode 100644 index 0bce02c57..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Example_BPressure$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Example_BPressure$package.class b/target/scala-3.6.4/classes/scalation/modeling/Example_BPressure$package.class deleted file mode 100644 index a8de044bc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Example_BPressure$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Example_BPressure$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/Example_BPressure$package.tasty deleted file mode 100644 index 1a2478b42..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Example_BPressure$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Example_BPressure.class b/target/scala-3.6.4/classes/scalation/modeling/Example_BPressure.class deleted file mode 100644 index d398bad5a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Example_BPressure.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Example_BPressure.tasty b/target/scala-3.6.4/classes/scalation/modeling/Example_BPressure.tasty deleted file mode 100644 index 80a994357..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Example_BPressure.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Example_BasketBall$.class b/target/scala-3.6.4/classes/scalation/modeling/Example_BasketBall$.class deleted file mode 100644 index 6474d86e1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Example_BasketBall$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Example_BasketBall$package$.class b/target/scala-3.6.4/classes/scalation/modeling/Example_BasketBall$package$.class deleted file mode 100644 index 9d2e281f3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Example_BasketBall$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Example_BasketBall$package.class b/target/scala-3.6.4/classes/scalation/modeling/Example_BasketBall$package.class deleted file mode 100644 index 556cbf49e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Example_BasketBall$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Example_BasketBall$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/Example_BasketBall$package.tasty deleted file mode 100644 index 885258f85..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Example_BasketBall$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Example_BasketBall.class b/target/scala-3.6.4/classes/scalation/modeling/Example_BasketBall.class deleted file mode 100644 index 3cca31936..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Example_BasketBall.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Example_BasketBall.tasty b/target/scala-3.6.4/classes/scalation/modeling/Example_BasketBall.tasty deleted file mode 100644 index 3afda9a8a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Example_BasketBall.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/ExpRegression$.class b/target/scala-3.6.4/classes/scalation/modeling/ExpRegression$.class deleted file mode 100644 index 3c481f997..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/ExpRegression$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/ExpRegression$package$.class b/target/scala-3.6.4/classes/scalation/modeling/ExpRegression$package$.class deleted file mode 100644 index 747c40134..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/ExpRegression$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/ExpRegression$package.class b/target/scala-3.6.4/classes/scalation/modeling/ExpRegression$package.class deleted file mode 100644 index d9e03bc5d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/ExpRegression$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/ExpRegression$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/ExpRegression$package.tasty deleted file mode 100644 index 08bc85580..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/ExpRegression$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/ExpRegression.class b/target/scala-3.6.4/classes/scalation/modeling/ExpRegression.class deleted file mode 100644 index 9f444b92a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/ExpRegression.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/ExpRegression.tasty b/target/scala-3.6.4/classes/scalation/modeling/ExpRegression.tasty deleted file mode 100644 index 7734e5543..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/ExpRegression.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/ExpandableVariable.class b/target/scala-3.6.4/classes/scalation/modeling/ExpandableVariable.class deleted file mode 100644 index b27a21330..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/ExpandableVariable.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/ExpandableVariable.tasty b/target/scala-3.6.4/classes/scalation/modeling/ExpandableVariable.tasty deleted file mode 100644 index a2a83dfb5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/ExpandableVariable.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/FeatureSelection$package$.class b/target/scala-3.6.4/classes/scalation/modeling/FeatureSelection$package$.class deleted file mode 100644 index 652e1c699..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/FeatureSelection$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/FeatureSelection$package.class b/target/scala-3.6.4/classes/scalation/modeling/FeatureSelection$package.class deleted file mode 100644 index 0cb44bad7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/FeatureSelection$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/FeatureSelection$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/FeatureSelection$package.tasty deleted file mode 100644 index dee5403b5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/FeatureSelection$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/FeatureSelection.class b/target/scala-3.6.4/classes/scalation/modeling/FeatureSelection.class deleted file mode 100644 index 92daf95ac..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/FeatureSelection.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/FeatureSelection.scala.bak b/target/scala-3.6.4/classes/scalation/modeling/FeatureSelection.scala.bak deleted file mode 100644 index e1376254d..000000000 --- a/target/scala-3.6.4/classes/scalation/modeling/FeatureSelection.scala.bak +++ /dev/null @@ -1,154 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Fri Sep 27 20:58:20 EDT 2024 - * @see LICENSE (MIT style license file). - * - * @note Model Framework: Support for Feature Selection - * - * @see bookdown.org/max/FES/selection.html - */ - -package scalation -package modeling - -import scala.collection.mutable.LinkedHashSet - -import scalation.mathstat._ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `SelectionTech` enumeration indicates the available feature selection - * techniques. - */ -enum SelectionTech: - - case Forward, Backward, Stepwise - -end SelectionTech - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `FeatureSelection` trait establishes a framework for feature selection, - * i.e., selecting the features (e.g., variable x_j, cross term x_j x_k, or - * functional form x_j^2) to include in the model. - */ -trait FeatureSelection: - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Perform feature selection to find the most predictive features/variables - * to have in the model, returning the features/variables added and the new - * Quality of Fit (QoF) measures/metrics for all steps. - * @see `Fit` for index of QoF measures/metrics. - * @param tech the feature selection technique to apply - * @param qk index of Quality of Fit (QoF) to use for comparing quality - * @param cross whether to include the cross-validation QoF measure - */ - def selectFeatures (tech: SelectionTech, qk: Int = QoF.rSqBar.ordinal, cross: Boolean = true): - (LinkedHashSet [Int], MatrixD) = - tech match - case SelectionTech.Forward => forwardSelAll (qk, cross) - case SelectionTech.Backward => backwardElimAll (qk, 1, cross) - case SelectionTech.Stepwise => stepwiseSelAll (qk, cross) - end selectFeatures - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Perform FORWARD SELECTION to find the MOST predictive features/variables - * to ADD into the model, returning the features/variables added and the new - * Quality of Fit (QoF) measures/metrics for all steps. - * @see `Fit` for index of QoF measures/metrics. - * @param qk index of Quality of Fit (QoF) to use for comparing quality - * @param cross whether to include the cross-validation QoF measure - */ - def forwardSelAll (qk: Int = QoF.rSqBar.ordinal, cross: Boolean = true): - (LinkedHashSet [Int], MatrixD) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Perform BACKWARD ELIMINATION to find the LEAST predictive features/variables - * to REMOVE from the full model, returning the features/variables left and the - * new Quality of Fit (QoF) measures/metrics for all steps. - * @see `Fit` for index of QoF measures/metrics. - * @param qk index of Quality of Fit (QoF) to use for comparing quality - * @param first first variable to consider for elimination - * @param cross whether to include the cross-validation QoF measure - */ - def backwardElimAll (qk: Int = QoF.rSqBar.ordinal, first: Int = 1, cross: Boolean = true): - (LinkedHashSet [Int], MatrixD) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Perform STEPWISE SELECTION to find a GOOD COMBINATION of predictive features/variables - * to have in the model, returning the features/variables left and the new Quality of Fit - * (QoF) measures/metrics for all steps. At each step, it calls forward and backward - * and takes the best of the two actions. Stops when neither action yields improvement. - * @see `Fit` for index of QoF measures/metrics. - * @param qk index of Quality of Fit (QoF) to use for comparing quality - * @param cross whether to include the cross-validation QoF measure - * @param swap whether to allow a swap step (swap out a feature for a new feature in one step) - */ - def stepwiseSelAll (qk: Int = QoF.rSqBar.ordinal, cross: Boolean = true, swap: Boolean = true): - (LinkedHashSet [Int], MatrixD) - -end FeatureSelection - - -type Model_FS = (Predictor | neuralnet.PredictorMV) & Fit - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `BestStep` is used to record the best improvement step found so far during - * feature selection. Note, best depends on whether maximizing or minimizing - * @param col the column/variable to ADD/REMOVE for this step - * @param qk the index for the Quality of Fit (QoF) measure/metric used for comparison - * @param qof the Quality of Fit (QoF) for this step - * @param mod the model including selected features/variables for this step - * @param bestq the best QoF for metric qk so far - */ -case class BestStep (col: Int = -1, qk: Int = QoF.rSqBar.ordinal, qof: VectorD = null, - mod: Model_FS = null) - (bestq: Double = if Fit.maxi.contains (qk) then -MAX_VALUE else MAX_VALUE): - - private val debug = debugf ("BestStep", true) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the better between this and candidate step. - * @param cand the new candidate - */ - def better (cand: BestStep): BestStep = - debug ("better", s"cand = $cand vs. this = $this") - if qof == null then cand - else if Fit.maxi.contains (qk) then - if cand.qof(qk) > qof(qk) then cand else this // maximize, e.g., R^2 - else - if cand.qof(qk) < qof(qk) then cand else this // minimize, e.g., mse, smape - end better - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the better between this and the to be formed candidate step. - * @param j the index of the feature/variable - * @param qof_j the QoF for mod_j - * @param mod_j the model with j - */ - def better (j: Int, qof_j: VectorD, mod_j: Model_FS): BestStep = - better (BestStep (j, qk, qof_j, mod_j)(qof_j(qk))) - end better - -end BestStep - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Update the rSq-based and smape QoF results for the l-th iteration of feature - * selection. - * @see `Predictor` - * @param rSq the matrix contain information about r-Sq-based QoF measures - * @param l the l-th iteration - * @param cross indicator of whether cross-validation are to be included - * @param best the best step so far - */ -def updateQoF (rSq: MatrixD, l: Int, cross: Boolean, best: BestStep): Unit = - rSq(l) = - if cross then - Fit.qofVector (best.qof, best.mod.crossValidate ()) // results for model mod_l, with cross-validation - else - Fit.qofVector (best.qof, null) // results for model mod_l, no cross-validation -end updateQoF - diff --git a/target/scala-3.6.4/classes/scalation/modeling/FeatureSelection.tasty b/target/scala-3.6.4/classes/scalation/modeling/FeatureSelection.tasty deleted file mode 100644 index 894860476..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/FeatureSelection.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Fit$.class b/target/scala-3.6.4/classes/scalation/modeling/Fit$.class deleted file mode 100644 index 8acfeced8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Fit$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Fit$package$.class b/target/scala-3.6.4/classes/scalation/modeling/Fit$package$.class deleted file mode 100644 index f9973689b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Fit$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Fit$package$ft$2$.class b/target/scala-3.6.4/classes/scalation/modeling/Fit$package$ft$2$.class deleted file mode 100644 index 6eaf9f8b1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Fit$package$ft$2$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Fit$package.class b/target/scala-3.6.4/classes/scalation/modeling/Fit$package.class deleted file mode 100644 index 9d7478d26..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Fit$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Fit$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/Fit$package.tasty deleted file mode 100644 index 6d10aed1e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Fit$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Fit.class b/target/scala-3.6.4/classes/scalation/modeling/Fit.class deleted file mode 100644 index 6e13483de..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Fit.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Fit.tasty b/target/scala-3.6.4/classes/scalation/modeling/Fit.tasty deleted file mode 100644 index db1dfe9b8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Fit.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/FitI.scala.bak b/target/scala-3.6.4/classes/scalation/modeling/FitI.scala.bak deleted file mode 100644 index 27d9281d7..000000000 --- a/target/scala-3.6.4/classes/scalation/modeling/FitI.scala.bak +++ /dev/null @@ -1,275 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Thu Apr 4 13:40:25 EDT 2024 - * @see LICENSE (MIT style license file). - * - * @note Model Support: Interval-based Quality of Fit (QoFI) - * - * @see github.com/scikit-learn/scikit-learn/issues/20162 // used in scikit-learn - * www.mdpi.com/1999-4893/13/6/132 // defines several metrics - * arxiv.org/pdf/2005.12881.pdf // for IS and WIS - */ - -package scalation -package modeling - -import scalation.mathstat._ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `QoFI` enum defines the Interval-based Quality of Fit (QoFI) measures/metrics. - * @param name the name of the parameter - */ -enum QoFI (val name: String): - - case picp extends QoFI ("picp") // index 0 - case pinc extends QoFI ("pinc") // index 1 - case ace extends QoFI ("ace") // index 2 - case pinaw extends QoFI ("pinaw") // index 3 - case pinad extends QoFI ("pinad") // index 4 - case iscore extends QoFI ("iscore") // index 5 - case wis extends QoFI ("wis") // index 7 - -end QoFI - -val qoFI_names = QoFI.values.map (_.toString) // The QoF names from the QoFI enum -val qoFI_all_names = qoF_names ++ qoFI_names // All the QoF names from QoF and QoFI - -import QoFI._ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `FitI` companion object provides factory methods for assessing quality of - * fit for standard types of modeling techniques. - */ -object FitI: - - val N_QoFI = QoFI.values.size // the number of QoFI measures - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the help string that describes the Quality of Fit (QoFI) measures - * provided by the `FitI` class. - * @see www.ncbi.nlm.nih.gov/pmc/articles/PMC5570302/ - * @see https://en.wikipedia.org/wiki/Coefficient_of_determination - */ - def help: String = - """ -help: Interval-based Quality of Fit (QoFI) metrics/measures: - picp = prediction interval coverage probability - pinc = prediction interval nominal coverage - ace = average coverage error - pinaw = prediction interval normalized average width - pinad = prediction interval normalized average deviation - iscore = interval score - wis = weighted interval score - """ - end help - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a table to store statistics for QoFI measures, where each row corresponds - * to the statistics on a particular QoFI measure. - */ - def qofStatTable: Array [Statistic] = - val stats = Array.ofDim [Statistic] (N_QoFI) // for collecting stats on QoFI measures - for i <- stats.indices do stats(i) = new Statistic (values(i).toString, unbiased = true) - stats - end qofStatTable - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Tally the current QoFI measures into the statistical accumulators. - * @param stats the statistics table being updated - * @param qof the current QoFI measure vector - */ - def tallyQof (stats: Array [Statistic], qof: VectorD): Unit = - for q <- qof.indices do stats(q).tally (qof(q)) // tally these QoFI measures - end tallyQof - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the Prediction Interval Coverage Probability (PICP) metric, i.e., - * the fraction is actual values inside the prediction interval. - * @param y the given time-series (must be aligned with the interval forecast) - * @param low the lower bound - * @param up the upper bound - */ - def picp_ (y: VectorD, low: VectorD, up: VectorD): Double = - var count = 0 - for i<- y.indices if y(i) in (low(i), up(i)) do count += 1 - count / y.dim.toDouble - end picp_ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the Prediction Interval Normalised Average Deviation (PINAD) metric, i.e., - * the normalized (by range) average deviation outside the prediction interval. - * @param y the given time-series (must be aligned with the interval forecast) - * @param low the lower bound - * @param up the upper bound - */ - def pinad_ (y: VectorD, low: VectorD, up: VectorD): Double = - var sum = 0.0 - for i <- y.indices do - sum += (if y(i) < low(i) then low(i) - y(i) - else if y(i) > up(i) then y(i) - up(i) - else 0.0) - sum / (y.dim * (y.max - y.min)) - end pinad_ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the Interval Score (IS) metric, i.e., the ... - * @see https://arxiv.org/pdf/2005.12881.pdf - * @param y the given time-series (must be aligned with the interval forecast) - * @param low the lower bound - * @param up the upper bound - & @param alpha the prediction level - */ - def iscore_ (y: VectorD, low: VectorD, up: VectorD, alpha: Double = 0.1): Double = - val fac = 2.0 / alpha - var sum = 0.0 - for i <- y.indices do - sum += up(i) - low(i) // interval width - if y(i) < low(i) then sum += fac * (low(i) - y(i)) // y_i below interval penalty - if y(i) > up(i) then sum += fac * (y(i) - up(i)) // y_i above interval penalty - sum / y.dim - end iscore_ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the Weighted Interval Score (WIS) metric, i.e., the ... - * @see https://arxiv.org/pdf/2005.12881.pdf - * @param y the given time-series (must be aligned with the interval forecast) - * @param yp the point prediction mean/median - * @param low the lower bounds for various alpha levels - * @param up the upper bounds for various alpha levels - * @param alphas the array of prediction levels - */ - def wis_ (y: VectorD, yp: VectorD, low: MatrixD, up: MatrixD, - alphas: Array [Double] = - Array (0.02, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9)): Double = - val k = alphas.size - var sum = alphas(0) * (y - yp).abs.mean - for j <- 1 until k do sum += alphas(j) * iscore_ (y, low(j), up(j), alphas(j)) - sum / (2 * k + 1) - end wis_ - -end FitI - -import FitI._ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `FitI` class provides methods to determine Interval-based Quality of Fit QoFI - * metrics/measures. - * @see reset to reset the degrees of freedom - * @param dfm the degrees of freedom for model/regression - * @param df the degrees of freedom for error - */ -class FitI (dfm_ : Double, df_ : Double) - extends Fit (dfm_, df_): // if unknown, may use 1 and y.dim-1 - - private var picp = -1.0 // prediction interval coverage probability - private var pinc = -1.0 // prediction interval nominal coverage - private var ace = -1.0 // average coverage error - private var pinaw = -1.0 // prediction interval normalized average width - private var pinad = -1.0 // prediction interval normalized average deviation - private var iscore = -1.0 // interval score - private var wis = -1.0 // weighted interval score - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Diagnose the health of the model by computing the Quality of Fit (QoFI) metrics/measures, - * from the error/residual vector and the predicted & actual responses. - * For some models the instances may be weighted. - * Note: `wis` should be computed separately. - * @see `Regression_WLS` - * @param y the actual response/output vector to use (test/full) - * @param yp the point prediction mean/median - * @param low the predicted lower bound - * @param up the predicted upper bound - * @param alpha the nominal level of uncertainty (alpha) (defaults to 0.9, 90%) - * @param w the weights on the instances (defaults to null) - */ - def diagnose_ (y: VectorD, yp: VectorD, low: VectorD, up: VectorD, alpha: Double = 0.1, - w: VectorD = null): VectorD = - super.diagnose (y, yp, w) - - picp = picp_ (y, low, up) // prediction interval coverage probability - pinc = 1 - alpha // prediction interval nominal coverage - ace = picp - pinc // average coverage error - pinaw = (up - low).mean / (y.max - y.min) // prediction interval normalized average width - pinad = pinad_ (y, low, up) // prediction interval normalized average deviation - iscore = iscore_ (y, low, up) // interval score - fit - end diagnose_ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Diagnose the health of the model by computing the Quality of FitI (QoFI) measures, - * @param y the given time-series (must be aligned with the interval forecast) - * @param yp the point prediction mean/median - * @param low the lower bounds for various alpha levels - * @param up the upper bounds for various alpha levels - * @param alphas the array of prediction levels - */ - def diagnose_wis (y: VectorD, yp: VectorD, low: MatrixD, up: MatrixD, - alphas: Array [Double] = - Array (0.02, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9)): Double = - wis = wis_ (y, yp, low, up, alphas) - wis - end diagnose_wis - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the Quality of FitI (QoFI) measures corresponding to the labels given. - * Override to add more quality of fit measures. - */ - override def fit: VectorD = super.fit ++ VectorD (picp, pinc, ace, pinaw, pinad, iscore, wis) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Show the prediction interval forecasts and relevant QoF metrics/measures. - * @param yy the aligned actual response/output vector to use (test/full) - * @param yfh the forecasts for horizon h - * @param low the predicted lower bound - * @param up the predicted upper bound - * @param qof_all all the QoF metrics (for point and interval forecasts) - * @param h the forecasting horizon - */ - def show_interval_forecasts (yy: VectorD, yfh: VectorD, - low: VectorD, up: VectorD, - qof_all: VectorD, h: Int): Unit = - println (FitM.fitMap (qof_all, qoFI_all_names)) // fully evaluate h-steps ahead forecasts - new PlotM (null, MatrixD (yy, yfh, low, up), // aligned actual, forecasted, lower, upper - Array ("yy", "yfh", "low", "up"), - "Plot Prediction Intervals for horizon $h", lines = true) - end show_interval_forecasts - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the help string that describes the Quality of FitI (QoFI) measures - * provided by the `FitI` class. Override to correspond to fitLabel. - */ - override def help: String = Fit.help ++ FitI.help - -end FitI - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `fitITest` main function is used to test the `FitI` class on a simulated - * time series. - * @see `scalation.modeling.forecasting.randomWalkTest3` for another test case - * > runMain scalation.modeling.fitITest - */ -@main def fitITest (): Unit = - - import scalation.random.Normal - - for sig2 <- 10 to 50 by 10 do - val rv = Normal (0, sig2) - val w = math.sqrt (sig2) * 1.96 - val yp = VectorD.range (1, 101) + 10.0 - val y = yp.map (_ + rv.gen) // simulated time series - val low = yp.map (_ - w) - val up = yp.map (_ + w) - new PlotM (null, MatrixD (y, yp, low, up), Array ("y", "yp", "low", "up"), "plot y, low and up") - - val ft = new FitI (1, y.dim) - ft.diagnose_ (y, yp, low, up) - ft.diagnose_wis (y, yp, MatrixD (low), MatrixD (up), Array (0.1)) - val qof = ft.fit - println (FitM.fitMap (qof, qoFI_all_names)) - end for - -end fitITest - diff --git a/target/scala-3.6.4/classes/scalation/modeling/FitM$.class b/target/scala-3.6.4/classes/scalation/modeling/FitM$.class deleted file mode 100644 index 565933348..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/FitM$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/FitM.class b/target/scala-3.6.4/classes/scalation/modeling/FitM.class deleted file mode 100644 index f0ed7a589..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/FitM.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/FitM.tasty b/target/scala-3.6.4/classes/scalation/modeling/FitM.tasty deleted file mode 100644 index e85feab59..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/FitM.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Imputation$package$.class b/target/scala-3.6.4/classes/scalation/modeling/Imputation$package$.class deleted file mode 100644 index 4b1c5e498..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Imputation$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Imputation$package.class b/target/scala-3.6.4/classes/scalation/modeling/Imputation$package.class deleted file mode 100644 index ec9b91f36..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Imputation$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Imputation$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/Imputation$package.tasty deleted file mode 100644 index 92acc7b02..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Imputation$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Imputation.class b/target/scala-3.6.4/classes/scalation/modeling/Imputation.class deleted file mode 100644 index a00abcebe..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Imputation.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Imputation.tasty b/target/scala-3.6.4/classes/scalation/modeling/Imputation.tasty deleted file mode 100644 index c226b7ea5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Imputation.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/ImputeBackward$.class b/target/scala-3.6.4/classes/scalation/modeling/ImputeBackward$.class deleted file mode 100644 index 5066a3810..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/ImputeBackward$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/ImputeBackward.class b/target/scala-3.6.4/classes/scalation/modeling/ImputeBackward.class deleted file mode 100644 index 5a5930bfa..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/ImputeBackward.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/ImputeBackward.tasty b/target/scala-3.6.4/classes/scalation/modeling/ImputeBackward.tasty deleted file mode 100644 index 5a5600e05..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/ImputeBackward.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/ImputeForward$.class b/target/scala-3.6.4/classes/scalation/modeling/ImputeForward$.class deleted file mode 100644 index 3c4ef1333..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/ImputeForward$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/ImputeForward.class b/target/scala-3.6.4/classes/scalation/modeling/ImputeForward.class deleted file mode 100644 index 42f131a91..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/ImputeForward.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/ImputeForward.tasty b/target/scala-3.6.4/classes/scalation/modeling/ImputeForward.tasty deleted file mode 100644 index ca3dc5389..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/ImputeForward.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/ImputeMean$.class b/target/scala-3.6.4/classes/scalation/modeling/ImputeMean$.class deleted file mode 100644 index c5319a62f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/ImputeMean$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/ImputeMean.class b/target/scala-3.6.4/classes/scalation/modeling/ImputeMean.class deleted file mode 100644 index 907e7632a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/ImputeMean.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/ImputeMean.tasty b/target/scala-3.6.4/classes/scalation/modeling/ImputeMean.tasty deleted file mode 100644 index 668a8e55f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/ImputeMean.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/ImputeMovingAvg$.class b/target/scala-3.6.4/classes/scalation/modeling/ImputeMovingAvg$.class deleted file mode 100644 index da5259bf2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/ImputeMovingAvg$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/ImputeMovingAvg.class b/target/scala-3.6.4/classes/scalation/modeling/ImputeMovingAvg.class deleted file mode 100644 index 2d508aecd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/ImputeMovingAvg.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/ImputeMovingAvg.tasty b/target/scala-3.6.4/classes/scalation/modeling/ImputeMovingAvg.tasty deleted file mode 100644 index 3f7e2cb97..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/ImputeMovingAvg.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/ImputeNormal$.class b/target/scala-3.6.4/classes/scalation/modeling/ImputeNormal$.class deleted file mode 100644 index 82f4344dd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/ImputeNormal$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/ImputeNormal.class b/target/scala-3.6.4/classes/scalation/modeling/ImputeNormal.class deleted file mode 100644 index 6897d2f12..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/ImputeNormal.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/ImputeNormal.tasty b/target/scala-3.6.4/classes/scalation/modeling/ImputeNormal.tasty deleted file mode 100644 index db9d0c066..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/ImputeNormal.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/ImputeNormalWin$.class b/target/scala-3.6.4/classes/scalation/modeling/ImputeNormalWin$.class deleted file mode 100644 index 6ed7be9f6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/ImputeNormalWin$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/ImputeNormalWin.class b/target/scala-3.6.4/classes/scalation/modeling/ImputeNormalWin.class deleted file mode 100644 index 65850ef60..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/ImputeNormalWin.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/ImputeNormalWin.tasty b/target/scala-3.6.4/classes/scalation/modeling/ImputeNormalWin.tasty deleted file mode 100644 index 1f2f63d71..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/ImputeNormalWin.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/ImputeRegression$.class b/target/scala-3.6.4/classes/scalation/modeling/ImputeRegression$.class deleted file mode 100644 index b527386c7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/ImputeRegression$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/ImputeRegression.class b/target/scala-3.6.4/classes/scalation/modeling/ImputeRegression.class deleted file mode 100644 index 09d6761d1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/ImputeRegression.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/ImputeRegression.tasty b/target/scala-3.6.4/classes/scalation/modeling/ImputeRegression.tasty deleted file mode 100644 index 4a3013467..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/ImputeRegression.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Initializer$.class b/target/scala-3.6.4/classes/scalation/modeling/Initializer$.class deleted file mode 100644 index 1a57538b9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Initializer$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Initializer.class b/target/scala-3.6.4/classes/scalation/modeling/Initializer.class deleted file mode 100644 index ab154a9d0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Initializer.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Initializer.tasty b/target/scala-3.6.4/classes/scalation/modeling/Initializer.tasty deleted file mode 100644 index 0505e4d29..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Initializer.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/KNN_Regression$.class b/target/scala-3.6.4/classes/scalation/modeling/KNN_Regression$.class deleted file mode 100644 index d06d96a65..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/KNN_Regression$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/KNN_Regression$package$.class b/target/scala-3.6.4/classes/scalation/modeling/KNN_Regression$package$.class deleted file mode 100644 index dc7155a65..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/KNN_Regression$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/KNN_Regression$package.class b/target/scala-3.6.4/classes/scalation/modeling/KNN_Regression$package.class deleted file mode 100644 index 076fbc566..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/KNN_Regression$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/KNN_Regression$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/KNN_Regression$package.tasty deleted file mode 100644 index 50e99055b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/KNN_Regression$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/KNN_Regression.class b/target/scala-3.6.4/classes/scalation/modeling/KNN_Regression.class deleted file mode 100644 index 4d23586d9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/KNN_Regression.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/KNN_Regression.tasty b/target/scala-3.6.4/classes/scalation/modeling/KNN_Regression.tasty deleted file mode 100644 index 75b0ebff0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/KNN_Regression.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/LassoRegression$.class b/target/scala-3.6.4/classes/scalation/modeling/LassoRegression$.class deleted file mode 100644 index 5ee980a3b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/LassoRegression$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/LassoRegression$package$.class b/target/scala-3.6.4/classes/scalation/modeling/LassoRegression$package$.class deleted file mode 100644 index de8e46cf6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/LassoRegression$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/LassoRegression$package.class b/target/scala-3.6.4/classes/scalation/modeling/LassoRegression$package.class deleted file mode 100644 index 4a84811fa..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/LassoRegression$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/LassoRegression$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/LassoRegression$package.tasty deleted file mode 100644 index 0e7b2a581..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/LassoRegression$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/LassoRegression.class b/target/scala-3.6.4/classes/scalation/modeling/LassoRegression.class deleted file mode 100644 index bafa0202d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/LassoRegression.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/LassoRegression.tasty b/target/scala-3.6.4/classes/scalation/modeling/LassoRegression.tasty deleted file mode 100644 index eeadfde8f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/LassoRegression.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/MatrixTransform$package$.class b/target/scala-3.6.4/classes/scalation/modeling/MatrixTransform$package$.class deleted file mode 100644 index 9aecaf19c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/MatrixTransform$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/MatrixTransform$package.class b/target/scala-3.6.4/classes/scalation/modeling/MatrixTransform$package.class deleted file mode 100644 index 744097795..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/MatrixTransform$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/MatrixTransform$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/MatrixTransform$package.tasty deleted file mode 100644 index a42732390..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/MatrixTransform$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Model$package$.class b/target/scala-3.6.4/classes/scalation/modeling/Model$package$.class deleted file mode 100644 index 18e8466b7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Model$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Model$package.class b/target/scala-3.6.4/classes/scalation/modeling/Model$package.class deleted file mode 100644 index 28cd61fc8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Model$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Model$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/Model$package.tasty deleted file mode 100644 index df99fc857..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Model$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Model.class b/target/scala-3.6.4/classes/scalation/modeling/Model.class deleted file mode 100644 index 841ad110c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Model.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Model.tasty b/target/scala-3.6.4/classes/scalation/modeling/Model.tasty deleted file mode 100644 index 45160f0d1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Model.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/MonitorLoss.class b/target/scala-3.6.4/classes/scalation/modeling/MonitorLoss.class deleted file mode 100644 index 4703ebe3a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/MonitorLoss.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/MonitorLoss.tasty b/target/scala-3.6.4/classes/scalation/modeling/MonitorLoss.tasty deleted file mode 100644 index 482f530c9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/MonitorLoss.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/NoSubModels.class b/target/scala-3.6.4/classes/scalation/modeling/NoSubModels.class deleted file mode 100644 index 9d6575191..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/NoSubModels.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/NoSubModels.tasty b/target/scala-3.6.4/classes/scalation/modeling/NoSubModels.tasty deleted file mode 100644 index f42d4d3e5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/NoSubModels.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Node$.class b/target/scala-3.6.4/classes/scalation/modeling/Node$.class deleted file mode 100644 index 2adeab14c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Node$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Node.class b/target/scala-3.6.4/classes/scalation/modeling/Node.class deleted file mode 100644 index 3b2e65bcd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Node.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Node.tasty b/target/scala-3.6.4/classes/scalation/modeling/Node.tasty deleted file mode 100644 index 06950f90b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Node.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/NonlinearRegression$.class b/target/scala-3.6.4/classes/scalation/modeling/NonlinearRegression$.class deleted file mode 100644 index e822b6185..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/NonlinearRegression$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/NonlinearRegression$package$.class b/target/scala-3.6.4/classes/scalation/modeling/NonlinearRegression$package$.class deleted file mode 100644 index 1787614de..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/NonlinearRegression$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/NonlinearRegression$package.class b/target/scala-3.6.4/classes/scalation/modeling/NonlinearRegression$package.class deleted file mode 100644 index 09bee0763..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/NonlinearRegression$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/NonlinearRegression$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/NonlinearRegression$package.tasty deleted file mode 100644 index 7d3adc4b3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/NonlinearRegression$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/NonlinearRegression.class b/target/scala-3.6.4/classes/scalation/modeling/NonlinearRegression.class deleted file mode 100644 index ad10a9ab4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/NonlinearRegression.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/NonlinearRegression.tasty b/target/scala-3.6.4/classes/scalation/modeling/NonlinearRegression.tasty deleted file mode 100644 index 0a6453093..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/NonlinearRegression.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/NullModel$.class b/target/scala-3.6.4/classes/scalation/modeling/NullModel$.class deleted file mode 100644 index c41b5bf94..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/NullModel$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/NullModel$package$.class b/target/scala-3.6.4/classes/scalation/modeling/NullModel$package$.class deleted file mode 100644 index b527fa680..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/NullModel$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/NullModel$package.class b/target/scala-3.6.4/classes/scalation/modeling/NullModel$package.class deleted file mode 100644 index ec922e921..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/NullModel$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/NullModel$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/NullModel$package.tasty deleted file mode 100644 index e5a914618..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/NullModel$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/NullModel.class b/target/scala-3.6.4/classes/scalation/modeling/NullModel.class deleted file mode 100644 index e8f0b0622..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/NullModel.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/NullModel.tasty b/target/scala-3.6.4/classes/scalation/modeling/NullModel.tasty deleted file mode 100644 index 3ede84b99..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/NullModel.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Outlier$package$.class b/target/scala-3.6.4/classes/scalation/modeling/Outlier$package$.class deleted file mode 100644 index e6f3952fd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Outlier$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Outlier$package.class b/target/scala-3.6.4/classes/scalation/modeling/Outlier$package.class deleted file mode 100644 index bce6e264a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Outlier$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Outlier$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/Outlier$package.tasty deleted file mode 100644 index c2bcb38bc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Outlier$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Outlier.class b/target/scala-3.6.4/classes/scalation/modeling/Outlier.class deleted file mode 100644 index 6f79ada2e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Outlier.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Outlier.tasty b/target/scala-3.6.4/classes/scalation/modeling/Outlier.tasty deleted file mode 100644 index a08829142..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Outlier.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Perceptron$.class b/target/scala-3.6.4/classes/scalation/modeling/Perceptron$.class deleted file mode 100644 index 6128d856e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Perceptron$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Perceptron$package$.class b/target/scala-3.6.4/classes/scalation/modeling/Perceptron$package$.class deleted file mode 100644 index ea8e3afc8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Perceptron$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Perceptron$package.class b/target/scala-3.6.4/classes/scalation/modeling/Perceptron$package.class deleted file mode 100644 index 9642922b0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Perceptron$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Perceptron$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/Perceptron$package.tasty deleted file mode 100644 index 7c6c493ca..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Perceptron$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Perceptron.class b/target/scala-3.6.4/classes/scalation/modeling/Perceptron.class deleted file mode 100644 index 5c15428af..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Perceptron.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Perceptron.tasty b/target/scala-3.6.4/classes/scalation/modeling/Perceptron.tasty deleted file mode 100644 index 7f49e507c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Perceptron.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/PoissonRegression$.class b/target/scala-3.6.4/classes/scalation/modeling/PoissonRegression$.class deleted file mode 100644 index ae357781a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/PoissonRegression$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/PoissonRegression$package$.class b/target/scala-3.6.4/classes/scalation/modeling/PoissonRegression$package$.class deleted file mode 100644 index 9e0567fe7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/PoissonRegression$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/PoissonRegression$package.class b/target/scala-3.6.4/classes/scalation/modeling/PoissonRegression$package.class deleted file mode 100644 index 264e52c15..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/PoissonRegression$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/PoissonRegression$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/PoissonRegression$package.tasty deleted file mode 100644 index 7ed0bc894..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/PoissonRegression$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/PoissonRegression.class b/target/scala-3.6.4/classes/scalation/modeling/PoissonRegression.class deleted file mode 100644 index 9c4a6e101..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/PoissonRegression.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/PoissonRegression.tasty b/target/scala-3.6.4/classes/scalation/modeling/PoissonRegression.tasty deleted file mode 100644 index 309341361..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/PoissonRegression.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/PolyORegression$.class b/target/scala-3.6.4/classes/scalation/modeling/PolyORegression$.class deleted file mode 100644 index c7e9f6ab3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/PolyORegression$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/PolyORegression$package$.class b/target/scala-3.6.4/classes/scalation/modeling/PolyORegression$package$.class deleted file mode 100644 index 2ba6c748f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/PolyORegression$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/PolyORegression$package.class b/target/scala-3.6.4/classes/scalation/modeling/PolyORegression$package.class deleted file mode 100644 index 879c00670..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/PolyORegression$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/PolyORegression$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/PolyORegression$package.tasty deleted file mode 100644 index 91fb0f744..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/PolyORegression$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/PolyORegression.class b/target/scala-3.6.4/classes/scalation/modeling/PolyORegression.class deleted file mode 100644 index 8e4aa5221..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/PolyORegression.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/PolyORegression.tasty b/target/scala-3.6.4/classes/scalation/modeling/PolyORegression.tasty deleted file mode 100644 index bf5b8917c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/PolyORegression.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/PolyRegression$.class b/target/scala-3.6.4/classes/scalation/modeling/PolyRegression$.class deleted file mode 100644 index 73265d764..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/PolyRegression$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/PolyRegression$package$.class b/target/scala-3.6.4/classes/scalation/modeling/PolyRegression$package$.class deleted file mode 100644 index 1f02ddd82..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/PolyRegression$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/PolyRegression$package.class b/target/scala-3.6.4/classes/scalation/modeling/PolyRegression$package.class deleted file mode 100644 index 9be22dcef..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/PolyRegression$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/PolyRegression$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/PolyRegression$package.tasty deleted file mode 100644 index 02e78a1e0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/PolyRegression$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/PolyRegression.class b/target/scala-3.6.4/classes/scalation/modeling/PolyRegression.class deleted file mode 100644 index 494acef6c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/PolyRegression.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/PolyRegression.tasty b/target/scala-3.6.4/classes/scalation/modeling/PolyRegression.tasty deleted file mode 100644 index facd99d30..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/PolyRegression.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Predictor$.class b/target/scala-3.6.4/classes/scalation/modeling/Predictor$.class deleted file mode 100644 index d0a8a87df..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Predictor$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Predictor$package$.class b/target/scala-3.6.4/classes/scalation/modeling/Predictor$package$.class deleted file mode 100644 index d6ce1a031..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Predictor$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Predictor$package.class b/target/scala-3.6.4/classes/scalation/modeling/Predictor$package.class deleted file mode 100644 index 69c29b5f1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Predictor$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Predictor$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/Predictor$package.tasty deleted file mode 100644 index 2bccf2a42..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Predictor$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Predictor.class b/target/scala-3.6.4/classes/scalation/modeling/Predictor.class deleted file mode 100644 index 804efdf34..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Predictor.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Predictor.tasty b/target/scala-3.6.4/classes/scalation/modeling/Predictor.tasty deleted file mode 100644 index 5df322dc0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Predictor.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$1.class b/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$1.class deleted file mode 100644 index cfad4245c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$1.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$10.class b/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$10.class deleted file mode 100644 index dc172b960..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$10.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$11.class b/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$11.class deleted file mode 100644 index 4738d9e18..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$11.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$12.class b/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$12.class deleted file mode 100644 index 120b2a547..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$12.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$13.class b/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$13.class deleted file mode 100644 index 6774a6faa..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$13.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$14.class b/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$14.class deleted file mode 100644 index 6be328769..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$14.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$15.class b/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$15.class deleted file mode 100644 index 44be1d00f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$15.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$16.class b/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$16.class deleted file mode 100644 index 2e7418d22..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$16.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$17.class b/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$17.class deleted file mode 100644 index f9ef39bf5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$17.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$18.class b/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$18.class deleted file mode 100644 index 02e4bd0aa..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$18.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$19.class b/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$19.class deleted file mode 100644 index dd7986aaf..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$19.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$2.class b/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$2.class deleted file mode 100644 index 800825e6f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$20.class b/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$20.class deleted file mode 100644 index c9eb0d92f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$20.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$21.class b/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$21.class deleted file mode 100644 index d3cc6f757..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$21.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$22.class b/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$22.class deleted file mode 100644 index 700b82ed9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$22.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$23.class b/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$23.class deleted file mode 100644 index f68c3a868..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$23.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$24.class b/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$24.class deleted file mode 100644 index 4708d3b7c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$24.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$25.class b/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$25.class deleted file mode 100644 index 62a512b88..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$25.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$3.class b/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$3.class deleted file mode 100644 index 9f8badb51..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$4.class b/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$4.class deleted file mode 100644 index 02b1480c6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$5.class b/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$5.class deleted file mode 100644 index fbf0c1762..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$5.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$6.class b/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$6.class deleted file mode 100644 index 3a07b1769..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$6.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$7.class b/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$7.class deleted file mode 100644 index 4011f0af6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$7.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$8.class b/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$8.class deleted file mode 100644 index ecc55db51..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$8.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$9.class b/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$9.class deleted file mode 100644 index ed1b4586c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/QoF$$anon$9.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/QoF$.class b/target/scala-3.6.4/classes/scalation/modeling/QoF$.class deleted file mode 100644 index ffc596b48..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/QoF$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/QoF.class b/target/scala-3.6.4/classes/scalation/modeling/QoF.class deleted file mode 100644 index cd9c10720..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/QoF.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/QoF.tasty b/target/scala-3.6.4/classes/scalation/modeling/QoF.tasty deleted file mode 100644 index 413f30cd6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/QoF.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/QuantileOutlier$.class b/target/scala-3.6.4/classes/scalation/modeling/QuantileOutlier$.class deleted file mode 100644 index 9a5977510..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/QuantileOutlier$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/QuantileOutlier.class b/target/scala-3.6.4/classes/scalation/modeling/QuantileOutlier.class deleted file mode 100644 index c009e7b57..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/QuantileOutlier.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/QuantileOutlier.tasty b/target/scala-3.6.4/classes/scalation/modeling/QuantileOutlier.tasty deleted file mode 100644 index 2874cb16b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/QuantileOutlier.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/QuartileXOutlier$.class b/target/scala-3.6.4/classes/scalation/modeling/QuartileXOutlier$.class deleted file mode 100644 index 027986673..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/QuartileXOutlier$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/QuartileXOutlier.class b/target/scala-3.6.4/classes/scalation/modeling/QuartileXOutlier.class deleted file mode 100644 index e7252f913..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/QuartileXOutlier.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/QuartileXOutlier.tasty b/target/scala-3.6.4/classes/scalation/modeling/QuartileXOutlier.tasty deleted file mode 100644 index 0ae6170e5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/QuartileXOutlier.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Regression$.class b/target/scala-3.6.4/classes/scalation/modeling/Regression$.class deleted file mode 100644 index 0c565bf46..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Regression$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Regression$package$.class b/target/scala-3.6.4/classes/scalation/modeling/Regression$package$.class deleted file mode 100644 index 50dd71dc0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Regression$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Regression$package.class b/target/scala-3.6.4/classes/scalation/modeling/Regression$package.class deleted file mode 100644 index 140fde8cd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Regression$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Regression$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/Regression$package.tasty deleted file mode 100644 index b06e0d318..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Regression$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Regression.class b/target/scala-3.6.4/classes/scalation/modeling/Regression.class deleted file mode 100644 index 056a28e69..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Regression.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Regression.tasty b/target/scala-3.6.4/classes/scalation/modeling/Regression.tasty deleted file mode 100644 index 79239ee8b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Regression.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/RegressionCat$.class b/target/scala-3.6.4/classes/scalation/modeling/RegressionCat$.class deleted file mode 100644 index 40a265862..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/RegressionCat$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/RegressionCat$package$.class b/target/scala-3.6.4/classes/scalation/modeling/RegressionCat$package$.class deleted file mode 100644 index 7ed2b1145..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/RegressionCat$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/RegressionCat$package.class b/target/scala-3.6.4/classes/scalation/modeling/RegressionCat$package.class deleted file mode 100644 index ee2c10f0d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/RegressionCat$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/RegressionCat$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/RegressionCat$package.tasty deleted file mode 100644 index 72af881c8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/RegressionCat$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/RegressionCat.class b/target/scala-3.6.4/classes/scalation/modeling/RegressionCat.class deleted file mode 100644 index 741227a7f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/RegressionCat.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/RegressionCat.tasty b/target/scala-3.6.4/classes/scalation/modeling/RegressionCat.tasty deleted file mode 100644 index b925aa071..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/RegressionCat.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/RegressionTree$.class b/target/scala-3.6.4/classes/scalation/modeling/RegressionTree$.class deleted file mode 100644 index 37505cb9a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/RegressionTree$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/RegressionTree$package$.class b/target/scala-3.6.4/classes/scalation/modeling/RegressionTree$package$.class deleted file mode 100644 index dfdd5921a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/RegressionTree$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/RegressionTree$package.class b/target/scala-3.6.4/classes/scalation/modeling/RegressionTree$package.class deleted file mode 100644 index 4b4b331f1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/RegressionTree$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/RegressionTree$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/RegressionTree$package.tasty deleted file mode 100644 index dda2eb5d1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/RegressionTree$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/RegressionTree.class b/target/scala-3.6.4/classes/scalation/modeling/RegressionTree.class deleted file mode 100644 index 3f32af269..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/RegressionTree.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/RegressionTree.tasty b/target/scala-3.6.4/classes/scalation/modeling/RegressionTree.tasty deleted file mode 100644 index c5ec95608..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/RegressionTree.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeGB$.class b/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeGB$.class deleted file mode 100644 index bbd9f0646..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeGB$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeGB$package$.class b/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeGB$package$.class deleted file mode 100644 index c34c9ac6e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeGB$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeGB$package.class b/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeGB$package.class deleted file mode 100644 index 002543020..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeGB$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeGB$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeGB$package.tasty deleted file mode 100644 index 313cf775b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeGB$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeGB.class b/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeGB.class deleted file mode 100644 index 736b08bb2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeGB.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeGB.scala.bak b/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeGB.scala.bak deleted file mode 100644 index 745477e1c..000000000 --- a/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeGB.scala.bak +++ /dev/null @@ -1,288 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author Dong Yu Yu, John Miller - * @version 2.0 - * @date Sun Dec 16 16:09:16 EST 2018 - * @see LICENSE (MIT style license file). - * - * @note Model: Regression Tree with Gradient Boosting - */ - -package scalation -package modeling - -import scala.collection.mutable.ArrayBuffer - -import scalation.mathstat._ - -import modeling.{RegressionTree => REG_TREE} // swap Regression Tree -//import modeling.{RegressionTreeMT => REG_TREE} // swap Model Tree - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `RegressionTreeGB` class uses Gradient Boosting on `RegressionTree`s. - * @param x the input/data matrix - * @param y the output/response vector - * @param fname_ the feature/variable names (defaults to null) - * @param hparam the hyper-parameters for the model (defaults to RegressionTree.hp) - */ -class RegressionTreeGB (x: MatrixD, y: VectorD, fname_ : Array [String] = null, - hparam: HyperParameter = RegressionTree.hp) - extends Predictor (x, y, fname_, hparam) - with Fit (dfm = x.dim2 - 1, df = x.dim - x.dim2): // call resetDF once tree is built - - private val depth = hparam("maxDepth").toInt // the max depth for the base regression trees - private val iter = hparam("iterations").toInt // the iterations for training - private val forest = new ArrayBuffer [REG_TREE] () // forest is a list of regression trees - - modelName = s"RegressionTreeGB ($depth, $iter)" - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Using Gradient Boosting on Training, for every iteration, we evaluate the residual - * and form a Regression Tree where the residual is the depedent value (equal to the - * gradient if using SSE as loss function). - * @param x_ the training/full data/input matrix - * @param y_ the training/full response/output vector - */ - def train (x_ : MatrixD, y_ : VectorD): Unit = - val yp = VectorD.fill (y_.dim)(y_.mean) // initial value for y-predicted - - for i <- 0 until iter do - val yres = y_ - yp // y-residual - val tree = new REG_TREE (x_, yres, fname, hparam) // i-th tree in forest (mean/regression) - forest += tree // add to forest - tree.train (x_, yres) // train the i-th tree - yp += tree.predict (x_) // add to cumulative prediction - end for - end train - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test a predictive model y_ = f(x_) + e and return its QoF vector. - * Testing may be be in-sample (on the training set) or out-of-sample - * (on the testing set) as determined by the parameters passed in. - * Note: must call train before test. - * @param x_ the testing/full data/input matrix (defaults to full x) - * @param y_ the testing/full response/output vector (defaults to full y) - */ - def test (x_ : MatrixD = x, y_ : VectorD = y): (VectorD, VectorD) = - val yp = predict (x_) // make predictions - val df1 = forest.foldLeft(0)(_ + _.numLeaves) // degrees of freedom model = number of leaves - val df2 = y_.dim - df1 // degrees of freedom error - resetDF ((df1, df2)) - (yp, diagnose (y_, yp)) // return predictions and QoF vector - end test - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Given a data vector z, predict the value by summing the predict for each tree. - * @param z the data vector to predict - */ - override def predict (z: VectorD): Double = - var yp = y.mean - for i <- forest.indices do yp += forest(i).predict (z) - yp - end predict - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Given a data matrix z, predict the value by summing the predict for each tree, - * for each row of the matrix. - * @param z the data matrix to predict - */ - override def predict (z: MatrixD = x): VectorD = - val yp = new VectorD (z.dim) - for i <- z.indices do yp(i) = predict (z(i)) - yp - end predict - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Build a sub-model that is restricted to the given columns of the data matrix. - * @param x_cols the columns that the new model is restricted to - */ - override def buildModel (x_cols: MatrixD): RegressionTreeGB = - new RegressionTreeGB (x_cols, y, null, hparam) - end buildModel - -end RegressionTreeGB - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `RegressionTreeGB` companion object defines hyper-parameters and provides - * a factory methods for creating gradient boosted regression trees. - */ -object RegressionTreeGB: - - private val flaw = flawf ("RegressionTreeGB") // flaw function - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `RegressionTreeGB` object that uses Gradient Boosting on `RegressionTree`. - * One Tree is included in the model at a time wisely chosen for reducing gradient. - * @param xy the combined data-response matrix - * @param fname the feature/variable names (defaults to null) - * @param hparam the hyper-parameters for the model (defaults to RegressionTree.hp) - * @param col the designated response column (defaults to the last column) - */ - def apply (xy: MatrixD, fname: Array [String] = null, - hparam: HyperParameter = RegressionTree.hp) - (col: Int = xy.dim2 - 1): RegressionTreeGB = - val n = xy.dim2 - if n < 2 then - flaw ("apply", s"dim2 = $n of the 'xy' matrix must be at least 2") - null - else - val (x, y) = (xy.not (?, col), xy(?, col)) - new RegressionTreeGB (x, y, fname, hparam) - end if - end apply - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `RegressionTreeGB` object that uses Gradient Boosting on `RegressionTree`. - * One Tree is included in the model at a time wisely chosen for reducing gradient. - * @param x the input/data matrix - * @param y the output/response vector - * @param fname the feature/variable names (defaults to null) - * @param hparam the hyper-parameters for the model (defaults to RegressionTree.hp) - */ - def rescale (x: MatrixD, y: VectorD, fname: Array [String] = null, - hparam: HyperParameter = RegressionTree.hp): RegressionTreeGB = - val n = x.dim2 - if n < 1 then - flaw ("rescale", s"dim2 = $n of the 'x' matrix must be at least 1") - null - else -// FIX - add rescale - new RegressionTreeGB (x, y, fname, hparam) - end if - end rescale - -end RegressionTreeGB - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `regressionTreeGBTest` main function is used to test the `RegressionTreeGB` class. - * @see translate.google.com/translate?hl=en&sl=zh-CN&u=https: - * //www.hrwhisper.me/machine-learning-decision-tree/&prev=search - * > runMain scalation.modeling.regressionTreeGBTest - */ -@main def regressionTreeGBTest (): Unit = - - val x = MatrixD ((10, 1), 1, 2, 3, 4, 5, 6, 7, 8, 9, 10) - val y = VectorD (5.56, 5.70, 5.91, 6.40, 6.80, 7.05, 8.90, 8.70, 9.00, 9.05) - val ox = VectorD.one (x.dim) +^: x - val fname = Array ("x") - - banner (s"Regression no intercept") - val reg = new Regression (x, y) - reg.trainNtest ()() // train and test the model - - banner (s"Regression with intercept") - val reg2 = new Regression (ox, y) - reg2.trainNtest ()() // train and test the model - - banner (s"Quadratic Regression") - val reg3 = SymbolicRegression.quadratic (x, y, fname) - reg3.trainNtest ()() // train and test the model - - banner (s"Perceptron sigmoid") - val nn = Perceptron.rescale (reg3.getX, y) - nn.trainNtest ()() // train and test the model - - banner (s"Perceptron tanh") - val nn2 = Perceptron.rescale (reg3.getX, y, f = ActivationFun.f_tanh) - nn2.trainNtest ()() // train and test the model - - for d <- 1 to 2 do - banner (s"Regression Tree GB with maxDepth = $d") - RegressionTree.hp.updateReturn ("maxDepth", d) - val mod = new RegressionTreeGB (x, y, fname) - mod.trainNtest ()() // train and test the model -// mod.printTree () - end for - -end regressionTreeGBTest - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `regressionTreeGBTest2` main function tests the `RegressionTreeGB` class using the - * AutoMPG dataset. Assumes no missing values. It tests multiple depths. - * > runMain scalation.modeling.regressionTreeGBTest2 - */ -@main def regressionTreeGBTest2 (): Unit = - - import Example_AutoMPG._ - -// println (s"x = $o") -// println (s"y = $y") - - val dmax = 6 // range of depths 1 to dmax - val qual = new MatrixD (dmax, 3) - - for d <- 1 to dmax do - banner ("AutoMPG Regression Tree GB with depth d = $d") - RegressionTree.hp("maxDepth") = d // depth of tree - RegressionTree.hp("nTrees") = 3 // number of iterations - val mod = new RegressionTreeGB (x, y, x_fname) // create model with intercept (else pass x) - val qof = mod.trainNtest ()()._2 // train and test the model -// mod.printTree () // print the regression tree -// println (mod.summary ()) // parameter/coefficient statistics - - banner (s"AutoMPG Regression Tree GB with d = $d Validation") - val qof2 = mod.validate ()() // out-of-sampling testing - val iq = QoF.rSq.ordinal // index for rSq - qual (d-1) = VectorD (qof(iq), qof(iq+1), qof2(iq)) // R^2, R^2 bar, R^2 os - end for - - new PlotM (VectorD.range (1, dmax+1), qual.transpose, Array ("R^2", "R^2 bar", "R^2 os"), - "RegressionTreeGB in-sample, out-of-sample QoF vs. depth", lines = true) - println (s"RegressionTreeGB: qual = $qual") - -end regressionTreeGBTest2 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `regressionTreeGBTest3` main function tests the `RegressionTreeGB` class using the - * AutoMPG dataset. Assumes no missing values. It tests forward, backward and stepwise - * selection. - * > runMain scalation.modeling.regressionTreeGBTest3 - */ -@main def regressionTreeGBTest3 (): Unit = - - import Example_AutoMPG._ - - val d = 5 // depth of tree - -// println (s"x = $x") -// println (s"y = $y") - - banner (s"AutoMPG Regression Tree GB with d = $d") - RegressionTree.hp("maxDepth") = d - val mod = new RegressionTreeGB (x, y, x_fname) // create model with intercept (else pass x) - mod.trainNtest ()() // train and test the model -// mod.printTree () // print the regression tree - - for tech <- SelectionTech.values do - banner (s"Feature Selection Technique: $tech") - val (cols, rSq) = mod.selectFeatures (tech) // R^2, R^2 bar, R^2 cv - val k = cols.size - println (s"k = $k, n = ${x.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "R^2 cv"), - s"R^2 vs n for Regression Tree GB with $tech", lines = true) - println (s"$tech: rSq = $rSq") - end for - -end regressionTreeGBTest3 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `regressionTreeGBTest4` main function is used to test the `RegressionTreeGB` class. - * > runMain scalation.modeling.regressionTreeGBTest4 - */ -@main def regressionTreeGBTest4 (): Unit = - - val x = MatrixD ((5, 1), 750, 800, 850, 900, 950) - val y = VectorD (1160, 1200, 1280, 1450, 2000) - - val mod = new RegressionTreeGB (x, y) - mod.trainNtest ()() // train and test the model -// mod.printTree () // print the regression tree -// println (mod.summary ()) // parameter/coefficient statistics - -end regressionTreeGBTest4 - diff --git a/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeGB.tasty b/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeGB.tasty deleted file mode 100644 index 3a2043d59..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeGB.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeMT$.class b/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeMT$.class deleted file mode 100644 index 127701916..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeMT$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeMT$package$.class b/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeMT$package$.class deleted file mode 100644 index 80ea88299..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeMT$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeMT$package.class b/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeMT$package.class deleted file mode 100644 index c611189dc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeMT$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeMT$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeMT$package.tasty deleted file mode 100644 index 110985318..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeMT$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeMT.class b/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeMT.class deleted file mode 100644 index 26f34eea5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeMT.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeMT.tasty b/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeMT.tasty deleted file mode 100644 index 67de25320..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeMT.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeRF$.class b/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeRF$.class deleted file mode 100644 index ac19ed574..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeRF$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeRF$package$.class b/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeRF$package$.class deleted file mode 100644 index 18b5f67ba..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeRF$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeRF$package.class b/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeRF$package.class deleted file mode 100644 index 8116450ee..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeRF$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeRF$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeRF$package.tasty deleted file mode 100644 index 93cc12204..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeRF$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeRF.class b/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeRF.class deleted file mode 100644 index 2b5746a73..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeRF.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeRF.tasty b/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeRF.tasty deleted file mode 100644 index d5d160f7c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeRF.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeRF_MT$.class b/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeRF_MT$.class deleted file mode 100644 index bb6a86a48..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeRF_MT$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeRF_MT$package$.class b/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeRF_MT$package$.class deleted file mode 100644 index 3ed944799..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeRF_MT$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeRF_MT$package.class b/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeRF_MT$package.class deleted file mode 100644 index 190d75cdb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeRF_MT$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeRF_MT$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeRF_MT$package.tasty deleted file mode 100644 index 56ec851d7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeRF_MT$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeRF_MT.class b/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeRF_MT.class deleted file mode 100644 index 64f5403e1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeRF_MT.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeRF_MT.tasty b/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeRF_MT.tasty deleted file mode 100644 index 3ec0a3a38..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/RegressionTreeRF_MT.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/RegressionWLS$.class b/target/scala-3.6.4/classes/scalation/modeling/RegressionWLS$.class deleted file mode 100644 index a08bccc61..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/RegressionWLS$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/RegressionWLS$package$.class b/target/scala-3.6.4/classes/scalation/modeling/RegressionWLS$package$.class deleted file mode 100644 index 33938024a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/RegressionWLS$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/RegressionWLS$package.class b/target/scala-3.6.4/classes/scalation/modeling/RegressionWLS$package.class deleted file mode 100644 index 51997d426..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/RegressionWLS$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/RegressionWLS$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/RegressionWLS$package.tasty deleted file mode 100644 index 2544876ab..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/RegressionWLS$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/RegressionWLS.class b/target/scala-3.6.4/classes/scalation/modeling/RegressionWLS.class deleted file mode 100644 index fcd227899..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/RegressionWLS.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/RegressionWLS.tasty b/target/scala-3.6.4/classes/scalation/modeling/RegressionWLS.tasty deleted file mode 100644 index 96f6952f9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/RegressionWLS.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/RidgeRegression$.class b/target/scala-3.6.4/classes/scalation/modeling/RidgeRegression$.class deleted file mode 100644 index 5c2021899..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/RidgeRegression$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/RidgeRegression$package$.class b/target/scala-3.6.4/classes/scalation/modeling/RidgeRegression$package$.class deleted file mode 100644 index af5270bb4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/RidgeRegression$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/RidgeRegression$package.class b/target/scala-3.6.4/classes/scalation/modeling/RidgeRegression$package.class deleted file mode 100644 index d5eee5a11..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/RidgeRegression$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/RidgeRegression$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/RidgeRegression$package.tasty deleted file mode 100644 index 15d65f8a4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/RidgeRegression$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/RidgeRegression.class b/target/scala-3.6.4/classes/scalation/modeling/RidgeRegression.class deleted file mode 100644 index 64d092a7a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/RidgeRegression.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/RidgeRegression.tasty b/target/scala-3.6.4/classes/scalation/modeling/RidgeRegression.tasty deleted file mode 100644 index 4b1e2e93e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/RidgeRegression.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/RoundRegression$.class b/target/scala-3.6.4/classes/scalation/modeling/RoundRegression$.class deleted file mode 100644 index 5b7f4a4c1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/RoundRegression$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/RoundRegression$package$.class b/target/scala-3.6.4/classes/scalation/modeling/RoundRegression$package$.class deleted file mode 100644 index c3b9efa5b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/RoundRegression$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/RoundRegression$package.class b/target/scala-3.6.4/classes/scalation/modeling/RoundRegression$package.class deleted file mode 100644 index 82daef32a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/RoundRegression$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/RoundRegression$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/RoundRegression$package.tasty deleted file mode 100644 index fe0a6f288..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/RoundRegression$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/RoundRegression.class b/target/scala-3.6.4/classes/scalation/modeling/RoundRegression.class deleted file mode 100644 index 2002b6f8f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/RoundRegression.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/RoundRegression.tasty b/target/scala-3.6.4/classes/scalation/modeling/RoundRegression.tasty deleted file mode 100644 index a31248a40..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/RoundRegression.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Sampling$package$.class b/target/scala-3.6.4/classes/scalation/modeling/Sampling$package$.class deleted file mode 100644 index c42221a9e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Sampling$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Sampling$package.class b/target/scala-3.6.4/classes/scalation/modeling/Sampling$package.class deleted file mode 100644 index 2f7426c1a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Sampling$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Sampling$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/Sampling$package.tasty deleted file mode 100644 index 9ad120e93..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Sampling$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Scaling.class b/target/scala-3.6.4/classes/scalation/modeling/Scaling.class deleted file mode 100644 index 63b19b34f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Scaling.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Scaling.tasty b/target/scala-3.6.4/classes/scalation/modeling/Scaling.tasty deleted file mode 100644 index a6fd092f7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Scaling.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/SelectionTech$$anon$1.class b/target/scala-3.6.4/classes/scalation/modeling/SelectionTech$$anon$1.class deleted file mode 100644 index 743bf955e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/SelectionTech$$anon$1.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/SelectionTech$.class b/target/scala-3.6.4/classes/scalation/modeling/SelectionTech$.class deleted file mode 100644 index a31e9e686..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/SelectionTech$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/SelectionTech.class b/target/scala-3.6.4/classes/scalation/modeling/SelectionTech.class deleted file mode 100644 index 84130601a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/SelectionTech.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/SelectionTech.tasty b/target/scala-3.6.4/classes/scalation/modeling/SelectionTech.tasty deleted file mode 100644 index 51d66564a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/SelectionTech.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/SimpleExpRegression$.class b/target/scala-3.6.4/classes/scalation/modeling/SimpleExpRegression$.class deleted file mode 100644 index c305a32b9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/SimpleExpRegression$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/SimpleExpRegression$package$.class b/target/scala-3.6.4/classes/scalation/modeling/SimpleExpRegression$package$.class deleted file mode 100644 index 39bd268cb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/SimpleExpRegression$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/SimpleExpRegression$package.class b/target/scala-3.6.4/classes/scalation/modeling/SimpleExpRegression$package.class deleted file mode 100644 index be117b5da..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/SimpleExpRegression$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/SimpleExpRegression$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/SimpleExpRegression$package.tasty deleted file mode 100644 index 74a47af3f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/SimpleExpRegression$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/SimpleExpRegression.class b/target/scala-3.6.4/classes/scalation/modeling/SimpleExpRegression.class deleted file mode 100644 index 47a0aac48..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/SimpleExpRegression.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/SimpleExpRegression.tasty b/target/scala-3.6.4/classes/scalation/modeling/SimpleExpRegression.tasty deleted file mode 100644 index 1ad83956f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/SimpleExpRegression.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/SimpleExponentialRegression.scalaa b/target/scala-3.6.4/classes/scalation/modeling/SimpleExponentialRegression.scalaa deleted file mode 100644 index 5e21dc3ee..000000000 --- a/target/scala-3.6.4/classes/scalation/modeling/SimpleExponentialRegression.scalaa +++ /dev/null @@ -1,208 +0,0 @@ -package scalation -package modeling - -import scala.math.exp - -import scalation.mathstat._ - -/** - * The `SimpleExponentialRegression` class implements a simple exponential regression model - * of the form `y = b(0) * exp(b(1) * x)`, where: - * - `b(0)` is the base (scale factor), - * - `b(1)` is the exponent rate, - * - `x` is the feature/independent variable. - * - * @param x the m-by-n input/data matrix (should have at least 2 columns: one for the intercept, one for the feature) - * @param y the m-dimensional response/output vector - * @param fname the feature/variable names (defaults to null) - * @param hparam the hyper-parameters (defaults to null) - */ -class SimpleExponentialRegression (x: MatrixD, y: VectorD, fname: Array [String] = null, - hparam: HyperParameter = null) - extends Predictor(x, y, fname, hparam) - with Fit(dfm = 1, df = x.dim - 1) - with NoSubModels: - - /** Maximum number of gradient descent iterations (epochs). */ - private val maxEpochs = 1000 - - /** Learning rate used by gradient descent. */ - private val eta = 1e-5 - - /** Debug function for logging progress. */ - private val debug = debugf("SimpleRegression", true) - - /** Flaw function for logging errors. */ - private val flaw = flawf("SimpleRegression") - - modelName = "SimpleExponentialRegression" - - // Validate that the input matrix has at least two columns - if x.dim2 < 2 then flaw("init", s"Data matrix must have at least 2 columns: ${x.dim2}") - - /** - * The exponential function used to compute predictions for a single value xi. - * - * y_hat = b(0) * exp(b(1) * xi) - * - * @param xi a single feature value (e.g., x_(i, 1)) - * @return the predicted response for xi - */ - def f(xi: Double): Double = b(0) * exp(b(1) * xi) - - /** - * Override the `predict` method from `Predictor` to handle matrix inputs. - * For each row in the matrix `x_`, extract the feature xi from column 1, - * then compute `f(xi)`. - * - * @param x_ the input/data matrix (defaults to the full training set x) - * @return a vector of predictions, one for each row of x_ - */ - override def predict (x_ : MatrixD): VectorD = - val predictions = new VectorD(x_.dim) - for i <- x_.indices do - val xi = x_(i, 1) - predictions(i) = f(xi) - predictions - - /** - * Train the exponential regression model using gradient descent. - * Initializes the parameter vector b to (1, 0.3), then iteratively updates b - * by minimizing the mean squared error: sum((y - f(x))^2). - * - * @param x_ the input/data matrix (defaults to x) - * @param y_ the response/output vector (defaults to y) - */ - def train(x_ : MatrixD = x, y_ : VectorD = y): Unit = - var epoch = 0 - var previousLoss = Double.MaxValue - b = VectorD(1.0, 0.3) // Initialize parameters - - while epoch < maxEpochs do - val gradients = VectorD(0.0, 0.0) - var loss = 0.0 - - // Compute gradients - for i <- x_.indices do - val xi = x_(i, 1) - val yi = y_(i) - - val prediction = f(xi) - val error = prediction - yi - loss += error * error - - // Partial derivatives for b(0) and b(1) - gradients(0) += (2.0 / x_.dim) * error * math.exp(b(1) * xi) - gradients(1) += (2.0 / x_.dim) * error * b(0) * xi * math.exp(b(1) * xi) - - // Update parameters - b(0) -= eta * gradients(0) - b(1) -= eta * gradients(1) - - // Compute mean squared error - loss /= x_.dim - if epoch % 10 == 0 || epoch == maxEpochs - 1 then - debug(s"Epoch $epoch:", s"Loss = $loss") - - // Check for convergence - if math.abs(previousLoss - loss) < 1e-6 then - debug(s"Converged at epoch $epoch with", s"loss = $loss") - return - - previousLoss = loss - epoch += 1 - - debug(s"Max epochs ($maxEpochs) reached.", s"Final loss = ($previousLoss)") - end train - - /** - * Test the model by predicting y-values for the given matrix x_, - * then comparing with the actual y_ via the `diagnose` function. - * - * @param x_ the input/data matrix (defaults to x) - * @param y_ the response/output vector (defaults to y) - * @return a tuple containing (predictions, the diagnostic QoF measures) - */ - def test(x_ : MatrixD = x, y_ : VectorD = y): (VectorD, VectorD) = - val yp = predict(x_) - (yp, diagnose(y_, yp)) - end test - -end SimpleExponentialRegression - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** A main method for testing `SimpleExponentialRegression` with a dataset - * that roughly follows y = 2 * exp(0.3 * x) for x=1..10. - * > runMain scalation.modeling.simpleExponentialRegressionTest - */ -@main def simpleExponentialRegressionTest (): Unit = - - val x = MatrixD ((10, 2), 1.0, 1.0, - 1.0, 2.0, - 1.0, 3.0, - 1.0, 4.0, - 1.0, 5.0, - 1.0, 6.0, - 1.0, 7.0, - 1.0, 8.0, - 1.0, 9.0, - 1.0, 10.0) - - val y = VectorD (2.70, // approximate 2 * exp(0.3*1) - 3.64, // approximate 2 * exp(0.3*2) - 4.93, - 6.64, - 8.96, - 12.10, - 16.33, - 22.05, - 29.76, - 40.17) - - // Create and train model, then test it - val model = new SimpleExponentialRegression(x, y) - model.trainNtest()() - - val yp = model.predict(x) - println(s"y = $y") - println(s"yp = $yp") - - // Optional: Plot the actual vs. predicted values - new Plot (null, y, yp, "SimpleExponentialRegressionTest", lines = true) - -end simpleExponentialRegressionTest - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** A second main method for testing `SimpleExponentialRegression` with a - * dataset that is actually quadratic (y = x^2), which is not exponential. - * This demonstrates how the model behaves with a mismatch in relationship. - * > runMain scalation.modeling.simpleExponentialRegressionTest2 - */ -@main def simpleExponentialRegressionTest2 (): Unit = - val x = MatrixD((5, 2), - 1.0, 2.0, - 1.0, 3.0, - 1.0, 4.0, - 1.0, 5.0, - 1.0, 6.0 - ) - val y = VectorD(4, 9, 16, 25, 36) // Quadratic pattern: x^2 - val z = VectorD(1.0, 20.0, 80.0) - - // Create and train model, then test it - val model = new SimpleExponentialRegression(x, y) - model.trainNtest()() - - val yp = model.predict(x) - println(s"y = $y") - println(s"yp = $yp") - - new Plot(null, y, yp, "SimpleExponentialRegressionTest") - - // Predict new points - val yp2 = model.predict(z) - println(s"predict ($z) = $yp2") - -end simpleExponentialRegressionTest2 diff --git a/target/scala-3.6.4/classes/scalation/modeling/SimpleRegression$.class b/target/scala-3.6.4/classes/scalation/modeling/SimpleRegression$.class deleted file mode 100644 index 25da94ae4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/SimpleRegression$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/SimpleRegression$package$.class b/target/scala-3.6.4/classes/scalation/modeling/SimpleRegression$package$.class deleted file mode 100644 index 4175658ea..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/SimpleRegression$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/SimpleRegression$package.class b/target/scala-3.6.4/classes/scalation/modeling/SimpleRegression$package.class deleted file mode 100644 index 8f0b5f240..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/SimpleRegression$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/SimpleRegression$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/SimpleRegression$package.tasty deleted file mode 100644 index 9bf85d7ca..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/SimpleRegression$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/SimpleRegression.class b/target/scala-3.6.4/classes/scalation/modeling/SimpleRegression.class deleted file mode 100644 index e37d28b1b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/SimpleRegression.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/SimpleRegression.tasty b/target/scala-3.6.4/classes/scalation/modeling/SimpleRegression.tasty deleted file mode 100644 index 28981f18a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/SimpleRegression.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/SimplerRegression$.class b/target/scala-3.6.4/classes/scalation/modeling/SimplerRegression$.class deleted file mode 100644 index 22d81c1a5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/SimplerRegression$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/SimplerRegression$package$.class b/target/scala-3.6.4/classes/scalation/modeling/SimplerRegression$package$.class deleted file mode 100644 index ccffeedb0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/SimplerRegression$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/SimplerRegression$package.class b/target/scala-3.6.4/classes/scalation/modeling/SimplerRegression$package.class deleted file mode 100644 index ac46f846a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/SimplerRegression$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/SimplerRegression$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/SimplerRegression$package.tasty deleted file mode 100644 index e8680b014..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/SimplerRegression$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/SimplerRegression.class b/target/scala-3.6.4/classes/scalation/modeling/SimplerRegression.class deleted file mode 100644 index 4abfe5e66..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/SimplerRegression.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/SimplerRegression.tasty b/target/scala-3.6.4/classes/scalation/modeling/SimplerRegression.tasty deleted file mode 100644 index eee0d63b5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/SimplerRegression.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/SumQueue$.class b/target/scala-3.6.4/classes/scalation/modeling/SumQueue$.class deleted file mode 100644 index 2b719b218..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/SumQueue$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/SumQueue$package$.class b/target/scala-3.6.4/classes/scalation/modeling/SumQueue$package$.class deleted file mode 100644 index 0b7cf0629..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/SumQueue$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/SumQueue$package.class b/target/scala-3.6.4/classes/scalation/modeling/SumQueue$package.class deleted file mode 100644 index bfcd3666e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/SumQueue$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/SumQueue$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/SumQueue$package.tasty deleted file mode 100644 index 01a1c75c2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/SumQueue$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/SumQueue.class b/target/scala-3.6.4/classes/scalation/modeling/SumQueue.class deleted file mode 100644 index 5932fa7c8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/SumQueue.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/SumQueue.tasty b/target/scala-3.6.4/classes/scalation/modeling/SumQueue.tasty deleted file mode 100644 index ee94c4dcc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/SumQueue.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/SumSqQueue$.class b/target/scala-3.6.4/classes/scalation/modeling/SumSqQueue$.class deleted file mode 100644 index 6da683b58..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/SumSqQueue$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/SumSqQueue.class b/target/scala-3.6.4/classes/scalation/modeling/SumSqQueue.class deleted file mode 100644 index 2c8baf9d0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/SumSqQueue.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/SumSqQueue.tasty b/target/scala-3.6.4/classes/scalation/modeling/SumSqQueue.tasty deleted file mode 100644 index 3f67f450a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/SumSqQueue.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/SymLassoRegression$.class b/target/scala-3.6.4/classes/scalation/modeling/SymLassoRegression$.class deleted file mode 100644 index 84613a8ce..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/SymLassoRegression$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/SymLassoRegression$package$.class b/target/scala-3.6.4/classes/scalation/modeling/SymLassoRegression$package$.class deleted file mode 100644 index 48eea749e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/SymLassoRegression$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/SymLassoRegression$package.class b/target/scala-3.6.4/classes/scalation/modeling/SymLassoRegression$package.class deleted file mode 100644 index 7601d6103..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/SymLassoRegression$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/SymLassoRegression$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/SymLassoRegression$package.tasty deleted file mode 100644 index 662f4dc78..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/SymLassoRegression$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/SymLassoRegression.class b/target/scala-3.6.4/classes/scalation/modeling/SymLassoRegression.class deleted file mode 100644 index 38c6387b1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/SymLassoRegression.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/SymLassoRegression.tasty b/target/scala-3.6.4/classes/scalation/modeling/SymLassoRegression.tasty deleted file mode 100644 index 8e418421e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/SymLassoRegression.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/SymRidgeRegression$.class b/target/scala-3.6.4/classes/scalation/modeling/SymRidgeRegression$.class deleted file mode 100644 index dc478f287..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/SymRidgeRegression$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/SymRidgeRegression$package$.class b/target/scala-3.6.4/classes/scalation/modeling/SymRidgeRegression$package$.class deleted file mode 100644 index 17f453ae0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/SymRidgeRegression$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/SymRidgeRegression$package.class b/target/scala-3.6.4/classes/scalation/modeling/SymRidgeRegression$package.class deleted file mode 100644 index 0f0298b4b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/SymRidgeRegression$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/SymRidgeRegression$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/SymRidgeRegression$package.tasty deleted file mode 100644 index 7dbcfd682..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/SymRidgeRegression$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/SymRidgeRegression.class b/target/scala-3.6.4/classes/scalation/modeling/SymRidgeRegression.class deleted file mode 100644 index f6111e9d6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/SymRidgeRegression.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/SymRidgeRegression.tasty b/target/scala-3.6.4/classes/scalation/modeling/SymRidgeRegression.tasty deleted file mode 100644 index c16cba029..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/SymRidgeRegression.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/SymbolicRegression$.class b/target/scala-3.6.4/classes/scalation/modeling/SymbolicRegression$.class deleted file mode 100644 index 66ad8ee46..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/SymbolicRegression$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/SymbolicRegression$package$.class b/target/scala-3.6.4/classes/scalation/modeling/SymbolicRegression$package$.class deleted file mode 100644 index f270ffe6a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/SymbolicRegression$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/SymbolicRegression$package.class b/target/scala-3.6.4/classes/scalation/modeling/SymbolicRegression$package.class deleted file mode 100644 index 4c6b4a30f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/SymbolicRegression$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/SymbolicRegression$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/SymbolicRegression$package.tasty deleted file mode 100644 index fb4159930..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/SymbolicRegression$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/SymbolicRegression.class b/target/scala-3.6.4/classes/scalation/modeling/SymbolicRegression.class deleted file mode 100644 index 06a7a3c97..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/SymbolicRegression.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/SymbolicRegression.tasty b/target/scala-3.6.4/classes/scalation/modeling/SymbolicRegression.tasty deleted file mode 100644 index 0e54ed485..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/SymbolicRegression.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/TestFit.class b/target/scala-3.6.4/classes/scalation/modeling/TestFit.class deleted file mode 100644 index 448ad3278..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/TestFit.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/TestFit.tasty b/target/scala-3.6.4/classes/scalation/modeling/TestFit.tasty deleted file mode 100644 index d0ebaabdd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/TestFit.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/TranRegression$.class b/target/scala-3.6.4/classes/scalation/modeling/TranRegression$.class deleted file mode 100644 index 062b3ed73..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/TranRegression$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/TranRegression$package$.class b/target/scala-3.6.4/classes/scalation/modeling/TranRegression$package$.class deleted file mode 100644 index 5011bffd0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/TranRegression$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/TranRegression$package.class b/target/scala-3.6.4/classes/scalation/modeling/TranRegression$package.class deleted file mode 100644 index c766c0e5f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/TranRegression$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/TranRegression$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/TranRegression$package.tasty deleted file mode 100644 index 26137580b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/TranRegression$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/TranRegression.class b/target/scala-3.6.4/classes/scalation/modeling/TranRegression.class deleted file mode 100644 index 93a2be117..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/TranRegression.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/TranRegression.tasty b/target/scala-3.6.4/classes/scalation/modeling/TranRegression.tasty deleted file mode 100644 index fde56e80c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/TranRegression.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/TranRegressionEx$.class b/target/scala-3.6.4/classes/scalation/modeling/TranRegressionEx$.class deleted file mode 100644 index ef3d9fcef..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/TranRegressionEx$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/TranRegressionEx.class b/target/scala-3.6.4/classes/scalation/modeling/TranRegressionEx.class deleted file mode 100644 index 07d3a21b9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/TranRegressionEx.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/TranRegressionEx.tasty b/target/scala-3.6.4/classes/scalation/modeling/TranRegressionEx.tasty deleted file mode 100644 index 3c6b3fc9a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/TranRegressionEx.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/TrigRegression$.class b/target/scala-3.6.4/classes/scalation/modeling/TrigRegression$.class deleted file mode 100644 index 2fedba4ad..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/TrigRegression$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/TrigRegression$package$.class b/target/scala-3.6.4/classes/scalation/modeling/TrigRegression$package$.class deleted file mode 100644 index f01b563b0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/TrigRegression$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/TrigRegression$package.class b/target/scala-3.6.4/classes/scalation/modeling/TrigRegression$package.class deleted file mode 100644 index 764f54c1f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/TrigRegression$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/TrigRegression$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/TrigRegression$package.tasty deleted file mode 100644 index f08df525e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/TrigRegression$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/TrigRegression.class b/target/scala-3.6.4/classes/scalation/modeling/TrigRegression.class deleted file mode 100644 index d5149f7b0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/TrigRegression.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/TrigRegression.tasty b/target/scala-3.6.4/classes/scalation/modeling/TrigRegression.tasty deleted file mode 100644 index 94c6bd936..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/TrigRegression.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Variable$.class b/target/scala-3.6.4/classes/scalation/modeling/Variable$.class deleted file mode 100644 index e04c458a8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Variable$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Variable$package$.class b/target/scala-3.6.4/classes/scalation/modeling/Variable$package$.class deleted file mode 100644 index ea3fb6b8a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Variable$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Variable$package.class b/target/scala-3.6.4/classes/scalation/modeling/Variable$package.class deleted file mode 100644 index cfbd15d3b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Variable$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Variable$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/Variable$package.tasty deleted file mode 100644 index 4c04d617f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Variable$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Variable.class b/target/scala-3.6.4/classes/scalation/modeling/Variable.class deleted file mode 100644 index 70c88af87..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Variable.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/Variable.tasty b/target/scala-3.6.4/classes/scalation/modeling/Variable.tasty deleted file mode 100644 index d71bd3320..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/Variable.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/VariableKind$$anon$1.class b/target/scala-3.6.4/classes/scalation/modeling/VariableKind$$anon$1.class deleted file mode 100644 index 90037ffcb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/VariableKind$$anon$1.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/VariableKind$.class b/target/scala-3.6.4/classes/scalation/modeling/VariableKind$.class deleted file mode 100644 index 6995f7671..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/VariableKind$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/VariableKind.class b/target/scala-3.6.4/classes/scalation/modeling/VariableKind.class deleted file mode 100644 index 794aebaf6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/VariableKind.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/VariableKind.tasty b/target/scala-3.6.4/classes/scalation/modeling/VariableKind.tasty deleted file mode 100644 index b5f0bb98a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/VariableKind.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/activationFunTest.class b/target/scala-3.6.4/classes/scalation/modeling/activationFunTest.class deleted file mode 100644 index 2c97d325d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/activationFunTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/activationFunTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/activationFunTest.tasty deleted file mode 100644 index bf279b741..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/activationFunTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/activationFunTest2.class b/target/scala-3.6.4/classes/scalation/modeling/activationFunTest2.class deleted file mode 100644 index 2fbdfbfaf..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/activationFunTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/activationFunTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/activationFunTest2.tasty deleted file mode 100644 index 53cacf5b7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/activationFunTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/activationFunTest3.class b/target/scala-3.6.4/classes/scalation/modeling/activationFunTest3.class deleted file mode 100644 index a8b6e710f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/activationFunTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/activationFunTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/activationFunTest3.tasty deleted file mode 100644 index 7d6959987..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/activationFunTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/activationFunTest4.class b/target/scala-3.6.4/classes/scalation/modeling/activationFunTest4.class deleted file mode 100644 index 3c9e92ecc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/activationFunTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/activationFunTest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/activationFunTest4.tasty deleted file mode 100644 index 80b29b559..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/activationFunTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/activationFunTest5.class b/target/scala-3.6.4/classes/scalation/modeling/activationFunTest5.class deleted file mode 100644 index 6ac73b188..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/activationFunTest5.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/activationFunTest5.tasty b/target/scala-3.6.4/classes/scalation/modeling/activationFunTest5.tasty deleted file mode 100644 index 267cdd75d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/activationFunTest5.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/activationFunTest6.class b/target/scala-3.6.4/classes/scalation/modeling/activationFunTest6.class deleted file mode 100644 index 39d9310eb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/activationFunTest6.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/activationFunTest6.tasty b/target/scala-3.6.4/classes/scalation/modeling/activationFunTest6.tasty deleted file mode 100644 index 860c21914..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/activationFunTest6.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/BaggingTrees$.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/BaggingTrees$.class deleted file mode 100644 index 6df1e4d6b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/BaggingTrees$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/BaggingTrees$package$.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/BaggingTrees$package$.class deleted file mode 100644 index 219f3daeb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/BaggingTrees$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/BaggingTrees$package.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/BaggingTrees$package.class deleted file mode 100644 index 622756711..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/BaggingTrees$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/BaggingTrees$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/BaggingTrees$package.tasty deleted file mode 100644 index 2c2df3918..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/BaggingTrees$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/BaggingTrees.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/BaggingTrees.class deleted file mode 100644 index fc5e1049f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/BaggingTrees.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/BaggingTrees.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/BaggingTrees.tasty deleted file mode 100644 index 45ccfa7e9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/BaggingTrees.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/BayesClassifier$.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/BayesClassifier$.class deleted file mode 100644 index b2aa48710..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/BayesClassifier$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/BayesClassifier$package$.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/BayesClassifier$package$.class deleted file mode 100644 index 2adf9e2f9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/BayesClassifier$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/BayesClassifier$package$bc$2$.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/BayesClassifier$package$bc$2$.class deleted file mode 100644 index 072e7d948..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/BayesClassifier$package$bc$2$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/BayesClassifier$package$bc$4$.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/BayesClassifier$package$bc$4$.class deleted file mode 100644 index 23cfca592..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/BayesClassifier$package$bc$4$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/BayesClassifier$package.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/BayesClassifier$package.class deleted file mode 100644 index 543cbd5b5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/BayesClassifier$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/BayesClassifier$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/BayesClassifier$package.tasty deleted file mode 100644 index df31c140e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/BayesClassifier$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/BayesClassifier.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/BayesClassifier.class deleted file mode 100644 index 613790e38..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/BayesClassifier.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/BayesClassifier.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/BayesClassifier.tasty deleted file mode 100644 index 4fd5bb8b6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/BayesClassifier.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/Classifier$.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/Classifier$.class deleted file mode 100644 index d3752d527..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/Classifier$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/Classifier$BestStep$.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/Classifier$BestStep$.class deleted file mode 100644 index 4aaf89373..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/Classifier$BestStep$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/Classifier$BestStep.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/Classifier$BestStep.class deleted file mode 100644 index f1789d7b2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/Classifier$BestStep.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/Classifier$package$.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/Classifier$package$.class deleted file mode 100644 index c79b42baf..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/Classifier$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/Classifier$package.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/Classifier$package.class deleted file mode 100644 index dd13ef162..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/Classifier$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/Classifier$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/Classifier$package.tasty deleted file mode 100644 index 58c92ee81..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/Classifier$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/Classifier.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/Classifier.class deleted file mode 100644 index 2fa8ef0d7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/Classifier.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/Classifier.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/Classifier.tasty deleted file mode 100644 index 32906f48b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/Classifier.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree$.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree$.class deleted file mode 100644 index 911b68b29..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree$package$.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree$package$.class deleted file mode 100644 index cf911b8eb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree$package$Tree$2$.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree$package$Tree$2$.class deleted file mode 100644 index e289f0f1d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree$package$Tree$2$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree$package.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree$package.class deleted file mode 100644 index bdc4cf7b7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree$package.tasty deleted file mode 100644 index 689bc5fcf..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree.class deleted file mode 100644 index 069c7dfbb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree.tasty deleted file mode 100644 index eade9f549..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_C45$.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_C45$.class deleted file mode 100644 index 79f109692..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_C45$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_C45$package$.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_C45$package$.class deleted file mode 100644 index c8536ac4e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_C45$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_C45$package.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_C45$package.class deleted file mode 100644 index f7dba5082..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_C45$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_C45$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_C45$package.tasty deleted file mode 100644 index e0e9e100c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_C45$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_C45.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_C45.class deleted file mode 100644 index 2e3a4e2dc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_C45.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_C45.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_C45.tasty deleted file mode 100644 index 3b4f3e0b9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_C45.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_C45wp$.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_C45wp$.class deleted file mode 100644 index 8bf7d99db..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_C45wp$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_C45wp$package$.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_C45wp$package$.class deleted file mode 100644 index 872ee7b6d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_C45wp$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_C45wp$package.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_C45wp$package.class deleted file mode 100644 index 425158109..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_C45wp$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_C45wp$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_C45wp$package.tasty deleted file mode 100644 index c067ac81c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_C45wp$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_C45wp.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_C45wp.class deleted file mode 100644 index 9b6b6304a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_C45wp.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_C45wp.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_C45wp.tasty deleted file mode 100644 index f77a1fa68..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_C45wp.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_ID3$.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_ID3$.class deleted file mode 100644 index ed8b934a1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_ID3$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_ID3$package$.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_ID3$package$.class deleted file mode 100644 index 3f0a7756b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_ID3$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_ID3$package.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_ID3$package.class deleted file mode 100644 index d5e86df12..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_ID3$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_ID3$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_ID3$package.tasty deleted file mode 100644 index bfe2f33b2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_ID3$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_ID3.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_ID3.class deleted file mode 100644 index ae272d752..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_ID3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_ID3.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_ID3.tasty deleted file mode 100644 index 2d55d4824..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_ID3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_ID3wp$.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_ID3wp$.class deleted file mode 100644 index aee5ff694..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_ID3wp$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_ID3wp$package$.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_ID3wp$package$.class deleted file mode 100644 index c14e0fb5e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_ID3wp$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_ID3wp$package.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_ID3wp$package.class deleted file mode 100644 index 537ae277f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_ID3wp$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_ID3wp$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_ID3wp$package.tasty deleted file mode 100644 index 2d4bf1aeb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_ID3wp$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_ID3wp.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_ID3wp.class deleted file mode 100644 index 23520b557..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_ID3wp.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_ID3wp.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_ID3wp.tasty deleted file mode 100644 index b60ac2d9b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/DecisionTree_ID3wp.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_BreastCancer$.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_BreastCancer$.class deleted file mode 100644 index 8f8d0b0cc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_BreastCancer$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_BreastCancer$package$.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_BreastCancer$package$.class deleted file mode 100644 index 98cd630fe..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_BreastCancer$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_BreastCancer$package.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_BreastCancer$package.class deleted file mode 100644 index 484d62f00..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_BreastCancer$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_BreastCancer$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_BreastCancer$package.tasty deleted file mode 100644 index 8873ccde3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_BreastCancer$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_BreastCancer.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_BreastCancer.class deleted file mode 100644 index 66b6f24d8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_BreastCancer.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_BreastCancer.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_BreastCancer.tasty deleted file mode 100644 index f5ada6eb0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_BreastCancer.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_Diabetes$.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_Diabetes$.class deleted file mode 100644 index fb894d89f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_Diabetes$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_Diabetes$package$.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_Diabetes$package$.class deleted file mode 100644 index 4a492e285..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_Diabetes$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_Diabetes$package.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_Diabetes$package.class deleted file mode 100644 index f8f7a4f7c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_Diabetes$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_Diabetes$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_Diabetes$package.tasty deleted file mode 100644 index 015db0fcb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_Diabetes$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_Diabetes.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_Diabetes.class deleted file mode 100644 index 5c8903253..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_Diabetes.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_Diabetes.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_Diabetes.tasty deleted file mode 100644 index 1f11f9399..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_Diabetes.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_Iris$.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_Iris$.class deleted file mode 100644 index 6cb32658a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_Iris$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_Iris$package$.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_Iris$package$.class deleted file mode 100644 index d2a21b056..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_Iris$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_Iris$package.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_Iris$package.class deleted file mode 100644 index 5dea09a6d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_Iris$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_Iris$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_Iris$package.tasty deleted file mode 100644 index 53a2e4f53..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_Iris$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_Iris.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_Iris.class deleted file mode 100644 index 5dc1182ca..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_Iris.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_Iris.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_Iris.tasty deleted file mode 100644 index e284244eb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_Iris.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_MTcars$.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_MTcars$.class deleted file mode 100644 index aecd0c70e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_MTcars$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_MTcars.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_MTcars.class deleted file mode 100644 index 5ecc2a1fc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_MTcars.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_MTcars.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_MTcars.tasty deleted file mode 100644 index 74b7452fb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_MTcars.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_PlayTennis$.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_PlayTennis$.class deleted file mode 100644 index bc7ee566c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_PlayTennis$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_PlayTennis$package$.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_PlayTennis$package$.class deleted file mode 100644 index ad28a5baf..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_PlayTennis$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_PlayTennis$package.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_PlayTennis$package.class deleted file mode 100644 index 2df3ef6a8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_PlayTennis$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_PlayTennis$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_PlayTennis$package.tasty deleted file mode 100644 index 94f632ca6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_PlayTennis$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_PlayTennis.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_PlayTennis.class deleted file mode 100644 index 54862cadd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_PlayTennis.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_PlayTennis.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_PlayTennis.tasty deleted file mode 100644 index ecb3f2598..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_PlayTennis.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_PlayTennis_Cont$.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_PlayTennis_Cont$.class deleted file mode 100644 index c9b5370f0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_PlayTennis_Cont$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_PlayTennis_Cont$package$.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_PlayTennis_Cont$package$.class deleted file mode 100644 index dfed08d81..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_PlayTennis_Cont$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_PlayTennis_Cont$package.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_PlayTennis_Cont$package.class deleted file mode 100644 index ac48d6d2d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_PlayTennis_Cont$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_PlayTennis_Cont$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_PlayTennis_Cont$package.tasty deleted file mode 100644 index c4a0f8781..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_PlayTennis_Cont$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_PlayTennis_Cont.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_PlayTennis_Cont.class deleted file mode 100644 index 7157bae2a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_PlayTennis_Cont.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_PlayTennis_Cont.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_PlayTennis_Cont.tasty deleted file mode 100644 index 174738db0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/Example_PlayTennis_Cont.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/FitC$.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/FitC$.class deleted file mode 100644 index 40f77f48c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/FitC$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/FitC$package$.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/FitC$package$.class deleted file mode 100644 index bc98cd699..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/FitC$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/FitC$package$TestFitC$2$.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/FitC$package$TestFitC$2$.class deleted file mode 100644 index 38da5de4c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/FitC$package$TestFitC$2$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/FitC$package$TestFitC$4$.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/FitC$package$TestFitC$4$.class deleted file mode 100644 index 8e912a929..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/FitC$package$TestFitC$4$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/FitC$package$TestFitC$6$.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/FitC$package$TestFitC$6$.class deleted file mode 100644 index bae46baaa..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/FitC$package$TestFitC$6$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/FitC$package$TestFitC$8$.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/FitC$package$TestFitC$8$.class deleted file mode 100644 index 8b00966b9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/FitC$package$TestFitC$8$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/FitC$package.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/FitC$package.class deleted file mode 100644 index 1a03f09f4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/FitC$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/FitC$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/FitC$package.tasty deleted file mode 100644 index b3e1bed29..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/FitC$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/FitC.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/FitC.class deleted file mode 100644 index c4ddba480..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/FitC.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/FitC.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/FitC.tasty deleted file mode 100644 index e8e8bd1f2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/FitC.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/HiddenMarkov$.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/HiddenMarkov$.class deleted file mode 100644 index f75ba8db8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/HiddenMarkov$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/HiddenMarkov$package$.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/HiddenMarkov$package$.class deleted file mode 100644 index b1ffc46d2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/HiddenMarkov$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/HiddenMarkov$package.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/HiddenMarkov$package.class deleted file mode 100644 index 23780504b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/HiddenMarkov$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/HiddenMarkov$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/HiddenMarkov$package.tasty deleted file mode 100644 index ac2c120a3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/HiddenMarkov$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/HiddenMarkov.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/HiddenMarkov.class deleted file mode 100644 index 2b4f7e8b6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/HiddenMarkov.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/HiddenMarkov.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/HiddenMarkov.tasty deleted file mode 100644 index c92142f05..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/HiddenMarkov.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/KNN_Classifier$.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/KNN_Classifier$.class deleted file mode 100644 index f79cafbf5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/KNN_Classifier$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/KNN_Classifier$package$.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/KNN_Classifier$package$.class deleted file mode 100644 index 7098ac2ec..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/KNN_Classifier$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/KNN_Classifier$package.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/KNN_Classifier$package.class deleted file mode 100644 index 450575289..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/KNN_Classifier$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/KNN_Classifier$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/KNN_Classifier$package.tasty deleted file mode 100644 index 161b381f6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/KNN_Classifier$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/KNN_Classifier.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/KNN_Classifier.class deleted file mode 100644 index 243658ba5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/KNN_Classifier.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/KNN_Classifier.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/KNN_Classifier.tasty deleted file mode 100644 index 513bd99fb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/KNN_Classifier.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/LinDiscAnalyis$.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/LinDiscAnalyis$.class deleted file mode 100644 index f98358307..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/LinDiscAnalyis$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/LinDiscAnalyis$package$.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/LinDiscAnalyis$package$.class deleted file mode 100644 index 61518b219..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/LinDiscAnalyis$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/LinDiscAnalyis$package.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/LinDiscAnalyis$package.class deleted file mode 100644 index d9ad752f6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/LinDiscAnalyis$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/LinDiscAnalyis$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/LinDiscAnalyis$package.tasty deleted file mode 100644 index 54233bd5d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/LinDiscAnalyis$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/LinDiscAnalyis.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/LinDiscAnalyis.class deleted file mode 100644 index b4725ab32..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/LinDiscAnalyis.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/LinDiscAnalyis.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/LinDiscAnalyis.tasty deleted file mode 100644 index 79faa1b37..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/LinDiscAnalyis.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/LogisticRegression$.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/LogisticRegression$.class deleted file mode 100644 index 9c186b85a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/LogisticRegression$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/LogisticRegression$package$.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/LogisticRegression$package$.class deleted file mode 100644 index cbbbc2f2b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/LogisticRegression$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/LogisticRegression$package.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/LogisticRegression$package.class deleted file mode 100644 index 638b66a36..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/LogisticRegression$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/LogisticRegression$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/LogisticRegression$package.tasty deleted file mode 100644 index d06b075b0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/LogisticRegression$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/LogisticRegression.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/LogisticRegression.class deleted file mode 100644 index 8f86cc0ce..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/LogisticRegression.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/LogisticRegression.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/LogisticRegression.tasty deleted file mode 100644 index 36ddd2734..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/LogisticRegression.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/NaiveBayes$.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/NaiveBayes$.class deleted file mode 100644 index fd97959f8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/NaiveBayes$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/NaiveBayes$package$.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/NaiveBayes$package$.class deleted file mode 100644 index 0124d5a2b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/NaiveBayes$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/NaiveBayes$package.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/NaiveBayes$package.class deleted file mode 100644 index 48cdea5cc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/NaiveBayes$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/NaiveBayes$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/NaiveBayes$package.tasty deleted file mode 100644 index 4ff02d10d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/NaiveBayes$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/NaiveBayes.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/NaiveBayes.class deleted file mode 100644 index 511e71dab..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/NaiveBayes.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/NaiveBayes.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/NaiveBayes.tasty deleted file mode 100644 index de7292ff6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/NaiveBayes.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/NaiveBayesR$.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/NaiveBayesR$.class deleted file mode 100644 index c4b9a6e68..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/NaiveBayesR$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/NaiveBayesR$package$.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/NaiveBayesR$package$.class deleted file mode 100644 index 4773f2c20..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/NaiveBayesR$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/NaiveBayesR$package.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/NaiveBayesR$package.class deleted file mode 100644 index 6ceb062fc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/NaiveBayesR$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/NaiveBayesR$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/NaiveBayesR$package.tasty deleted file mode 100644 index f51fe243c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/NaiveBayesR$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/NaiveBayesR.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/NaiveBayesR.class deleted file mode 100644 index 0384603b1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/NaiveBayesR.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/NaiveBayesR.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/NaiveBayesR.tasty deleted file mode 100644 index d3498ce95..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/NaiveBayesR.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/Node$.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/Node$.class deleted file mode 100644 index ad469bbd9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/Node$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/Node.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/Node.class deleted file mode 100644 index ed3e32729..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/Node.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/Node.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/Node.tasty deleted file mode 100644 index 7007eae1a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/Node.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/NullModel$.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/NullModel$.class deleted file mode 100644 index 360b03780..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/NullModel$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/NullModel$package$.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/NullModel$package$.class deleted file mode 100644 index 63c5daee7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/NullModel$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/NullModel$package.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/NullModel$package.class deleted file mode 100644 index ad1833d44..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/NullModel$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/NullModel$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/NullModel$package.tasty deleted file mode 100644 index 53f65c72e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/NullModel$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/NullModel.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/NullModel.class deleted file mode 100644 index 62bb01e03..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/NullModel.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/NullModel.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/NullModel.tasty deleted file mode 100644 index 32a931bab..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/NullModel.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/QoFC$$anon$1.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/QoFC$$anon$1.class deleted file mode 100644 index 10cd1181b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/QoFC$$anon$1.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/QoFC$$anon$10.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/QoFC$$anon$10.class deleted file mode 100644 index f8d398e79..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/QoFC$$anon$10.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/QoFC$$anon$11.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/QoFC$$anon$11.class deleted file mode 100644 index 5fa1e1b03..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/QoFC$$anon$11.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/QoFC$$anon$12.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/QoFC$$anon$12.class deleted file mode 100644 index ddc08342d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/QoFC$$anon$12.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/QoFC$$anon$13.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/QoFC$$anon$13.class deleted file mode 100644 index c9884f4e9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/QoFC$$anon$13.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/QoFC$$anon$14.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/QoFC$$anon$14.class deleted file mode 100644 index 66fd131f4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/QoFC$$anon$14.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/QoFC$$anon$15.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/QoFC$$anon$15.class deleted file mode 100644 index 6603cd3e7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/QoFC$$anon$15.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/QoFC$$anon$16.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/QoFC$$anon$16.class deleted file mode 100644 index 30e2b5400..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/QoFC$$anon$16.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/QoFC$$anon$17.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/QoFC$$anon$17.class deleted file mode 100644 index 8883a4a62..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/QoFC$$anon$17.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/QoFC$$anon$18.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/QoFC$$anon$18.class deleted file mode 100644 index 3d8e8862a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/QoFC$$anon$18.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/QoFC$$anon$2.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/QoFC$$anon$2.class deleted file mode 100644 index 5fde09a00..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/QoFC$$anon$2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/QoFC$$anon$3.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/QoFC$$anon$3.class deleted file mode 100644 index 19a5ab745..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/QoFC$$anon$3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/QoFC$$anon$4.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/QoFC$$anon$4.class deleted file mode 100644 index f07baac6d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/QoFC$$anon$4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/QoFC$$anon$5.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/QoFC$$anon$5.class deleted file mode 100644 index 19a0331da..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/QoFC$$anon$5.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/QoFC$$anon$6.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/QoFC$$anon$6.class deleted file mode 100644 index 323fa9810..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/QoFC$$anon$6.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/QoFC$$anon$7.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/QoFC$$anon$7.class deleted file mode 100644 index 98515a509..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/QoFC$$anon$7.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/QoFC$$anon$8.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/QoFC$$anon$8.class deleted file mode 100644 index c35853ead..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/QoFC$$anon$8.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/QoFC$$anon$9.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/QoFC$$anon$9.class deleted file mode 100644 index 3c3e4dc0e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/QoFC$$anon$9.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/QoFC$.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/QoFC$.class deleted file mode 100644 index 38a7df7e6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/QoFC$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/QoFC.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/QoFC.class deleted file mode 100644 index 9770ab0f3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/QoFC.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/QoFC.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/QoFC.tasty deleted file mode 100644 index 89f66c850..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/QoFC.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/RandomForest$.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/RandomForest$.class deleted file mode 100644 index 4fd84e08f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/RandomForest$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/RandomForest$package$.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/RandomForest$package$.class deleted file mode 100644 index 4e0d8df3c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/RandomForest$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/RandomForest$package.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/RandomForest$package.class deleted file mode 100644 index f83cb2d58..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/RandomForest$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/RandomForest$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/RandomForest$package.tasty deleted file mode 100644 index e7cf09897..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/RandomForest$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/RandomForest.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/RandomForest.class deleted file mode 100644 index 56971e00e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/RandomForest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/RandomForest.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/RandomForest.tasty deleted file mode 100644 index 5d6536577..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/RandomForest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/SimpleLDA$.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/SimpleLDA$.class deleted file mode 100644 index 923fa32a1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/SimpleLDA$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/SimpleLDA$package$.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/SimpleLDA$package$.class deleted file mode 100644 index d363dfa8f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/SimpleLDA$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/SimpleLDA$package.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/SimpleLDA$package.class deleted file mode 100644 index eeece663a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/SimpleLDA$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/SimpleLDA$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/SimpleLDA$package.tasty deleted file mode 100644 index f4b02af50..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/SimpleLDA$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/SimpleLDA.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/SimpleLDA.class deleted file mode 100644 index f9fdf1132..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/SimpleLDA.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/SimpleLDA.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/SimpleLDA.tasty deleted file mode 100644 index 482b60c54..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/SimpleLDA.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/SimpleLogisticRegression$.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/SimpleLogisticRegression$.class deleted file mode 100644 index df9069c30..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/SimpleLogisticRegression$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/SimpleLogisticRegression$package$.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/SimpleLogisticRegression$package$.class deleted file mode 100644 index b887bd275..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/SimpleLogisticRegression$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/SimpleLogisticRegression$package.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/SimpleLogisticRegression$package.class deleted file mode 100644 index 4e30f9147..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/SimpleLogisticRegression$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/SimpleLogisticRegression$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/SimpleLogisticRegression$package.tasty deleted file mode 100644 index 0ce931e0e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/SimpleLogisticRegression$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/SimpleLogisticRegression.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/SimpleLogisticRegression.class deleted file mode 100644 index 089db84cd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/SimpleLogisticRegression.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/SimpleLogisticRegression.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/SimpleLogisticRegression.tasty deleted file mode 100644 index 023c8a006..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/SimpleLogisticRegression.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/SupportVectorMachine$.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/SupportVectorMachine$.class deleted file mode 100644 index 8d7aabd29..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/SupportVectorMachine$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/SupportVectorMachine$package$.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/SupportVectorMachine$package$.class deleted file mode 100644 index 351230025..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/SupportVectorMachine$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/SupportVectorMachine$package.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/SupportVectorMachine$package.class deleted file mode 100644 index c74cf2fdd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/SupportVectorMachine$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/SupportVectorMachine$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/SupportVectorMachine$package.tasty deleted file mode 100644 index 519818533..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/SupportVectorMachine$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/SupportVectorMachine.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/SupportVectorMachine.class deleted file mode 100644 index bcf8e6dd7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/SupportVectorMachine.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/SupportVectorMachine.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/SupportVectorMachine.tasty deleted file mode 100644 index 7983ade60..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/SupportVectorMachine.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/TANBayes$.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/TANBayes$.class deleted file mode 100644 index 5728cbedc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/TANBayes$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/TANBayes$package$.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/TANBayes$package$.class deleted file mode 100644 index 7d79a7817..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/TANBayes$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/TANBayes$package.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/TANBayes$package.class deleted file mode 100644 index dc46830eb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/TANBayes$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/TANBayes$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/TANBayes$package.tasty deleted file mode 100644 index dbd37fbb7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/TANBayes$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/TANBayes.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/TANBayes.class deleted file mode 100644 index 21220b1cf..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/TANBayes.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/TANBayes.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/TANBayes.tasty deleted file mode 100644 index 252141219..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/TANBayes.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/baggingTreesTest.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/baggingTreesTest.class deleted file mode 100644 index e7ac08194..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/baggingTreesTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/baggingTreesTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/baggingTreesTest.tasty deleted file mode 100644 index 96537ec63..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/baggingTreesTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/baggingTreesTest2.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/baggingTreesTest2.class deleted file mode 100644 index 8a0e02f25..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/baggingTreesTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/baggingTreesTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/baggingTreesTest2.tasty deleted file mode 100644 index b36836477..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/baggingTreesTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/baggingTreesTest3.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/baggingTreesTest3.class deleted file mode 100644 index 7b964e38a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/baggingTreesTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/baggingTreesTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/baggingTreesTest3.tasty deleted file mode 100644 index 5565010bb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/baggingTreesTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/baggingTreesTest4.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/baggingTreesTest4.class deleted file mode 100644 index 2db6f3278..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/baggingTreesTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/baggingTreesTest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/baggingTreesTest4.tasty deleted file mode 100644 index a7ed65272..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/baggingTreesTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/baggingTreesTest5.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/baggingTreesTest5.class deleted file mode 100644 index 44beb9d73..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/baggingTreesTest5.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/baggingTreesTest5.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/baggingTreesTest5.tasty deleted file mode 100644 index 917316040..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/baggingTreesTest5.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/baggingTreesTest6.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/baggingTreesTest6.class deleted file mode 100644 index c97dea997..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/baggingTreesTest6.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/baggingTreesTest6.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/baggingTreesTest6.tasty deleted file mode 100644 index 3047e2d7d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/baggingTreesTest6.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/baggingTreesTest7.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/baggingTreesTest7.class deleted file mode 100644 index 0cabb97f7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/baggingTreesTest7.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/baggingTreesTest7.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/baggingTreesTest7.tasty deleted file mode 100644 index b7537a8bd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/baggingTreesTest7.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/bayesClassifierTest.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/bayesClassifierTest.class deleted file mode 100644 index 6e7af5bad..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/bayesClassifierTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/bayesClassifierTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/bayesClassifierTest.tasty deleted file mode 100644 index f0d14fbbf..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/bayesClassifierTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/bayesClassifierTest2.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/bayesClassifierTest2.class deleted file mode 100644 index 4f9b411e1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/bayesClassifierTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/bayesClassifierTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/bayesClassifierTest2.tasty deleted file mode 100644 index 4d207a509..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/bayesClassifierTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/classifierTest.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/classifierTest.class deleted file mode 100644 index bcc8929cf..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/classifierTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/classifierTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/classifierTest.tasty deleted file mode 100644 index 52c542f14..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/classifierTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTreeTest.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTreeTest.class deleted file mode 100644 index 0d5f633b0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTreeTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTreeTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTreeTest.tasty deleted file mode 100644 index ca31e8690..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTreeTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_C45Test.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_C45Test.class deleted file mode 100644 index d1592f491..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_C45Test.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_C45Test.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_C45Test.tasty deleted file mode 100644 index 36a7f873f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_C45Test.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_C45Test2.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_C45Test2.class deleted file mode 100644 index 1c8d32cf5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_C45Test2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_C45Test2.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_C45Test2.tasty deleted file mode 100644 index 44087e1c9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_C45Test2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_C45Test3.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_C45Test3.class deleted file mode 100644 index 284c27166..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_C45Test3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_C45Test3.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_C45Test3.tasty deleted file mode 100644 index 97e4c57d5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_C45Test3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_C45Test4.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_C45Test4.class deleted file mode 100644 index 3810589bd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_C45Test4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_C45Test4.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_C45Test4.tasty deleted file mode 100644 index a2b73dd73..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_C45Test4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_C45Test5.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_C45Test5.class deleted file mode 100644 index 70f152573..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_C45Test5.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_C45Test5.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_C45Test5.tasty deleted file mode 100644 index b46b28c21..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_C45Test5.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_C45wpTest.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_C45wpTest.class deleted file mode 100644 index 31141b58a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_C45wpTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_C45wpTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_C45wpTest.tasty deleted file mode 100644 index b629c085c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_C45wpTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_C45wpTest2.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_C45wpTest2.class deleted file mode 100644 index a0039de76..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_C45wpTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_C45wpTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_C45wpTest2.tasty deleted file mode 100644 index 6f4637444..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_C45wpTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_ID3Test.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_ID3Test.class deleted file mode 100644 index f89f0600a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_ID3Test.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_ID3Test.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_ID3Test.tasty deleted file mode 100644 index 168cba555..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_ID3Test.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_ID3Test2.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_ID3Test2.class deleted file mode 100644 index 309b7f52f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_ID3Test2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_ID3Test2.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_ID3Test2.tasty deleted file mode 100644 index 5a9f1482c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_ID3Test2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_ID3Test3.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_ID3Test3.class deleted file mode 100644 index 63cd10d12..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_ID3Test3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_ID3Test3.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_ID3Test3.tasty deleted file mode 100644 index 036540d29..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_ID3Test3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_ID3wpTest.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_ID3wpTest.class deleted file mode 100644 index a2b52859a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_ID3wpTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_ID3wpTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_ID3wpTest.tasty deleted file mode 100644 index 2e39cc48b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_ID3wpTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_ID3wpTest2.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_ID3wpTest2.class deleted file mode 100644 index 7b44e8d66..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_ID3wpTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_ID3wpTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_ID3wpTest2.tasty deleted file mode 100644 index 109dbd856..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_ID3wpTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_ID3wpTest3.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_ID3wpTest3.class deleted file mode 100644 index 243ec711f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_ID3wpTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_ID3wpTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_ID3wpTest3.tasty deleted file mode 100644 index 59b97252f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/decisionTree_ID3wpTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/example_BreastCancerTest.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/example_BreastCancerTest.class deleted file mode 100644 index 1d919099a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/example_BreastCancerTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/example_BreastCancerTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/example_BreastCancerTest.tasty deleted file mode 100644 index 40ffc5aef..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/example_BreastCancerTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/example_DiabetesTest.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/example_DiabetesTest.class deleted file mode 100644 index 93e733d80..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/example_DiabetesTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/example_DiabetesTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/example_DiabetesTest.tasty deleted file mode 100644 index 41405a3ab..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/example_DiabetesTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/example_IrisTest.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/example_IrisTest.class deleted file mode 100644 index d6ddab986..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/example_IrisTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/example_IrisTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/example_IrisTest.tasty deleted file mode 100644 index b3b54ab7e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/example_IrisTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/example_PlayTennisTest.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/example_PlayTennisTest.class deleted file mode 100644 index 353c28dcc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/example_PlayTennisTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/example_PlayTennisTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/example_PlayTennisTest.tasty deleted file mode 100644 index 398de0fb1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/example_PlayTennisTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/example_PlayTennis_ContTest.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/example_PlayTennis_ContTest.class deleted file mode 100644 index 0d4873c05..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/example_PlayTennis_ContTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/example_PlayTennis_ContTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/example_PlayTennis_ContTest.tasty deleted file mode 100644 index e9756117d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/example_PlayTennis_ContTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/fitCTest.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/fitCTest.class deleted file mode 100644 index 49714589e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/fitCTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/fitCTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/fitCTest.tasty deleted file mode 100644 index b3c2d16e0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/fitCTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/fitCTest2.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/fitCTest2.class deleted file mode 100644 index 6c411ddca..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/fitCTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/fitCTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/fitCTest2.tasty deleted file mode 100644 index 21e3cf854..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/fitCTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/fitCTest3.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/fitCTest3.class deleted file mode 100644 index c206d1adf..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/fitCTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/fitCTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/fitCTest3.tasty deleted file mode 100644 index 9eaffe69c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/fitCTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/fitCTest4.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/fitCTest4.class deleted file mode 100644 index 4c0f69b26..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/fitCTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/fitCTest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/fitCTest4.tasty deleted file mode 100644 index dbb697b1e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/fitCTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/hiddenMarkovTest.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/hiddenMarkovTest.class deleted file mode 100644 index bcb5342f3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/hiddenMarkovTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/hiddenMarkovTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/hiddenMarkovTest.tasty deleted file mode 100644 index c9d749400..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/hiddenMarkovTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/hiddenMarkovTest2.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/hiddenMarkovTest2.class deleted file mode 100644 index 5a14ba54f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/hiddenMarkovTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/hiddenMarkovTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/hiddenMarkovTest2.tasty deleted file mode 100644 index 040e5e029..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/hiddenMarkovTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/hiddenMarkovTest3.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/hiddenMarkovTest3.class deleted file mode 100644 index 965d61717..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/hiddenMarkovTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/hiddenMarkovTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/hiddenMarkovTest3.tasty deleted file mode 100644 index b83468e04..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/hiddenMarkovTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/hiddenMarkovTest4.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/hiddenMarkovTest4.class deleted file mode 100644 index 5a76cd362..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/hiddenMarkovTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/hiddenMarkovTest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/hiddenMarkovTest4.tasty deleted file mode 100644 index 8fc53f62f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/hiddenMarkovTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/index.html b/target/scala-3.6.4/classes/scalation/modeling/classifying/index.html deleted file mode 100644 index 895660354..000000000 --- a/target/scala-3.6.4/classes/scalation/modeling/classifying/index.html +++ /dev/null @@ -1,35 +0,0 @@ - - -

    Source files in classifying Package

    -

    - - \ No newline at end of file diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/kNN_ClassifierTest.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/kNN_ClassifierTest.class deleted file mode 100644 index cdb505a7c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/kNN_ClassifierTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/kNN_ClassifierTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/kNN_ClassifierTest.tasty deleted file mode 100644 index 41fc87a4e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/kNN_ClassifierTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/kNN_ClassifierTest2.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/kNN_ClassifierTest2.class deleted file mode 100644 index b8ecbeda4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/kNN_ClassifierTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/kNN_ClassifierTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/kNN_ClassifierTest2.tasty deleted file mode 100644 index ec8af04e4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/kNN_ClassifierTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/kNN_ClassifierTest3.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/kNN_ClassifierTest3.class deleted file mode 100644 index f460b376a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/kNN_ClassifierTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/kNN_ClassifierTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/kNN_ClassifierTest3.tasty deleted file mode 100644 index a943faf45..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/kNN_ClassifierTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/kNN_ClassifierTest4.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/kNN_ClassifierTest4.class deleted file mode 100644 index 670a59cd5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/kNN_ClassifierTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/kNN_ClassifierTest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/kNN_ClassifierTest4.tasty deleted file mode 100644 index 518679390..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/kNN_ClassifierTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/linDiscAnalyisTest.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/linDiscAnalyisTest.class deleted file mode 100644 index a88b5ed4f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/linDiscAnalyisTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/linDiscAnalyisTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/linDiscAnalyisTest.tasty deleted file mode 100644 index 718a1d105..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/linDiscAnalyisTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/logisticRegressionTest.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/logisticRegressionTest.class deleted file mode 100644 index d00548f7f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/logisticRegressionTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/logisticRegressionTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/logisticRegressionTest.tasty deleted file mode 100644 index 7151e780c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/logisticRegressionTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/logisticRegressionTest2.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/logisticRegressionTest2.class deleted file mode 100644 index 439b7402b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/logisticRegressionTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/logisticRegressionTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/logisticRegressionTest2.tasty deleted file mode 100644 index a15130cff..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/logisticRegressionTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/naiveBayesRTest.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/naiveBayesRTest.class deleted file mode 100644 index df3c76a07..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/naiveBayesRTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/naiveBayesRTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/naiveBayesRTest.tasty deleted file mode 100644 index b2834c26b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/naiveBayesRTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/naiveBayesRTest2.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/naiveBayesRTest2.class deleted file mode 100644 index 3979a7dea..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/naiveBayesRTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/naiveBayesRTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/naiveBayesRTest2.tasty deleted file mode 100644 index a1288a0f0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/naiveBayesRTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/naiveBayesTest.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/naiveBayesTest.class deleted file mode 100644 index 816e36619..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/naiveBayesTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/naiveBayesTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/naiveBayesTest.tasty deleted file mode 100644 index 61a2f11dc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/naiveBayesTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/naiveBayesTest2.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/naiveBayesTest2.class deleted file mode 100644 index 4e258a683..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/naiveBayesTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/naiveBayesTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/naiveBayesTest2.tasty deleted file mode 100644 index 4b2cc1d1b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/naiveBayesTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/naiveBayesTest3.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/naiveBayesTest3.class deleted file mode 100644 index e5f7ff1f9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/naiveBayesTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/naiveBayesTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/naiveBayesTest3.tasty deleted file mode 100644 index 0f12e682a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/naiveBayesTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/naiveBayesTest4.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/naiveBayesTest4.class deleted file mode 100644 index 528a1b71b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/naiveBayesTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/naiveBayesTest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/naiveBayesTest4.tasty deleted file mode 100644 index 801d18423..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/naiveBayesTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/nullModelTest.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/nullModelTest.class deleted file mode 100644 index 3c61595b9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/nullModelTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/nullModelTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/nullModelTest.tasty deleted file mode 100644 index bf287198c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/nullModelTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/old/BayesClassifier.scala.bak b/target/scala-3.6.4/classes/scalation/modeling/classifying/old/BayesClassifier.scala.bak deleted file mode 100644 index 792b2d6b4..000000000 --- a/target/scala-3.6.4/classes/scalation/modeling/classifying/old/BayesClassifier.scala.bak +++ /dev/null @@ -1,250 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller, Hao Peng - * @version 2.0 - * @date Sat Aug 8 20:26:34 EDT 2015 - * @see LICENSE (MIT style license file). - * - * @title Model Framework: Trait for Bayesian Classifiers - */ - -package scalation -package modeling -package classifying - -import scala.runtime.ScalaRunTime.stringOf - -import scalation.mathstat._ -import scalation.log2 - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `BayesClassifier` trait provides methods for Bayesian Classifiers, including - * calculations of joint probabilities and Conditional Mutual Information (CMI). - * @param k the number of classes - */ -trait BayesClassifier (k: Int = 2): - - private val EPS = 1E-9 // a small value - -// FIX: replace Array [Array [Array [MatrixD]]] with Array [Array [RTensorD]] -// nu_(j)(l)(x_ij)(x_il, yi) += 1 // increment frequency for xj, xl, yi <-- current -// nu_(j, l, x(j), x(l), k) += 1 // 5D Tensor not implemented -// nu_(j)(x(j)(l, x(l), k) += 1 // 2D Array of 3D RTensorD <-- try this - -/* - val (x_dim2, vc) = (4, VectorI (3, 3, 2, 2)) - val nu_ = Array.ofDim [Array [RTensorD]] (x_dim2) - for j <- 0 until x_dim2 do nu_(j) = Array.fill (vc(j)) (new RTensorD (x_dim2, vc, k)) -*/ - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the joint frequency of X, Z and y for each combination of features xj, xl. - * @param x the integer-valued data vectors stored as rows of a matrix - * @param y the class vector, where y(i) = class for row i of the matrix x, x(i) - * @param vc the vector of value counts (number of distinct values per feature) - */ - def freq_XZy (x: MatrixD, y: VectorI, vc: VectorI): Array [Array [Array [MatrixD]]] = - val nu_XZy = Array.ofDim [Array [MatrixD]] (x.dim2, x.dim2) - for j <- x.indices2; l <- j + 1 until x.dim2 do - nu_XZy(j)(l) = Array.ofDim [MatrixD] (vc(j)) - nu_XZy(l)(j) = Array.ofDim [MatrixD] (vc(l)) - for xj <- 0 until vc(j) do nu_XZy(j)(l)(xj) = new MatrixD (vc(l), k) - for xl <- 0 until vc(l) do nu_XZy(l)(j)(xl) = new MatrixD (vc(j), k) - end for - for i <- x.indices do updateFreq (x, y, i, nu_XZy) - nu_XZy - end freq_XZy - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Increment frequency counters used in CMI calculations based on the i-th - * row of the data matrix. - * @param x the integer-valued data vectors stored as rows of a matrix - * @param y the class vector, where y(i) = class for row i of the matrix x, x(i) - * @param i the index for current data row - * @param nu_XZy the joint frequency of X, Z and y for each combination of features xj, xl. - */ - private def updateFreq (x: MatrixD, y: VectorI, i: Int, - nu_XZy: Array [Array [Array [MatrixD]]]): Unit = - val yi = y(i) // get the class for ith row - for j <- x.indices2 do // for each feature/variable xj - for l <- j + 1 until x.dim2 do // for each feature/variable xl - val x_ij = x(i, j).toInt - val x_il = x(i, l).toInt - nu_XZy(j)(l)(x_ij)(x_il, yi) += 1 // increment frequency for xj, xl, yi - nu_XZy(l)(j)(x_il)(x_ij, yi) += 1 // increment frequency for xl, xj, yi - end for - end for - end updateFreq - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the joint probability of X and y for each feature xj. - * @param x the integer-valued data vectors stored as rows of a matrix - * @param y the class vector, where y(i) = class for row i of the matrix x, x(i) - * @param vc the vector of value counts (number of distinct values per feature) - */ - def jprob_Xy (x: MatrixD, y: VectorI, vc: VectorI): RTensorD = - val nu_Xy = RTensorD.freq (x, vc, y, k) // joint frequency of X and y - val p_Xy = new RTensorD (x.dim2, vc, k) - for j <- x.indices2; xj <- 0 until vc(j) do - p_Xy(j)(xj) = (nu_Xy(j)(xj) + EPS) / x.dim.toDouble - end for - p_Xy - end jprob_Xy - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the joint probability of X, Z and y each each combination of features xj, xl. - * @param x the integer-valued data vectors stored as rows of a matrix - * @param y the class vector, where y(i) = class for row i of the matrix x, x(i) - * @param vc the vector of value counts (number of distinct values per feature) - */ - def jprob_XZy (x: MatrixD, y: VectorI, vc: VectorI): Array [Array [Array [MatrixD]]] = - val nu_XZy = freq_XZy (x, y, vc) // joint frequency of X, Z and y - val p_XZy = Array.ofDim [Array [MatrixD]] (x.dim2, x.dim2) - for j <- x.indices2; l <- j + 1 until x.dim2 do - p_XZy(j)(l) = Array.ofDim [MatrixD] (vc(j)) - for xj <- 0 until vc(j) do - p_XZy(j)(l)(xj) = new MatrixD (vc(l), k) - for xl <- 0 until vc(l) do - p_XZy(j)(l)(xj)(xl) = (nu_XZy(j)(l)(xj)(xl) + EPS) / x.dim.toDouble - end for - p_XZy - end jprob_XZy - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute Conditional Mutual Information (CMI) matrix given the probability - * of y and joint probabilities of Xy and XZy, where y is the class, and - * X & Z are features, I(x; z | y). - * @see en.wikipedia.org/wiki/Conditional_mutual_information - * @param x the integer-valued data vectors stored as rows of a matrix - * @param vc the vector of value counts (number of distinct values per feature) - * @param y the class vector, where y(i) = class for row i of the matrix x, x(i) - */ - def cmi (x: MatrixD, vc: VectorI, y: VectorI): MatrixD = - val n = x.dim2 // number of features - val cmiMx = new MatrixD (n, n) // CMI matrix - - val p_y = y.freq (k)._2 // class/prior probability of y - val p_Xy = jprob_Xy (x, y, vc) // joint probability of X and y - val p_XZy = jprob_XZy (x, y, vc) // joint probability of X, Z and y - - for c <- 0 until k do // check each class, where k = p_y.size - val py = p_y(c) - for j <- p_Xy.indices; xj <- 0 until vc(j) do - val pxy = p_Xy(j, xj, c) - for l <- j + 1 until n; xl <- 0 until vc(l) do - val pzy = p_Xy(l, xl, c) - val pxzy = p_XZy(j)(l)(xj)(xl, c) - cmiMx(j, l) += pxzy * log2 ((py * pxzy) / (pxy * pzy)) - end for - cmiMx - end cmi - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - def jProbXY (x: VectorI, vcx: Int, y: VectorI, k: Int): MatrixD = - (Probability.freq (x, vcx, y, k) + EPS) / x.dim.toDouble - end jProbXY - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - def jProbXZY (x: VectorI, z: VectorI, vcxz: VectorI, y: VectorI, k: Int): RTensorD = - (RTensorD.freq (x, z, vcxz, y, k) + EPS) / x.dim.toDouble - end jProbXZY - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Calculate the Conditional Mutual Information (CMI) given data vectors x and z, - * and response/classification vector y, i.e., I(x; z | y). - * @see en.wikipedia.org/wiki/Conditional_mutual_information - * @param x the first integer-valued data vector - * @param z the second integer-valued data vector - * @param vcxz the vector of value counts (number of distinct values per feature) - * @param y the class vector, where y(i) = class - */ - def cmi (x: VectorI, z: VectorI, vcxz: VectorI, y: VectorI): Double = - var cmi_ = 0.0 - - val p_y = y.freq (k)._2 // class/prior probability of y - val p_xy = jProbXY (x, vcxz(0), y, k) // joint probability of x and y - val p_zy = jProbXY (z, vcxz(1), y, k) // joint probability of z and y - val p_xzy = jProbXZY (x, z, vcxz, y, k) // joint probability of x, z and y - - for c <- 0 until k do // check each class, where k = p_y.size - val py = p_y(c) - for xj <- 0 until vcxz(0) do - val pxy = p_xy(xj, c) - for zl <- 0 until vcxz(1) do - val pzy = p_zy(zl, c) - val pxzy = p_xzy(xj, zl, c) - cmi_ += pxzy * log2 ((py * pxzy) / (pxy * pzy)) - end for - cmi_ - end cmi - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Calculate the Conditional Mutual Information (CMI) matrix given data matrix x - * and response/classification vector y, i.e., I(x; z | y) for all pairs - * features xj and xl in matrix x. - * @see en.wikipedia.org/wiki/Conditional_mutual_information - * @param x the integer-valued data vectors stored as rows of a matrix - * @param vc the vector of value counts (number of distinct values per feature) - * @param y the class vector, where y(i) = class for row i of the matrix x, x(i) - */ - def cmiMatrix (x: MatrixD, vc: VectorI, y: VectorI): MatrixD = - val n = x.dim2 // number of features - val cmiMx = new MatrixD (n, n) // CMI matrix - - for j <- x.indices2; l <- j + 1 until n do - cmiMx(j, l) = cmi (x(?, j).toInt, x(?, l).toInt, VectorI (vc(j), vc(l)), y) - end for - cmiMx - end cmiMatrix - -end BayesClassifier - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `bayesClassifierTest` main function is used to test the `BayesClassifier` - * class using the Play Tennis Example. - * > runMain scalation.modeling.classifying.bayesClassifierTest - */ -@main def bayesClassifierTest (): Unit = - - import Example_PlayTennis.{x, y, k} - - println (s"x = $x") - println (s"y = $y") - - object bc extends BayesClassifier (k) // create a Bayes Classifier object - - Classifier.shift2zero (x) // make sure values for all features start at zero - val vc = Classifier.vc_fromData (x) // set value counts from data - - val p_y = y.freq (k)._2 // class/prior probability of y - banner (s"Probability of Response y") - println (s"p_y = $p_y") - - val jp_Xy = bc.jprob_Xy (x, y, vc) // joint probability of X and y - for j <- x.indices2 do - banner (s"Probability Feature x$j and Response y") - println (s"jp_Xy = ${jp_Xy(j)}") - end for - - val jp_XZy = bc.jprob_XZy (x, y, vc) // joint probability of X, Z and y - for j <- x.indices2; l <- j + 1 until x.dim2 do - banner (s"Probability Features x$j, x$l and Response y") - println (s"jp_XZy = ${stringOf (jp_XZy(j)(l))}") - end for - - banner ("Conditional Mutual Information") - val cmi = bc.cmi (x, vc, y) // Conditional Mutual Information (CMI) - println (s"cmi = $cmi") - - val cmi2 = bc.cmiMatrix (x, vc, y) // Conditional Mutual Information (CMI) - println (s"cmi2 = $cmi2") - - val cmiAns = MatrixD ((4, 4), 0.00000, 0.419593, 0.222815, 0.311752, - 0.00000, 0.00000, 0.419593, 0.168895, - 0.00000, 0.00000, 0.00000, 0.0610538, - 0.00000, 0.00000, 0.00000, 0.00000) - println (s"cmiAns = $cmiAns") - -end bayesClassifierTest - diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/old/TANBayes.scala.bak b/target/scala-3.6.4/classes/scalation/modeling/classifying/old/TANBayes.scala.bak deleted file mode 100644 index 135adf356..000000000 --- a/target/scala-3.6.4/classes/scalation/modeling/classifying/old/TANBayes.scala.bak +++ /dev/null @@ -1,443 +0,0 @@ - -/:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller, Hao Peng, Zhe Jin - * @version 2.0 - * @date Mon Jul 27 01:27:00 EDT 2015 - * @see LICENSE (MIT style license file). - * - * @note Model: Integer-Based Tree Augmented Naive Bayes (TAN) Classifier - */ - -package scalation -package modeling -package classifying - -import scala.collection.mutable.{Set => SET, Map} -import scala.runtime.ScalaRunTime.stringOf - -import scalation.columnar_db.Relation -import scalation.graph_db.{MGraph, MinSpanningTree, Pair} -import scalation.mathstat._ - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `TANBayes` class implements an Integer-Based Tree Augmented Naive Bayes - * Classifier, which is a commonly used such classifier for discrete input data. - * The classifier is trained using a data matrix x and a classification vector y. - * Each data vector in the matrix is classified into one of k classes numbered - * 0, ..., k-1. Prior probabilities are calculated based on the population of - * each class in the training-set. Relative posterior probabilities are computed - * by multiplying these by values computed using conditional probabilities. - *----------------------------------------------------------------------------- - * This classifier supports limited dependency between features/variables. - * @param x the input/data m-by-n matrix - * @param y the class vector, where y(i) = class for row i of matrix x - * @param fname_ the names of the features/variables - * @param k the number of classes - * @param cname_ the names of the classes - * @param vc the value count (number of distinct values) for each feature - * @param hparam the hyper-parameters - */ -class TANBayes (x: MatrixD, y: VectorI, fname_ : Array [String] = null, - k: Int = 2, cname_ : Array [String] = Array ("No", "Yes"), - protected var vc: VectorI = null, hparam: HyperParameter = NaiveBayes.hp) - extends Classifier (x, y, fname_, k, cname_, hparam) - with FitC (y, k): - - private val debug = debugf ("TANBayes", true) // debug function - private val flaw = flawf ("TANBayes") // flaw function - - if cname.length != k then flaw ("init", "# class names != # classes") - - private val me = hparam("me").toDouble // m-estimates (me == 0 => regular MLE estimates) - private val me_vc = VectorD (vc.map (me / _)) // me / vc_j for all j - private val (m, n) = (x.dim, x.dim2) // number of (instances, variables) - private val md = m.toDouble // m as a double (real number) - - private var parent = new VectorI (n) // vector holding the parent for each feature/variable - private val vcp = new VectorI (n) // value count for the parent - - protected val nu_XyP = new HMatrix4 [Int] (k, n) // conditional frequency counts for variable/feature j: xj - protected val p_XyP = new HMatrix4 [Double] (k, n) // conditional probabilities for variable/feature j: xj - - if vc == null then - shiftToZero (); vc = vc_fromData // set value counts from data - end if - - nu_X = HMatrix2 [Int] (n, vc) // local frequency of X = [x_0, ... x_n-1] - nu_Xy = HMatrix3 [Int] (k, n, vc) // local joint frequency of X and y - - private val nu_X = Array.ofDim [VectorI] (n) // frequency of X = [x_0, ... x_n-1] - private val nu_Xy = Array.ofDim [MatrixI] (n) // Joint Frequency Tables (JFTs) one per feature - - nu_XyZ = HMatrix5 [Int] (k, n, n, vc, vc) // local joint frequency (using partial dataset, i.e. when using cross validation) - // of X, y and Z where X, Z are features/columns - debug ("init", s" value count vc = $vc \n vcp = $vcp \n parent = $parent") - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train the classifier by computing the probabilities for c, and the - * conditional probabilities for x_j. - * @param itest indices of the instances considered as testing data - FIX - */ - def train (itest: VectorI): Unit = - val idx = VectorI.range (0, m) diff itest - computeParent (idx) // frequency computations are also done here - computeVcp () - nu_XyP.alloc (vc, vcp) - p_XyP.alloc (vc, vcp) - copyFreqXyP () - train2 () - end train - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train the classifier by computing the probabilities for y, and the - * conditional probabilities for x_j. - */ - private def train2 (): Unit = - p_y = nu_y.toDouble / md // probability for class yi - for i <- 0 until k; j <- 0 until n do // for each class yi & feature xj - val me_vc = me / vc(j).toDouble - for xj <- 0 until vc(j); xp <- 0 until vcp(j) do - val d = if parent(j) > -1 then - nu_Xy(i, parent(j), xp) - else - nu_y(i) - // for each value for feature j: xj, par(j): xp - p_XyP(i, j, xj, xp) = (nu_XyP(i, j, xj, xp) + me_vc) / (d + me) - end for - - debug ("train2", s" py = $p_y \n p_XyP = $p_XyP") - end train2 - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the parent of each feature based on the correlation matrix. - * Feature x_i is only a possible candidate for parent of feature x_j if * i < j - * @param idx indicies of either training or testing region - */ - def computeParent (idx: VectorI): Unit = - val cmiMx = calcCMI (idx, vc) - for j1 <- 0 until n; j2 <- 0 until j1 do cmiMx(j1, j2) = cmiMx(j2, j1) - - val ch = Array.fill (n)(SET [Int] ()) - val elabel = Map [Pair, Double] () - - for i <- 0 until n; j <- i + 1 until n do { ch(i) += j; elabel += new Pair(i, j) -> cmiMx(i, j) } - - parent = { val a = maxSpanningTree (ch, elabel).makeITree (); new VectorI (a.size, a) } - end computeParent - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the parent. - */ - override def getParent: VectoI = parent - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Clone/copy the values from global freq variables into local ones. - * Only the joint frequencies of Class, X-feature, and its Parent needs - * to be copied for parameter learning purposes. - */ - private def copyFreqXyP (): Unit = - for i <- 0 until k; j <- x.indices2; xj <- 0 until vc(j); xp <- 0 until vcp(j) do - nu_XyP(i, j, xj, xp) = if parent(j) > -1 then - nu_XyZ(i, j, parent(j), xj, xp) - else - nu_Xy(i, j, xj) - end for - end copyFreqXyP - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Increment frequency counters used in CMI calculations based on the 'i'th - * row of the data matrix. - * @param i the index for current data row - */ - protected def updateFreq (i: Int): Unit = - val yi = y(i) // get the class for ith row - nu_y(yi) += 1 // increment frequency for class yi - for j <- x.indices2 do // for each feature/variable xj - nu_X(j, x(i, j)) += 1 // increment frequency for xj - nu_Xy(yi, j, x(i, j)) += 1 // increment frequency for xj, yi - for j2 <- j+1 until n do // for each feature/variable xj2 - nu_XyZ(yi, j, j2, x(i, j), x(i, j2)) += 1 // increment frequency for xj, yi, xj2 - nu_XyZ(yi, j2, j, x(i, j2), x(i, j)) += 1 // increment frequency for xj2, yi, xj - end for - end for - end updateFreq - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create MaxSpanningTree from conditional mutual information. - * @param ch the adjacency set - * @param elabel the edge labels/weights - */ - def maxSpanningTree (ch: Array [SET [Int]], elabel: Map [Pair, Double]): MinSpanningTree = - val g = new MGraph (ch, Array.ofDim (n), elabel) - new MinSpanningTree (g, false, false) // param 2 = false means max spanning tree - end maxSpanningTree - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the value counts of each parent feature based on the parent vector. - * Let 1 be the default value count when there is no parent. - */ - def computeVcp (): Unit = - for j <- vcp.indices do - vcp(j) = if parent(j) > -1 then vc(parent(j)) else 1 - end for - end computeVcp - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Given a discrete data vector 'z', classify it returning the class number - * (0, ..., k-1) with the highest relative posterior probability. - * Return the best class, its name and its relative probability. - * @param z the data vector to classify - */ - def predictI (z: VectoI): Int = - val prob = new VectorD (p_y) - for i <- 0 until k; j <- 0 until n do - prob(i) *= (if parent(j) > -1 then - p_XyP(i, j, z(j), z(parent(j))) // P(X_j = z_j | X_p = z_p, y = c), x-parent - else - p_XyP(i, j, z(j), 0)) // P(X_j = z_j | x_p = z_p, y = c), no x-parent - end for - prob.argmax () // class with the highest relative posterior probability - end predictI - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Reset or re-initialize the frequency tables. - */ - def reset (): Unit = - nu_y.set (0) - nu_X.set (0) - nu_Xy.set (0) - nu_XyZ.set (0) - end reset - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Print the conditional probability tables by iterating over the features/variables. - */ - def printConditionalProb (): Unit = - for j <- p_XyP.indices2 do - println (s"ConditionalProb for x$j = ${p_XyP(j)}") - end for - end printConditionalProb - -end TANBayes - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `TANBayes` object is the companion object for the `TANBayes` class. - */ -object TANBayes - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `TANBayes` object, passing x and y together in one matrix. - * @param xy the combined data-response matrix - * @param fname the names of the features/variables - * @param k the number of classes - * @param cname the names of the classes - * @param vc the value count (number of distinct values) for each feature - * @param hparam the hyper-parameters - */ - def apply (xy: MatrixI, fname: Array [String] = null, k: Int = 2, - cname: Array [String] = Array ("No", "Yes"), vc: VectorI = null, - hparam: HyperParameter = NaiveBayes.hp) - (col: Int = xy.dim2 - 1): TANBayes = - val (x, y) = (xy.not(?, col), xy(?, col).toInt) // data matrix, response vector - new TANBayes (x, y, fname, k, cname, vc, hparam) - end apply - -end TANBayes - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `TANBayesTest` object is used to test the `TANBayes0` and 'TANBayes' classes. - * > runMain scalation.analytics.classifier.TANBayesTest - */ -object TANBayesTest extends App -{ - import ExampleTennis._ - - banner ("Tennis Example") - println ("xy = " + xy) - println ("-" * 60) - val x = xy.sliceCol (0, xy.dim2 - 1) // data/input matrix - val y = xy.col (xy.dim2 - 1) // response/class label vector - println (s"x = $x") - - val tan0 = TANBayes0 (xy, fn, k, cn) // create a classifier tan0 - tan0.train () // train the classifier tan0 - val tan = TANBayes (xy, fn, k, cn) // create a classifier tan - ClassifierInt.analyze (tan) - - tan.printClassProb () // print class probabilities - tan.printConditionalProb () // print conditional probabilities - - val z = VectorI (2, 2, 1, 1) // new data vector to classify - banner (s"Classify $z") - println (s"Use tan0 to classify ($z) = " + tan0.classify (z)) - println (s"Use tan to classify ($z) = " + tan.classify (z)) - - banner ("All Test") - val yp = tan.classify () - tan.contrast (yp) - println (tan.report) - println (tan.summary (tan.parameter)) - - banner ("Cross-validation for TANBayes0") - tan0.crossValidateRand (10, true) - banner ("Cross-validation for TANBayes") - tan.crossValidateRand (10, true) - -} // TANBayesTest object - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `TANBayesTest2` object is used to test the `TANBayes0` and `TANBayes` classes. - * Classify whether a car is more likely to be stolen (1) or not (1). - * @see www.inf.u-szeged.hu/~ormandi/ai2/06-AugNaiveBayes-example.pdf - * > runMain scalation.analytics.classifier.TANBayesTest2 - */ -object TANBayesTest2 extends App -{ - // x0: Color: Red (1), Yellow (0) - // x1: Type: SUV (1), Sports (0) - // x2: Origin: Domestic (1), Imported (0) - // features: x0 x1 x2 - val x = new MatrixI((10, 3), 1, 0, 1, // data matrix - 1, 0, 1, - 1, 0, 1, - 0, 0, 1, - 0, 0, 0, - 0, 1, 0, - 0, 1, 0, - 0, 1, 1, - 1, 1, 0, - 1, 0, 0) - - val y = VectorI (1, 0, 1, 0, 1, 0, 1, 0, 0, 1) // classification vector: 0(No), 1(Yes)) - val fn = Array ("Color", "Type", "Origin") // feature/variable names - val cn = Array ("No", "Yes") // class names - - println ("xy = " + (x :^+ y)) - println ("-" * 60) - - val tan0 = new TANBayes0 (x, y, fn, 2, cn) // create the classifier - tan0.train () - val tan = new TANBayes (x, y, fn, 2, cn) // create the classifier - ClassifierInt.analyze (tan) - - // test sample ------------------------------------------------------------ - val z1 = VectorI (1, 0, 1) // existing data vector to classify - val z2 = VectorI (1, 1, 1) // new data vector to classify - println (s"Use tan0 to classify ($z1) = " + tan0.classify (z1)) - println (s"Use tan to classify ($z1) = " + tan.classify (z1)) - println (s"Use tan0 to classify ($z2) = " + tan0.classify (z2)) - println (s"Use tan to classify ($z2) = " + tan.classify (z2)) - - banner ("All Test") - val yp = tan.classify () - tan.contrast (yp) - println (tan.report) - println (tan.summary (tan.parameter)) - - banner ("Cross-validation for TANBayes0") - tan0.crossValidateRand () - banner ("Cross-validation for TANBayes") - tan.crossValidateRand () - -} // TANBayesTest2 object - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `TANBayesTest3` object is used to test the `TANBayes0` and `TANBayes` classes. - * Given whether a person is Fast and/or Strong, classify them as making C = 1 - * or not making C = 0 the football team. - * > runMain scalation.analytics.classifier.TANBayesTest3 - */ -object TANBayesTest3 extends App -{ - // training-set ----------------------------------------------------------- - // x0: Fast - // x1: Strong - // y: Classification (No/0, Yes/1) - // features: x0 x1 y - val xy = new MatrixI((10, 3), 1, 1, 1, - 1, 1, 1, - 1, 0, 1, - 1, 0, 1, - 1, 0, 0, - 0, 1, 0, - 0, 1, 0, - 0, 1, 1, - 0, 0, 0, - 0, 0, 0) - - val fn = Array ("Fast", "Strong") // feature names - val cn = Array ("No", "Yes") // class names - - println("xy = " + xy) - println ("-" * 60) - - val tan0 = TANBayes0 (xy, fn, 2, cn, 1, null) // create the classifier - tan0.train () - val tan = TANBayes (xy, fn, 2, cn, 1, null) // create the classifier - ClassifierInt.analyze (tan) - - // test sample ------------------------------------------------------------ - val z = VectorI (1, 0) // new data vector to classify - println (s"Use tan0 to classify ($z) = " + tan0.classify (z)) - println (s"Use tan to classify ($z) = " + tan.classify (z)) - - banner ("All Test") - val yp = tan.classify () - tan.contrast (yp) - println (tan.report) - println (tan.summary (tan.parameter)) - - banner ("Cross-validation for TANBayes0") - tan0.crossValidateRand () - banner ("Cross-validation for TANBayes") - tan.crossValidateRand () - -} // TANBayesTest3 object - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `TANBayesTest4` object is used to test the `TANBayes0` and `TANBayes` classes. - * > runMain scalation.analytics.classifier.TANBayesTest4 - */ -object TANBayesTest4 extends App -{ - val filename = BASE_DIR + "breast-cancer.arff" - var data = Relation (filename, -1, null) - val xy = data.toMatriI2 (null) - val fn = data.colName.slice (0, xy.dim2 - 1).toArray - val cn = Array ("p", "e") // class names - val k = 2 - println ("-" * 60) - - val tan0 = TANBayes0 (xy, fn, k, cn) // create the classifier - tan0.train () - val tan = TANBayes (xy, fn, k, cn) // create the classifier - ClassifierInt.analyze (tan) - - banner ("All Test") - val yp = tan.classify () - tan.contrast (yp) - println (tan.report) - println (tan.summary (tan.parameter)) - - tan0.featureSelection () - tan.featureSelection () - - banner ("Cross-validation for TANBayes0") - tan0.crossValidateRand () - banner ("Cross-validation for TANBayes") - tan.crossValidateRand () - - println ("After feature selection") - - banner ("Cross-validation for TANBayes0") - tan0.crossValidateRand () - banner ("Cross-validation for TANBayes") - tan.crossValidateRand () - -} // TANBayesTest4 object - diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/randomForestTest.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/randomForestTest.class deleted file mode 100644 index a9c267840..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/randomForestTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/randomForestTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/randomForestTest.tasty deleted file mode 100644 index 64096bbba..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/randomForestTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/randomForestTest2.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/randomForestTest2.class deleted file mode 100644 index bd941be5c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/randomForestTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/randomForestTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/randomForestTest2.tasty deleted file mode 100644 index ed8d86742..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/randomForestTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/randomForestTest3.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/randomForestTest3.class deleted file mode 100644 index 1c3d09d52..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/randomForestTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/randomForestTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/randomForestTest3.tasty deleted file mode 100644 index 30c09f522..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/randomForestTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/randomForestTest4.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/randomForestTest4.class deleted file mode 100644 index 73fdd3331..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/randomForestTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/randomForestTest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/randomForestTest4.tasty deleted file mode 100644 index 2ad26602d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/randomForestTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/randomForestTest5.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/randomForestTest5.class deleted file mode 100644 index 683fc79ec..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/randomForestTest5.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/randomForestTest5.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/randomForestTest5.tasty deleted file mode 100644 index 4793d252d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/randomForestTest5.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/randomForestTest6.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/randomForestTest6.class deleted file mode 100644 index 3701b682e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/randomForestTest6.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/randomForestTest6.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/randomForestTest6.tasty deleted file mode 100644 index d44457171..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/randomForestTest6.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/randomForestTest7.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/randomForestTest7.class deleted file mode 100644 index 760de1110..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/randomForestTest7.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/randomForestTest7.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/randomForestTest7.tasty deleted file mode 100644 index a3aafd083..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/randomForestTest7.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/simpleLDATest.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/simpleLDATest.class deleted file mode 100644 index 570da9203..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/simpleLDATest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/simpleLDATest.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/simpleLDATest.tasty deleted file mode 100644 index d6ab9df97..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/simpleLDATest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/simpleLDATest2.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/simpleLDATest2.class deleted file mode 100644 index b1d28e2cc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/simpleLDATest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/simpleLDATest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/simpleLDATest2.tasty deleted file mode 100644 index 8f035df42..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/simpleLDATest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/simpleLogisticRegressionTest.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/simpleLogisticRegressionTest.class deleted file mode 100644 index ab8775122..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/simpleLogisticRegressionTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/simpleLogisticRegressionTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/simpleLogisticRegressionTest.tasty deleted file mode 100644 index 5aecd9d41..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/simpleLogisticRegressionTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/simpleLogisticRegressionTest3.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/simpleLogisticRegressionTest3.class deleted file mode 100644 index 82cd69cc0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/simpleLogisticRegressionTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/simpleLogisticRegressionTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/simpleLogisticRegressionTest3.tasty deleted file mode 100644 index c3b4e37d2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/simpleLogisticRegressionTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/simpleLogisticRegressionTest4.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/simpleLogisticRegressionTest4.class deleted file mode 100644 index 0d9d17782..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/simpleLogisticRegressionTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/simpleLogisticRegressionTest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/simpleLogisticRegressionTest4.tasty deleted file mode 100644 index 5ed098463..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/simpleLogisticRegressionTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/simpleLogisticRegressionTest5.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/simpleLogisticRegressionTest5.class deleted file mode 100644 index 5be42fd51..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/simpleLogisticRegressionTest5.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/simpleLogisticRegressionTest5.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/simpleLogisticRegressionTest5.tasty deleted file mode 100644 index 9f1ad36dd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/simpleLogisticRegressionTest5.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/simpleLogisticRegressionTest6.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/simpleLogisticRegressionTest6.class deleted file mode 100644 index 936a0db8c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/simpleLogisticRegressionTest6.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/simpleLogisticRegressionTest6.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/simpleLogisticRegressionTest6.tasty deleted file mode 100644 index d2430c58f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/simpleLogisticRegressionTest6.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/supportVectorMachineTest.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/supportVectorMachineTest.class deleted file mode 100644 index 1ea68a778..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/supportVectorMachineTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/supportVectorMachineTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/supportVectorMachineTest.tasty deleted file mode 100644 index af42d1d69..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/supportVectorMachineTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/supportVectorMachineTest2.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/supportVectorMachineTest2.class deleted file mode 100644 index afd075916..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/supportVectorMachineTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/supportVectorMachineTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/supportVectorMachineTest2.tasty deleted file mode 100644 index ec227cefd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/supportVectorMachineTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/tANBayesTest.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/tANBayesTest.class deleted file mode 100644 index 3732a67d2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/tANBayesTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/tANBayesTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/tANBayesTest.tasty deleted file mode 100644 index 90e23141a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/tANBayesTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/tANBayesTest2.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/tANBayesTest2.class deleted file mode 100644 index fad1435ce..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/tANBayesTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/tANBayesTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/tANBayesTest2.tasty deleted file mode 100644 index 4c57c4eb2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/tANBayesTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/tANBayesTest3.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/tANBayesTest3.class deleted file mode 100644 index 3d709f287..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/tANBayesTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/tANBayesTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/tANBayesTest3.tasty deleted file mode 100644 index 2d1087d27..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/tANBayesTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/tANBayesTest4.class b/target/scala-3.6.4/classes/scalation/modeling/classifying/tANBayesTest4.class deleted file mode 100644 index 262efbecf..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/tANBayesTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/classifying/tANBayesTest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/classifying/tANBayesTest4.tasty deleted file mode 100644 index aad4d9a64..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/classifying/tANBayesTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/Algorithm$$anon$1.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/Algorithm$$anon$1.class deleted file mode 100644 index 05b342a32..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/Algorithm$$anon$1.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/Algorithm$.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/Algorithm$.class deleted file mode 100644 index 171eb8c40..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/Algorithm$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/Algorithm.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/Algorithm.class deleted file mode 100644 index 4bd269ae2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/Algorithm.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/Algorithm.tasty b/target/scala-3.6.4/classes/scalation/modeling/clustering/Algorithm.tasty deleted file mode 100644 index 4c06147d1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/Algorithm.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/Cluster$.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/Cluster$.class deleted file mode 100644 index 8bd22be12..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/Cluster$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/Cluster.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/Cluster.class deleted file mode 100644 index a53bc507e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/Cluster.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/Cluster.tasty b/target/scala-3.6.4/classes/scalation/modeling/clustering/Cluster.tasty deleted file mode 100644 index 0bd7dede3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/Cluster.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/Clusterer$.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/Clusterer$.class deleted file mode 100644 index 18fe31dc6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/Clusterer$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/Clusterer.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/Clusterer.class deleted file mode 100644 index cf4a23982..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/Clusterer.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/Clusterer.tasty b/target/scala-3.6.4/classes/scalation/modeling/clustering/Clusterer.tasty deleted file mode 100644 index d9cdbf621..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/Clusterer.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/ClusteringPredictor$.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/ClusteringPredictor$.class deleted file mode 100644 index fa801c9b8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/ClusteringPredictor$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/ClusteringPredictor$package$.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/ClusteringPredictor$package$.class deleted file mode 100644 index 4fb1324a0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/ClusteringPredictor$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/ClusteringPredictor$package.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/ClusteringPredictor$package.class deleted file mode 100644 index 83d637a07..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/ClusteringPredictor$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/ClusteringPredictor$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/clustering/ClusteringPredictor$package.tasty deleted file mode 100644 index 5d405a2f8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/ClusteringPredictor$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/ClusteringPredictor.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/ClusteringPredictor.class deleted file mode 100644 index 9373b94c8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/ClusteringPredictor.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/ClusteringPredictor.tasty b/target/scala-3.6.4/classes/scalation/modeling/clustering/ClusteringPredictor.tasty deleted file mode 100644 index 74994f2fb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/ClusteringPredictor.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/Distance$package$.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/Distance$package$.class deleted file mode 100644 index d015ce953..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/Distance$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/Distance$package.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/Distance$package.class deleted file mode 100644 index 299bbac97..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/Distance$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/Distance$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/clustering/Distance$package.tasty deleted file mode 100644 index cefa26657..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/Distance$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/GapStatistic$.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/GapStatistic$.class deleted file mode 100644 index 34a523e4f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/GapStatistic$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/GapStatistic$package$.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/GapStatistic$package$.class deleted file mode 100644 index 9fd70ff37..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/GapStatistic$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/GapStatistic$package.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/GapStatistic$package.class deleted file mode 100644 index c0dfa006a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/GapStatistic$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/GapStatistic$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/clustering/GapStatistic$package.tasty deleted file mode 100644 index 4e0adf525..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/GapStatistic$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/GapStatistic.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/GapStatistic.class deleted file mode 100644 index 4b019fd55..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/GapStatistic.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/GapStatistic.tasty b/target/scala-3.6.4/classes/scalation/modeling/clustering/GapStatistic.tasty deleted file mode 100644 index b57b69b20..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/GapStatistic.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/HierClusterer$.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/HierClusterer$.class deleted file mode 100644 index 4954d5053..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/HierClusterer$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/HierClusterer$package$.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/HierClusterer$package$.class deleted file mode 100644 index fa3fe5e2c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/HierClusterer$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/HierClusterer$package.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/HierClusterer$package.class deleted file mode 100644 index ec500196d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/HierClusterer$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/HierClusterer$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/clustering/HierClusterer$package.tasty deleted file mode 100644 index 440b0e5d5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/HierClusterer$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/HierClusterer.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/HierClusterer.class deleted file mode 100644 index b515c9012..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/HierClusterer.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/HierClusterer.tasty b/target/scala-3.6.4/classes/scalation/modeling/clustering/HierClusterer.tasty deleted file mode 100644 index 30de8af45..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/HierClusterer.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClusterer$.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClusterer$.class deleted file mode 100644 index 9f8229ebd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClusterer$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClusterer$package$.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClusterer$package$.class deleted file mode 100644 index cddb523ae..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClusterer$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClusterer$package.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClusterer$package.class deleted file mode 100644 index ad920419b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClusterer$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClusterer$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClusterer$package.tasty deleted file mode 100644 index 8319ce9b1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClusterer$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClusterer.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClusterer.class deleted file mode 100644 index c11a33658..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClusterer.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClusterer.tasty b/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClusterer.tasty deleted file mode 100644 index dfcbae5d9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClusterer.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClusterer2$.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClusterer2$.class deleted file mode 100644 index 4cd76c984..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClusterer2$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClusterer2$package$.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClusterer2$package$.class deleted file mode 100644 index fc27ddbb9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClusterer2$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClusterer2$package.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClusterer2$package.class deleted file mode 100644 index 6345f8a7e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClusterer2$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClusterer2$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClusterer2$package.tasty deleted file mode 100644 index 3b97c6d7e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClusterer2$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClusterer2.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClusterer2.class deleted file mode 100644 index fd9429c0f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClusterer2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClusterer2.tasty b/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClusterer2.tasty deleted file mode 100644 index 740af3fd8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClusterer2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClustererHW$.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClustererHW$.class deleted file mode 100644 index ccd25fcf2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClustererHW$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClustererHW$package$.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClustererHW$package$.class deleted file mode 100644 index 0fe5abd2f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClustererHW$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClustererHW$package.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClustererHW$package.class deleted file mode 100644 index c00573819..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClustererHW$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClustererHW$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClustererHW$package.tasty deleted file mode 100644 index fa1f95c02..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClustererHW$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClustererHW.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClustererHW.class deleted file mode 100644 index b16e80ff5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClustererHW.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClustererHW.tasty b/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClustererHW.tasty deleted file mode 100644 index 73e6619e1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClustererHW.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClustererPP$.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClustererPP$.class deleted file mode 100644 index 652822b90..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClustererPP$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClustererPP$package$.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClustererPP$package$.class deleted file mode 100644 index 9ae80ba83..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClustererPP$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClustererPP$package.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClustererPP$package.class deleted file mode 100644 index 178ee0117..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClustererPP$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClustererPP$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClustererPP$package.tasty deleted file mode 100644 index 752b8a987..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClustererPP$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClustererPP.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClustererPP.class deleted file mode 100644 index d63bbae7f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClustererPP.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClustererPP.tasty b/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClustererPP.tasty deleted file mode 100644 index b464a254d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClustererPP.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClustererSSE.scalaa b/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClustererSSE.scalaa deleted file mode 100644 index 865e592ac..000000000 --- a/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansClustererSSE.scalaa +++ /dev/null @@ -1,428 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 1.6 - * @date Sat Apr 27 12:55:24 EDT 2019 - * @see LICENSE (MIT style license file). - * - * @note Model: K-Means Clustering (to minimize SSE) - */ - -package scalation.analytics.clusterer - -// U N D E R D E V E L O P M E N T - -import scala.runtime.ScalaRunTime.stringOf -import scala.util.control.Breaks.{breakable, break} - -import scalation.linalgebra.{MatriD, MatrixD, VectoD, VectorD, VectoI, VectorI} -import scalation.plot.Plot -import scalation.random.Randi -import scalation.util.{banner, Error} - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `KMeansClustererSSE` class cluster several vectors/points using k-means - * clustering. Randomly assign points to 'k' clusters (primary technique). - * Iteratively, reassign each point to the cluster containing - * the closest centroid. Stop when there are no changes to the clusters. - * @see `KMeansClusterer2` for secondary technique. - *----------------------------------------------------------------------------- - * @param x the vectors/points to be clustered stored as rows of a matrix - * @param k the number of clusters to make - * @param flags the flags used to adjust the algorithm - */ -class KMeansClustererSSE (x: MatriD, k: Int, flags: Array [Boolean] = Array (false, false)) - extends Clusterer with Error -{ - if (k >= x.dim1) flaw ("constructor", "k must be less than the number of vectors") - - private val DEBUG = false // debug flag - private val IMMEDIATE = true // reassign returns after first change flag - protected val MAX_ITER = 1000 // the maximum number of iterations - protected val (m, n) = (x.dim1, x.dim2) // number of rows and columns - - protected val to_c = Array.ofDim [Int] (m) // assignment of points to clusters - protected val clu = Array.fill (k)(Cluster ()) - Cluster.reset () - - protected val (post, immediate) = (flags(0), flags(1)) // (post processing swapping, immediate return upon change) - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the sum of squared errors within the clusters, where error is - * indicated by e.g., the distance from a point to its centroid. - * @param x the data matrix - * @param to_c the cluster assignments - */ - def sse (x: MatriD, clu: Array [Cluster], to_c: Array [Int]): Double = - { - var sum = 0.0 - for (i <- x.range1) sum += dist (x(i), clu(to_c(i)).cen) - sum - } // sse - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the distances between vector/point 'u' and the centroids. - * @param u the given vector/point - * @param clu array of clusters - */ - private def distance (u: VectoD, clu: Array [Cluster]): VectoD = - { - val du = new VectorD (clu.size) - for (c <- clu.indices) du(c) = dist (u, clu(c).cen) - du - } // distance - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Iteratively recompute clusters until the assignment of points does not - * change. Initialize by randomly assigning points to 'k' clusters. - */ - def train (): KMeansClustererSSE = - { - assign () // randomly assign points to clusters - fixEmptyClusters () // swap points into empty clusters - calcCentroids () // calculate the initial centroids - if (DEBUG) println (s"train: l = 0, clu = ${stringOf (clu)}") - - var it = 0 - breakable { for (l <- 1 to MAX_ITER) { - if (reassign ()) break () // reassign points to clusters (no change => break) - calcCentroids () // re-calculate the centroids - if (DEBUG) println (s"train: l = $l, clu = ${stringOf (clu)}") - it += 1 - }} // for - if (DEBUG) println (s"terminated after $it iterations") - emptyClusters () // should not have any empty clusters - - move2 () - swap () - this - } // train - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the cluster assignment vector. Should only be called after `train`. - */ - def cluster: Array [Int] = to_c - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the centroids. Should only be called after `train`. - */ - def centroids: MatriD = null - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the sizes of the centroids. Should only be called after `train`. - */ - def csize: VectoI = null - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Randomly assign each vector/point 'x(i)' to a random cluster. - * Primary technique for initiating the clustering. - */ - protected def assign (): Unit = - { - val ran = new Randi (0, k-1, stream) // for random integers: 0, ..., k-1 - for (i <- x.range1) { - to_c(i) = ran.igen // randomly assign x(i) to a cluster - clu(to_c(i)).np += 1 // increment size of that cluster - } // for - } // assign - - def largest: Int = - { - var jm = 0 - for (j <- 1 until k) if (clu(j).np > clu(jm).np) jm = j - jm - } // largest - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Fix all empty clusters by taking a point from the largest cluster. - */ - protected def fixEmptyClusters (): Unit = - { - if (DEBUG) println (s"fixEmptyClusters: to_c = ${stringOf (to_c)}") - - for (c <- 0 until k if ! (to_c contains c)) { // for each empty cluster - if (DEBUG) println (s"fixEmptyClusters: cluster c = $c is empty!") - val biggest = largest // biggest cluster - val indices = to_c.indices.filter (to_c(_) == biggest) // indices of elements in biggest cluster - - val ran = new Randi (0, indices.size-1) // random integer generator - val i = indices(ran.igen) // randomly pick one point from biggest cluster - clu(to_c(i)).np -= 1 // decrement size of previous cluster - clu(c).np += 1 // increment size of cluster c - to_c(i) = c // reassign vector x(i) to cluster c - if (DEBUG) println (s"fixEmptyClusters: to_c = ${stringOf (to_c)}") - } // for - } // fixEmptyClusters - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Check for empty clusters and throw an execption if found. - */ - def emptyClusters (): Unit = - { - for (c <- 0 until k if ! (to_c contains c)) throw new Exception (s"Empty cluster c = $c") - } // emptyClusters - - private def distance (i: Int, c1: Int, clu: Array [Cluster]): VectoD = - { - val d = new VectorD (k) - val sse_c1 = clu(c1).ssef (x, to_c) - to_c(i) = -1 -// val del_c1 = clu(c1).ssef (x, to_c, clu(c1).cenf (x, to_c)) - sse_c1 - val del_c1 = clu(c1).ssef (x, to_c) - sse_c1 - d(c1) = 0 - for (c <- 0 until k if c != c1) { - val sse_c = clu(c).ssef (x, to_c) - to_c(i) = c -// val del_c = clu(c).ssef (x, to_c, clu(c).cenf (x, to_c)) - sse_c - val del_c = clu(c).ssef (x, to_c) - sse_c - d(c) = del_c1 + del_c - } // for - to_c(i) = c1 - //println (s"distance = $d") - d - } // distance - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Reassign each vector/point to the cluster with the closest centroid. - * Indicate done, if no points changed clusters (for stopping rule). - */ - protected def reassign (): Boolean = - { - var done = true // done indicates no changes - for (i <- x.range1) { - val c1 = to_c(i) - if (clu(c1).np > 1) { - val d = distance (i, c1, clu) // distances to all centroid - val c2 = d.argmin () // u's (current, closest) cluster - if (d(c2) < 0.0) { // closest closer than current - clu(c1).np -= 1 // decrement size of current cluster - clu(c2).np += 1 // increment size of new cluster - to_c(i) = c2 // reassign vector x(i) to cluster c2 - done = false // changed clusters => not done -// if (IMMEDIATE) return false // return after first change - } // if - } // if - } // for - done // return whether there were no changes - } // reassign - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Calculate the centroids based on current assignment of points to clusters. - */ - protected def calcCentroids (): Unit = - { - for (j <- 0 until k) { - val cl = clu(j) - cl.set_cen (cl.cenf (x, to_c)) - if (DEBUG) println (s"calcCentroids ($j): cen = ${cl.cen}") - } // for - } // calcCentroids - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Given a new point/vector 'z', determine which cluster it belongs to, - * i.e., the cluster whose centroid it is closest to. - * @param z the vector to classify - */ - def classify (z: VectoD): Int = distance (z, clu).argmin () - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Check to see if the sum of squared errors is optimum. - * @param opt the known (from human/oracle) optimum - */ - def checkOpt (opt: Double): Boolean = sse (x, clu, to_c) <= opt - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Swap clusters for points 'x(i)' and 'x(j)'. - * param i the inded for point x(i) - * param j the inded for point x(j) - */ - private def swapPoints (i: Int, j: Int): Unit = - { - val temp = to_c(i) - to_c(i) = to_c(j) - to_c(j) = temp - calcCentroids () - } // swapPoints - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Try all pairwise swaps and make them if 'sse' improves. - */ - protected def swap (): Unit = - { - for (i <- 0 until x.dim1-1; j <- i+1 until x.dim1 if to_c(i) != to_c(j)) { - val sum1 = sse (x, clu, to_c) + sse (x, clu, to_c) - swapPoints (i, j) - val sum2 = sse (x, clu, to_c) + sse (x, clu, to_c) - if (DEBUG) println (s"sum1 = $sum1 vs. sum2 = $sum2") - if (sum2 > sum1) swapPoints (i, j) // if not better, swap back - } // for - } // swap - - def move2 (): Unit = - { - for (c <- 0 until k if clu(c).np > 2) { - var min_sse = Double.MaxValue - var iMin, ciMin, jMin, cjMin = -1 - val idx = to_c.indices.filter (to_c(_) == c) - if (DEBUG) println (s"move2: to_c = ${stringOf (to_c)}") - if (DEBUG) println (s"move2: cluster $c has ${clu(c).np} points, idx = $idx") - for (i <- idx; j <- idx if i != j) { - var ci_min, cj_min = -1 - var di_min, dj_min = Double.MaxValue - for (c2 <- 0 until k if c2 != c) { - val di = dist (x(i), clu(c2).cen) - if (di < di_min) { di_min = di; ci_min = c2 } - val dj = dist (x(j), clu(c2).cen) - if (dj < dj_min) { dj_min = dj; cj_min = c2 } - } // for - val sse1 = sse (x, clu, to_c) - to_c(i) = ci_min; to_c(j) = cj_min // check move to other clusters - clu(c).np -= 2; clu(ci_min).np += 1; clu(cj_min).np += 1 - if (DEBUG) println (s"move2: to_c = ${stringOf (to_c)}") - calcCentroids () - val sse2 = sse (x, clu, to_c) - if (DEBUG) println (s"sse1 = $sse1") - if (DEBUG) println (s"sse2 = $sse2") - if (sse2 < min_sse) { min_sse = sse2; iMin = i; ciMin = ci_min; jMin = j; cjMin = cj_min } - to_c(i) = c; to_c(j) = c // move back - clu(c).np += 2; clu(ci_min).np -= 1; clu(cj_min).np -= 1 - calcCentroids () - } // for - to_c(iMin) = ciMin; to_c(jMin) = cjMin // best move to other clusters - clu(c).np -= 2; clu(ciMin).np += 1; clu(cjMin).np += 1 - } // for - } // move2 - -} // KMeansClustererSSE class - -import Clusterer.test - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `KMeansClustererSSETest` object is used to test the `KMeansClustererSSE` class. - * > runMain scalation.analytics.clusterer.KMeansClustererSSETest - */ -object KMeansClustererSSETest extends App -{ - import Clusterer.x - - val k = 3 - val opt = 3.0 - - println ("x = " + x) - println ("k = " + k) - println ("----------------------------------------------------") - - val tf = Array (true, false) - for (fl0 <- tf; fl1 <- tf) { - val fls = Array (fl0, fl1) - test (x, fls, new KMeansClustererSSE (x, k, fls), opt) - } // for - - new Plot (x.col(0), x.col(1), null, "x0 vs x1") - -} // KMeansClustererSSETest object - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `KMeansClustererSSETest2` object is used to test the `KMeansClustererSSE` class. - * > runMain scalation.analytics.clusterer.KMeansClustererSSETest2 - */ -object KMeansClustererSSETest2 extends App -{ - // x0 x1 - val x = new MatrixD ((8, 2), 1.0, 1.0, - 1.0, 3.0, - 5.0, 18.0, - 5.0, 20.0, - 9.0, 10.0, - 9.0, 12.0, - 15.0, 30.0, - 15.0, 32.0) - - val k = 4 - val opt = 8.0 - - println ("x = " + x) - println ("k = " + k) - println ("----------------------------------------------------") - - val tf = Array (true, false) - for (fl0 <- tf; fl1 <- tf) { - val fls = Array (fl0, fl1) - test (x, fls, new KMeansClustererSSE (x, k, fls), opt) - } // for - - new Plot (x.col(0), x.col(1), null, "x0 vs x1") - -} // KMeansClustererSSETest2 object - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `KMeansClustererSSETest2` object is used to test the `KMeansClustererSSE` class. - * > runMain scalation.analytics.clusterer.KMeansClustererSSETest3 - */ -object KMeansClustererSSETest3 extends App -{ - import scalation.random.{Bernoulli, Normal} - - val coin = Bernoulli () - val dist1 = Normal (2.0, 1.0) - val dist2 = Normal (8.0, 1.0) - val x = new MatrixD (50, 2) - val k = 4 - val opt = 76.0 - - for (i <- x.range1) x(i) = VectorD (if (coin.gen == 0) dist1.gen else dist2.gen, - if (coin.gen == 0) dist1.gen else dist2.gen) - - println ("x = " + x) - println ("k = " + k) - println ("----------------------------------------------------") - - val tf = Array (true, false) - for (fl0 <- tf; fl1 <- tf) { - val fls = Array (fl0, fl1) - test (x, fls, new KMeansClustererSSE (x, k, fls), opt) - } // for - - new Plot (x.col(0), x.col(1)) - -} // KMeansClustererSSETest3 object - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `KMeansClustererSSETest4` object is used to test the `KMeansClustererSSE` class. - * > runMain scalation.analytics.clusterer.KMeansClustererSSETest4 - */ -object KMeansClustererSSETest4 extends App -{ - import scalation.random.{Normal, Bernoulli} - - val coin = Bernoulli () - val dist1 = Normal (2.0, 1.0) - val dist2 = Normal (8.0, 1.0) - val x = new MatrixD (100, 2) - val k = 4 - val opt = 171.0 - - for (i <- x.range1) x(i) = VectorD (if (coin.gen == 0) dist1.gen else dist2.gen, - if (coin.gen == 0) dist1.gen else dist2.gen) - -// import org.apache.commons.math3.ml.clustering.KMeansPlusPlusClusterer -// val cl = new KMeansPlusPlusClusterer (k) - - println ("x = " + x) - println ("k = " + k) - println ("----------------------------------------------------") - - val tf = Array (true, false) - for (fl0 <- tf; fl1 <- tf) { - val fls = Array (fl0, fl1) - test (x, fls, new KMeansClustererSSE (x, k, fls), opt) - } // for - - new Plot (x.col(0), x.col(1)) - -} // KMeansClustererSSETest4 object - diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansPPClusterer$.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansPPClusterer$.class deleted file mode 100644 index ab6aa9930..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansPPClusterer$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansPPClusterer$package$.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansPPClusterer$package$.class deleted file mode 100644 index 55e2abfd1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansPPClusterer$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansPPClusterer$package.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansPPClusterer$package.class deleted file mode 100644 index c83727ecf..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansPPClusterer$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansPPClusterer$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansPPClusterer$package.tasty deleted file mode 100644 index 2b096b562..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansPPClusterer$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansPPClusterer.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansPPClusterer.class deleted file mode 100644 index 8d4c9598f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansPPClusterer.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansPPClusterer.tasty b/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansPPClusterer.tasty deleted file mode 100644 index e15a82f50..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansPPClusterer.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansPPClustererTester$.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansPPClustererTester$.class deleted file mode 100644 index f6187d67c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansPPClustererTester$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansPPClustererTester.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansPPClustererTester.class deleted file mode 100644 index df270d9fe..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansPPClustererTester.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansPPClustererTester.tasty b/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansPPClustererTester.tasty deleted file mode 100644 index 4815d9c73..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/KMeansPPClustererTester.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/MarkovClusterer$.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/MarkovClusterer$.class deleted file mode 100644 index c547685f8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/MarkovClusterer$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/MarkovClusterer.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/MarkovClusterer.class deleted file mode 100644 index 56cc6692b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/MarkovClusterer.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/MarkovClusterer.tasty b/target/scala-3.6.4/classes/scalation/modeling/clustering/MarkovClusterer.tasty deleted file mode 100644 index bf63e3946..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/MarkovClusterer.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/MarkovClustering$package$.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/MarkovClustering$package$.class deleted file mode 100644 index a9e8bd65d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/MarkovClustering$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/MarkovClustering$package.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/MarkovClustering$package.class deleted file mode 100644 index b7b56c514..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/MarkovClustering$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/MarkovClustering$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/clustering/MarkovClustering$package.tasty deleted file mode 100644 index 2a222a167..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/MarkovClustering$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/RandomGraph$package$.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/RandomGraph$package$.class deleted file mode 100644 index d0666e365..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/RandomGraph$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/RandomGraph$package.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/RandomGraph$package.class deleted file mode 100644 index 4bbf82881..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/RandomGraph$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/RandomGraph$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/clustering/RandomGraph$package.tasty deleted file mode 100644 index 762f9759f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/RandomGraph$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/RandomGraph.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/RandomGraph.class deleted file mode 100644 index 4ff6bf299..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/RandomGraph.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/RandomGraph.tasty b/target/scala-3.6.4/classes/scalation/modeling/clustering/RandomGraph.tasty deleted file mode 100644 index 5997293ce..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/RandomGraph.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/TightClusterer$.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/TightClusterer$.class deleted file mode 100644 index 98ec82524..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/TightClusterer$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/TightClusterer$package$.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/TightClusterer$package$.class deleted file mode 100644 index 54304c23a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/TightClusterer$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/TightClusterer$package.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/TightClusterer$package.class deleted file mode 100644 index 66cef65a9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/TightClusterer$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/TightClusterer$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/clustering/TightClusterer$package.tasty deleted file mode 100644 index eb85073a9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/TightClusterer$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/TightClusterer.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/TightClusterer.class deleted file mode 100644 index 760a56a7b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/TightClusterer.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/TightClusterer.tasty b/target/scala-3.6.4/classes/scalation/modeling/clustering/TightClusterer.tasty deleted file mode 100644 index 974652534..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/TightClusterer.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/clusteringPredictorTest.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/clusteringPredictorTest.class deleted file mode 100644 index 3c15f7339..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/clusteringPredictorTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/clusteringPredictorTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/clustering/clusteringPredictorTest.tasty deleted file mode 100644 index 204dcd6b2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/clusteringPredictorTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/clusteringPredictorTest2.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/clusteringPredictorTest2.class deleted file mode 100644 index f63701e0e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/clusteringPredictorTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/clusteringPredictorTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/clustering/clusteringPredictorTest2.tasty deleted file mode 100644 index 3ab67094f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/clusteringPredictorTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/clusteringPredictorTest3.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/clusteringPredictorTest3.class deleted file mode 100644 index 52bf4d05f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/clusteringPredictorTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/clusteringPredictorTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/clustering/clusteringPredictorTest3.tasty deleted file mode 100644 index a5d25be30..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/clusteringPredictorTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/gapStatisticTest.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/gapStatisticTest.class deleted file mode 100644 index 2744a8a2d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/gapStatisticTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/gapStatisticTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/clustering/gapStatisticTest.tasty deleted file mode 100644 index 6e0a9d7e5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/gapStatisticTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/gapStatisticTest2.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/gapStatisticTest2.class deleted file mode 100644 index 3bff76bb4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/gapStatisticTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/gapStatisticTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/clustering/gapStatisticTest2.tasty deleted file mode 100644 index 5b7a2eeb3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/gapStatisticTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/hierClustererTest.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/hierClustererTest.class deleted file mode 100644 index 5d9a89e56..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/hierClustererTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/hierClustererTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/clustering/hierClustererTest.tasty deleted file mode 100644 index d69a205af..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/hierClustererTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/hierClustererTest2.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/hierClustererTest2.class deleted file mode 100644 index ed3d96c65..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/hierClustererTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/hierClustererTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/clustering/hierClustererTest2.tasty deleted file mode 100644 index a412fa265..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/hierClustererTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/index.html b/target/scala-3.6.4/classes/scalation/modeling/clustering/index.html deleted file mode 100644 index 9b5599e86..000000000 --- a/target/scala-3.6.4/classes/scalation/modeling/clustering/index.html +++ /dev/null @@ -1,22 +0,0 @@ - - -

    Source files in clustering Package

    -

    - - \ No newline at end of file diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClusterer2Test.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClusterer2Test.class deleted file mode 100644 index 434c90a9d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClusterer2Test.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClusterer2Test.tasty b/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClusterer2Test.tasty deleted file mode 100644 index 0c663718a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClusterer2Test.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClusterer2Test2.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClusterer2Test2.class deleted file mode 100644 index 1126db49d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClusterer2Test2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClusterer2Test2.tasty b/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClusterer2Test2.tasty deleted file mode 100644 index 53372aa4b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClusterer2Test2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClusterer2Test3.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClusterer2Test3.class deleted file mode 100644 index 006788a6f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClusterer2Test3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClusterer2Test3.tasty b/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClusterer2Test3.tasty deleted file mode 100644 index 44d2a43d3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClusterer2Test3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClusterer2Test4.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClusterer2Test4.class deleted file mode 100644 index 4df87c5ab..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClusterer2Test4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClusterer2Test4.tasty b/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClusterer2Test4.tasty deleted file mode 100644 index 401d27bab..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClusterer2Test4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClustererHWTest.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClustererHWTest.class deleted file mode 100644 index a75cccf2c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClustererHWTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClustererHWTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClustererHWTest.tasty deleted file mode 100644 index facde8d71..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClustererHWTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClustererHWTest2.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClustererHWTest2.class deleted file mode 100644 index d8d2ad45e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClustererHWTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClustererHWTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClustererHWTest2.tasty deleted file mode 100644 index 2c5688c08..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClustererHWTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClustererHWTest3.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClustererHWTest3.class deleted file mode 100644 index 4c728fd85..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClustererHWTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClustererHWTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClustererHWTest3.tasty deleted file mode 100644 index bb3048bef..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClustererHWTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClustererPPTest.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClustererPPTest.class deleted file mode 100644 index d45618c3c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClustererPPTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClustererPPTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClustererPPTest.tasty deleted file mode 100644 index 1e76ad4f4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClustererPPTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClustererPPTest2.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClustererPPTest2.class deleted file mode 100644 index ae4a95de5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClustererPPTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClustererPPTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClustererPPTest2.tasty deleted file mode 100644 index c23149f65..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClustererPPTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClustererPPTest3.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClustererPPTest3.class deleted file mode 100644 index 8e6454b40..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClustererPPTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClustererPPTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClustererPPTest3.tasty deleted file mode 100644 index 975a11f3b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClustererPPTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClustererTest.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClustererTest.class deleted file mode 100644 index 0a1ed0eee..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClustererTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClustererTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClustererTest.tasty deleted file mode 100644 index 11b4d1ba7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClustererTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClustererTest2.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClustererTest2.class deleted file mode 100644 index 0fb19d37b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClustererTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClustererTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClustererTest2.tasty deleted file mode 100644 index 2a1c4dc52..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClustererTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClustererTest3.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClustererTest3.class deleted file mode 100644 index 01a9f15f4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClustererTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClustererTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClustererTest3.tasty deleted file mode 100644 index 09678394d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClustererTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClustererTest4.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClustererTest4.class deleted file mode 100644 index 74a2eabea..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClustererTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClustererTest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClustererTest4.tasty deleted file mode 100644 index 9152f8a2b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansClustererTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansPPClustererTest.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansPPClustererTest.class deleted file mode 100644 index 83b3be70f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansPPClustererTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansPPClustererTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansPPClustererTest.tasty deleted file mode 100644 index 28167605e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansPPClustererTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansPPClustererTest2.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansPPClustererTest2.class deleted file mode 100644 index 7aa8be844..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansPPClustererTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansPPClustererTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansPPClustererTest2.tasty deleted file mode 100644 index f56e4cbfd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansPPClustererTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansPPClustererTest3.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansPPClustererTest3.class deleted file mode 100644 index b7db1f3f4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansPPClustererTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansPPClustererTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansPPClustererTest3.tasty deleted file mode 100644 index 0418c24bb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansPPClustererTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansPPClustererTest4.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansPPClustererTest4.class deleted file mode 100644 index 1bec2d45d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansPPClustererTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansPPClustererTest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansPPClustererTest4.tasty deleted file mode 100644 index 2da27727d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/kMeansPPClustererTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/markovClustererTest.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/markovClustererTest.class deleted file mode 100644 index c00fb01f7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/markovClustererTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/markovClustererTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/clustering/markovClustererTest.tasty deleted file mode 100644 index 33a970713..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/markovClustererTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/markovClustererTest2.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/markovClustererTest2.class deleted file mode 100644 index 712647b36..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/markovClustererTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/markovClustererTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/clustering/markovClustererTest2.tasty deleted file mode 100644 index 02b236bf8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/markovClustererTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/randomGraphTest.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/randomGraphTest.class deleted file mode 100644 index 0d6100a72..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/randomGraphTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/randomGraphTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/clustering/randomGraphTest.tasty deleted file mode 100644 index 1be5ee526..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/randomGraphTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/tightClustererTest.class b/target/scala-3.6.4/classes/scalation/modeling/clustering/tightClustererTest.class deleted file mode 100644 index 937b752ca..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/tightClustererTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/clustering/tightClustererTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/clustering/tightClustererTest.tasty deleted file mode 100644 index d3cbb8596..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/clustering/tightClustererTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/example_AutoMPG_Correlation.class b/target/scala-3.6.4/classes/scalation/modeling/example_AutoMPG_Correlation.class deleted file mode 100644 index 2e402d6a9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/example_AutoMPG_Correlation.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/example_AutoMPG_Correlation.tasty b/target/scala-3.6.4/classes/scalation/modeling/example_AutoMPG_Correlation.tasty deleted file mode 100644 index f6473d5b0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/example_AutoMPG_Correlation.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/example_AutoMPG_NullModel.class b/target/scala-3.6.4/classes/scalation/modeling/example_AutoMPG_NullModel.class deleted file mode 100644 index 254eb7993..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/example_AutoMPG_NullModel.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/example_AutoMPG_NullModel.tasty b/target/scala-3.6.4/classes/scalation/modeling/example_AutoMPG_NullModel.tasty deleted file mode 100644 index 0dc232e97..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/example_AutoMPG_NullModel.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/example_AutoMPG_QuadRegression.class b/target/scala-3.6.4/classes/scalation/modeling/example_AutoMPG_QuadRegression.class deleted file mode 100644 index ff692f044..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/example_AutoMPG_QuadRegression.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/example_AutoMPG_QuadRegression.tasty b/target/scala-3.6.4/classes/scalation/modeling/example_AutoMPG_QuadRegression.tasty deleted file mode 100644 index 78493294b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/example_AutoMPG_QuadRegression.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/example_AutoMPG_Regression.class b/target/scala-3.6.4/classes/scalation/modeling/example_AutoMPG_Regression.class deleted file mode 100644 index 3b017b76e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/example_AutoMPG_Regression.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/example_AutoMPG_Regression.tasty b/target/scala-3.6.4/classes/scalation/modeling/example_AutoMPG_Regression.tasty deleted file mode 100644 index 68ea43be1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/example_AutoMPG_Regression.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/example_AutoMPG_SimpleRegression.class b/target/scala-3.6.4/classes/scalation/modeling/example_AutoMPG_SimpleRegression.class deleted file mode 100644 index bf33af246..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/example_AutoMPG_SimpleRegression.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/example_AutoMPG_SimpleRegression.tasty b/target/scala-3.6.4/classes/scalation/modeling/example_AutoMPG_SimpleRegression.tasty deleted file mode 100644 index 194dc6da6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/example_AutoMPG_SimpleRegression.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/example_AutoMPG_SimplerRegression.class b/target/scala-3.6.4/classes/scalation/modeling/example_AutoMPG_SimplerRegression.class deleted file mode 100644 index e2d4c589f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/example_AutoMPG_SimplerRegression.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/example_AutoMPG_SimplerRegression.tasty b/target/scala-3.6.4/classes/scalation/modeling/example_AutoMPG_SimplerRegression.tasty deleted file mode 100644 index c89745ff0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/example_AutoMPG_SimplerRegression.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/example_BPressureTest.class b/target/scala-3.6.4/classes/scalation/modeling/example_BPressureTest.class deleted file mode 100644 index ec27f183e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/example_BPressureTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/example_BPressureTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/example_BPressureTest.tasty deleted file mode 100644 index b7a1d2025..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/example_BPressureTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/example_BPressureTest2.class b/target/scala-3.6.4/classes/scalation/modeling/example_BPressureTest2.class deleted file mode 100644 index 3aeace8d9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/example_BPressureTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/example_BPressureTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/example_BPressureTest2.tasty deleted file mode 100644 index 64d9c205b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/example_BPressureTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/example_BasketBallTest.class b/target/scala-3.6.4/classes/scalation/modeling/example_BasketBallTest.class deleted file mode 100644 index 22cd79969..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/example_BasketBallTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/example_BasketBallTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/example_BasketBallTest.tasty deleted file mode 100644 index 8ba807b44..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/example_BasketBallTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/expRegressionTest.class b/target/scala-3.6.4/classes/scalation/modeling/expRegressionTest.class deleted file mode 100644 index df505e99b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/expRegressionTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/expRegressionTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/expRegressionTest.tasty deleted file mode 100644 index 07a738ea8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/expRegressionTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/expRegressionTest2.class b/target/scala-3.6.4/classes/scalation/modeling/expRegressionTest2.class deleted file mode 100644 index 3b2532770..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/expRegressionTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/expRegressionTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/expRegressionTest2.tasty deleted file mode 100644 index 65e02cd94..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/expRegressionTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/expRegressionTest3.class b/target/scala-3.6.4/classes/scalation/modeling/expRegressionTest3.class deleted file mode 100644 index 0e2873366..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/expRegressionTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/expRegressionTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/expRegressionTest3.tasty deleted file mode 100644 index 711b9b530..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/expRegressionTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/fitTest.class b/target/scala-3.6.4/classes/scalation/modeling/fitTest.class deleted file mode 100644 index 55c056311..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/fitTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/fitTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/fitTest.tasty deleted file mode 100644 index fa508b330..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/fitTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/fitTest2.class b/target/scala-3.6.4/classes/scalation/modeling/fitTest2.class deleted file mode 100644 index a8b81447e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/fitTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/fitTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/fitTest2.tasty deleted file mode 100644 index d1b1a567f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/fitTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/AR$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/AR$.class deleted file mode 100644 index 47d0f5615..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/AR$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/AR$package$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/AR$package$.class deleted file mode 100644 index d337f1d78..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/AR$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/AR$package.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/AR$package.class deleted file mode 100644 index 00b7f9433..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/AR$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/AR$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/AR$package.tasty deleted file mode 100644 index 616472859..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/AR$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/AR.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/AR.class deleted file mode 100644 index 7922076c0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/AR.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/AR.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/AR.tasty deleted file mode 100644 index 7d666224f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/AR.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARIMA$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARIMA$.class deleted file mode 100644 index ee63d37e7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARIMA$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARIMA$package$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARIMA$package$.class deleted file mode 100644 index c4ca35f00..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARIMA$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARIMA$package.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARIMA$package.class deleted file mode 100644 index 4b5fe0d8a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARIMA$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARIMA$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARIMA$package.tasty deleted file mode 100644 index c51edad0d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARIMA$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARIMA.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARIMA.class deleted file mode 100644 index f28db11cd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARIMA.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARIMA.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARIMA.tasty deleted file mode 100644 index 12af6ceda..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARIMA.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARIMA_diff$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARIMA_diff$.class deleted file mode 100644 index fa7bc5c9d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARIMA_diff$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARIMA_diff$package$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARIMA_diff$package$.class deleted file mode 100644 index 234a2dd1c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARIMA_diff$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARIMA_diff$package.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARIMA_diff$package.class deleted file mode 100644 index 1e2b7a74a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARIMA_diff$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARIMA_diff$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARIMA_diff$package.tasty deleted file mode 100644 index 1a07b2180..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARIMA_diff$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARIMA_diff.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARIMA_diff.class deleted file mode 100644 index 9e2b378f0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARIMA_diff.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARIMA_diff.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARIMA_diff.tasty deleted file mode 100644 index 7e423d778..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARIMA_diff.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARMA$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARMA$.class deleted file mode 100644 index d18ec6708..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARMA$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARMA$package$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARMA$package$.class deleted file mode 100644 index bda181030..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARMA$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARMA$package$CG$2$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARMA$package$CG$2$.class deleted file mode 100644 index 28a84e74c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARMA$package$CG$2$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARMA$package.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARMA$package.class deleted file mode 100644 index b2d343bdd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARMA$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARMA$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARMA$package.tasty deleted file mode 100644 index 8b1a092d0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARMA$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARMA.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARMA.class deleted file mode 100644 index 2c85c0631..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARMA.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARMA.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARMA.tasty deleted file mode 100644 index deb4ff357..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARMA.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX$.class deleted file mode 100644 index 965aa7265..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX$package$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX$package$.class deleted file mode 100644 index 0d0d454d3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX$package.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX$package.class deleted file mode 100644 index 808dc1bb2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX$package.tasty deleted file mode 100644 index 76b493973..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX.class deleted file mode 100644 index 4c1a4b774..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX.tasty deleted file mode 100644 index d73afd2e7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_D$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_D$.class deleted file mode 100644 index 1bab92792..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_D$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_D$package$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_D$package$.class deleted file mode 100644 index d6d75a9e5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_D$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_D$package.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_D$package.class deleted file mode 100644 index 1d7aeba02..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_D$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_D$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_D$package.tasty deleted file mode 100644 index f8908ccce..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_D$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_D.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_D.class deleted file mode 100644 index ef45b0ecd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_D.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_D.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_D.tasty deleted file mode 100644 index 46252fe4e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_D.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Quad$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Quad$.class deleted file mode 100644 index 0b795bd9e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Quad$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Quad$package$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Quad$package$.class deleted file mode 100644 index d4f628d97..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Quad$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Quad$package.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Quad$package.class deleted file mode 100644 index 358a9f4fe..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Quad$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Quad$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Quad$package.tasty deleted file mode 100644 index b94231109..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Quad$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Quad.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Quad.class deleted file mode 100644 index cbb0c2cf7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Quad.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Quad.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Quad.tasty deleted file mode 100644 index b34ff48f3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Quad.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Quad_D$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Quad_D$.class deleted file mode 100644 index 97c718068..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Quad_D$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Quad_D$package$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Quad_D$package$.class deleted file mode 100644 index 67ca45386..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Quad_D$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Quad_D$package.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Quad_D$package.class deleted file mode 100644 index 07705e6e2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Quad_D$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Quad_D$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Quad_D$package.tasty deleted file mode 100644 index 981dbd448..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Quad_D$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Quad_D.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Quad_D.class deleted file mode 100644 index 8ada07e01..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Quad_D.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Quad_D.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Quad_D.tasty deleted file mode 100644 index b1feca9bf..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Quad_D.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Symb$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Symb$.class deleted file mode 100644 index 9ef9decc1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Symb$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Symb$package$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Symb$package$.class deleted file mode 100644 index 55c508743..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Symb$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Symb$package.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Symb$package.class deleted file mode 100644 index 0c02634fa..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Symb$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Symb$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Symb$package.tasty deleted file mode 100644 index eeb2b4ec5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Symb$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Symb.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Symb.class deleted file mode 100644 index cdb957d44..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Symb.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Symb.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Symb.tasty deleted file mode 100644 index 9a131d451..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Symb.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Symb_D$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Symb_D$.class deleted file mode 100644 index 07f1685b0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Symb_D$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Symb_D$package$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Symb_D$package$.class deleted file mode 100644 index 611a955b6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Symb_D$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Symb_D$package.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Symb_D$package.class deleted file mode 100644 index 417c4df2c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Symb_D$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Symb_D$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Symb_D$package.tasty deleted file mode 100644 index bed431cdc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Symb_D$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Symb_D.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Symb_D.class deleted file mode 100644 index fa7571002..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Symb_D.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Symb_D.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Symb_D.tasty deleted file mode 100644 index c74f3b27d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARX_Symb_D.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARY$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARY$.class deleted file mode 100644 index 094c6acf1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARY$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARY$package$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARY$package$.class deleted file mode 100644 index caab39cdf..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARY$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARY$package.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARY$package.class deleted file mode 100644 index cdee70e8d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARY$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARY$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARY$package.tasty deleted file mode 100644 index edce84c38..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARY$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARY.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARY.class deleted file mode 100644 index b3325384c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARY.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARY.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARY.tasty deleted file mode 100644 index 1c4be1f70..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARY.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARY_D$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARY_D$.class deleted file mode 100644 index 81a8093c0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARY_D$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARY_D$package$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARY_D$package$.class deleted file mode 100644 index 009b0de5e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARY_D$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARY_D$package.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARY_D$package.class deleted file mode 100644 index 62c3a30c0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARY_D$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARY_D$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARY_D$package.tasty deleted file mode 100644 index 835bbf3c2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARY_D$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARY_D.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARY_D.class deleted file mode 100644 index 74c3bedcf..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARY_D.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARY_D.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARY_D.tasty deleted file mode 100644 index 029962739..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARY_D.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARY_Quad$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARY_Quad$.class deleted file mode 100644 index 5f27324f5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARY_Quad$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARY_Quad$package$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARY_Quad$package$.class deleted file mode 100644 index 0df9f23ea..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARY_Quad$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARY_Quad$package.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARY_Quad$package.class deleted file mode 100644 index 66d2beecd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARY_Quad$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARY_Quad$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARY_Quad$package.tasty deleted file mode 100644 index 6a133873e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARY_Quad$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARY_Quad.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARY_Quad.class deleted file mode 100644 index 2965fe108..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARY_Quad.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARY_Quad.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARY_Quad.tasty deleted file mode 100644 index 2fce1dc2a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ARY_Quad.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Baseline.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/Baseline.class deleted file mode 100644 index 99ec10c46..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Baseline.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Baseline.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/Baseline.tasty deleted file mode 100644 index d5e27c911..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Baseline.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Baselines$package$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/Baselines$package$.class deleted file mode 100644 index c164c8473..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Baselines$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Baselines$package.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/Baselines$package.class deleted file mode 100644 index ab2dc167b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Baselines$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Baselines$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/Baselines$package.tasty deleted file mode 100644 index 0da6c870b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Baselines$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/DTW$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/DTW$.class deleted file mode 100644 index d527abcc9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/DTW$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/DTW$package$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/DTW$package$.class deleted file mode 100644 index 1a14ded3f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/DTW$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/DTW$package.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/DTW$package.class deleted file mode 100644 index 589614a10..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/DTW$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/DTW$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/DTW$package.tasty deleted file mode 100644 index 8d2a7a701..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/DTW$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/DTW.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/DTW.class deleted file mode 100644 index 99b6087be..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/DTW.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/DTW.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/DTW.tasty deleted file mode 100644 index cf705cb59..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/DTW.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Diagnoser.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/Diagnoser.class deleted file mode 100644 index bd1d7d7fd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Diagnoser.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Diagnoser.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/Diagnoser.tasty deleted file mode 100644 index 4d1e637ac..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Diagnoser.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Example_Covid$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/Example_Covid$.class deleted file mode 100644 index b4e80610a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Example_Covid$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Example_Covid$package$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/Example_Covid$package$.class deleted file mode 100644 index 93fbb5286..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Example_Covid$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Example_Covid$package.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/Example_Covid$package.class deleted file mode 100644 index 6ae0bd51f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Example_Covid$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Example_Covid$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/Example_Covid$package.tasty deleted file mode 100644 index 1dea625ac..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Example_Covid$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Example_Covid.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/Example_Covid.class deleted file mode 100644 index 9b7db0103..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Example_Covid.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Example_Covid.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/Example_Covid.tasty deleted file mode 100644 index a40688872..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Example_Covid.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Example_GasFurnace$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/Example_GasFurnace$.class deleted file mode 100644 index 1b95ca0fb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Example_GasFurnace$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Example_GasFurnace$package$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/Example_GasFurnace$package$.class deleted file mode 100644 index aa22d88df..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Example_GasFurnace$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Example_GasFurnace$package.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/Example_GasFurnace$package.class deleted file mode 100644 index e53c79769..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Example_GasFurnace$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Example_GasFurnace$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/Example_GasFurnace$package.tasty deleted file mode 100644 index dff885c01..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Example_GasFurnace$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Example_GasFurnace.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/Example_GasFurnace.class deleted file mode 100644 index a055dbd98..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Example_GasFurnace.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Example_GasFurnace.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/Example_GasFurnace.tasty deleted file mode 100644 index 9eedcd8ea..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Example_GasFurnace.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Example_ILI$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/Example_ILI$.class deleted file mode 100644 index a6d1ad002..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Example_ILI$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Example_ILI$package$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/Example_ILI$package$.class deleted file mode 100644 index 9cc5cd281..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Example_ILI$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Example_ILI$package.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/Example_ILI$package.class deleted file mode 100644 index 211f06183..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Example_ILI$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Example_ILI$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/Example_ILI$package.tasty deleted file mode 100644 index b0186c8b5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Example_ILI$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Example_ILI.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/Example_ILI.class deleted file mode 100644 index 4287f3c73..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Example_ILI.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Example_ILI.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/Example_ILI.tasty deleted file mode 100644 index bc3110cb7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Example_ILI.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Example_LakeLevels$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/Example_LakeLevels$.class deleted file mode 100644 index 6b45da407..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Example_LakeLevels$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Example_LakeLevels.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/Example_LakeLevels.class deleted file mode 100644 index 7bf9a08e7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Example_LakeLevels.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Example_LakeLevels.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/Example_LakeLevels.tasty deleted file mode 100644 index 1e7f3f177..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Example_LakeLevels.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ForecastMatrix$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ForecastMatrix$.class deleted file mode 100644 index 56bfcdf16..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ForecastMatrix$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ForecastMatrix$package$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ForecastMatrix$package$.class deleted file mode 100644 index 4e528c626..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ForecastMatrix$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ForecastMatrix$package.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ForecastMatrix$package.class deleted file mode 100644 index 3058b2ae4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ForecastMatrix$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ForecastMatrix$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ForecastMatrix$package.tasty deleted file mode 100644 index 3104cbc11..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ForecastMatrix$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ForecastMatrix.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ForecastMatrix.class deleted file mode 100644 index 20291c42d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ForecastMatrix.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ForecastMatrix.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/ForecastMatrix.tasty deleted file mode 100644 index 21ef7ad64..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/ForecastMatrix.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Forecaster$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/Forecaster$.class deleted file mode 100644 index 5d1c0668c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Forecaster$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Forecaster.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/Forecaster.class deleted file mode 100644 index 190a9a9fc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Forecaster.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Forecaster.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/Forecaster.tasty deleted file mode 100644 index c64d746ac..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Forecaster.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Forecaster_D$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/Forecaster_D$.class deleted file mode 100644 index 1e697d91c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Forecaster_D$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Forecaster_D.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/Forecaster_D.class deleted file mode 100644 index e214b0e5e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Forecaster_D.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Forecaster_D.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/Forecaster_D.tasty deleted file mode 100644 index 7803a7d9e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Forecaster_D.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Forecaster_Reg$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/Forecaster_Reg$.class deleted file mode 100644 index d11c6a944..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Forecaster_Reg$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Forecaster_Reg.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/Forecaster_Reg.class deleted file mode 100644 index 9d8c09c1f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Forecaster_Reg.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Forecaster_Reg.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/Forecaster_Reg.tasty deleted file mode 100644 index 9e88125e3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Forecaster_Reg.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/MA.scalaa b/target/scala-3.6.4/classes/scalation/modeling/forecasting/MA.scalaa deleted file mode 100644 index 0632d12b5..000000000 --- a/target/scala-3.6.4/classes/scalation/modeling/forecasting/MA.scalaa +++ /dev/null @@ -1,321 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Mon Nov 25 13:22:27 EST 2024 - * @see LICENSE (MIT style license file). - * - * @note Model: Moving-Average (MA) - * - * @see real-statistics.com/time-series-analysis/moving-average-processes/ma-coefficients-acf/ - * @see people.stat.sc.edu/hitchcock/stat520ch7slides.pdf - */ - -// U N D E R D E V E L O P M E N T - -package scalation -package modeling -package forecasting - -import scala.math.{abs, sqrt} - -import scalation.mathstat._ -import scalation.optimization.quasi_newton.{BFGS => Optimizer} // change import to change optimizer -//import scalation.optimization.quasi_newton.{LBFGS => Optimizer} -import scalation.random.NormalVec_c - -import Example_Covid.loadData_y -import Example_LakeLevels.y - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `MA` class provides basic time series analysis capabilities for Moving-Average - * (MA) models. MA models are often used for forecasting. - * Given time series data stored in vector y, its next value y_t = combination of last q errors. - * - * y_t = δ + b dot [e_t-1, ..., e_t-q) + e_t - * - * where y_t is the value of y at time t and e_t is the residual/error term. - * @param y the response vector (time series data) - * @param hh the maximum forecasting horizon (h = 1 to hh) - * @param tRng the time range, if relevant (time index may suffice) - * @param hparam the hyper-parameters (defaults to AR.hp) - * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) - */ -class MA (y: VectorD, hh: Int, tRng: Range = null, - hparam: HyperParameter = AR.hp, - bakcast: Boolean = false) - extends Forecaster (y, hh, tRng, hparam, bakcast) - with Correlogram (y): - - private val debug = debugf ("MA", true) // debug function - private val flaw = flawf ("MA") // flaw function - private val q = hparam("q").toInt // use the last q errors/shocks - private val useMoM = false // estimate using Method of Moments (MoM) - private var δ = NO_DOUBLE // drift/intercept/constant term (mean) - private var z = VectorD.nullv // var for centered time series - - modelName = s"MA($q)" - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train/fit an `MA` model to the times-series data in vector y_. - * Estimate the coefficient vector b for a q-th order Moving-Average MA(q) model. - * Uses Nonlinear Least Squares Estimation (LSE) to determine the coefficients. - * The b (θ) vector holds the coefficients multiplying previous errors/shocks. - * @param x_null the data/input matrix (ignored, pass null) - * @param y_ the training/full response vector (e.g., full y) - */ - override def train (x_null: MatrixD, y_ : VectorD): Unit = - makeCorrelogram (y_) // correlogram computes, acf, pacf, psi matrix - val r = acF // autocorrelation function (requires makeCorrelogram) - δ = statsF.mu // compute drift/intercept (just the mean) - - if useMoM && q == 1 then // get MoM estimate by solving a quadratic equation - b = MA.solve_MA1 (r(1)) // coefficients for MA(1) using MoM - else // get LSE estimate using Nonlinear Optimzer - b = NormalVec_c (q, 0.0, 0.1).gen // randomly initialize the coefficients - val mu = y_.mean // sample mean of y_ - z = y_ - mu // optimization works better using zero-centered data - val optimizer = new Optimizer (css) // apply Quasi-Newton optimizer - val (fb, bb) = optimizer.solve (b, 0.5) // optimal solution for loss function and parameters - b = bb // assign optimized parameters to vector b - - debug ("train", s"optimize q = $q, δ = $δ, b = $b, r = $r") - end train - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the conditional sum of squared errors (loss function to optimize). - * @param b_ the working copy of the parameter vector b = θ. - */ - def css (b_ : VectorD): Double = - b = b_.copy // copy parameters from b_ vector - val z_ = z(1 until z.dim) // skip first (backcasted) value - val zp = predictAll (z) // predicted value for z -// debug ("css", s"z_.dim = ${z_.dim}, zp.dim = ${zp.dim}") - ssef (z_, zp) // compute loss function - end css - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the full parameter vector for the MA(q) model. - */ - override def parameter: VectorD = δ +: b - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Predict a value for y_t using the 1-step ahead forecast. - * - * y_t = δ + b_0 e_t-1 + b_1 e_t-2 + ... + b_q-1 e_t-p - * - * @param t the time point being predicted - * @param y_ the actual values to use in making predictions - */ - override def predict (t: Int, y_ : VectorD): Double = - if t == 0 then e(0) = 0 // from backcast: assume no error - if t == 1 then e(1) = y_(1) - yf(0, 1) // first real point - - var sum = δ // intercept - for j <- 0 until q do // add MA terms (shocks) - if t-j >= 0 then sum += b(j) * e(t-j) // e(t-j = -1) does not exists; b(j) = 0(j) - - if t < y_.dim-1 then e(t+1) = y_(t+1) - sum // update the error vector (uncomment for first train) - sum // prediction yp - end predict - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Produce a vector of size hh, h = 1 to hh-steps ahead forecasts for the model, - * i.e., forecast the following time points: t+1, ..., t+h. - * Intended to work with rolling validation (analog of predict method). - * @param t the time point from which to make forecasts - * @param y_ the actual values to use in making predictions - */ - override def forecast (t: Int, y_ : VectorD = yb): VectorD = - val yh = new VectorD (hh) // hold forecasts for each horizon - for h <- 1 to hh do - var sum = δ // intercept - for j <- h-1 until q do // add MA terms (shocks) from before hozizon - if t-j >= 0 then sum += b(j) * e(t-j) // e(t-j = -1) does not exists; b(j) = 0(j) - yf(t, h) = sum // record in forecast matrix - yh(h-1) = sum // record forecasts for each horizon - yh // return forecasts for all horizons - end forecast - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all y_.dim time points at horizon h (h-steps ahead). - * Assign into FORECAST MATRIX and return the h-steps ahead forecast. - * Note, `predictAll` provides predictions for h = 1. - * @see `forecastAll` method in `Forecaster` trait. - * @param h the forecasting horizon, number of steps ahead to produce forecasts - * @param y_ the actual values to use in making forecasts - */ - override def forecastAt (h: Int, y_ : VectorD = yb): VectorD = - if h < 2 then flaw ("forecastAt", s"horizon h = $h must be at least 2") - - for t <- y_.indices do // make forecasts over all time points for horizon h - var sum = δ // intercept - for j <- h-1 until q do // add MA terms (shocks) from before hozizon - if t-j >= 0 then sum += b(j) * e(t-j) // e(t-j = -1) does not exists; b(j) = 0(j) - yf(t, h) = sum // record in forecast matrix - yf(?, h) // return the h-step ahead forecast vector - yf(?, h) // return the h-step ahead forecast vector - end forecastAt - -end MA - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `MA` companion object provides factory methods for the - * `MA` class. - */ -object MA: - - private val flaw = flawf ("MA") // flaw function - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `MA` object. - * @param y the response vector (time series data) - * @param hh the maximum forecasting horizon (h = 1 to hh) - * @param tRng the time range, if relevant (time index may suffice) - * @param hparam the hyper-parameters - */ - def apply (y: VectorD, hh: Int, tRng: Range = null, hparam: HyperParameter = AR.hp): MA = - new MA (y, hh, tRng, hparam) - end apply - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Solve for and return the coefficient/parameter θ for an MA(1) using the Method of Moments. - * Returns the appropriate root of the quadratic equation: r(1 + θ) = θ - * @param r an estimate for the first lag autocorrelation rho_1. - */ - def solve_MA1 (r: Double): VectorD = - if abs (r) <= 0.5 then - VectorD (1 - sqrt (1 - 4 * r~^2) / 2 * r) // coefficients, only works when r(1) <= .5 - else - flaw ("solve_MA1", s"first lag autocorrelation = $r must be in [-.5, .5] for MoM") - VectorD (-0.0) - end solve_MA1 - -end MA - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `mATest` main function tests the `MA` class on real data: - * Forecasting Lake Levels using In-Sample Testing (In-ST). - * Test forecasts (h = 1 to hh steps ahead forecasts). - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.mATest - */ -@main def mATest (): Unit = - - val hh = 3 // maximum forecasting horizon - - val mod = new MA (y, hh) // create model for time series data - banner (s"In-ST Forecasts: ${mod.modelName} on LakeLevels Dataset") - mod.trainNtest ()() // train and test on full dataset - - mod.forecastAll () // forecast h-steps ahead (h = 1 to hh) for all y - mod.diagnoseAll (y, mod.getYf) -// Forecaster.evalForecasts (mod, mod.getYb, hh) - println (s"Final In-ST Forecast Matrix yf = ${mod.getYf}") - -end mATest - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `mATest2` main function tests the `MA` class on real data: - * Forecasting Lake Levels using Train-n-Test Split (TnT) with Rolling Validation. - * Test forecasts (h = 1 to hh steps ahead forecasts). - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.mATest2 - */ -@main def mATest2 (): Unit = - - val hh = 3 // maximum forecasting horizon - - val mod = new MA (y, hh) // create model for time series data - banner (s"TnT Forecasts: ${mod.modelName} on LakeLevels Dataset") - mod.trainNtest ()() // train and test on full dataset - - mod.rollValidate () // TnT with Rolling Validation - println (s"Final TnT Forecast Matrix yf = ${mod.getYf}") - -end mATest2 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `mATest3` main function tests the `MA` class on real data: - * Forecasting COVID-19 using In-Sample Testing (In-ST). - * Test forecasts (h = 1 to hh steps ahead forecasts). - * > runMain scalation.modeling.forecasting.mATest3 - */ -@main def mATest3 (): Unit = - - val yy = loadData_y () -// val y = yy // full - val y = yy(0 until 116) // clip the flat end - val hh = 6 // maximum forecasting horizon - - for q <- 1 to 5 do - AR.hp("q") = q // number of MA terms - val mod = new MA (y, hh) // create model for time series data - banner (s"In-ST Forecasts: ${mod.modelName} on COVID-19 Dataset") - mod.trainNtest ()() // train and test on full dataset - - mod.forecastAll () // forecast h-steps ahead (h = 1 to hh) for all y - mod.diagnoseAll (y, mod.getYf) -// Forecaster.evalForecasts (mod, mod.getYb, hh) - println (s"Final In-ST Forecast Matrix yf = ${mod.getYf}") - end for - -end mATest3 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `mATest4` main function tests the `MA` class on real data: - * Forecasting COVID-19 using Train-n-Test Split (TnT) with Rolling Validation. - * Test forecasts (h = 1 to hh steps ahead forecasts). - * > runMain scalation.modeling.forecasting.mATest4 - */ -@main def mATest4 (): Unit = - - val yy = loadData_y () -// val y = yy // full - val y = yy(0 until 116) // clip the flat end - val hh = 6 // maximum forecasting horizon - - for q <- 1 to 5 do - AR.hp("q") = q // number of MA terms - val mod = new MA (y, hh) // create model for time series data - banner (s"TnT Forecasts: ${mod.modelName} on COVID-19 Dataset") - mod.trainNtest ()() - - mod.rollValidate () // TnT with Rolling Validation - println (s"Final TnT Forecast Matrix yf = ${mod.getYf}") - end for - -end mATest4 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `mATest5` main function tests the `MA` class on small dataset. - * Test forecasts (h = 1 step ahead forecasts). - * > runMain scalation.modeling.forecasting.mATest5 - */ -@main def mATest5 (): Unit = - - val y = VectorD (1, 3, 4, 2, 5, 7, 9, 8, 6, 3) - - val mod = new MA (y, 1) // create model for time series data - banner (s"In-ST Forecasts: ${mod.modelName} on a Small Dataset") - mod.trainNtest ()() // train and test on full dataset - println (s"Final In-ST Forecast Matrix yf = ${mod.getYf}") - new Baseline (y, "MA1") - -/* - AR.hp ("p") = 2 - mod = new MA (y, 1) // create model for time series data - banner (s"In-ST Forecasts: ${mod.modelName} on a Small Dataset") - mod.trainNtest ()() // train and test on full dataset - println (s"Final In-ST Forecast Matrix yf = ${mod.getYf}") - new Baseline (y, "MA2") -*/ - -end mATest5 - diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/MakeMatrix4TS$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/MakeMatrix4TS$.class deleted file mode 100644 index b6c65ec72..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/MakeMatrix4TS$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/MakeMatrix4TS$package$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/MakeMatrix4TS$package$.class deleted file mode 100644 index 220bea0b6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/MakeMatrix4TS$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/MakeMatrix4TS$package.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/MakeMatrix4TS$package.class deleted file mode 100644 index acfac69ba..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/MakeMatrix4TS$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/MakeMatrix4TS$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/MakeMatrix4TS$package.tasty deleted file mode 100644 index 0c96f72dd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/MakeMatrix4TS$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/MakeMatrix4TS.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/MakeMatrix4TS.class deleted file mode 100644 index 9cbdd2563..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/MakeMatrix4TS.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/MakeMatrix4TS.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/MakeMatrix4TS.tasty deleted file mode 100644 index 801c966e9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/MakeMatrix4TS.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/MakeMatrix4TSY.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/MakeMatrix4TSY.class deleted file mode 100644 index 35cb29cae..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/MakeMatrix4TSY.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/MakeMatrix4TSY.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/MakeMatrix4TSY.tasty deleted file mode 100644 index 9f102ae77..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/MakeMatrix4TSY.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/NullModel$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/NullModel$.class deleted file mode 100644 index 0feffce3b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/NullModel$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/NullModel$package$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/NullModel$package$.class deleted file mode 100644 index e993164f4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/NullModel$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/NullModel$package.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/NullModel$package.class deleted file mode 100644 index f2e2e7409..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/NullModel$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/NullModel$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/NullModel$package.tasty deleted file mode 100644 index c23a198b1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/NullModel$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/NullModel.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/NullModel.class deleted file mode 100644 index 80b9a84fd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/NullModel.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/NullModel.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/NullModel.tasty deleted file mode 100644 index 4bb33a494..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/NullModel.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Periodogram$package$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/Periodogram$package$.class deleted file mode 100644 index 868ec2f39..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Periodogram$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Periodogram$package.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/Periodogram$package.class deleted file mode 100644 index da30bf0a3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Periodogram$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Periodogram$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/Periodogram$package.tasty deleted file mode 100644 index ac3bba9fb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Periodogram$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Periodogram.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/Periodogram.class deleted file mode 100644 index 172fecc90..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Periodogram.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Periodogram.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/Periodogram.tasty deleted file mode 100644 index 350e8fc33..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Periodogram.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/RandomWalk$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/RandomWalk$.class deleted file mode 100644 index a2d16e11a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/RandomWalk$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/RandomWalk$package$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/RandomWalk$package$.class deleted file mode 100644 index 9d2a81a7f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/RandomWalk$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/RandomWalk$package.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/RandomWalk$package.class deleted file mode 100644 index e9866a06b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/RandomWalk$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/RandomWalk$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/RandomWalk$package.tasty deleted file mode 100644 index 8e6e78074..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/RandomWalk$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/RandomWalk.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/RandomWalk.class deleted file mode 100644 index 8651933fd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/RandomWalk.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/RandomWalk.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/RandomWalk.tasty deleted file mode 100644 index d8b6da0d9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/RandomWalk.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/RandomWalkS$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/RandomWalkS$.class deleted file mode 100644 index 0bf388f17..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/RandomWalkS$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/RandomWalkS$package$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/RandomWalkS$package$.class deleted file mode 100644 index 30b830f12..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/RandomWalkS$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/RandomWalkS$package.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/RandomWalkS$package.class deleted file mode 100644 index a236ac709..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/RandomWalkS$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/RandomWalkS$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/RandomWalkS$package.tasty deleted file mode 100644 index f7a4ad122..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/RandomWalkS$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/RandomWalkS.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/RandomWalkS.class deleted file mode 100644 index 68e8c479d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/RandomWalkS.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/RandomWalkS.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/RandomWalkS.tasty deleted file mode 100644 index 81378336d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/RandomWalkS.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/SARY$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/SARY$.class deleted file mode 100644 index d820a443c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/SARY$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/SARY$package$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/SARY$package$.class deleted file mode 100644 index 763207007..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/SARY$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/SARY$package.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/SARY$package.class deleted file mode 100644 index f5b89be4e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/SARY$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/SARY$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/SARY$package.tasty deleted file mode 100644 index 6d24f97bb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/SARY$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/SARY.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/SARY.class deleted file mode 100644 index 6c523105d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/SARY.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/SARY.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/SARY.tasty deleted file mode 100644 index 18cd6278c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/SARY.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/SimpleExpSmoothing$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/SimpleExpSmoothing$.class deleted file mode 100644 index 3607e6c1b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/SimpleExpSmoothing$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/SimpleExpSmoothing$package$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/SimpleExpSmoothing$package$.class deleted file mode 100644 index 0e2af58a6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/SimpleExpSmoothing$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/SimpleExpSmoothing$package.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/SimpleExpSmoothing$package.class deleted file mode 100644 index 13ad13799..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/SimpleExpSmoothing$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/SimpleExpSmoothing$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/SimpleExpSmoothing$package.tasty deleted file mode 100644 index 8ca7de1c5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/SimpleExpSmoothing$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/SimpleExpSmoothing.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/SimpleExpSmoothing.class deleted file mode 100644 index 284ee8300..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/SimpleExpSmoothing.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/SimpleExpSmoothing.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/SimpleExpSmoothing.tasty deleted file mode 100644 index 4b9d40cff..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/SimpleExpSmoothing.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/SimpleMovingAverage$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/SimpleMovingAverage$.class deleted file mode 100644 index b888e6715..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/SimpleMovingAverage$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/SimpleMovingAverage$package$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/SimpleMovingAverage$package$.class deleted file mode 100644 index e58d68e48..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/SimpleMovingAverage$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/SimpleMovingAverage$package.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/SimpleMovingAverage$package.class deleted file mode 100644 index 72d2b3c93..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/SimpleMovingAverage$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/SimpleMovingAverage$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/SimpleMovingAverage$package.tasty deleted file mode 100644 index c4033dec7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/SimpleMovingAverage$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/SimpleMovingAverage.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/SimpleMovingAverage.class deleted file mode 100644 index 731611c48..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/SimpleMovingAverage.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/SimpleMovingAverage.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/SimpleMovingAverage.tasty deleted file mode 100644 index 1bb45658f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/SimpleMovingAverage.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Stationarity$package$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/Stationarity$package$.class deleted file mode 100644 index 6e30ba320..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Stationarity$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Stationarity$package.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/Stationarity$package.class deleted file mode 100644 index 5fe9cbd9e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Stationarity$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Stationarity$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/Stationarity$package.tasty deleted file mode 100644 index d620d814d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Stationarity$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Stationarity_KPSS$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/Stationarity_KPSS$.class deleted file mode 100644 index 07ff888fa..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Stationarity_KPSS$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Stationarity_KPSS$package$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/Stationarity_KPSS$package$.class deleted file mode 100644 index 6f9aa4248..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Stationarity_KPSS$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Stationarity_KPSS$package.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/Stationarity_KPSS$package.class deleted file mode 100644 index 39cd5ffba..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Stationarity_KPSS$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Stationarity_KPSS$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/Stationarity_KPSS$package.tasty deleted file mode 100644 index 4b62531be..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Stationarity_KPSS$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Stationarity_KPSS.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/Stationarity_KPSS.class deleted file mode 100644 index 6687b4852..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Stationarity_KPSS.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Stationarity_KPSS.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/Stationarity_KPSS.tasty deleted file mode 100644 index a912f2a1c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/Stationarity_KPSS.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/TranARY$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/TranARY$.class deleted file mode 100644 index 3612c6150..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/TranARY$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/TranARY$package$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/TranARY$package$.class deleted file mode 100644 index 073c703b8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/TranARY$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/TranARY$package.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/TranARY$package.class deleted file mode 100644 index ffa2756df..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/TranARY$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/TranARY$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/TranARY$package.tasty deleted file mode 100644 index f32ab9641..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/TranARY$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/TranARY.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/TranARY.class deleted file mode 100644 index dd8f05926..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/TranARY.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/TranARY.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/TranARY.tasty deleted file mode 100644 index c26410a3d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/TranARY.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/TrendModel$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/TrendModel$.class deleted file mode 100644 index 738ab875c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/TrendModel$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/TrendModel$package$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/TrendModel$package$.class deleted file mode 100644 index 82472873d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/TrendModel$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/TrendModel$package.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/TrendModel$package.class deleted file mode 100644 index 5d8419a56..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/TrendModel$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/TrendModel$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/TrendModel$package.tasty deleted file mode 100644 index e4165195c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/TrendModel$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/TrendModel.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/TrendModel.class deleted file mode 100644 index 7b1da84cd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/TrendModel.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/TrendModel.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/TrendModel.tasty deleted file mode 100644 index 0741772bc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/TrendModel.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/UnitRoot.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/UnitRoot.class deleted file mode 100644 index 76e16fa3c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/UnitRoot.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/UnitRoot.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/UnitRoot.tasty deleted file mode 100644 index d17ace64b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/UnitRoot.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/WeightedMovingAverage$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/WeightedMovingAverage$.class deleted file mode 100644 index 4d85fcbe2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/WeightedMovingAverage$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/WeightedMovingAverage$package$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/WeightedMovingAverage$package$.class deleted file mode 100644 index 957f78c0d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/WeightedMovingAverage$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/WeightedMovingAverage$package.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/WeightedMovingAverage$package.class deleted file mode 100644 index 831775daa..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/WeightedMovingAverage$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/WeightedMovingAverage$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/WeightedMovingAverage$package.tasty deleted file mode 100644 index 7583d66d9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/WeightedMovingAverage$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/WeightedMovingAverage.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/WeightedMovingAverage.class deleted file mode 100644 index 55bd2a962..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/WeightedMovingAverage.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/WeightedMovingAverage.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/WeightedMovingAverage.tasty deleted file mode 100644 index 671f47aa3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/WeightedMovingAverage.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRIMATest.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRIMATest.class deleted file mode 100644 index fdd2d6e59..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRIMATest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRIMATest.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRIMATest.tasty deleted file mode 100644 index 95cd6a4cf..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRIMATest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRIMATest2.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRIMATest2.class deleted file mode 100644 index 9bd3f23ea..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRIMATest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRIMATest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRIMATest2.tasty deleted file mode 100644 index ee8aecc0f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRIMATest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRIMATest3.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRIMATest3.class deleted file mode 100644 index fdcd4fbbd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRIMATest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRIMATest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRIMATest3.tasty deleted file mode 100644 index 8889047ae..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRIMATest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRIMA_diffTest.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRIMA_diffTest.class deleted file mode 100644 index eb4dfc04f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRIMA_diffTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRIMA_diffTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRIMA_diffTest.tasty deleted file mode 100644 index 5288acc12..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRIMA_diffTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRMATest.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRMATest.class deleted file mode 100644 index ef920f328..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRMATest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRMATest.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRMATest.tasty deleted file mode 100644 index 56a31bc39..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRMATest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRMATest2.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRMATest2.class deleted file mode 100644 index 15166149b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRMATest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRMATest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRMATest2.tasty deleted file mode 100644 index c04c6d546..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRMATest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRMATest3.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRMATest3.class deleted file mode 100644 index 5d9e3de4a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRMATest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRMATest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRMATest3.tasty deleted file mode 100644 index 4191ca2d0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRMATest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRMATest4.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRMATest4.class deleted file mode 100644 index 1a2676143..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRMATest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRMATest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRMATest4.tasty deleted file mode 100644 index d529cd2d8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRMATest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRMATest5.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRMATest5.class deleted file mode 100644 index 36bdb0ba1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRMATest5.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRMATest5.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRMATest5.tasty deleted file mode 100644 index 410f616fd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRMATest5.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRMATest6.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRMATest6.class deleted file mode 100644 index 316889b09..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRMATest6.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRMATest6.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRMATest6.tasty deleted file mode 100644 index ec0c6ff31..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRMATest6.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRMATest7.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRMATest7.class deleted file mode 100644 index c96946b61..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRMATest7.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRMATest7.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRMATest7.tasty deleted file mode 100644 index 748e42c03..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRMATest7.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRTest.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRTest.class deleted file mode 100644 index b65c64609..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRTest.tasty deleted file mode 100644 index bbf9a4c1c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRTest2.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRTest2.class deleted file mode 100644 index 69fdd3f6f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRTest2.tasty deleted file mode 100644 index 6e19e6087..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRTest3.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRTest3.class deleted file mode 100644 index 913f712d7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRTest3.tasty deleted file mode 100644 index 443182ec6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRTest4.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRTest4.class deleted file mode 100644 index d43c59f0b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRTest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRTest4.tasty deleted file mode 100644 index 743d28494..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRTest5.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRTest5.class deleted file mode 100644 index f3aa8ecf1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRTest5.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRTest5.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRTest5.tasty deleted file mode 100644 index 709657ab0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRTest5.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRXTest3.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRXTest3.class deleted file mode 100644 index 72480d803..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRXTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRXTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRXTest3.tasty deleted file mode 100644 index 675d85572..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRXTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRXTest4.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRXTest4.class deleted file mode 100644 index d36b17d52..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRXTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRXTest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRXTest4.tasty deleted file mode 100644 index dcf5f4069..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRXTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRXTest5.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRXTest5.class deleted file mode 100644 index 2eed9c6dc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRXTest5.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRXTest5.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRXTest5.tasty deleted file mode 100644 index fc1d292db..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRXTest5.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRX_DTest3.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRX_DTest3.class deleted file mode 100644 index cff3972d6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRX_DTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRX_DTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRX_DTest3.tasty deleted file mode 100644 index 5009b697a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRX_DTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRX_DTest4.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRX_DTest4.class deleted file mode 100644 index b8cd9a8d4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRX_DTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRX_DTest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRX_DTest4.tasty deleted file mode 100644 index bc8a85929..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRX_DTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRX_QuadTest3.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRX_QuadTest3.class deleted file mode 100644 index e27343dbf..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRX_QuadTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRX_QuadTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRX_QuadTest3.tasty deleted file mode 100644 index be7cadfde..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRX_QuadTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRX_QuadTest4.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRX_QuadTest4.class deleted file mode 100644 index d6e141753..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRX_QuadTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRX_QuadTest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRX_QuadTest4.tasty deleted file mode 100644 index 4916e8104..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRX_QuadTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRX_QuadTest5.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRX_QuadTest5.class deleted file mode 100644 index ec22f31c8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRX_QuadTest5.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRX_QuadTest5.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRX_QuadTest5.tasty deleted file mode 100644 index e81963c2a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRX_QuadTest5.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRX_Quad_DTest3.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRX_Quad_DTest3.class deleted file mode 100644 index 7924a3752..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRX_Quad_DTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRX_Quad_DTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRX_Quad_DTest3.tasty deleted file mode 100644 index c8050ce38..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRX_Quad_DTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRX_Quad_DTest4.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRX_Quad_DTest4.class deleted file mode 100644 index 39c12dc2d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRX_Quad_DTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRX_Quad_DTest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRX_Quad_DTest4.tasty deleted file mode 100644 index ccc58407d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRX_Quad_DTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRX_SymbTest3.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRX_SymbTest3.class deleted file mode 100644 index 290c243dd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRX_SymbTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRX_SymbTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRX_SymbTest3.tasty deleted file mode 100644 index 81f432a66..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRX_SymbTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRX_SymbTest4.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRX_SymbTest4.class deleted file mode 100644 index da6f3179b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRX_SymbTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRX_SymbTest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRX_SymbTest4.tasty deleted file mode 100644 index 4e700fe5d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRX_SymbTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRX_Symb_DTest3.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRX_Symb_DTest3.class deleted file mode 100644 index 9218e188c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRX_Symb_DTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRX_Symb_DTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRX_Symb_DTest3.tasty deleted file mode 100644 index 57f7f1209..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRX_Symb_DTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRX_Symb_DTest4.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRX_Symb_DTest4.class deleted file mode 100644 index 2960f64ac..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRX_Symb_DTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRX_Symb_DTest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRX_Symb_DTest4.tasty deleted file mode 100644 index e6f8d46c3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRX_Symb_DTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRYTest.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRYTest.class deleted file mode 100644 index 84285fb67..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRYTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRYTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRYTest.tasty deleted file mode 100644 index cd1fafe6c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRYTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRYTest2.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRYTest2.class deleted file mode 100644 index 2e00b9b54..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRYTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRYTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRYTest2.tasty deleted file mode 100644 index 45c8a83fc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRYTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRYTest3.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRYTest3.class deleted file mode 100644 index 9fd35e657..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRYTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRYTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRYTest3.tasty deleted file mode 100644 index 783c323e7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRYTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRYTest4.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRYTest4.class deleted file mode 100644 index 2abb2f427..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRYTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRYTest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRYTest4.tasty deleted file mode 100644 index 5ca8dac37..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRYTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRYTest5.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRYTest5.class deleted file mode 100644 index ae2a09d86..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRYTest5.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRYTest5.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRYTest5.tasty deleted file mode 100644 index f52aabb56..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRYTest5.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRYTest6.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRYTest6.class deleted file mode 100644 index 490a585d8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRYTest6.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRYTest6.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRYTest6.tasty deleted file mode 100644 index e2358816c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRYTest6.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRY_DTest.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRY_DTest.class deleted file mode 100644 index 4aac75191..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRY_DTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRY_DTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRY_DTest.tasty deleted file mode 100644 index 33a3bf5fc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRY_DTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRY_DTest2.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRY_DTest2.class deleted file mode 100644 index 7435726a1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRY_DTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRY_DTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRY_DTest2.tasty deleted file mode 100644 index a8dd568fc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRY_DTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRY_DTest3.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRY_DTest3.class deleted file mode 100644 index 0c708cd5c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRY_DTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRY_DTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRY_DTest3.tasty deleted file mode 100644 index 3b74a31f1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRY_DTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRY_DTest4.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRY_DTest4.class deleted file mode 100644 index bb28dd3b2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRY_DTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRY_DTest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRY_DTest4.tasty deleted file mode 100644 index f52a7f5c4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRY_DTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRY_QuadTest.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRY_QuadTest.class deleted file mode 100644 index 7147086bd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRY_QuadTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRY_QuadTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRY_QuadTest.tasty deleted file mode 100644 index 1a5c433d2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRY_QuadTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRY_QuadTest2.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRY_QuadTest2.class deleted file mode 100644 index de3dd809c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRY_QuadTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRY_QuadTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRY_QuadTest2.tasty deleted file mode 100644 index 202558f26..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRY_QuadTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRY_QuadTest3.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRY_QuadTest3.class deleted file mode 100644 index 15fdd7006..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRY_QuadTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRY_QuadTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRY_QuadTest3.tasty deleted file mode 100644 index d791a4a88..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRY_QuadTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRY_QuadTest4.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRY_QuadTest4.class deleted file mode 100644 index 67b810634..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRY_QuadTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRY_QuadTest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRY_QuadTest4.tasty deleted file mode 100644 index 3fa38154a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRY_QuadTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRY_QuadTest5.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRY_QuadTest5.class deleted file mode 100644 index 986f12253..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRY_QuadTest5.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRY_QuadTest5.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRY_QuadTest5.tasty deleted file mode 100644 index eb47feb53..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/aRY_QuadTest5.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/baselineTest.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/baselineTest.class deleted file mode 100644 index a4e24472d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/baselineTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/baselineTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/baselineTest.tasty deleted file mode 100644 index 153892558..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/baselineTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/dTWTest.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/dTWTest.class deleted file mode 100644 index 1d3f861ca..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/dTWTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/dTWTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/dTWTest.tasty deleted file mode 100644 index fa8e96885..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/dTWTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest.class deleted file mode 100644 index 7d8cfec19..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest.tasty deleted file mode 100644 index e139b8307..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest10.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest10.class deleted file mode 100644 index 147765671..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest10.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest10.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest10.tasty deleted file mode 100644 index aeff1b018..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest10.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest11.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest11.class deleted file mode 100644 index 753c67076..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest11.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest11.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest11.tasty deleted file mode 100644 index 835dc185d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest11.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest12.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest12.class deleted file mode 100644 index da031af39..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest12.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest12.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest12.tasty deleted file mode 100644 index c2707b145..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest12.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest13.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest13.class deleted file mode 100644 index 0e80395fa..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest13.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest13.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest13.tasty deleted file mode 100644 index 95bcb4bd9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest13.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest14.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest14.class deleted file mode 100644 index 482eeb50f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest14.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest14.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest14.tasty deleted file mode 100644 index 78e496efe..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest14.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest15.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest15.class deleted file mode 100644 index 78e7484ea..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest15.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest15.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest15.tasty deleted file mode 100644 index dad798787..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest15.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest16.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest16.class deleted file mode 100644 index fbbb02ccc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest16.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest16.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest16.tasty deleted file mode 100644 index 06eb0a4cc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest16.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest2.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest2.class deleted file mode 100644 index 429f63bf0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest2.tasty deleted file mode 100644 index ae5a732df..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest3.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest3.class deleted file mode 100644 index 020a65806..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest3.tasty deleted file mode 100644 index 13280db30..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest4.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest4.class deleted file mode 100644 index 20de235cb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest4.tasty deleted file mode 100644 index ad5bedc5c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest5.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest5.class deleted file mode 100644 index 73d34ea55..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest5.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest5.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest5.tasty deleted file mode 100644 index be898f4e8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest5.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest6.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest6.class deleted file mode 100644 index c484666db..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest6.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest6.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest6.tasty deleted file mode 100644 index 1ff02917c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest6.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest7.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest7.class deleted file mode 100644 index 7d6f07b82..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest7.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest7.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest7.tasty deleted file mode 100644 index 0ff60dd03..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest7.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest8.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest8.class deleted file mode 100644 index 90cc155f2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest8.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest8.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest8.tasty deleted file mode 100644 index bae7dda1c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest8.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest9.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest9.class deleted file mode 100644 index 42652d86a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest9.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest9.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest9.tasty deleted file mode 100644 index cf0c3d5d0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_CovidTest9.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_GasFurnaceTest.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_GasFurnaceTest.class deleted file mode 100644 index 62c972559..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_GasFurnaceTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_GasFurnaceTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_GasFurnaceTest.tasty deleted file mode 100644 index 9f692b70e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_GasFurnaceTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_GasFurnaceTest2.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_GasFurnaceTest2.class deleted file mode 100644 index fda12dba5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_GasFurnaceTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_GasFurnaceTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_GasFurnaceTest2.tasty deleted file mode 100644 index 3a894aa04..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_GasFurnaceTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_ILITest.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_ILITest.class deleted file mode 100644 index ba4ab9d9c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_ILITest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_ILITest.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_ILITest.tasty deleted file mode 100644 index d0bb1c48a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_ILITest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_ILITest10.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_ILITest10.class deleted file mode 100644 index bb3a00e7d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_ILITest10.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_ILITest10.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_ILITest10.tasty deleted file mode 100644 index 70beb9f13..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_ILITest10.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_ILITest11.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_ILITest11.class deleted file mode 100644 index 8bf2ba602..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_ILITest11.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_ILITest11.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_ILITest11.tasty deleted file mode 100644 index 5bf039a41..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_ILITest11.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_ILITest2.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_ILITest2.class deleted file mode 100644 index acca6cc28..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_ILITest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_ILITest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_ILITest2.tasty deleted file mode 100644 index 4432e03a6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_ILITest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_ILITest3.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_ILITest3.class deleted file mode 100644 index 67059d8a2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_ILITest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_ILITest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_ILITest3.tasty deleted file mode 100644 index d1ef693f4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_ILITest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_ILITest5.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_ILITest5.class deleted file mode 100644 index 37bc22438..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_ILITest5.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_ILITest5.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_ILITest5.tasty deleted file mode 100644 index 57de60b17..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_ILITest5.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_ILITest6.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_ILITest6.class deleted file mode 100644 index eca5f5a09..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_ILITest6.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_ILITest6.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_ILITest6.tasty deleted file mode 100644 index d352d9128..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_ILITest6.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_ILITest7.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_ILITest7.class deleted file mode 100644 index 48fce9e67..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_ILITest7.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_ILITest7.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_ILITest7.tasty deleted file mode 100644 index f7d8081b4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_ILITest7.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_ILITest8.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_ILITest8.class deleted file mode 100644 index 44cb25454..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_ILITest8.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_ILITest8.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_ILITest8.tasty deleted file mode 100644 index 174cc7832..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_ILITest8.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_ILITest9.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_ILITest9.class deleted file mode 100644 index 998af88d9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_ILITest9.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_ILITest9.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_ILITest9.tasty deleted file mode 100644 index b71553389..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/example_ILITest9.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/forecastMatrixTest.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/forecastMatrixTest.class deleted file mode 100644 index 1414a3750..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/forecastMatrixTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/forecastMatrixTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/forecastMatrixTest.tasty deleted file mode 100644 index 5e186e2be..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/forecastMatrixTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/forecastMatrixTest2.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/forecastMatrixTest2.class deleted file mode 100644 index 23a2f2f68..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/forecastMatrixTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/forecastMatrixTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/forecastMatrixTest2.tasty deleted file mode 100644 index 54d5ddcfc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/forecastMatrixTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/forecastMatrixTest3.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/forecastMatrixTest3.class deleted file mode 100644 index 0d6341b35..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/forecastMatrixTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/forecastMatrixTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/forecastMatrixTest3.tasty deleted file mode 100644 index db6c341c1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/forecastMatrixTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/forecastMatrixTest4.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/forecastMatrixTest4.class deleted file mode 100644 index 91b9cf7bc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/forecastMatrixTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/forecastMatrixTest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/forecastMatrixTest4.tasty deleted file mode 100644 index e50539391..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/forecastMatrixTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/AR_Star$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/AR_Star$.class deleted file mode 100644 index 999d78d05..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/AR_Star$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/AR_Star$package$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/AR_Star$package$.class deleted file mode 100644 index 608ad2be7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/AR_Star$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/AR_Star$package.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/AR_Star$package.class deleted file mode 100644 index 2cbf58bdd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/AR_Star$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/AR_Star$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/AR_Star$package.tasty deleted file mode 100644 index c00bcf16f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/AR_Star$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/AR_Star.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/AR_Star.class deleted file mode 100644 index f7913af70..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/AR_Star.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/AR_Star.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/AR_Star.tasty deleted file mode 100644 index 2a943f911..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/AR_Star.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/ForecastTensor$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/ForecastTensor$.class deleted file mode 100644 index 73cada246..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/ForecastTensor$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/ForecastTensor$package$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/ForecastTensor$package$.class deleted file mode 100644 index 742c49554..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/ForecastTensor$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/ForecastTensor$package.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/ForecastTensor$package.class deleted file mode 100644 index 50e1c7f95..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/ForecastTensor$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/ForecastTensor$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/ForecastTensor$package.tasty deleted file mode 100644 index b45c4396e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/ForecastTensor$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/ForecastTensor.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/ForecastTensor.class deleted file mode 100644 index 3b4427d8d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/ForecastTensor.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/ForecastTensor.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/ForecastTensor.tasty deleted file mode 100644 index 1090730e6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/ForecastTensor.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/RandomWalk_Star$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/RandomWalk_Star$.class deleted file mode 100644 index f0ede56a2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/RandomWalk_Star$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/RandomWalk_Star$package$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/RandomWalk_Star$package$.class deleted file mode 100644 index 2d0681280..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/RandomWalk_Star$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/RandomWalk_Star$package.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/RandomWalk_Star$package.class deleted file mode 100644 index 73b1b6ec9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/RandomWalk_Star$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/RandomWalk_Star$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/RandomWalk_Star$package.tasty deleted file mode 100644 index 2405b1e04..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/RandomWalk_Star$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/RandomWalk_Star.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/RandomWalk_Star.class deleted file mode 100644 index 3025838c2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/RandomWalk_Star.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/RandomWalk_Star.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/RandomWalk_Star.tasty deleted file mode 100644 index f2c998cee..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/RandomWalk_Star.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/VAR$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/VAR$.class deleted file mode 100644 index e84d6e0d7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/VAR$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/VAR$package$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/VAR$package$.class deleted file mode 100644 index fb6c9b85a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/VAR$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/VAR$package.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/VAR$package.class deleted file mode 100644 index 34c78a654..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/VAR$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/VAR$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/VAR$package.tasty deleted file mode 100644 index 71f918dfd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/VAR$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/VAR.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/VAR.class deleted file mode 100644 index f25cd6942..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/VAR.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/VAR.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/VAR.tasty deleted file mode 100644 index 209a5a3d6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/VAR.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/aR_StarTest.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/aR_StarTest.class deleted file mode 100644 index b906fb41d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/aR_StarTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/aR_StarTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/aR_StarTest.tasty deleted file mode 100644 index 4c3481bb3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/aR_StarTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/aR_StarTest2.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/aR_StarTest2.class deleted file mode 100644 index c9304b57b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/aR_StarTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/aR_StarTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/aR_StarTest2.tasty deleted file mode 100644 index d1110da8b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/aR_StarTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/aR_StarTest3.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/aR_StarTest3.class deleted file mode 100644 index ba7d6a791..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/aR_StarTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/aR_StarTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/aR_StarTest3.tasty deleted file mode 100644 index 5f8b99535..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/aR_StarTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/forecastTensorTest.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/forecastTensorTest.class deleted file mode 100644 index 77fe75130..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/forecastTensorTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/forecastTensorTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/forecastTensorTest.tasty deleted file mode 100644 index d84a88eb6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/forecastTensorTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/forecastTensorTest2.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/forecastTensorTest2.class deleted file mode 100644 index dc833a7bb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/forecastTensorTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/forecastTensorTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/forecastTensorTest2.tasty deleted file mode 100644 index 07c631ea9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/forecastTensorTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/randomWalk_StarTest.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/randomWalk_StarTest.class deleted file mode 100644 index 43e911cfe..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/randomWalk_StarTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/randomWalk_StarTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/randomWalk_StarTest.tasty deleted file mode 100644 index 6195ed884..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/randomWalk_StarTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/randomWalk_StarTest2.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/randomWalk_StarTest2.class deleted file mode 100644 index ef39e8bf7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/randomWalk_StarTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/randomWalk_StarTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/randomWalk_StarTest2.tasty deleted file mode 100644 index 7bca74675..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/randomWalk_StarTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/randomWalk_StarTest3.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/randomWalk_StarTest3.class deleted file mode 100644 index 5fc325cc1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/randomWalk_StarTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/randomWalk_StarTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/randomWalk_StarTest3.tasty deleted file mode 100644 index 1328bd51b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/randomWalk_StarTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/vARTest.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/vARTest.class deleted file mode 100644 index bc0ed4f50..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/vARTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/vARTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/vARTest.tasty deleted file mode 100644 index 5560d466f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/vARTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/vARTest2.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/vARTest2.class deleted file mode 100644 index e381e40c1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/vARTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/vARTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/vARTest2.tasty deleted file mode 100644 index c466cf3f3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/vARTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/vARTest3.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/vARTest3.class deleted file mode 100644 index 0bca54919..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/vARTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/vARTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/vARTest3.tasty deleted file mode 100644 index 672ee22b7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/vARTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/vARTest4.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/vARTest4.class deleted file mode 100644 index ad9bd939d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/vARTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/vARTest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/vARTest4.tasty deleted file mode 100644 index 071763424..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/vARTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/vARTest5.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/vARTest5.class deleted file mode 100644 index d1332570c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/vARTest5.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/vARTest5.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/vARTest5.tasty deleted file mode 100644 index c2df9d6d7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/vARTest5.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/vARTest6.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/vARTest6.class deleted file mode 100644 index 077ff7ed6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/vARTest6.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/vARTest6.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/vARTest6.tasty deleted file mode 100644 index ea0a7268e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/multivar/vARTest6.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/Attention$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/Attention$.class deleted file mode 100644 index 01b73ecec..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/Attention$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/Attention$package$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/Attention$package$.class deleted file mode 100644 index dbbe62950..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/Attention$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/Attention$package$att$11$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/Attention$package$att$11$.class deleted file mode 100644 index cacca6058..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/Attention$package$att$11$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/Attention$package$att$3$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/Attention$package$att$3$.class deleted file mode 100644 index af1f0fcb6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/Attention$package$att$3$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/Attention$package$att$5$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/Attention$package$att$5$.class deleted file mode 100644 index 17aac5021..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/Attention$package$att$5$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/Attention$package$att$7$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/Attention$package$att$7$.class deleted file mode 100644 index 7e65dab6f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/Attention$package$att$7$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/Attention$package$att$9$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/Attention$package$att$9$.class deleted file mode 100644 index a338a88d8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/Attention$package$att$9$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/Attention$package.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/Attention$package.class deleted file mode 100644 index 60d98f3de..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/Attention$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/Attention$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/Attention$package.tasty deleted file mode 100644 index 7d76c0237..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/Attention$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/Attention.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/Attention.class deleted file mode 100644 index b51698d39..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/Attention.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/Attention.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/Attention.tasty deleted file mode 100644 index c77406434..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/Attention.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/DenseLayer$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/DenseLayer$.class deleted file mode 100644 index e978b77ba..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/DenseLayer$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/DenseLayer.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/DenseLayer.class deleted file mode 100644 index d48663d6b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/DenseLayer.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/DenseLayer.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/DenseLayer.tasty deleted file mode 100644 index 3b5a34427..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/DenseLayer.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/DropoutLayer$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/DropoutLayer$.class deleted file mode 100644 index 41e23c5f6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/DropoutLayer$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/DropoutLayer.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/DropoutLayer.class deleted file mode 100644 index 977bfc0ae..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/DropoutLayer.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/DropoutLayer.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/DropoutLayer.tasty deleted file mode 100644 index 72f09e3d8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/DropoutLayer.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/GRU$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/GRU$.class deleted file mode 100644 index 3346c4ea5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/GRU$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/GRU$package$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/GRU$package$.class deleted file mode 100644 index 61187d40b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/GRU$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/GRU$package.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/GRU$package.class deleted file mode 100644 index 557ba1ee2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/GRU$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/GRU$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/GRU$package.tasty deleted file mode 100644 index 2bfd4d057..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/GRU$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/GRU.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/GRU.class deleted file mode 100644 index eed91f577..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/GRU.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/GRU.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/GRU.tasty deleted file mode 100644 index 2e2a736a8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/GRU.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/Gate$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/Gate$.class deleted file mode 100644 index 9c3fd07d6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/Gate$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/Gate.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/Gate.class deleted file mode 100644 index f40313793..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/Gate.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/Gate.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/Gate.tasty deleted file mode 100644 index a1e1a6cd4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/Gate.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/LSTM$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/LSTM$.class deleted file mode 100644 index cca6b2ff9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/LSTM$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/LSTM$package$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/LSTM$package$.class deleted file mode 100644 index 2a7b26a41..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/LSTM$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/LSTM$package.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/LSTM$package.class deleted file mode 100644 index fd95503d8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/LSTM$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/LSTM$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/LSTM$package.tasty deleted file mode 100644 index 06562317b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/LSTM$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/LSTM.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/LSTM.class deleted file mode 100644 index d608c99ea..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/LSTM.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/LSTM.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/LSTM.tasty deleted file mode 100644 index 7f3cdfa33..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/LSTM.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/LayerNorm$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/LayerNorm$.class deleted file mode 100644 index bd35e5d52..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/LayerNorm$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/LayerNorm.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/LayerNorm.class deleted file mode 100644 index db7600d92..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/LayerNorm.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/LayerNorm.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/LayerNorm.tasty deleted file mode 100644 index 74356b794..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/LayerNorm.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/NeuralNet_3L4TS$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/NeuralNet_3L4TS$.class deleted file mode 100644 index 91a8e513d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/NeuralNet_3L4TS$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/NeuralNet_3L4TS$package$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/NeuralNet_3L4TS$package$.class deleted file mode 100644 index 596e1ef8c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/NeuralNet_3L4TS$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/NeuralNet_3L4TS$package.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/NeuralNet_3L4TS$package.class deleted file mode 100644 index 3009563cc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/NeuralNet_3L4TS$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/NeuralNet_3L4TS$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/NeuralNet_3L4TS$package.tasty deleted file mode 100644 index be4ed7d90..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/NeuralNet_3L4TS$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/NeuralNet_3L4TS.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/NeuralNet_3L4TS.class deleted file mode 100644 index f3cc3811a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/NeuralNet_3L4TS.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/NeuralNet_3L4TS.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/NeuralNet_3L4TS.tasty deleted file mode 100644 index 2b0560866..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/NeuralNet_3L4TS.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/NeuralNet_XL4TS$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/NeuralNet_XL4TS$.class deleted file mode 100644 index 20a156861..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/NeuralNet_XL4TS$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/NeuralNet_XL4TS$package$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/NeuralNet_XL4TS$package$.class deleted file mode 100644 index 98f79f718..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/NeuralNet_XL4TS$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/NeuralNet_XL4TS$package.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/NeuralNet_XL4TS$package.class deleted file mode 100644 index fca45616c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/NeuralNet_XL4TS$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/NeuralNet_XL4TS$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/NeuralNet_XL4TS$package.tasty deleted file mode 100644 index aca277258..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/NeuralNet_XL4TS$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/NeuralNet_XL4TS.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/NeuralNet_XL4TS.class deleted file mode 100644 index 5dde2a981..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/NeuralNet_XL4TS.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/NeuralNet_XL4TS.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/NeuralNet_XL4TS.tasty deleted file mode 100644 index 391027d11..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/NeuralNet_XL4TS.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/PositionalEnc$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/PositionalEnc$.class deleted file mode 100644 index 96156cc7e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/PositionalEnc$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/PositionalEnc$package$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/PositionalEnc$package$.class deleted file mode 100644 index 48bedfdd8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/PositionalEnc$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/PositionalEnc$package$pe$2$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/PositionalEnc$package$pe$2$.class deleted file mode 100644 index 219fe6c4a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/PositionalEnc$package$pe$2$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/PositionalEnc$package.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/PositionalEnc$package.class deleted file mode 100644 index f7833f0f7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/PositionalEnc$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/PositionalEnc$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/PositionalEnc$package.tasty deleted file mode 100644 index 2ff81bb85..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/PositionalEnc$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/PositionalEnc.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/PositionalEnc.class deleted file mode 100644 index 583d4adc2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/PositionalEnc.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/PositionalEnc.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/PositionalEnc.tasty deleted file mode 100644 index 60093bf46..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/PositionalEnc.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/RMSNorm$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/RMSNorm$.class deleted file mode 100644 index 3abec1d29..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/RMSNorm$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/RMSNorm.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/RMSNorm.class deleted file mode 100644 index bfff959bd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/RMSNorm.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/RMSNorm.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/RMSNorm.tasty deleted file mode 100644 index dc532a0b5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/RMSNorm.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/RNN$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/RNN$.class deleted file mode 100644 index 5b582d5b9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/RNN$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/RNN$ParamGroup$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/RNN$ParamGroup$.class deleted file mode 100644 index 364052c3e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/RNN$ParamGroup$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/RNN$ParamGroup.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/RNN$ParamGroup.class deleted file mode 100644 index 3df2fd899..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/RNN$ParamGroup.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/RNN$ParamGroupVector$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/RNN$ParamGroupVector$.class deleted file mode 100644 index d90617b18..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/RNN$ParamGroupVector$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/RNN$ParamGroupVector.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/RNN$ParamGroupVector.class deleted file mode 100644 index 1ee8fe2d3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/RNN$ParamGroupVector.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/RNN$package$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/RNN$package$.class deleted file mode 100644 index cf5118e55..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/RNN$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/RNN$package.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/RNN$package.class deleted file mode 100644 index e9cc79c58..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/RNN$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/RNN$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/RNN$package.tasty deleted file mode 100644 index 7826d407f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/RNN$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/RNN.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/RNN.class deleted file mode 100644 index 488227569..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/RNN.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/RNN.scala.bak b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/RNN.scala.bak deleted file mode 100644 index c32f418f8..000000000 --- a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/RNN.scala.bak +++ /dev/null @@ -1,261 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Tue Aug 29 13:54:14 EDT 2023 - * @see LICENSE (MIT style license file). - * - * @note Model: Recurrent Neural Network (RNN) for Multivariate Time Series - */ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Standard RNN (GRU/LSTM) Formats for Multivariate Time Series Data: - * Matlab: - * - * Keras: 3D format expected by GRU/LSTM is [samples, timesteps, features]. - * => indexing [timestamp t, lags k, variable j] - * PyTorch: - */ - -package scalation -package modeling -package forecasting -package neuralforecasting - -import scalation.mathstat.{MatrixD, Plot, VectorD} -import scalation.random.{NormalMat, NormalVec_c} - -import ActivationFun.{softmax_, tanh_} -import MatrixD.outer - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `RNN` class implements Recurrent Neural Network (RNN) via Back Propagation Through - * Time (BPTT). At each time point x_t, there is a vector representing several variables - * or the encoding of a word. Intended to work for guessing the next work in a sentence - * or for multi-horizon forecasting. - * Time series: (x_t: t = 0, 1, ..., n_seq-1) where n_seq is the number of time points/words - * @param x the input sequence/time series - * @param y the output sequence/time series - * @param fname the feature/variable names - * @param n_mem the size for hidden state (h) (dimensionality of memory) - */ -class RNN (x: MatrixD, y: MatrixD, fname: Array [String] = null, n_mem: Int = 8): // 4 - - private val CLASSIF = false // whether to apply classification (e.g., guess next word) or forecast a value - private val max_epochs = 20 // maximum number of iterations - private val eta = 0.0005 // the learning rate (use 0.25 for rNNTest) - - private val n_seq = x.dim // e.g., 20, number of words in a sentence (including start and end symbol) - private val n_var = x.dim2 // e.g., 64, number of variables or distinct words (vocabulary size) - // since we will only use one sentence for training, - // this is also the total steps during training. - -// private val _1 = VectorD.one (n_mem) // vector of all ones for e.g., 1 - z - - // initialize parameters (weights and biases) - private val rmg1 = NormalMat (n_mem, n_var, 0.0, 0.01) // random (Normal) matrix generators - private val rmg2 = NormalMat (n_mem, n_mem, 0.0, 0.01) - private val rmg3 = NormalMat (n_var, n_mem, 0.0, 0.01) - private val rvg1 = NormalVec_c (n_mem, 0.0, 0.01) // random (Normal) vector generators - private val rvg3 = NormalVec_c (n_var, 0.0, 0.01) - - private val U = rmg1.gen // parameters for computing the hidden state - private val W = rmg2.gen - private val b_h = rvg1.gen - - private val hg = Gate (n_seq, n_mem, n_var) // hidden state gate-like structure - - // decoder for generating output - private val V = rmg3.gen // decoder weight matrix - private val b_y = rvg3.gen // decoder bias vector - - private val h_m1 = rvg1.gen // hidden state @ t = -1 (m1 means minus 1) - private val h = new MatrixD (n_seq, n_mem) // hidden state h - private val yp = new MatrixD (n_seq, n_var) // predicted output - private val L = new VectorD (n_seq) // store loss function values - - // the partial derivative of weights and biases - private var dh_m1 = new VectorD (h_m1.dim) - private var db_y: VectorD = null - private val dV = new MatrixD (V.dim, V.dim2) - - if fname != null then println (s"RNN: fname = $fname") - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train the RNN using simple gradient descent. - */ - def train (): Unit = - for it <- 1 to max_epochs do - forward () // forward propagate: get intermediate and output results - - println (s"train: for epoch $it: loss function L = $L") - banner (s"train: for epoch $it: total loss function L.sum = ${L.sum}") - - backward () // back propagate: calculate gradients (partial derivatives) - - update_params () // update parameters (weights and biases) - end for - end train - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test the RNN predictions. - */ - def test (): Unit = - new Plot (null, y(?, 0), yp(?, 0), "Plot of y vs yp for RNN", lines = true) - end test - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forward propagate calculates yp, loss and intermediate variables for each step. - */ - def forward (): Unit = - for t <- 0 until n_seq do - val h_pre = if t == 0 then h_m1 else h(t-1) // get previous hidden state - h(t) = tanh_ (U * x(t) + W * h_pre + b_h) // compute hidden state - if CLASSIF then - yp(t) = softmax_ (V * h(t) + b_y) // activation: softmax for classification - L(t) = (-y(t) * log_ (yp(t))).sum // cross-entropy loss function - else - yp(t) = V * h(t) + b_y // activation: id for forecasting - L(t) = (y(t) - yp(t)).normSq // sse loss function - end if - end for - end forward - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Backward propagate to calculate gradients using chain rules in O(n_seq) time. - * FIX - add option of using sse loss function and fix affected partial derivatives - */ - def backward (): Unit = - - import ActivationFun.tanhD - - // start back-propagation with the final/feed-forward (ff) layer (uses id for activation) - - val e = yp - y // negative error matrix - db_y = e.sumVr // vector of row sums - for t <- 0 until n_seq do dV += outer (e(t), h(t)) // outer vector product - val dh_ff = e * V // partial w.r.t. h: n_seq by n_mem matrix - var dh = new VectorD (dh_ff.dim2) // hold partial for hidden state (dh) @ time t - var dIn: VectorD = null - - // calculate the derivative contribution of each step and add them up - - for t <- n_seq-1 to 1 by -1 do // move back in time to t = 1 - dh += dh_ff(t) // update partial for hidden state (dh) @ time t - dIn = dh * tanhD (hg(t)) // input to tanh for hidden state - hg += (dIn, x(t), h(t-1)) // update partials for hidden state gate @ time t - dh = W.𝐓 * dIn // 𝐓 => matrix transpose - end for - - // end case @ time t = 0 -> use h_m1 for hidden state - - dh += dh_ff(0) // update partial for hidden state (dh) @ t = 0 - dIn = dh * tanhD (hg(0)) - hg += (dIn, x(0), h_m1) // update partials for hidden state gate @ t = 0 - dh_m1 = W.𝐓 * dIn - end backward - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Based on the calculated partial derivatives, update the parameters (weights - * and biases). - */ - def update_params (): Unit = - // hidden state (h) - U -= hg.dU * eta - W -= hg.dW * eta - b_h -= hg.db * eta - - // output layer - V -= dV * eta - b_y -= db_y * eta - end update_params - -end RNN - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `RNN` companion object provides factory methods. - */ -object RNN: - - import ActivationFun._ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `RNN` with automatic rescaling from a data matrix and response matrix. - * @param x the input/data matrix - * @param y the output/response matrix - * @param fname the feature/variable names - * @param n_mem the size of the hidden state (dimensionality of memory) - */ - def rescale (x: MatrixD, y: MatrixD, fname: Array [String] = null, n_mem: Int = 4): RNN = - val x_s = rescaleX (x, f_sigmoid) - val y_s = rescaleY (y, f_sigmoid)._1 - -// println (s" scaled: x = $x_s \n scaled y = $y_s") - new RNN (x_s, y_s, fname, n_mem) - end rescale - -end RNN - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `rNNTest` main function tests the `RNN` class on randomly generated - * sequence data meant to represent encoded words - * > runMain scalation.modeling.forecasting.neuralforecasting.rNNTest - */ -@main def rNNTest (): Unit = - - val n_seq = 8 - val n_var = 5 - - val (x_t, y_t) = genSequenceData (n_seq, n_var) - - println (s"x_t = $x_t") - println (s"y_t = $y_t") - - banner ("Create a Recurrent Neural Network (RNN)") - val mod = new RNN (x_t, y_t) - mod.train () - mod.test () - -end rNNTest - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `rNNTest2` main function tests the `RNN` class on sequence data read as words - * in a file that encoded and pass into `RNN` - * > runMain scalation.modeling.forecasting.neuralforecasting.rNNTest2 - */ -@main def rNNTest2 (): Unit = - - println ("read words from a text file") - -// FIX - find example text - -end rNNTest2 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `rNNTest3` main function tests the `RNN` class on sequence/time series data - * corresponding to the lake level dataset using multiple lags. - * > runMain scalation.modeling.forecasting.neuralforecasting.rNNTest3 - */ -@main def rNNTest3 (): Unit = - - import Example_LakeLevels.y - val lags = 2 // number of lags to include - val hh = 2 // forecasting horizon - FIX - currently lags == hh - - val y_s = scaleV (extreme (y), (-2.0, 2.0))(y) // rescale y to active domain of sigmoid, tanh - - val (x, yy) = buildMatrix4TS (y_s, lags, hh) // column for each lag - - println (s"x.dims = ${x.dims}, yy.dims = ${yy.dims}") - - banner ("Create a Recurrent Neural Network Unit (RNN)") - val mod = new RNN (x, yy) // call constructor - mod.train () - mod.test () - -end rNNTest3 - diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/RNN.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/RNN.tasty deleted file mode 100644 index 26204b1a1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/RNN.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/TrEncoderLayer$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/TrEncoderLayer$.class deleted file mode 100644 index 59b4cab0f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/TrEncoderLayer$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/TrEncoderLayer.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/TrEncoderLayer.class deleted file mode 100644 index 96e34fa3b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/TrEncoderLayer.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/TrEncoderLayer.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/TrEncoderLayer.tasty deleted file mode 100644 index 4c07aca00..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/TrEncoderLayer.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/attentionTest.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/attentionTest.class deleted file mode 100644 index ecdb1da54..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/attentionTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/attentionTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/attentionTest.tasty deleted file mode 100644 index 928554452..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/attentionTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/attentionTest2.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/attentionTest2.class deleted file mode 100644 index a34abeada..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/attentionTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/attentionTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/attentionTest2.tasty deleted file mode 100644 index b67b03448..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/attentionTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/attentionTest3.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/attentionTest3.class deleted file mode 100644 index 1328142df..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/attentionTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/attentionTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/attentionTest3.tasty deleted file mode 100644 index 3a162c789..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/attentionTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/attentionTest4.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/attentionTest4.class deleted file mode 100644 index 90afc340c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/attentionTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/attentionTest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/attentionTest4.tasty deleted file mode 100644 index 5610e33aa..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/attentionTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/attentionTest5.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/attentionTest5.class deleted file mode 100644 index 685186bb5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/attentionTest5.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/attentionTest5.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/attentionTest5.tasty deleted file mode 100644 index 4420537ef..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/attentionTest5.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/gRUTest.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/gRUTest.class deleted file mode 100644 index eb5fefe44..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/gRUTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/gRUTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/gRUTest.tasty deleted file mode 100644 index 9f5987416..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/gRUTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/gRUTest2.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/gRUTest2.class deleted file mode 100644 index 7029312a8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/gRUTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/gRUTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/gRUTest2.tasty deleted file mode 100644 index 4cc6eb0f0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/gRUTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/gRUTest3.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/gRUTest3.class deleted file mode 100644 index 729df22ea..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/gRUTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/gRUTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/gRUTest3.tasty deleted file mode 100644 index 302717059..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/gRUTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/index.html b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/index.html deleted file mode 100644 index 86943e40a..000000000 --- a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/index.html +++ /dev/null @@ -1,18 +0,0 @@ - - -

    Source files in neuralforecasting Package

    -

    - - \ No newline at end of file diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/lSTMTest.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/lSTMTest.class deleted file mode 100644 index 62f0fc46f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/lSTMTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/lSTMTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/lSTMTest.tasty deleted file mode 100644 index f243c9d0d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/lSTMTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/lSTMTest2.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/lSTMTest2.class deleted file mode 100644 index d92f1443c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/lSTMTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/lSTMTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/lSTMTest2.tasty deleted file mode 100644 index 5384d296c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/lSTMTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/lSTMTest3.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/lSTMTest3.class deleted file mode 100644 index b98e70a84..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/lSTMTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/lSTMTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/lSTMTest3.tasty deleted file mode 100644 index d190823ea..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/lSTMTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/neuralNet_3L4TSTest.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/neuralNet_3L4TSTest.class deleted file mode 100644 index c3a05c5c4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/neuralNet_3L4TSTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/neuralNet_3L4TSTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/neuralNet_3L4TSTest.tasty deleted file mode 100644 index 5a263b93c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/neuralNet_3L4TSTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/neuralNet_3L4TSTest2.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/neuralNet_3L4TSTest2.class deleted file mode 100644 index e6a0c994d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/neuralNet_3L4TSTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/neuralNet_3L4TSTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/neuralNet_3L4TSTest2.tasty deleted file mode 100644 index a5ce6b49b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/neuralNet_3L4TSTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/neuralNet_3L4TSTest3.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/neuralNet_3L4TSTest3.class deleted file mode 100644 index b3e25583f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/neuralNet_3L4TSTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/neuralNet_3L4TSTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/neuralNet_3L4TSTest3.tasty deleted file mode 100644 index 1f9f77e49..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/neuralNet_3L4TSTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/neuralNet_XL4TSTest.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/neuralNet_XL4TSTest.class deleted file mode 100644 index 446a4cbd3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/neuralNet_XL4TSTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/neuralNet_XL4TSTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/neuralNet_XL4TSTest.tasty deleted file mode 100644 index ca702b67c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/neuralNet_XL4TSTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/neuralNet_XL4TSTest2.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/neuralNet_XL4TSTest2.class deleted file mode 100644 index 4e0f64aeb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/neuralNet_XL4TSTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/neuralNet_XL4TSTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/neuralNet_XL4TSTest2.tasty deleted file mode 100644 index 9eb0c8e70..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/neuralNet_XL4TSTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/neuralNet_XL4TSTest3.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/neuralNet_XL4TSTest3.class deleted file mode 100644 index 4be9973b3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/neuralNet_XL4TSTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/neuralNet_XL4TSTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/neuralNet_XL4TSTest3.tasty deleted file mode 100644 index b000d948d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/neuralNet_XL4TSTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/neuralNet_XL4TSTest4.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/neuralNet_XL4TSTest4.class deleted file mode 100644 index eccafc051..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/neuralNet_XL4TSTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/neuralNet_XL4TSTest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/neuralNet_XL4TSTest4.tasty deleted file mode 100644 index 8b7093c9d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/neuralNet_XL4TSTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/neuralNet_XL4TSTest5.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/neuralNet_XL4TSTest5.class deleted file mode 100644 index 2aaecc156..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/neuralNet_XL4TSTest5.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/neuralNet_XL4TSTest5.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/neuralNet_XL4TSTest5.tasty deleted file mode 100644 index 2aed7dedf..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/neuralNet_XL4TSTest5.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/positionalEncTest.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/positionalEncTest.class deleted file mode 100644 index 5425eaeae..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/positionalEncTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/positionalEncTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/positionalEncTest.tasty deleted file mode 100644 index aa5cef033..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/positionalEncTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/rNNTest.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/rNNTest.class deleted file mode 100644 index f902d7412..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/rNNTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/rNNTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/rNNTest.tasty deleted file mode 100644 index 40e99fe3e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/rNNTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/rNNTest2.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/rNNTest2.class deleted file mode 100644 index d14f2a1b2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/rNNTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/rNNTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/rNNTest2.tasty deleted file mode 100644 index 747503f03..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/rNNTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/rNNTest3.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/rNNTest3.class deleted file mode 100644 index 0215cb515..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/rNNTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/rNNTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/rNNTest3.tasty deleted file mode 100644 index 91ef32787..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/rNNTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/rNNTest4.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/rNNTest4.class deleted file mode 100644 index d720ad3a8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/rNNTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/rNNTest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/rNNTest4.tasty deleted file mode 100644 index 5e19ace5c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/neuralforecasting/rNNTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/nullModelTest.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/nullModelTest.class deleted file mode 100644 index 34cce3a31..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/nullModelTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/nullModelTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/nullModelTest.tasty deleted file mode 100644 index ae58dc5ed..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/nullModelTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/nullModelTest2.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/nullModelTest2.class deleted file mode 100644 index 8142c00cb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/nullModelTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/nullModelTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/nullModelTest2.tasty deleted file mode 100644 index 962a88f6a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/nullModelTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/nullModelTest3.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/nullModelTest3.class deleted file mode 100644 index ae6499368..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/nullModelTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/nullModelTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/nullModelTest3.tasty deleted file mode 100644 index 4aadace7d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/nullModelTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/nullModelTest4.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/nullModelTest4.class deleted file mode 100644 index c71fbac85..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/nullModelTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/nullModelTest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/nullModelTest4.tasty deleted file mode 100644 index 2729e6dab..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/nullModelTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/nullModelTest5.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/nullModelTest5.class deleted file mode 100644 index 791566e0a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/nullModelTest5.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/nullModelTest5.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/nullModelTest5.tasty deleted file mode 100644 index 032b17943..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/nullModelTest5.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/old/ARMA.scala.bak b/target/scala-3.6.4/classes/scalation/modeling/forecasting/old/ARMA.scala.bak deleted file mode 100644 index 11fc205d4..000000000 --- a/target/scala-3.6.4/classes/scalation/modeling/forecasting/old/ARMA.scala.bak +++ /dev/null @@ -1,434 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Sun Jun 30 13:27:00 EDT 2024 - * @see LICENSE (MIT style license file). - * - * @note Model: Auto-Regressive, Moving Average (ARMA) - * - * Parameter Estimation: Least Squares, Maximum Likelihood - * Conditional Sum-of-Squares (CSS), Negative Log-Likelihood (NLL) - * @see arxiv.org/pdf/1611.00965 - * @see arxiv.org/html/2310.01198v2 - * @see arxiv.org/pdf/2310.01198 - * @see people.stat.sc.edu/hitchcock/stat520ch7slides.pdf - * @see www.emu.edu.tr/mbalcilar/teaching2007/econ604/lecture_notes.htm - * @see www.stat.berkeley.edu/~bartlett/courses/153-fall2010 - * @see www.princeton.edu/~apapanic/ORFE_405,_Financial_Time_Series_%28Fall_2011%29_files/slides12-13.pdf - */ - -package scalation -package modeling -package forecasting2 - -import scalation.mathstat._ -import scalation.optimization.quasi_newton.{BFGS => Optimizer} // change import to change optimizer -//import scalation.optimization.quasi_newton.{LBFGS => Optimizer} -import scalation.random.NormalVec_c - -import Forecaster.rdot -import Example_Covid.loadData_y -import Example_LakeLevels.y - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARMA` class provides basic time series analysis capabilities for Auto-Regressive, - * Moving Average (ARMA) models. ARMA models are often used for forecasting. - * Given time series data stored in vector y, its next value y_t = combination of last - * p values and q shocks. - * - * y_t = δ + Σ[φ_j y_t-j] + Σ[θ_j e_t-j] + e_t - * - * where y_t is the value of y at time t and e_t is the residual/error term. - * @param y the response vector (time series data) - * @param hh the maximum forecasting horizon (h = 1 to hh) - * @param tRng the time range, if relevant (time index may suffice) - * @param hparam the hyper-parameters (defaults to AR.hp) - * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) - */ -class ARMA (y: VectorD, hh: Int, tRng: Range = null, - hparam: HyperParameter = AR.hp, - bakcast: Boolean = false) - extends AR (y, hh, tRng, hparam, bakcast): - - private val debug = debugf ("ARMA", true) // debug function - private val flaw = flawf ("ARMA") // flaw function - protected val q = hparam("q").toInt // use the last q shock/errors - private var z = VectorD.nullv // var for centered time series (used by first train) - private val pnq = p + q // sum of the orders - private val notHR = true // don't use the HR algorithm - - modelName = s"ARMA($p, $q)" - -// Use one of the following two train methods: swap names train0 & train and add override - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train/fit an `ARMA` model to the times-series data in vector y_. - * Estimate the coefficient vector b for a (p, q)-th order Auto-Regressive ARMA(p, q) model. - * Uses a nonlinear optimizer (e.g., BFGS) to determine the coefficients. - * Residuals are re-estimated during optimization (may lead to instability) - * NOTE: Requires the error update in `predict` to be uncommented. - * @param x_null the data/input matrix (ignored, pass null) - * @param y_ the training/full response vector (e.g., full y) - */ - def train0 (x_null: MatrixD, y_ : VectorD): Unit = - b = NormalVec_c (pnq, 0.0, 0.1).gen // randomly initialize the coefficients - val mu = y_.mean // sample mean of y_ - z = y_ - mu // optimization works better using zero-centered data - val optimizer = new Optimizer (css) // apply Quasi-Newton optimizer - val (fb, bb) = optimizer.solve3 (b, 0.5) // optimal solution for loss function and parameters - b = bb // assign optimized parameters to vector b - δ = mu * (1 - b(0 until p).sum) // determine intercept - debug ("train0", s"optimized: p = $p, q - $q, δ = $δ, b = $b") - end train0 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train/fit an `ARMA` model to the times-series data in vector y_. - * Estimate the coefficient vector b for a (p, q)-th order Auto-Regressive ARMA(p, q) model. - * Uses a nonlinear optimizer (e.g., BFGS) to determine the coefficients. - * Residuals are estimated before optimization using the Hannan-Rissanen Algorithm. - * NOTE: Requires the error update in `predict` to be commented out. - * @see faculty.washington.edu/dbp/s519/PDFs/13-overheads-2020.pdf - * @param x_null the data/input matrix (ignored, pass null) - * @param y_ the training/full response vector (e.g., full y) - */ - override def train (x_null: MatrixD, y_ : VectorD): Unit = - if notHR then - train0 (x_null, y_) - else - δ = 0.0 // intercept for y_ - b = NormalVec_c (pnq, 0.0, 0.1).gen // randomize the coefficients - val b_ = δ +: b // combine all parameters -> vector to optimize - resid (y_) // set the residuals using high order AR - val optimizer = new Optimizer (ss) // apply Quasi-Newton optimizer - val (fb, bb) = optimizer.solve3 (b_, 0.5) // optimal solution for loss function and parameters - δ = bb(0); b = bb(1 until bb.dim) // recover parameters for z - debug ("train", s"optimized parameters: δ = $δ, b = $b") - end train - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Use a higher order AR model to estimate the residuals (unobserved data). - * Set the residual/error vector e defined in `Forecaster`. - * @param y_ the training/full response vector (e.g., full y) - */ - def resid (y_ : VectorD): Unit = - val hp2 = new HyperParameter - hp2 += ("p", pnq + 3, pnq + 3) // Set the AR order to p + 1 + 3 - val ar = new AR (y, hh, tRng, hp2) // create an AR model - ar.train (null, y_) // train the AR model - e += ar.residual // use residuals from the AR model - end resid - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the sum of squared errors (loss function). - * @param b_ the combined parameters (δ, b) where b = (φ, θ). - */ - def ss (b_ : VectorD): Double = - δ = b_(0); b = b_(1 until b_.dim) // pull parameters out of b vector - val yy = yb(1 until yb.dim) // skip first (backcasted) value - val yyp = predictAll (yb) // predicted value for yb -// debug ("ss", s"yy.dim = ${yy.dim}, yyp.dim = ${yyp.dim}") - ssef (yy, yyp) // compute loss function - end ss - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the conditional sum of squared errors (loss function). - * @param b_ the working copy of parameters vector b = φ ++ θ. - */ - def css (b_ : VectorD): Double = - b = b_.copy // co[y parameters from b vector - val z_ = z(1 until z.dim) // skip first (backcasted) value - val zp = predictAll (z) // predicted value for z -// debug ("css", s"z_.dim = ${z_.dim}, zp.dim = ${zp.dim}") - ssef (z_, zp) // compute loss function - end css - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Predict a value for y_t using the 1-step ahead forecast. - * - * y_t = δ + φ_0 y_t-1 + φ_1 y_t-2 + ... + φ_p-1 y_t-p - * θ_0 e_t-1 + θ_1 e_t-2 + ... + θ_q-1 e_t-q - * - * where φ = b(0 until p) and θ = b(p until p_q). - * When k < 0 let y_k = y_0 (i.e., assume first value repeats back in time), - * but do not assume errors repeat. Note, column 1 of yf (yf(?, 1) holds yp. - * Must be executed in time order, so errors are properly recorded in vector e - * @see `predictAll` method in `Forecaster` trait. - * @see `rdot` in Forecaster.scala for reverse dot product implementation. - * @param t the time point being predicted - * @param y_ the actual values to use in making predictions - */ - override def predict (t: Int, y_ : VectorD): Double = - if t == 0 then e(0) = 0 // from backcast: assume no error - if t == 1 then e(1) = y_(1) - yf(0, 1) // first real point - - var sum = δ + rdot (b(0 until p), y_, t) // intercept + AR terms - for j <- 0 until q if t-j >= 0 do sum += b(p+j) * e(t-j) // add MA terms (shocks) - -// if t < y_.dim-1 then e(t+1) = y_(t+1) - sum // update the error vector (uncomment for first train) - sum // prediction yp - end predict - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Produce a vector of size hh, h = 1 to hh-steps ahead forecasts for the model, - * i.e., forecast the following time points: t+1, ..., t+h. - * Intended to work with rolling validation (analog of predict method). - * FIX -- must include [ y_i, e_i ] before horizon and [ yp_i ] after horizon - * @param t the time point from which to make forecasts - * @param y_ the actual values to use in making predictions - */ - override def forecast (t: Int, y_ : VectorD = yb): VectorD = - val yh = new VectorD (hh) // hold forecasts for each horizon - for h <- 1 to hh do - val pred = δ + rdot (b, yf, t, h-1) // FIX -- slide in prior forecasted values - yf(t, h) = pred // record in forecast matrix - yh(h-1) = pred // record forecasts for each horizon - yh // return forecasts for all horizons - end forecast - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all y_.dim time points at horizon h (h-steps ahead). - * Assign into FORECAST MATRIX and return the h-steps ahead forecast. - * Note, `predictAll` provides predictions for h = 1. - * @see `forecastAll` method in `Forecaster` trait. - * FIX -- must include [ y_i, e_i ] before horizon and [ yp_i ] after horizon - * @param h the forecasting horizon, number of steps ahead to produce forecasts - * @param y_ the actual values to use in making forecasts - */ - override def forecastAt (h: Int, y_ : VectorD = yb): VectorD = - if h < 2 then flaw ("forecastAt", s"horizon h = $h must be at least 2") - - for t <- y_.indices do // make forecasts over all time points for horizon h - yf(t, h) = δ + rdot (b, yf, t, h-1) // FIX -- record in forecast matrix - yf(?, h) // return the h-step ahead forecast vector - end forecastAt - -end ARMA - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARMA` companion object provides factory methods for the `ARMA` class. - */ -object ARMA: - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `ARMA` object. - * @param y the response vector (time series data) - * @param hh the maximum forecasting horizon (h = 1 to hh) - * @param tRng the time range, if relevant (time index may suffice) - * @param hparam the hyper-parameters - */ - def apply (y: VectorD, hh: Int, tRng: Range = null, hparam: HyperParameter = AR.hp): ARMA = - new ARMA (y, hh, tRng, hparam) - end apply - -end ARMA - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRMATest` main function tests the `ARMA` class on real data: - * Forecasting Lake Levels using In-Sample Testing (In-ST). - * Test forecasts (h = 1 to hh steps ahead forecasts). - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting2.aRMATest - */ -@main def aRMATest (): Unit = - - val hh = 3 // maximum forecasting horizon - - val mod = new ARMA (y, hh) // create model for time series data - banner (s"In-ST Forecasts: ${mod.modelName} on LakeLevels Dataset") - mod.trainNtest ()() // train and test on full dataset - - mod.forecastAll () // forecast h-steps ahead (h = 1 to hh) for all y - mod.diagnoseAll (y, mod.getYf) - Forecaster.evalForecasts (mod, mod.getYb, hh) - println (s"Final In-ST Forecast Matrix yf = ${mod.getYf}") - -end aRMATest - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRMATest2` main function tests the `ARMA` class on real data: - * Forecasting Lake Levels using Train-n-Test Split (TnT) with Rolling Validation. - * Test forecasts (h = 1 to hh steps ahead forecasts). - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting2.aRMATest2 - */ -@main def aRMATest2 (): Unit = - - val hh = 3 // maximum forecasting horizon - - val mod = new ARMA (y, hh) // create model for time series data - banner (s"TnT Forecasts: ${mod.modelName} on LakeLevels Dataset") - mod.trainNtest ()() // train and test on full dataset - - mod.setSkip (0) - mod.rollValidate () // TnT with Rolling Validation - mod.diagnoseAll (y, mod.getYf, Forecaster.teRng (y.dim)) // only diagnose on the testing set - println (s"Final TnT Forecast Matrix yf = ${mod.getYf}") - -end aRMATest2 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRMATest3` main function tests the `ARMA` class on real data: - * Forecasting COVID-19 using In-Sample Testing (In-ST). - * Test forecasts (h = 1 to hh steps ahead forecasts). - * Comparison of sMAPE for AR(p), ARMA(p, 0), ARY(p), and ARY_D(p). - * - * 20.3547, 29.9510, 38.0585, 45.3859, 51.7718, 56.6248 AR(1) - * 20.2191, 29.9108, 38.1525, 45.5858, 52.2918, 57.3670 ARMA(1, 0) - * 20.1794, 29.8589, 38.1450, 45.5634, 52.3478, 57.4474 ARY(1) - * 19.9912, 30.1349, 38.7483, 45.1096, 49.5424, 52.5320 ARY_D(1) - * - * 18.1684, 27.0930, 35.1466, 41.4030, 46.7556, 51.7624 AR(2) - * 17.7900, 25.3293, 33.3283, 39.5055, 44.9095, 50.6043 ARMA(2, 0) - * 17.7728, 25.1705, 33.1900, 39.4218, 44.8621, 50.5991 ARY(2) - * 17.7245, 24.2871, 31.1716, 35.9357, 40.5132, 46.4806 ARY_D(2) - * - * 17.7411, 24.9003, 31.8377, 37.3797, 42.1010, 47.4027 AR(3) - * 17.4057, 23.9135, 30.5357, 35.5950, 40.6434, 46.4122 ARMA(3, 0) - * 17.3594, 23.7550, 30.3838, 35.4514, 40.5868, 46.4292 ARY(3) - * 17.2367, 23.2007, 29.4120, 33.5757, 38.8647, 44.1707 ARY_D(3) - * - * 17.5993, 24.4001, 30.4701, 35.1507, 39.6652, 44.3065 AR(4) - * 17.2928, 23.6678, 29.5574, 34.0383, 38.9062, 44.1568 ARMA(4, 0) - * 17.2457, 23.5122, 29.4110, 33.9350, 38.8422, 44.2303 ARY(4) - * 17.1336, 23.1984, 29.1758, 33.5773, 38.6493, 43.8045 ARY_D(4) - * - * 17.7440, 24.3782, 30.2910, 34.6374, 39.2784, 43.5109 AR(5) - * 17.2850, 23.6708, 29.5699, 34.0520, 38.9330, 44.2125 ARMA(5, 0) - * 17.2314, 23.5178, 29.4345, 33.9602, 38.9022, 44.3249 ARY(5) - * 17.1196, 23.1224, 29.1769, 33.6120, 38.7839, 43.9346 ARY_D(5) - * - * > runMain scalation.modeling.forecasting2.aRMATest3 - */ -@main def aRMATest3 (): Unit = - - val yy = loadData_y () -// val y = yy // full - val y = yy(0 until 116) // clip the flat end - val hh = 6 // maximum forecasting horizon - - AR.hp("q") = 1 // number of MA terms - for p <- 1 to 5 do - AR.hp("p") = p // number of AR terms - val mod = new ARMA (y, hh) // create model for time series data - banner (s"In-ST Forecasts: ${mod.modelName} on COVID-19 Dataset") - mod.trainNtest ()() // train and test on full dataset - - mod.forecastAll () // forecast h-steps ahead (h = 1 to hh) for all y - mod.diagnoseAll (y, mod.getYf) - Forecaster.evalForecasts (mod, mod.getYb, hh) - println (s"Final In-ST Forecast Matrix yf = ${mod.getYf}") - end for - -end aRMATest3 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRMATest4` main function tests the `ARMA` class on real data: - * Forecasting COVID-19 using Train-n-Test Split (TnT) with Rolling Validation. - * Test forecasts (h = 1 to hh steps ahead forecasts). - * Comparison of sMAPE for AR(p), ARMA(p, 0), ARY(p), and ARY_D(p). - * - * 19.1334, 31.1906, 44.3787, 55.1576, 65.1810, 74.0524 AR(1) - * 19.0397, 30.4570, 43.9113, 54.9642, 65.3163, 74.2124 ARMA(1, 0) - * 19.0397, 30.4570, 43.9113, 54.9642, 65.3163, 74.2124 ARY(1) - * - * 16.6447, 26.9109, 39.8106, 50.8595, 60.2176, 68.6317 AR(2) - * 16.9459, 26.6301, 39.3354, 50.8941, 61.0822, 70.3496 ARMA(2, 0) - * 16.8833, 26.4824, 39.2329, 50.8677, 61.0624, 70.3218 ARY(2) - * - * 15.9232, 23.5929, 34.3577, 44.1784, 53.6513, 62.0129 AR(3) - * 15.9052, 21.8056, 31.7951, 41.3657, 51.8574, 61.7836 ARMA(3, 0) - * 15.7190, 21.7959, 32.1395, 42.0074, 52.6874, 62.7276 ARY(3) - * - * 15.3256, 22.6893, 30.7558, 39.6274, 48.6646, 56.7375 AR(4) - * 15.1026, 20.3376, 27.3229, 35.9123, 40.6935, 50.1041 ARMA(4, 0) - * 14.6791, 19.9940, 26.5644, 35.4590, 41.4955, 50.8660 ARY(4) - * - * 15.9166, 21.5246, 28.0675, 36.8669, 43.3785, 51.1786 AR(5) - * 15.5742, 20.2465, 28.8253, 36.8582, 43.1053, 48.5483 ARMA(5, 0) - * 15.0232, 19.4222, 27.1981, 35.4744, 40.3466, 48.4066 ARY(5) - * - * > runMain scalation.modeling.forecasting2.aRMATest4 - */ -@main def aRMATest4 (): Unit = - - val yy = loadData_y () -// val y = yy // full - val y = yy(0 until 116) // clip the flat end - val hh = 6 // maximum forecasting horizon - - AR.hp("q") = 0 // number of MA terms - for p <- 1 to 5 do - AR.hp("p") = p // number of AR terms - val mod = new ARMA (y, hh) // create model for time series data - banner (s"TnT Forecasts: ${mod.modelName} on COVID-19 Dataset") - mod.trainNtest ()() - - mod.setSkip (0) - mod.rollValidate () // TnT with Rolling Validation - mod.diagnoseAll (y, mod.getYf, Forecaster.teRng (y.dim)) // only diagnose on the testing set - println (s"Final TnT Forecast Matrix yf = ${mod.getYf}") - end for - -end aRMATest4 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRMATest5` main function tests the `ARMA` class on real data: - * Forecasting COVID-19 using In-Sample Testing (In-ST). - * Test forecasts (h = 1 to hh steps ahead forecasts). - * Comparison of sMAPE for ARMA(p, 1) (i.e., q = 1) for different p orders. - * > runMain scalation.modeling.forecasting2.aRMATest5 - */ -@main def aRMATest5 (): Unit = - - val yy = loadData_y () -// val y = yy // full - val y = yy(0 until 116) // clip the flat end - val hh = 6 // maximum forecasting horizon - - AR.hp("q") = 1 // number of MA terms - for p <- 1 to 5 do - AR.hp("p") = p // number of AR terms - val mod = new ARMA (y, hh) // create model for time series data - banner (s"In-ST Forecasts: ${mod.modelName} on COVID-19 Dataset") - mod.trainNtest ()() // train and test on full dataset - - mod.forecastAll () // forecast h-steps ahead (h = 1 to hh) for all y - Forecaster.evalForecasts (mod, mod.getYb, hh) - println (s"Final In-ST Forecast Matrix yf = ${mod.getYf}") - end for - -end aRMATest5 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRMATest6` main function tests the `ARMA` class on small dataset. - * Test forecasts (h = 1 step ahead forecasts). - * > runMain scalation.modeling.forecasting2.aRMATest6 - */ -@main def aRMATest6 (): Unit = - - val y = VectorD (1, 3, 4, 2, 5, 7, 9, 8, 6, 3) - - AR.hp ("q") = 0 - var mod = new ARMA (y, 1) // create model for time series data - banner (s"In-ST Forecasts: ${mod.modelName} on a Small Dataset") - mod.trainNtest ()() // train and test on full dataset - println (s"Final In-ST Forecast Matrix yf = ${mod.getYf}") - new Baseline (y, "AR1") - - AR.hp ("p") = 2 - mod = new ARMA (y, 1) // create model for time series data - banner (s"In-ST Forecasts: ${mod.modelName} on a Small Dataset") - mod.trainNtest ()() // train and test on full dataset - println (s"Final In-ST Forecast Matrix yf = ${mod.getYf}") - new Baseline (y, "AR2") - -end aRMATest6 - diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/old/ARMA.scala.bak2 b/target/scala-3.6.4/classes/scalation/modeling/forecasting/old/ARMA.scala.bak2 deleted file mode 100644 index afff6fb75..000000000 --- a/target/scala-3.6.4/classes/scalation/modeling/forecasting/old/ARMA.scala.bak2 +++ /dev/null @@ -1,443 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Sun Jun 30 13:27:00 EDT 2024 - * @see LICENSE (MIT style license file). - * - * @note Model: Auto-Regressive, Moving Average (ARMA) - * - * Parameter Estimation: Least Squares, Maximum Likelihood - * Conditional Sum-of-Squares (CSS), Negative Log-Likelihood (NLL) - * @see arxiv.org/pdf/1611.00965 - * @see arxiv.org/html/2310.01198v2 - * @see arxiv.org/pdf/2310.01198 - * @see people.stat.sc.edu/hitchcock/stat520ch7slides.pdf - * @see www.emu.edu.tr/mbalcilar/teaching2007/econ604/lecture_notes.htm - * @see www.stat.berkeley.edu/~bartlett/courses/153-fall2010 - * @see www.princeton.edu/~apapanic/ORFE_405,_Financial_Time_Series_%28Fall_2011%29_files/slides12-13.pdf - */ - -package scalation -package modeling -package forecasting2 - -import scalation.mathstat._ -import scalation.optimization.quasi_newton.{BFGS => Optimizer} // change import to change optimizer -//import scalation.optimization.quasi_newton.{LBFGS => Optimizer} -import scalation.random.NormalVec_c - -import Forecaster.rdot -import Example_Covid.loadData_y -import Example_LakeLevels.y - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARMA` class provides basic time series analysis capabilities for Auto-Regressive, - * Moving Average (ARMA) models. ARMA models are often used for forecasting. - * Given time series data stored in vector y, its next value y_t = combination of last - * p values and q shocks. - * - * y_t = δ + Σ[φ_j y_t-j] + Σ[θ_j e_t-j] + e_t - * - * where y_t is the value of y at time t and e_t is the residual/error term. - * @param y the response vector (time series data) - * @param hh the maximum forecasting horizon (h = 1 to hh) - * @param tRng the time range, if relevant (time index may suffice) - * @param hparam the hyper-parameters (defaults to AR.hp) - * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) - */ -class ARMA (y: VectorD, hh: Int, tRng: Range = null, - hparam: HyperParameter = AR.hp, - bakcast: Boolean = false) - extends AR (y, hh, tRng, hparam, bakcast): - - private val debug = debugf ("ARMA", true) // debug function - private val flaw = flawf ("ARMA") // flaw function - protected val q = hparam("q").toInt // use the last q shock/errors - private var z = VectorD.nullv // var for centered time series (used by first train) - private val pnq = p + q // sum of the orders - private val notHR = true // don't use the HR algorithm - - modelName = s"ARMA($p, $q)" - b = NormalVec_c (pnq, 0.0, 0.04).gen // randomly initialize the coefficients - -// Use one of the following two train methods: swap names train0 & train and add override - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train/fit an `ARMA` model to the times-series data in vector y_. - * Estimate the coefficient vector b for a (p, q)-th order Auto-Regressive ARMA(p, q) model. - * Uses a nonlinear optimizer (e.g., BFGS) to determine the coefficients. - * Residuals are re-estimated during optimization (may lead to instability) - * NOTE: Requires the error update in `predict` to be uncommented. - * @param x_null the data/input matrix (ignored, pass null) - * @param y_ the training/full response vector (e.g., full y) - */ - def train0 (x_null: MatrixD, y_ : VectorD): Unit = - e.clear () // set errors to zero - val mu = y_.mean // sample mean of y_ - z = y_ - mu // optimization works better using zero-centered data - val optimizer = new Optimizer (css) // apply Quasi-Newton optimizer - val (fb, bb) = optimizer.solve3 (b, 0.2) // optimal solution for loss function and parameters - b = bb // assign optimized parameters to vector b - δ = mu * (1 - b(0 until p).sum) // determine intercept - debug ("train0", s"optimized: p = $p, q - $q, δ = $δ, b = $b") - println (s"train0: error e = $e") - end train0 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train/fit an `ARMA` model to the times-series data in vector y_. - * Estimate the coefficient vector b for a (p, q)-th order Auto-Regressive ARMA(p, q) model. - * Uses a nonlinear optimizer (e.g., BFGS) to determine the coefficients. - * Residuals are estimated before optimization using the Hannan-Rissanen Algorithm. - * NOTE: Requires the error update in `predict` to be commented out. - * @see faculty.washington.edu/dbp/s519/PDFs/13-overheads-2020.pdf - * @param x_null the data/input matrix (ignored, pass null) - * @param y_ the training/full response vector (e.g., full y) - */ - override def train (x_null: MatrixD, y_ : VectorD): Unit = - if notHR then - train0 (x_null, y_) - else - e.clear () - δ = 0.0 // intercept for y_ - resid (y_) // set the residuals using high order AR - val optimizer = new Optimizer (ss) // apply Quasi-Newton optimizer - val (fb, bb) = optimizer.solve3 (b, 0.2) // optimal solution for loss function and parameters - b = bb // recover parameters for z - δ = y.mean * (1 - b(0 until p).sum) // determine intercept - debug ("train", s"optimized: p = $p, q - $q, δ = $δ, b = $b") - println (s"train: error e = $e") - end train - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Use a higher order AR model to estimate the residuals (unobserved data). - * Set the residual/error vector e defined in `Forecaster`. - * @param y_ the training/full response vector (e.g., full y) - */ - def resid (y_ : VectorD): Unit = - val hp2 = new HyperParameter - hp2 += ("p", pnq + 3, pnq + 3) // Set the AR order to p + 1 + 3 - val ar = new AR (y, hh, tRng, hp2) // create an AR model - ar.train (null, y_) // train the AR model - e += ar.residual // use residuals from the AR model - end resid - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the sum of squared errors (loss function). - * @param b_ the combined parameters (δ, b) where b = (φ, θ). - */ - def ss (b_ : VectorD): Double = - b = b_.copy // copy parameters from b vector - val yy = yb(1 until yb.dim) // skip first (backcasted) value - val yyp = predictAll (yb) // predicted value for yb -// debug ("ss", s"yy.dim = ${yy.dim}, yyp.dim = ${yyp.dim}") - ssef (yy, yyp) // compute loss function - end ss - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the conditional sum of squared errors (loss function). - * @param b_ the working copy of parameters vector b = φ ++ θ. - */ - def css (b_ : VectorD): Double = - b = b_.copy // copy parameters from b vector - val z_ = z(1 until z.dim) // skip first (backcasted) value - val zp = predictAll (z) // predicted value for z -// debug ("css", s"z_.dim = ${z_.dim}, zp.dim = ${zp.dim}") - ssef (z_, zp) // compute loss function - end css - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Predict a value for y_t using the 1-step ahead forecast. - * - * y_t = δ + φ_0 y_t-1 + φ_1 y_t-2 + ... + φ_p-1 y_t-p - * θ_0 e_t-1 + θ_1 e_t-2 + ... + θ_q-1 e_t-q - * - * where φ = b(0 until p) and θ = b(p until p_q). - * When k < 0 let y_k = y_0 (i.e., assume first value repeats back in time), - * but do not assume errors repeat. Note, column 1 of yf (yf(?, 1) holds yp. - * Must be executed in time order, so errors are properly recorded in vector e - * @see `predictAll` method in `Forecaster` trait. - * @see `rdot` in Forecaster.scala for reverse dot product implementation. - * @param t the time point being predicted - * @param y_ the actual values to use in making predictions - */ - override def predict (t: Int, y_ : VectorD): Double = - if t == 0 then e(0) = 0 // from backcast: assume no error - if t == 1 then e(1) = y_(1) - yf(0, 1) // first real point - - var sum = δ + rdot (b(0 until p), y_, t) // intercept + AR terms (use y); b(0 until p) = φ - for j <- 0 until q do // add MA terms (shocks) - if t-j >= 0 then sum += b(p+j) * e(t-j) // e(t-j = -1) does not exists; b(p+j) = 0(j) - -// if t < y_.dim-1 then e(t+1) = y_(t+1) - sum // update the error vector (uncomment for first train) - sum // prediction yp - end predict - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Produce a vector of size hh, h = 1 to hh-steps ahead forecasts for the model, - * i.e., forecast the following time points: t+1, ..., t+h. - * Intended to work with rolling validation (analog of predict method). - * Note, must include [ y_i, e_i ] before horizon and [ yp_i ] after horizon - * @param t the time point from which to make forecasts - * @param y_ the actual values to use in making predictions - */ - override def forecast (t: Int, y_ : VectorD = yb): VectorD = - val yh = new VectorD (hh) // hold forecasts for each horizon - for h <- 1 to hh do - var sum = δ + rdot (b(0 until p), yf, t, h-1) // intercept + AR terms (use y and yp); b(0 until p) = φ - for j <- h-1 until q do // add MA terms (shocks) from before hozizon - if t-j >= 0 then sum += b(p+j) * e(t-j) // e(t-j = -1) does not exists; b(p+j) = 0(j) - yf(t, h) = sum // record in forecast matrix - yh(h-1) = sum // record forecasts for each horizon - yh // return forecasts for all horizons - end forecast - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all y_.dim time points at horizon h (h-steps ahead). - * Assign into FORECAST MATRIX and return the h-steps ahead forecast. - * Note, `predictAll` provides predictions for h = 1. - * @see `forecastAll` method in `Forecaster` trait. - * Note, must include [ y_i, e_i ] before horizon and [ yp_i ] after horizon - * @param h the forecasting horizon, number of steps ahead to produce forecasts - * @param y_ the actual values to use in making forecasts - */ - override def forecastAt (h: Int, y_ : VectorD = yb): VectorD = - if h < 2 then flaw ("forecastAt", s"horizon h = $h must be at least 2") - - for t <- y_.indices do // make forecasts over all time points for horizon h - var sum = δ + rdot (b(0 until p), yf, t, h-1) // intercept + AR terms (use y and yp); b(0 until p) = φ - for j <- h-1 until q do // add MA terms (shocks) from before hozizon - if t-j >= 0 then sum += b(p+j) * e(t-j) // e(t-j = -1) does not exists; b(p+j) = 0(j) - yf(t, h) = sum // record in forecast matrix - yf(?, h) // return the h-step ahead forecast vector - end forecastAt - -end ARMA - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARMA` companion object provides factory methods for the `ARMA` class. - */ -object ARMA: - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `ARMA` object. - * @param y the response vector (time series data) - * @param hh the maximum forecasting horizon (h = 1 to hh) - * @param tRng the time range, if relevant (time index may suffice) - * @param hparam the hyper-parameters - */ - def apply (y: VectorD, hh: Int, tRng: Range = null, hparam: HyperParameter = AR.hp): ARMA = - new ARMA (y, hh, tRng, hparam) - end apply - -end ARMA - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRMATest` main function tests the `ARMA` class on real data: - * Forecasting Lake Levels using In-Sample Testing (In-ST). - * Test forecasts (h = 1 to hh steps ahead forecasts). - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting2.aRMATest - */ -@main def aRMATest (): Unit = - - val hh = 3 // maximum forecasting horizon - - val mod = new ARMA (y, hh) // create model for time series data - banner (s"In-ST Forecasts: ${mod.modelName} on LakeLevels Dataset") - mod.trainNtest ()() // train and test on full dataset - - mod.forecastAll () // forecast h-steps ahead (h = 1 to hh) for all y - mod.diagnoseAll (y, mod.getYf) - Forecaster.evalForecasts (mod, mod.getYb, hh) - println (s"Final In-ST Forecast Matrix yf = ${mod.getYf}") - -end aRMATest - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRMATest2` main function tests the `ARMA` class on real data: - * Forecasting Lake Levels using Train-n-Test Split (TnT) with Rolling Validation. - * Test forecasts (h = 1 to hh steps ahead forecasts). - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting2.aRMATest2 - */ -@main def aRMATest2 (): Unit = - - val hh = 3 // maximum forecasting horizon - - val mod = new ARMA (y, hh) // create model for time series data - banner (s"TnT Forecasts: ${mod.modelName} on LakeLevels Dataset") - mod.trainNtest ()() // train and test on full dataset - - mod.setSkip (0) - mod.rollValidate () // TnT with Rolling Validation - mod.diagnoseAll (y, mod.getYf, Forecaster.teRng (y.dim)) // only diagnose on the testing set - println (s"Final TnT Forecast Matrix yf = ${mod.getYf}") - -end aRMATest2 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRMATest3` main function tests the `ARMA` class on real data: - * Forecasting COVID-19 using In-Sample Testing (In-ST). - * Test forecasts (h = 1 to hh steps ahead forecasts). - * Comparison of sMAPE for AR(p), ARMA(p, 0), ARY(p), and ARY_D(p). - * - * 20.3547, 29.9510, 38.0585, 45.3859, 51.7718, 56.6248 AR(1) - * 20.2191, 29.9108, 38.1525, 45.5858, 52.2918, 57.3670 ARMA(1, 0) - * 20.1794, 29.8589, 38.1450, 45.5634, 52.3478, 57.4474 ARY(1) - * 19.9912, 30.1349, 38.7483, 45.1096, 49.5424, 52.5320 ARY_D(1) - * - * 18.1684, 27.0930, 35.1466, 41.4030, 46.7556, 51.7624 AR(2) - * 17.7900, 25.3293, 33.3283, 39.5055, 44.9095, 50.6043 ARMA(2, 0) - * 17.7728, 25.1705, 33.1900, 39.4218, 44.8621, 50.5991 ARY(2) - * 17.7245, 24.2871, 31.1716, 35.9357, 40.5132, 46.4806 ARY_D(2) - * - * 17.7411, 24.9003, 31.8377, 37.3797, 42.1010, 47.4027 AR(3) - * 17.4057, 23.9135, 30.5357, 35.5950, 40.6434, 46.4122 ARMA(3, 0) - * 17.3594, 23.7550, 30.3838, 35.4514, 40.5868, 46.4292 ARY(3) - * 17.2367, 23.2007, 29.4120, 33.5757, 38.8647, 44.1707 ARY_D(3) - * - * 17.5993, 24.4001, 30.4701, 35.1507, 39.6652, 44.3065 AR(4) - * 17.2928, 23.6678, 29.5574, 34.0383, 38.9062, 44.1568 ARMA(4, 0) - * 17.2457, 23.5122, 29.4110, 33.9350, 38.8422, 44.2303 ARY(4) - * 17.1336, 23.1984, 29.1758, 33.5773, 38.6493, 43.8045 ARY_D(4) - * - * 17.7440, 24.3782, 30.2910, 34.6374, 39.2784, 43.5109 AR(5) - * 17.2850, 23.6708, 29.5699, 34.0520, 38.9330, 44.2125 ARMA(5, 0) - * 17.2314, 23.5178, 29.4345, 33.9602, 38.9022, 44.3249 ARY(5) - * 17.1196, 23.1224, 29.1769, 33.6120, 38.7839, 43.9346 ARY_D(5) - * - * > runMain scalation.modeling.forecasting2.aRMATest3 - */ -@main def aRMATest3 (): Unit = - - val yy = loadData_y () -// val y = yy // full - val y = yy(0 until 116) // clip the flat end - val hh = 6 // maximum forecasting horizon - - AR.hp("q") = 1 // number of MA terms - for p <- 1 to 1 do - AR.hp("p") = p // number of AR terms - val mod = new ARMA (y, hh) // create model for time series data - banner (s"In-ST Forecasts: ${mod.modelName} on COVID-19 Dataset") - mod.trainNtest ()() // train and test on full dataset - - mod.forecastAll () // forecast h-steps ahead (h = 1 to hh) for all y - mod.diagnoseAll (y, mod.getYf) - Forecaster.evalForecasts (mod, mod.getYb, hh) - println (s"Final In-ST Forecast Matrix yf = ${mod.getYf}") - end for - -end aRMATest3 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRMATest4` main function tests the `ARMA` class on real data: - * Forecasting COVID-19 using Train-n-Test Split (TnT) with Rolling Validation. - * Test forecasts (h = 1 to hh steps ahead forecasts). - * Comparison of sMAPE for AR(p), ARMA(p, 0), ARY(p), and ARY_D(p). - * - * 19.1334, 31.1906, 44.3787, 55.1576, 65.1810, 74.0524 AR(1) - * 19.0397, 30.4570, 43.9113, 54.9642, 65.3163, 74.2124 ARMA(1, 0) - * 19.0397, 30.4570, 43.9113, 54.9642, 65.3163, 74.2124 ARY(1) - * - * 16.6447, 26.9109, 39.8106, 50.8595, 60.2176, 68.6317 AR(2) - * 16.9459, 26.6301, 39.3354, 50.8941, 61.0822, 70.3496 ARMA(2, 0) - * 16.8833, 26.4824, 39.2329, 50.8677, 61.0624, 70.3218 ARY(2) - * - * 15.9232, 23.5929, 34.3577, 44.1784, 53.6513, 62.0129 AR(3) - * 15.9052, 21.8056, 31.7951, 41.3657, 51.8574, 61.7836 ARMA(3, 0) - * 15.7190, 21.7959, 32.1395, 42.0074, 52.6874, 62.7276 ARY(3) - * - * 15.3256, 22.6893, 30.7558, 39.6274, 48.6646, 56.7375 AR(4) - * 15.1026, 20.3376, 27.3229, 35.9123, 40.6935, 50.1041 ARMA(4, 0) - * 14.6791, 19.9940, 26.5644, 35.4590, 41.4955, 50.8660 ARY(4) - * - * 15.9166, 21.5246, 28.0675, 36.8669, 43.3785, 51.1786 AR(5) - * 15.5742, 20.2465, 28.8253, 36.8582, 43.1053, 48.5483 ARMA(5, 0) - * 15.0232, 19.4222, 27.1981, 35.4744, 40.3466, 48.4066 ARY(5) - * - * > runMain scalation.modeling.forecasting2.aRMATest4 - */ -@main def aRMATest4 (): Unit = - - val yy = loadData_y () -// val y = yy // full - val y = yy(0 until 116) // clip the flat end - val hh = 6 // maximum forecasting horizon - - AR.hp("q") = 0 // number of MA terms - for p <- 1 to 5 do - AR.hp("p") = p // number of AR terms - val mod = new ARMA (y, hh) // create model for time series data - banner (s"TnT Forecasts: ${mod.modelName} on COVID-19 Dataset") - mod.trainNtest ()() - - mod.setSkip (0) - mod.rollValidate () // TnT with Rolling Validation - mod.diagnoseAll (y, mod.getYf, Forecaster.teRng (y.dim)) // only diagnose on the testing set - println (s"Final TnT Forecast Matrix yf = ${mod.getYf}") - end for - -end aRMATest4 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRMATest5` main function tests the `ARMA` class on real data: - * Forecasting COVID-19 using In-Sample Testing (In-ST). - * Test forecasts (h = 1 to hh steps ahead forecasts). - * Comparison of sMAPE for ARMA(p, 1) (i.e., q = 1) for different p orders. - * > runMain scalation.modeling.forecasting2.aRMATest5 - */ -@main def aRMATest5 (): Unit = - - val yy = loadData_y () -// val y = yy // full - val y = yy(0 until 116) // clip the flat end - val hh = 6 // maximum forecasting horizon - - AR.hp("q") = 1 // number of MA terms - for p <- 1 to 5 do - AR.hp("p") = p // number of AR terms - val mod = new ARMA (y, hh) // create model for time series data - banner (s"In-ST Forecasts: ${mod.modelName} on COVID-19 Dataset") - mod.trainNtest ()() // train and test on full dataset - - mod.forecastAll () // forecast h-steps ahead (h = 1 to hh) for all y - Forecaster.evalForecasts (mod, mod.getYb, hh) - println (s"Final In-ST Forecast Matrix yf = ${mod.getYf}") - end for - -end aRMATest5 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRMATest6` main function tests the `ARMA` class on small dataset. - * Test forecasts (h = 1 step ahead forecasts). - * > runMain scalation.modeling.forecasting2.aRMATest6 - */ -@main def aRMATest6 (): Unit = - - val y = VectorD (1, 3, 4, 2, 5, 7, 9, 8, 6, 3) - - AR.hp ("q") = 0 - var mod = new ARMA (y, 1) // create model for time series data - banner (s"In-ST Forecasts: ${mod.modelName} on a Small Dataset") - mod.trainNtest ()() // train and test on full dataset - println (s"Final In-ST Forecast Matrix yf = ${mod.getYf}") - new Baseline (y, "AR1") - - AR.hp ("p") = 2 - mod = new ARMA (y, 1) // create model for time series data - banner (s"In-ST Forecasts: ${mod.modelName} on a Small Dataset") - mod.trainNtest ()() // train and test on full dataset - println (s"Final In-ST Forecast Matrix yf = ${mod.getYf}") - new Baseline (y, "AR2") - -end aRMATest6 - diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/old/ARX.scala.bak b/target/scala-3.6.4/classes/scalation/modeling/forecasting/old/ARX.scala.bak deleted file mode 100644 index 5cffdb88f..000000000 --- a/target/scala-3.6.4/classes/scalation/modeling/forecasting/old/ARX.scala.bak +++ /dev/null @@ -1,412 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Sun Jun 30 13:27:00 EDT 2024 - * @see LICENSE (MIT style license file). - * - * @note Model: Auto-Regressive on lagged y and xe (ARX) using OLS - * - * @see `scalation.modeling.Regression` - * @see `scalation.modeling.forecasting2.ARY` when no exogenous variable are needed - - */ - -package scalation -package modeling -package forecasting2 - -import scala.math.{max, min} - -import scalation.mathstat._ -import scalation.modeling.{Regression => REGRESSION} -//import scalation.modeling.{RidgeRegression => REGRESSION} -//import scalation.modeling.{LassoRegression => REGRESSION} - -import Example_Covid.{loadData, response} -//import Example_LakeLevels.y - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARX` class provides basic time series analysis capabilities for ARX models. - * ARX models build on `ARY` by including one or more exogenous (xe) variables. - * Given time series data stored in vector y, its next value y_t = combination of - * last p values of y and the last q values of each exogenous variable xe_j. - * - * y_t = b dot x_t + e_t - * - * where y_t is the value of y at time t and e_t is the residual/error term. - * @param x the data/input matrix (lagged columns of y and xe) @see `ARX.apply` - * @param y the response/output vector (time series data) - * @param hh the maximum forecasting horizon (h = 1 to hh) - * @param fname the feature/variable names - * @param tRng the time range, if relevant (time index may suffice) - * @param hparam the hyper-parameters (defaults to `ARX.hp`) - * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) - */ -class ARX (x: MatrixD, y: VectorD, hh: Int, n_exo: Int, fname: Array [String] = null, - tRng: Range = null, hparam: HyperParameter = ARY.hp, - bakcast: Boolean = false) // backcasted values only used in `buildMatrix4TS` - extends Forecaster (y, hh, tRng, hparam, bakcast): // no automatic backcasting, @see `ARX.appky` - - private val debug = debugf ("ARX", true) // debug function - private val flaw = flawf ("ARX") // flaw function - private val p = hparam("p").toInt // use the last p endogenous values (p lags) - private val q = hparam("q").toInt // use the last q exogenous values (q lags) - private val spec = hparam("spec").toInt // additional terms: 0 => none, 1 => constant, 2 => linear - private val reg = new REGRESSION (x, y, fname, hparam) // delegate training to regression - - modelName = s"ARX($p, $q)" - - debug ("init", s"$modelName with $n_exo exogenous variables and additional term spec = $spec") - debug ("init", s"[ x | y ] = ${x :^+ y}") - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Get the data/input matrix built from lagged y values. - */ - override def getX: MatrixD = x - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train/fit an `ARX` model to the times-series data in vector y_. - * Estimate the coefficient vector b for a p-th order Auto-Regressive ARX(p) model. - * Uses OLS Matrix Fatorization to determine the coefficients, i.e., the b (φ) vector. - * @param x_ the data/input matrix (e.g., full x) - * @param y_ the training/full response vector (e.g., full y) - */ - override def train (x_ : MatrixD, y_ : VectorD): Unit = - debug ("train", s"$modelName, x_.dims = ${x_.dims}, y_.dim = ${y_.dim}") - reg.train (x_, y_) // train the regression model - b = reg.parameter // coefficients from regression - end train - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train and test the forecasting model y_ = f(y-past) + e and report its QoF - * and plot its predictions. Return the predictions and QoF. - * NOTE: must use `trainNtest_x` when an x matrix is used, such as in `ARX`. - * @param x_ the training/full data/input matrix (defaults to full x) - * @param y_ the training/full response/output vector (defaults to full y) - * @param xx the testing/full data/input matrix (defaults to full x) - * @param yy the testing/full response/output vector (defaults to full y) - */ - def trainNtest_x (x_ : MatrixD = x, y_ : VectorD = y)(xx: MatrixD = x, yy: VectorD = y): (VectorD, VectorD) = - train (x_, y_) // train the model on training set - val (yp, qof) = test (xx, yy) // test the model on testing set - println (report (qof)) // report on Quality of Fit (QoF) - (yp, qof) - end trainNtest_x - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test FORECASTS of a forecasting model y_ = f(lags (y_)) + e - * and RETURN (1) aligned actual values, (2) its forecasts and (3) QoF vector. - * Testing may be in-sample (on the training set) or out-of-sample (on the testing set) - * as determined by the parameters passed in. Note: must call train and forecastAll - * before testF. - * @param h the forecasting horizon, number of steps ahead to produce forecasts - * @param y_ the testing/full response/output vector - */ - override def testF (h: Int, y_ : VectorD): (VectorD, VectorD, VectorD) = - val h_ = h - 1 - val yy = y_(h_ until y_.dim) // align the actual values - val yfh = yf(?, h)(0 until y_.dim-h_) // column h of the forecast matrix - println (s"yy.dim = ${yy.dim}, yfh.dim = ${yfh.dim}") -// Forecaster.differ (yy, yfh) // uncomment for debugging - assert (yy.dim == yfh.dim) // make sure the vector sizes agree - - new Plot (null, yy, yfh, s"testF: yy, yfh vs. t for $modelName @h = $h", lines = true) - mod_resetDF (yy.dim) // reset the degrees of freedom - (yy, yfh, diagnose (yy, yfh)) // return actual, forecasted and QoF vectors - end testF - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Models need to provide a means for updating the Degrees of Freedom (DF). - * @param size the size of dataset (full, train, or test) - */ - override def mod_resetDF (size: Int): Unit = - val dfm = max (1, parameter.size - 1) // degrees of freedom for model - debug ("mod_resetDF", s"dfm = $dfm, df = ${size-dfm}") - resetDF (dfm, size - dfm) - end mod_resetDF - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Predict a value for y_t using the 1-step ahead forecast. - * - * y_t = b_0 + b_1 y_t-1 + b_2 y_t-2 + ... + b_p y_t-p = b dot x_t - * - * FIX - parameter order is in conflict with AR models. - * @param t the time point being predicted - * @param y_ the actual values to use in making predictions (ignored) - */ - override def predict (t: Int, y_ : VectorD): Double = - val yp = reg.predict (x(t)) -// debug ("predict", s"@t = $t, b = $b dot x(t) = ${x(t)} = yp = $yp vs. y_ = ${y_(t)}") - yp - end predict - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Produce a vector of size hh, h = 1 to hh-steps ahead forecasts for the model, - * i.e., forecast the following time points: t+1, ..., t+h. - * Intended to work with rolling validation (analog of predict method). - * @param t the time point from which to make forecasts - * @param y_ the actual values to use in making predictions - */ - override def forecast (t: Int, y_ : VectorD = y): VectorD = - val yh = new VectorD (hh) // hold forecasts for each horizon - for h <- 1 to hh do - val xy = forge (x(min (t+1, x.dim-1)), yf(t), h) // FIX - why t+1 - println ("forecast: xy = $xy") - val pred = reg.predict (xy) // slide in prior forecasted values -// debug ("forecast", s"h = $h, @t = $t, xy = $xy, yp = $pred, y_ = ${y_(t)}") - yf(t, h) = pred // record in forecast matrix - yh(h-1) = pred // record forecasts for each horizon - yh // return forecasts for all horizons - end forecast - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all y_.dim time points at horizon h (h-steps ahead). - * Assign into FORECAST MATRIX and return the h-steps ahead forecast. - * Note, `predictAll` provides predictions for h = 1. - * @see `forecastAll` method in `Forecaster` trait. - * @param h the forecasting horizon, number of steps ahead to produce forecasts - * @param y_ the actual values to use in making forecasts - */ - override def forecastAt (h: Int, y_ : VectorD = y): VectorD = - if h < 2 then flaw ("forecastAt", s"horizon h = $h must be at least 2") - - for t <- y_.indices do // make forecasts over all time points for horizon h - val xy = forge (x(t), yf(t), h) - val pred = reg.predict (xy) -// debug ("forecastAt", s"h = $h, @t = $t, xy = $xy, yp = $pred, y_ = ${y_(t)}") - yf(t, h) = pred // record in forecast matrix - yf(?, h) // return the h-step ahead forecast vector - end forecastAt - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forge a new vector from the first spec values of x, the last p-h+1 values - * of x (past values) and values 1 to h-1 from the forecasts. - * FIX - will need to pull endogenous values and exogenous values - * @param xx the t-th row of the input matrix (lagged actual values) - * @param yy the t-th row of the forecast matrix (forecasted future values) - */ - def forge (xx: VectorD, yy: VectorD, h: Int): VectorD = - val n_endo = spec + p // number of trend + endogenous values - val x_trend = xx(0 until spec) // get trend values - val x_act = xx(n_endo-(p+1-h) until n_endo) // get actual lagged y-values (endogenous) - val nyy = n_endo - x_trend.dim - x_act.dim // number of forecasted values needed -// println (s"forge: h = $h, n_nedo = $n_endo, [ ${x_trend.dim}, ${x_act.dim} ], nyy = $nyy") - val x_fcast = yy(h-nyy until h) // get forecasted y-values - var xy = x_trend ++ x_act ++ x_fcast - - for j <- 0 until n_exo do // for the j-th exogenous variable - xy = xy ++ shift_l (xx(j*q until (j+1)*q), h) // get actual lagged xe-values for exogenous variable j - xy - end forge - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Shift vector z, h places to the left and fill on right with zeroes. - * Ex: [ 1, 2, 3, 4, 5] shift 1 yields [2, 3, 4, 5, 0] - * @param z the vector to shift - * @param h the number of places to shift - */ - def shift_l (z: VectorD, h: Int): VectorD = - val _z = new VectorD (z.dim) - for k <- h until z.dim do _z(k-h) = z(k) - _z - end shift_l - -end ARX - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARX` companion object provides factory methods for the `ARX` class. - */ -object ARX: - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create an `ARX` object by building an input matrix xy and then calling the constructor. - * @param xe the matrix of exogenous variable values - * @param y the response vector (time series data) - * @param hh the maximum forecasting horizon (h = 1 to hh) - * @param fname the feature/variable names - * @param tRng the time range, if relevant (time index may suffice) - * @param hparam the hyper-parameters - */ - def apply (xe: MatrixD, y: VectorD, hh: Int, fname: Array [String] = null, - tRng: Range = null, hparam: HyperParameter = ARY.hp): ARX = - val p = hparam("p").toInt // use the last p endogenous values - val q = hparam("q").toInt // use the last q exogenous values - val spec = hparam("spec").toInt // 0 => none, 1 => constant, 2 => linear trend - val xy = buildMatrix4TS (xe, y, p, q, spec) - new ARX (xy, y, hh, xe.dim2, fname, tRng, hparam) - end apply - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Build the input matrix by combining the p + spec columns for the trend and - * endogenous variable with the q * xe.dim2 columns for the exogenous variables. - * @param xe the matrix of exogenous variable values - * @param y the response vector (time series data) - * @param p the number of lags for the endogenous variable (lags 1 to p) - * @param q the number of lags for each exogenous variable (lags 1 to q) - * @param spec the number of trend terms (0 => none, 1 => constant, 2 => linear trend) - */ - def buildMatrix4TS (xe: MatrixD, y: VectorD, p: Int, q: Int, spec: Int = 1): MatrixD = - ARY.buildMatrix4TS (y, p, spec) ++^ buildMatrix_exo (xe, q) - end buildMatrix4TS - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Build the second part of the input matrix consisting of the q * xe.dim2 columns - * for the exogenous variables. - * @param xe the matrix of exogenous variable values - * @param q the number of lags for each exogenous variable (lags 1 to q) - */ - def buildMatrix_exo (xe: MatrixD, q: Int): MatrixD = - var xx: MatrixD = buildMatrix_exo_col (xe(?, 0), q) - for j <- 1 until xe.dim2 do - xx = xx ++^ buildMatrix_exo_col (xe(?, j), q) - xx - end buildMatrix_exo - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Given the j-th exogenous variable vector xe_j, build and return an input/predictor - * MATRIX xx. - * The first lag responses can't be predicted due to missing past values. - * Therefore the number of rows in xx is reduced to xe.dim - 1. - * @param xej the j-th exogenous variable vector - * @param q the maximum lag included (inclusive) for the exogenous variable (1 to q) - */ - def buildMatrix_exo_col (xej: VectorD, q: Int): MatrixD = - val xe_j = backfill (xej) - val xx = new MatrixD (xe_j.dim, q) - for i <- xx.indices do - for k <- xx.indices2 do xx(i, q - 1 - k) = xe_j(max0(i - 1 - k)) - println (s"xx = $xx") - xx - end buildMatrix_exo_col - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Backfill the zero prefix of exogenous variable j (xej) by backcasting. The zero - * prefix will be at least of size 1 as 0.0 is initially prepended. - * @param xej the j-th exogenous variable vector - */ - def backfill (xej: VectorD): VectorD = - val xe_j = xej :+ 0.0 // prepend with zero - val ii = xej.indexWhere (_ != 0.0) // find the first non-zero value - println (s"ii = $ii") - for i <- ii-1 to 0 by -1 do // replace zero prefix with backcasted values - xe_j(i) = WeightedMovingAverage.backcast (xe_j, i) // backcast from index i - xe_j - end backfill - -end ARX - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRXTest` main function tests the `ARX` class on real data: - * Forecasting Lake Levels using In-Sample Testing (In-ST). - * Test forecasts (h = 1 to hh steps ahead forecasts). - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting2.aRXTest - * -@main def aRXTest (): Unit = - - val hh = 3 // maximum forecasting horizon - - val mod = ARX (y, hh) // create model for time series data - banner (s"In-ST Forecasts: ${mod.modelName} on LakeLevels Dataset") - mod.trainNtest_x ()() // train and test on full dataset - - mod.forecastAll () // forecast h-steps ahead (h = 1 to hh) for all y - Forecaster.evalForecasts (mod, mod.getYb, hh) - println (s"Final In-ST Forecast Matrix yf = ${mod.getYf}") - -end aRXTest - */ - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRXTest2` main function tests the `ARX` class on real data: - * Forecasting Lake Levels using Train-n-Test Split (TnT) with Rolling Validation. - * Test forecasts (h = 1 to hh steps ahead forecasts). - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting2.aRXTest2 - * -@main def aRXTest2 (): Unit = - - val hh = 3 // maximum forecasting horizon - - val mod = ARX (y, hh) // create model for time series data - banner (s"TnT Forecasts: ${mod.modelName} on LakeLevels Dataset") - mod.trainNtest_x ()() // train and test on full dataset - - mod.rollValidate () // TnT with Rolling Validation - println (s"Final TnT Forecast Matrix yf = ${mod.getYf}") - -end aRXTest2 - */ - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRXTest3` main function tests the `ARX` class on real data: - * Forecasting COVID-19 using In-Sample Testing (In-ST). - * Test forecasts (h = 1 to hh steps ahead forecasts). - * > runMain scalation.modeling.forecasting2.aRXTest3 - */ -@main def aRXTest3 (): Unit = - -// val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val exo_vars = Array ("icu_patients", "hosp_patients") - val (xxe, yy) = loadData (exo_vars, response) - println (s"xxe.dims = ${xxe.dims}, yy.dim = ${yy.dim}") - -// val xe = xxe // full - val xe = xxe(0 until 116) // clip the flat end -// val y = yy // full - val y = yy(0 until 116) // clip the flat end - val hh = 6 // maximum forecasting horizon - - new Plot (null, y, null, s"y (new_deaths) vs. t", lines = true) - for j <- exo_vars.indices do - new Plot (null, xe(?, j), null, s"x_$j (${exo_vars(j)}) vs. t", lines = true) - - for p <- 1 to 5 do // number of lags - ARY.hp("p") = p - ARY.hp("q") = p - val mod = ARX (xe, y, hh) // create model for time series data - banner (s"In-ST Forecasts: ${mod.modelName} on COVID-19 Dataset") - mod.trainNtest_x ()() // train and test on full dataset - - mod.forecastAll () // forecast h-steps ahead (h = 1 to hh) for all y - Forecaster.evalForecasts (mod, mod.getYb, hh) - println (s"Final In-ST Forecast Matrix yf = ${mod.getYf}") - end for - -end aRXTest3 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRXTest4` main function tests the `ARX` class on real data: - * Forecasting COVID-19 using Train-n-Test Split (TnT) with Rolling Validation. - * Test forecasts (h = 1 to hh steps ahead forecasts). - * > runMain scalation.modeling.forecasting2.aRXTest4 - * -@main def aRXTest4 (): Unit = - - val yy = loadData_y () -// val y = yy // full - val y = yy(0 until 116) // clip the flat end - val hh = 6 // maximum forecasting horizon - - for p <- 2 to 2 do // number of lags - ARX.hp("p") = p - val mod = ARX (y, hh) // create model for time series data - banner (s"TnT Forecasts: ${mod.modelName} on COVID-19 Dataset") - mod.trainNtest_x ()() // use customized trainNtest_x - - mod.setSkip (0) - mod.rollValidate () // TnT with Rolling Validation - println (s"After Roll TnT Forecast Matrix yf = ${mod.getYf}") - mod.diagnoseAll (y, mod.getYf, Forecaster.teRng (y.dim), 0) // only diagnose on the testing set - println (s"Final TnT Forecast Matrix yf = ${mod.getYf}") - end for - -end aRXTest4 - */ - diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/old/ARX_D.scala.bak b/target/scala-3.6.4/classes/scalation/modeling/forecasting/old/ARX_D.scala.bak deleted file mode 100644 index 4552e1f12..000000000 --- a/target/scala-3.6.4/classes/scalation/modeling/forecasting/old/ARX_D.scala.bak +++ /dev/null @@ -1,377 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Sun Jun 30 13:27:00 EDT 2024 - * @see LICENSE (MIT style license file). - * - * @note Model: Auto-Regressive on lagged y and xe (ARX_D) using OLS - Direct Forecasting - */ - -package scalation -package modeling -package forecasting - -import scalation.mathstat._ -import scalation.modeling.neuralnet.{RegressionMV => REGRESSION} - -import Example_Covid.{loadData, response} -import MakeMatrix4TS._ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARX_D` class provides basic time series analysis capabilities for - * ARX_D models. ARX_D models are often used for forecasting. - * `ARX_D` uses DIRECT (as opposed to RECURSIVE) multi-horizon forecasting. - * Given time series data stored in vector y, its next value y_t = combination of last p values. - * - * y_t = b dot x_t + e_t - * - * where y_t is the value of y at time t and e_t is the residual/error term. - * @param x the data/input matrix (lagged columns of y) @see `ARX_D.apply` - * @param y the response/output matrix (column per horizon) (time series data) - * @param hh the maximum forecasting horizon (h = 1 to hh) - * @param n_exo the number of exogenous variables - * @param fname the feature/variable names - * @param tRng the time range, if relevant (time index may suffice) - * @param hparam the hyper-parameters (defaults to `MakeMatrix4TS.hp`) - * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) - * @param tForms the map of transformation applied - */ -class ARX_D (x: MatrixD, y: MatrixD, hh: Int, n_exo: Int, fname: Array [String] = null, - tRng: Range = null, hparam: HyperParameter = hp, bakcast: Boolean = false, - tForms: TransformMap = Map ("tForm_y" -> null)) - extends Forecaster_D (x, y, hh, tRng, hparam, bakcast): // no automatic backcasting, @see `ARX_D.apply` - - private val debug = debugf ("ARX_D", true) // debug function -// private val flaw = flawf ("ARX_D") // flaw function - private val p = hparam("p").toInt // use the last p endogenous values (p lags) - private val q = hparam("q").toInt // use the last q exogenous values (q lags) - private val spec = hparam("spec").toInt // trend terms: 0 - none, 1 - constant, 2 - linear, 3 - quadratic - // 4 - sine, 5 cosine - private val nneg = hparam("nneg").toInt == 1 // 0 => unrestricted, 1 => predictions must be non-negative - private val reg = new REGRESSION (x, y, fname, hparam) // delegate training to multi-variate regression - - modelName = s"ARX_D($p, $q, $n_exo)" - println (s"Forms = $tForms") // FIX - to be removed - - debug ("init", s"$modelName with $n_exo exogenous variables and additional term spec = $spec") -// debug ("init", s"[ x | y ] = ${x ++^ y}") - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train/fit an `ARX_D` model to the times-series data in vector y_. - * Estimate the coefficient mattrix bb for a p-th order Auto-Regressive ARX_D(p) model. - * Uses OLS Matrix Fatorization to determine the coefficients, i.e., the bb matrix. - * @param x_ the data/input matrix (e.g., full x) - * @param y_ the training/full response vector (e.g., full y) - */ - def train_x (x_ : MatrixD, y_ : MatrixD): Unit = - debug ("train_x", s"$modelName, x_.dim = ${x_.dim}, y_.dim = ${y_.dim}") - reg.train (x_, y_) // train the multi-variate regression model - bb = reg.parameter // coefficients from regression - end train_x - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Produce a QoF summary for a model with diagnostics for each predictor 'x_j' - * and the overall Quality of Fit (QoF). - * @param x_ the testing/full data/input matrix - * @param fname_ the array of feature/variable names - * @param b_ the parameters/coefficients for the model - * @param vifs the Variance Inflation Factors (VIFs) - */ - override def summary (x_ : MatrixD = getX, fname_ : Array [String] = reg.getFname, - b_ : VectorD = b, vifs: VectorD = reg.vif ()): String = - super.summary (x_, fname_, b_, vifs) // summary from `Fit` - end summary - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Predict a value for y_t using the 1-step ahead forecast. - * - * y_t = b_0 + b_1 y_t-1 + b_2 y_t-2 + ... + b_p y_t-p = b dot x_t - * - * @param t the time point being predicted - * @param y_ the actual values to use in making predictions (ignored) - */ - def predict (t: Int, y_ : MatrixD): VectorD = - val yp = rectify (reg.predict (x(t)), nneg) - if t < y_.dim then - debug ("predict", s"@t = $t, x(t) = ${x(t)}, yp = $yp vs. y_ = ${y_(t)}") - yp - end predict - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Produce a vector of size hh, h = 1 to hh-steps ahead forecasts for the model, - * i.e., forecast the following time points: t+1, ..., t+h. - * Intended to work with rolling validation (analog of predict method). - * @param t the time point from which to make forecasts - * @param y_ the actual values to use in making predictions - */ - override def forecast (t: Int, y_ : VectorD): VectorD = -// val pred = reg.predict (x(min (t+1, x.dim-1))) // FIX - why t+1 - val pred = predict (t, MatrixD (y_).transpose) - for h <- 1 to hh do yf(t, h) = pred(h-1) - pred // yh is pred - end forecast - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all y_.dim time points all horizons h (h-steps ahead). - * Assign into FORECAST MATRIX and return the forecast matrix. - * @param y_ the matrix of actual response values - */ - override def forecastAll (y_ : MatrixD): MatrixD = - for t <- y_.indices do - val pred = predict (t, y_) - for h <- 1 to hh do yf(t, h) = pred(h-1) -// for h <- 1 to hh do yf(max0 (t-1), h) = pred(h-1) // FIX - why -1 - yf - end forecastAll - -end ARX_D - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARX_D` companion object provides factory methods for the `ARX_D` class. - */ -object ARX_D extends MakeMatrix4TS: - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create an `ARX_D` object by building an input matrix x and then calling the constructor. - * @param xe the matrix of exogenous variable values - * @param y the response vector (time series data) - * @param hh the maximum forecasting horizon (h = 1 to hh) - * @param fname_ the feature/variable names - * @param tRng the time range, if relevant (time index may suffice) - * @param hparam the hyper-parameters (defaults for `MakeMatrix4TS.hp`) - * @param fEndo the array of functions used to transform endogenous variables - * @param fExo the array of functions used to transform exogenous variables - * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) - */ - def apply (xe: MatrixD, y: VectorD, hh: Int, fname_ : Array [String] = null, - tRng: Range = null, hparam: HyperParameter = hp, - fEndo: Array [Transform] = null, fExo: Array [Transform] = null, - bakcast: Boolean = false): ARX_D = - val xy = ARX.buildMatrix (xe, y, hparam, bakcast) - val yy = buildMatrixYY (xe, y, p, q, spec, lwave, hh) - val n_exo = xe.dim2 - val fname = if fname_ == null then ARX.formNames (n_exo, hparam) - else fname_ - new ARX_D (xy, yy, hh, n_exo, fname, tRng, hparam) - end apply - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create an `ARX` object by building an input matrix xy and then calling the - * `ARX` constructor. Also rescale the input data. - * @param xe the matrix of exogenous variable values - * @param y the endogenous/response vector (main time series data) - * @param hh the maximum forecasting horizon (h = 1 to hh) - * @param tRng the time range, if relevant (time index may suffice) - * @param hparam the hyper-parameters - * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) - * @param tForm the z-transform (rescale to standard normal) - */ - def rescale (xe: MatrixD, y: VectorD, hh: Int, fname_ : Array [String] = null, - tRng: Range = null, hparam: HyperParameter = hp, bakcast: Boolean = false, - tForm: VectorD | MatrixD => Transform = x => zForm(x)): ARX = ??? - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Build the outpt matrix lagging each column of matrix based on horizon h. - * @param y the response vector (time series data) - * @param hh the maximum forecasting horizon (h = 1, 2, ... hh) - * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) - */ - def buildMatrixYY (y: VectorD, hh: Int, bakcast: Boolean = false): MatrixD = - val yb = if bakcast then WeightedMovingAverage.backcast (y) +: y // y prepended with one backcast - else y - val m = y.dim - val yy = new MatrixD (m, hh) // yy = [ y_h ] for h = 1 to hh - for t <- yy.indices do - for h <- yy.indices2 do - yy(t, h) = if t+h >= m then -0.0 else yb(t+h) // yy -> actual and horizons - yy - end buildMatrixYY - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Evaluate the quality of point and optionally interval forecast for horizon (h = 1 to hh). - * @param mod the forecasting model to be evaluated - * @param yy the complete shifted per horizon actual time series values - * @param hh the maximum forecasting horizon (h = 1 to hh) - * @param ints whether to evaluate prediction interval forecasts as well as point forecasts - */ - def evalForecasts (mod: Forecaster, yy: MatrixD, hh: Int, ints: Boolean = false): Unit = - val ftMat = new MatrixD (hh, Fit.N_QoF) - banner (s"Evaluate ${mod.modelName}'s QoF for horizons 1 to $hh:") - val m = yy.dim - - for h <- 1 to hh do - val yh = yy(0 until m-h, h-1) // h-steps ahead actual values - val yfh = mod.getYf(0 until m-h, h) // h-steps ahead forecast - val qof = mod.diagnose (yh, yfh) - ftMat(h-1) = qof -// println (FitM.fitMap (qof, qoF_names)) // evaluate h-steps ahead forecasts - new Plot (null, yh, yfh, s"evalForecast: Plot of yh, yfh for ${mod.modelName} vs. t @h = $h", true) - -/* - if ints then - val (low, up) = mod.forecastAtI (yy, yfh, h) // prediction interval forecasts - val qof_all = mod.diagnose_ (yy, yfh, low, up) // fully evaluate h-steps ahead forecasts - mod.show_interval_forecasts (yy, yfh, low, up, qof_all, h) -*/ - end for - - println ("fitMap qof = ") - println (FitM.showFitMap (ftMat.transpose, QoF.values.map (_.toString))) - end evalForecasts - -end ARX_D - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRX_DTest` main function tests the `ARX_D` class on real data: - * Forecasting Lake Levels using In-Sample Testing (In-ST). - * Test forecasts (h = 1 to hh steps ahead forecasts). - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.aRX_DTest - * -@main def aRX_DTest (): Unit = - - val hh = 3 // maximum forecasting horizon - - val mod = ARX_D (y, hh) // create model for time series data - banner (s"In-ST Forecasts: ${mod.modelName} on LakeLevels Dataset") - mod.trainNtest_x ()() // train and test on full dataset - - mod.forecastAll () // forecast h-steps ahead (h = 1 to hh) for all y - ARX_D.evalForecasts (mod, mod.getYy, hh) - println (s"Final In-ST Forecast Matrix yf = ${mod.getYf}") - -end aRX_DTest - */ - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRX_DTest2` main function tests the `ARX_D` class on real data: - * Forecasting Lake Levels using Train-n-Test Split (TnT) with Rolling Validation. - * Test forecasts (h = 1 to hh steps ahead forecasts). - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.aRX_DTest2 - * -@main def aRX_DTest2 (): Unit = - - val hh = 3 // maximum forecasting horizon - - val mod = ARX_D (y, hh) // create model for time series data - banner (s"TnT Forecasts: ${mod.modelName} on LakeLevels Dataset") - mod.trainNtest_x ()() // train and test on full dataset - - mod.rollValidate () // TnT with Rolling Validation - println (s"Final TnT Forecast Matrix yf = ${mod.getYf}") - -end aRX_DTest2 - */ - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRX_DTest3` main function tests the `ARX_D` class on real data: - * Forecasting COVID-19 using In-Sample Testing (In-ST). - * Test forecasts (h = 1 to hh steps ahead forecasts). - * > runMain scalation.modeling.forecasting.aRX_DTest3 - */ -@main def aRX_DTest3 (): Unit = - - val exo_vars = Array ("icu_patients") -// val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (xxe, yy) = loadData (exo_vars, response) - println (s"xxe.dims = ${xxe.dims}, yy.dim = ${yy.dim}") - -// val xe = xxe // full - val xe = xxe(0 until 116) // clip the flat end -// val y = yy // full - val y = yy(0 until 116) // clip the flat end - val hh = 6 // maximum forecasting horizon - hp("lwave") = 20 // wavelength (distance between peaks) - - for p <- 1 to 10; s <- 1 to 5 do // number of lags; trend - hp("p") = p // mumber of endo lags - hp("q") = p // mumber of exo lags - hp("spec") = s // trend specification: 0, 1, 2, 3, 5 - val mod = ARX_D (xe, y, hh) // create model for time series data - banner (s"In-ST Forecasts: ${mod.modelName} on COVID-19 Dataset") - mod.trainNtest_x ()() // train and test on full dataset -// println (mod.summary ()) // statistical summary of fit FIX - crashes - - mod.forecastAll (mod.getYy) // forecast h-steps ahead (h = 1 to hh) for all y - mod.diagnoseAll (y, mod.getYf) // FIX - diagnoseAll and evalForecasts should agree - ARX_D.evalForecasts (mod, mod.getYy, hh) - println (s"Final In-ST Forecast Matrix yf = ${mod.getYf}") - end for - -end aRX_DTest3 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRX_DTest4` main function tests the `ARX_D` class on real data: - * Forecasting COVID-19 using Train-n-Test Split (TnT) with Rolling Validation. - * Test forecasts (h = 1 to hh steps ahead forecasts). - * > runMain scalation.modeling.forecasting.aRX_DTest4 - */ -@main def aRX_DTest4 (): Unit = - -// val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val exo_vars = Array ("icu_patients", "hosp_patients") - val (xxe, yy) = loadData (exo_vars, response) - println (s"xxe.dims = ${xxe.dims}, yy.dim = ${yy.dim}") - -// val xe = xxe // full - val xe = xxe(0 until 116) // clip the flat end -// val y = yy // full - val y = yy(0 until 116) // clip the flat end - val hh = 6 // maximum forecasting horizon - hp("lwave") = 20 // wavelength (distance between peaks) - - for p <- 1 to 10; s <- 1 to 5 do // number of lags; trend - hp("p") = p // number of endo lags - hp("q") = p // number of exo lags - hp("spec") = s // trend specification: 0, 1, 2, 3, 5 - val mod = ARX_D (xe, y, hh) // create model for time series data - banner (s"TnT Forecasts: ${mod.modelName} on COVID-19 Dataset") - mod.trainNtest_x ()() // use customized trainNtest_x - - mod.setSkip (0) - mod.rollValidate () -// println (s"After Roll TnT Forecast Matrix yf = ${mod.getYf}") - mod.diagnoseAll (y, mod.getYf, Forecaster.teRng (y.dim)) // only diagnose on the testing set -// println (s"Final TnT Forecast Matrix yf = ${mod.getYf}") - end for - -end aRX_DTest4 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRX_DTest5` main function tests the `ARX_D` object's ability to build input - * matrices. Build an input/predictor data matrix for the COVID-19 dataset. - * > runMain scalation.modeling.forecasting.aRX_DTest5 - */ -@main def aRX_DTest5 (): Unit = - -// val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val exo_vars = Array ("icu_patients", "hosp_patients") - val (xxe, yy) = loadData (exo_vars, response) - println (s"xxe.dims = ${xxe.dims}, yy.dim = ${yy.dim}") - -// val xe = xxe // full - val xe = xxe(0 until 116) // clip the flat end -// val y = yy // full - val y = yy(0 until 116) // clip the flat end - val p = 3 // the number of endo lags - val q = 2 // the number of exo lags - val spec = 1 // additional terms - val lwave = 20 // wavelength (distance between peaks) - val hh = 2 // maximum forecasting horizon - - println (s"y = $y") - - val (x, y_) = ARX_D.buildMatrix4TS (xe, y, p, q, hh, spec, lwave) - - println (s"y.dim = ${y.dim}, x.dims = ${x.dims}, y_.dims = ${y_.dims}") - -end aRX_DTest5 - diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/old/ARX_Symb.scala.bak b/target/scala-3.6.4/classes/scalation/modeling/forecasting/old/ARX_Symb.scala.bak deleted file mode 100644 index 326c07a0b..000000000 --- a/target/scala-3.6.4/classes/scalation/modeling/forecasting/old/ARX_Symb.scala.bak +++ /dev/null @@ -1,500 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Tue Jan 14 15:47:45 EST 2025 - * @see LICENSE (MIT style license file). - * - * @note Model: Auto-Regressive on lagged y and xe with SR terms (ARX_Symb) using OLS - * - * @see `scalation.modeling.Regression` - */ - -package scalation -package modeling -package forecasting - -import scala.math.min - -import scalation.mathstat._ - -import MakeMatrix4TS._ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARX_Symb` class provides time series analysis capabilities for ARX Symbolic - * Regression (SR) models. These models include trend, linear, power, root, and cross terms - * for the single endogenous (y) variable and zero or more exogenous (xe) variables. - * Given time series data stored in vector y and matrix xe, its next value y_t = combination - * of last p values of y, y^p, y^r and the last q values of each exogenous variable xe_j, - * again in linear, power and root forms (as well as ENDO-EXO cross terms). - * - * y_t = b dot x_t + e_t - * - * where y_t is the value of y at time t, x_t is a vector of inputs, and e_t is the - * residual/error term. - * @see `MakeMatrix4TS` for hyper-parameter specifications. - * @param x the data/input matrix (lagged columns of y and xe) @see `ARX_Symb.apply` - * @param y the response/output vector (main time series data) - * @param hh the maximum forecasting horizon (h = 1 to hh) - * @param n_exo the number of exogenous variables - * @param fname the feature/variable names - * @param tRng the time range, if relevant (time index may suffice) - * @param hparam the hyper-parameters (defaults to `MakeMatrix4TS.hp`) - * @param itran the inverse transformation to return to the original scale (defaults to null) - * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) - */ -class ARX_Symb (x: MatrixD, y: VectorD, hh: Int, n_exo: Int, fname: Array [String] = null, - tRng: Range = null, hparam: HyperParameter = hp, - val itran: FunctionS2S = null, bakcast: Boolean = false) // backcast value used only `MakeMatrix4TS` - extends Forecaster_Reg (x, y, hh, fname, tRng, hparam, bakcast): // no automatic backcasting - - private val debug = debugf ("ARX_Symb", true) // debug function - private val flaw = flawf ("ARX_Symb") // flaw function - private val p = hparam("p").toInt // use the last p values (p lags) - private val pp = hparam("pp").toDouble // power to raise the endogenous lags to (defaults to quadratic) - private val pr = hparam("pr").toDouble // root to take the endogenous lags to (defaults to square root) - - private val q = hparam("q").toInt // use the last q exogenous values (q lags) - private val spec = hparam("spec").toInt // trend terms: 0 - none, 1 - constant, 2 - linear, 3 - quadratic - // 4 - sine, 5 cosine - modelName = s"ARX_Symb($p, $q, $n_exo)" - - debug ("init", s"$modelName with with $n_exo exogenous variables and additional term spec = $spec") -// debug ("init", s"[ x | y ] = ${x :^+ y}") - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train/fit an `ARX_Symb` model to the times-series data in vector y_. - * Estimate the coefficient vector b for a p-th order Auto-Regressive ARX_Symb(p) model. - * Uses OLS Matrix Fatorization to determine the coefficients, i.e., the b (φ) vector. - * @param x_ the data/input matrix (e.g., full x) - * @param y_ the training/full response vector (e.g., full y) - */ - override def train (x_ : MatrixD, y_ : VectorD): Unit = - debug ("train", s"$modelName, x_.dims = ${x_.dims}, y_.dim = ${y_.dim}") - reg.train (x_, y_) // train the regression model - b = reg.parameter // coefficients from regression - end train - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train and test the forecasting model y_ = f(y-past) + e and report its QoF - * and plot its predictions. Return the predictions and QoF. - * NOTE: must use `trainNtest_x` when an x matrix is used, such as in `ARX_Symb`. - * @param x_ the training/full data/input matrix (defaults to full x) - * @param y_ the training/full response/output vector (defaults to full y) - * @param xx the testing/full data/input matrix (defaults to full x) - * @param yy the testing/full response/output vector (defaults to full y) - */ - def trainNtest_x (x_ : MatrixD = x, y_ : VectorD = y)(xx: MatrixD = x, yy: VectorD = y): (VectorD, VectorD) = - val y_tr = y_.drop () :+ -0.0 // skip the first, add placeholder past end - train (x_, y_tr) // train the model on training set - val (yp, qof) = test (xx, yy) // test the model on testing set - println (report (qof)) // report on Quality of Fit (QoF) - (yp, qof) - end trainNtest_x - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test FORECASTS of a forecasting model y_ = f(lags (y_)) + e - * and RETURN (1) aligned actual values, (2) its forecasts and (3) QoF vector. - * Testing may be in-sample (on the training set) or out-of-sample (on the testing set) - * as determined by the parameters passed in. Note: must call train and forecastAll - * before testF. - * @param h the forecasting horizon, number of steps ahead to produce forecasts - * @param y_ the testing/full response/output vector - */ - override def testF (h: Int, y_ : VectorD): (VectorD, VectorD, VectorD) = - val h_ = h - 1 - val yy = y_(h_ until y_.dim) // align the actual values - val yfh = yf(?, h)(0 until y_.dim-h_) // column h of the forecast matrix - println (s"yy.dim = ${yy.dim}, yfh.dim = ${yfh.dim}") -// Forecaster.differ (yy, yfh) // uncomment for debugging - assert (yy.dim == yfh.dim) // make sure the vector sizes agree - - new Plot (null, yy, yfh, s"testF: yy, yfh vs. t for $modelName @h = $h", lines = true) - mod_resetDF (yy.dim) // reset the degrees of freedom - (yy, yfh, diagnose (yy, yfh)) // return actual, forecasted and QoF vectors - end testF - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Produce a vector of size hh, h = 1 to hh-steps ahead forecasts for the model, - * i.e., forecast the following time points: t+1, ..., t+h. - * Intended to work with rolling validation (analog of predict method). - * @param t the time point from which to make forecasts - * @param y_ the actual values to use in making predictions - */ - override def forecast (t: Int, y_ : VectorD = y): VectorD = - val yh = new VectorD (hh) // hold forecasts for each horizon - for h <- 1 to hh do - - val xy = forge (x(min (t+1, x.dim-1)), yf(t), h) // FIX - why t+1 - - val pred = rectify (reg.predict (xy), nneg) // slide in prior forecasted values -// debug ("forecast", s"h = $h, @t = $t, xy = $xy, yp = $pred, y_ = ${y_(t)}") - yf(t, h) = pred // record in forecast matrix - yh(h-1) = pred // record forecasts for each horizon - yh // return forecasts for all horizons - end forecast - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all y_.dim time points at horizon h (h-steps ahead). - * Assign into FORECAST MATRIX and return the h-steps ahead forecast. - * Note, `predictAll` provides predictions for h = 1. - * @see `forecastAll` method in `Forecaster` trait. - * @param h the forecasting horizon, number of steps ahead to produce forecasts - * @param y_ the actual values to use in making forecasts - */ - override def forecastAt (h: Int, y_ : VectorD = y): VectorD = - if h < 2 then flaw ("forecastAt", s"horizon h = $h must be at least 2") - - for t <- y_.indices do // make forecasts over all time points for horizon h - val xy = forge (x(t), yf(t), h) - val pred = rectify (reg.predict (xy), nneg) -// debug ("forecastAt", s"h = $h, @t = $t, xy = $xy, yp = $pred, y_ = ${y_(t)}") - yf(t, h) = pred // record in forecast matrix - yf(?, h) // return the h-step ahead forecast vector - end forecastAt - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forge a new vector from the first spec values of x, the last p-h+1 values - * of x (past values) and recent values 1 to h-1 from the forecasts. - * @param xx the t-th row of the input matrix (lagged actual values) - * @param yy the t-th row of the forecast matrix (forecasted future values) - * @param h the forecasting horizon, number of steps ahead to produce forecasts - */ - def forge (xx: VectorD, yy: VectorD, h: Int): VectorD = - val n_endo = spec + p // number of trend + endogenous values - val x_trend = xx(0 until spec) // get trend values - val x_act = xx(n_endo-(p+1-h) until n_endo) // get actual lagged y-values (endogenous) - val nyy = n_endo - x_trend.dim - x_act.dim // number of forecasted values needed -// println (s"forge: h = $h, n_nedo = $n_endo, [ ${x_trend.dim}, ${x_act.dim} ], nyy = $nyy") - val x_fcast = yy(h-nyy until h) // get forecasted y-values - val xpp_act = x_act ~^ pp // get actual y^p-values - val xpp_fcast = x_fcast ~^ pp // get forecasted y^p-values - val xpr_act = x_act ~^ pr // get actual y_root-values - val xpr_fcast = x_fcast ~^ pr // get forecasted y_root-values - - var xy = x_trend ++ x_act ++ x_fcast ++ xpp_act ++ xpp_fcast ++ xpr_act ++ xpr_fcast - - var exo = hide (xx(n_endo+2*p until n_endo+2*p + q), h) - - for j <- 1 until n_exo do // for the j-th exogenous variable - exo = exo ++ hide (xx(n_endo+2*p + j*q until n_endo+2*p + (j+1)*q), h) // get actual lagged xe-values for exogenous variable j - end for - val exo_pp = exo ~^ pp // get exogenous y^p-values - val exo_pr = exo ~^ pr // get exogenous y_root-values - xy = xy ++ exo ++ exo_pp ++ exo_pr - - xy - end forge - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Hide values at the end of vector z (last h-1 values) as the increasing horizon - * turns them in future values (hence unavailable). Set these values to either - * zero (the default) or the last available value. - * @param z the vector to shift - * @param h the current horizon (number of steps ahead to forecast) - * @param fill whether to backfill with the rightmost value (true) or with 0 (false) - */ - def hide(z: VectorD, h: Int, fill: Boolean = false): VectorD = - val z_ = new VectorD(z.dim) - val z_lst = if h <= z.dim then z(z.dim - 1) else 0 - for k <- z.indices do - z_(k) = if k <= z.dim - h then z(k + 1) else z_lst // Shift values left and replace the last with zero - z_ - end hide - - -end ARX_Symb - -import Example_Covid._ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARX_Symb` companion object provides factory methods for the `ARX_Symb` class. - */ -object ARX_Symb: - - private val bounds = (1.0, 5.0) // (lower, upper) bounds for rescaling - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create an `ARX_Symb` object by building an input matrix xy and then calling the - * `ARX_Symb` constructor. - * @param xe the matrix of exogenous variable values - * @param y the endogenous/response vector (main time series data) - * @param hh the maximum forecasting horizon (h = 1 to hh) - * @param tRng the time range, if relevant (time index may suffice) - * @param hparam the hyper-parameters - * @param tran the transformation function (defaults to null, could use log1p) - * @param itran the inverse transformation function to rescale predictions to original y scale - * (defaults to null, could use expm1) - * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) - */ - def apply (xe: MatrixD, y: VectorD, hh: Int, fname: Array [String] = null, - tRng: Range = null, hparam: HyperParameter = hp, - tran: FunctionS2S = null, itran: FunctionS2S = null, - bakcast: Boolean = false): ARX_Symb = - val p = hparam("p").toInt // use the last p values - val pp = hparam("pp").toDouble // use the last p values raised to pp power - val pr = hparam("pr").toDouble // use the last p values taken to the pr root - val q = hparam("q").toInt // use the last q exogenous values - val qp = hparam("qp").toDouble // use the last q exogenous values raised to pp power - val qr = hparam("qr").toDouble // use the last q exogenous values taken to the pr root - val spec = hparam("spec").toInt // 0 - none, 1 - constant, 2 - linear, 3 -quadratic, 4 - sin, 5 = cos - val lwave = hparam("lwave").toDouble // wavelength (distance between peaks) - val cross = hparam("cross").toInt == 1 // whether to include ENDO-EXO cross terms - val yt = if tran != null then y.map (tran) // y transformed - else y - val xy = buildMatrix4TS (xe, yt, p, pp, pr, q, qp, qr, spec, lwave, cross) - new ARX_Symb (xy, y, hh, xe.dim2, fname, tRng, hparam, itran, bakcast) - end apply - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create an `ARX_Symb` object by building an input matrix xy and then calling the - * `ARX_Symb` constructor, with rescaling of endogneous and exogenous variable values. - * @param xe the matrix of exogenous variable values - * @param y the endogenous/response vector (main time series data) - * @param hh the maximum forecasting horizon (h = 1 to hh) - * @param tRng the time range, if relevant (time index may suffice) - * @param hparam the hyper-parameters - * @param tran the transformation function (defaults to null, could use log1p) - * @param itran the inverse transformation function to rescale predictions to original y scale - * (defaults to null, could use expm1) - * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) - */ - def rescale (xe: MatrixD, y: VectorD, hh: Int, fname: Array [String] = null, - tRng: Range = null, hparam: HyperParameter = hp, - tran: FunctionS2S = null, itran: FunctionS2S = null, - bakcast: Boolean = false): ARX_Symb = - val p = hparam("p").toInt // use the last p values - val pp = hparam("pp").toDouble // use the last p values raised to pp power - val pr = hparam("pr").toDouble // use the last p values taken to the pr root - val q = hparam("q").toInt // use the last q exogenous values - val qp = hparam("qp").toDouble // use the last q exogenous values raised to pp power - val qr = hparam("qr").toDouble // use the last q exogenous values taken to the pr root - val spec = hparam("spec").toInt // 0 - none, 1 - constant, 2 - linear, 3 -quadratic, 4 - sin, 5 = cos - val lwave = hparam("lwave").toDouble // wavelength (distance between peaks) - val cross = hparam("cross").toInt == 1 // whether to include ENDO-EXO cross terms - val xet = scale (extreme (xe), bounds)(xe) // rescale x matrix to bounds - val yt = if tran != null then y.map (tran) // y transformed - else y - val xy = buildMatrix4TS (xet, yt, p, pp, pr, q, qp, qr, spec, lwave, cross) - new ARX_Symb (xy, y, hh, xe.dim2, fname, tRng, hparam, itran, bakcast) - end rescale - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Build an input matrix by combining up 5 trend terms, endogenous terms from - * endogenous variable y, exogenous terms from n = xe.dim2 exogenous variables xe, - * and endogenous-exogenous cross terms. Specifically, - * trend terms spec (0 to 5) trend terms - * endogenous terms: p lagged linear terms, p lagged power terms, p lagged root terms, - * exogenous terms: q * n lagged linear terms, q * n lagged power terms, q * n lagged root terms, - * cross terms: q * n lagged linear cross terms. - * @param xe the matrix of exogenous variable values - * @param y the response vector (time series data) - * @param p the number of lags for the endogenous variable (lags 1 to p) - * @param pp the power (defaults to quadratic) to raise the lags of the endogenous variable to - * @param pr the root (defaults to sqrt) to take of the lags of the endogenous variable - * @param q the number of lags for each exogenous variable (lags 1 to q) - * @param qp the power (defaults to quadratic) to raise the lags of the exogenous variables to - * @param qr the root (defaults to sqrt) to take of the lags of the exogenous variables - * @param spec the number of trend terms (added columns) - * 0 - none, 1 - constant 2 - linear, 3 - quadratic, 4 - sine, 5 - cosine - * @param lwave the wavelength (distance between peaks) - * @param cross whether to include cross terms between endogenous and exogenous variables - */ - def buildMatrix4TS (xe: MatrixD, y: VectorD, - p: Int, pp: Double, pr: Double, - q: Int, qp: Double, qr: Double, - spec: Int, lwave: Double, cross: Boolean): MatrixD = - - // add trend terms and terms for the endogenous variable - var xy = makeMatrix (y, p, spec, lwave) ++^ // lagged linear terms and trend terms - makeMatrix4XP (y, p, pp) ++^ // lagged power/quadratic terms - makeMatrix4XP (y, p, pr) // lagged root/square-root terms - - // add terms for the exogenous variables - if xe.dim2 > 0 then xy = xy ++^ makeMatrix4EXO (xe, q) ++^ // lagged linear terms - makeMatrix4EXO (xe, q, qp) ++^ // lagged power/quadratic terms - makeMatrix4EXO (xe, q, qr) // lagged root/square-root terms - - // add cross terms of the endogenous and exogenous variables - if cross then - val yxe = y *~: xe // element-wise multiplication of vector y and matrix xe - xy = xy ++^ makeMatrix4EXO (yxe, q) // lagged linear cross terms - end if - println(s"End xy.dims: ${xy.dims}") - xy - end buildMatrix4TS - -end ARX_Symb - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRX_SymbTest` main function tests the `ARX_Symb` class on real data: - * Forecasting Lake Levels using In-Sample Testing (In-ST). - * Test forecasts (h = 1 to hh steps ahead forecasts). - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.aRX_SymbTest - * -@main def aRX_SymbTest (): Unit = - - val hh = 3 // maximum forecasting horizon - - val mod = ARX_Symb (y, hh) // create model for time series data - banner (s"In-ST Forecasts: ${mod.modelName} on LakeLevels Dataset") - mod.trainNtest_x ()() // train and test on full dataset - - mod.forecastAll () // forecast h-steps ahead (h = 1 to hh) for all y - Forecaster.evalForecasts (mod, mod.getYb, hh) - println (s"Final In-ST Forecast Matrix yf = ${mod.getYf}") - -end aRX_SymbTest - */ - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRX_SymbTest2` main function tests the `ARX_Symb` class on real data: - * Forecasting Lake Levels using Train-n-Test Split (TnT) with Rolling Validation. - * Test forecasts (h = 1 to hh steps ahead forecasts). - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.aRX_SymbTest2 - * -@main def aRX_SymbTest2 (): Unit = - - val hh = 3 // maximum forecasting horizon - - val mod = ARX_Symb (y, hh) // create model for time series data - banner (s"TnT Forecasts: ${mod.modelName} on LakeLevels Dataset") - mod.trainNtest_x ()() // train and test on full dataset - - mod.rollValidate () // TnT with Rolling Validation - println (s"Final TnT Forecast Matrix yf = ${mod.getYf}") - -end aRX_SymbTest2 - */ - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRX_SymbTest3` main function tests the `ARX_Symb` class on real data: - * Forecasting COVID-19 using In-Sample Testing (In-ST). - * Test forecasts (h = 1 to hh steps ahead forecasts). - * > runMain scalation.modeling.forecasting.aRX_SymbTest3 - */ -@main def aRX_SymbTest3 (): Unit = - -// val exo_vars = NO_EXO - val exo_vars = Array ("icu_patients", "hosp_patients") -// val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (xxe, yy) = loadData (exo_vars, response) - println (s"xxe.dims = ${xxe.dims}, yy.dim = ${yy.dim}") - -// val xe = xxe // full - val xe = xxe(0 until 116) // clip the flat end -// val y = yy // full - val y = yy(0 until 116) // clip the flat end - val hh = 6 // maximum forecasting horizon - hp("lwave") = 20 // wavelength (distance between peaks) - - for p <- 3 to 3; s <- 1 to 1; q <- 2 to 2 do // number of lags; trend; number of exo lags - hp("p") = p // endo lags - hp("q") = q // exo lags - hp("spec") = s // trend specification: 0, 1, 2, 3, 5 - val mod = ARX_Symb (xe, y, hh) // create model for time series data - banner (s"In-ST Forecasts: ${mod.modelName} on COVID-19 Dataset") - mod.trainNtest_x ()() // train and test on full dataset - println (mod.summary ()) // statistical summary of fit - -// mod.setSkip (p) // full AR-formula available when t >= p - mod.forecastAll () // forecast h-steps ahead (h = 1 to hh) for all y - mod.diagnoseAll (y, mod.getYf) -// Forecaster.evalForecasts (mod, mod.getYb, hh) -// println (s"Final In-ST Forecast Matrix yf = ${mod.getYf}") -// println (s"Final In-ST Forecast Matrix yf = ${mod.getYf.shiftDiag}") - end for - -end aRX_SymbTest3 - - - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRX_SymbTest4` main function tests the `ARX_Symb` class on real data: - * Forecasting COVID-19 using Train-n-Test Split (TnT) with Rolling Validation. - * Test forecasts (h = 1 to hh steps ahead forecasts). - * > runMain scalation.modeling.forecasting.aRX_SymbTest4 - */ -@main def aRX_SymbTest4 (): Unit = - -// val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val exo_vars = Array ("icu_patients", "hosp_patients") - val (xxe, yy) = loadData (exo_vars, response) - println (s"xxe.dims = ${xxe.dims}, yy.dim = ${yy.dim}") - -// val xe = xxe // full - val xe = xxe(0 until 116) // clip the flat end -// val y = yy // full - val y = yy(0 until 116) // clip the flat end - val hh = 6 // maximum forecasting horizon - hp("lwave") = 20 // wavelength (distance between peaks) - - for p <- 1 to 5; s <- 1 to 1 do // number of lags; trend - hp("p") = p // endo lags -// hp("q") = 1 // exo lags - hp("q") = min (2, p) // try various rules - hp("spec") = s // trend specification: 0, 1, 2, 3, 5 - val mod = ARX_Symb (xe, y, hh) // create model for time series data - banner (s"TnT Forecasts: ${mod.modelName} on COVID-19 Dataset") - mod.trainNtest_x ()() // use customized trainNtest_x - - mod.setSkip (0) - mod.rollValidate () // TnT with Rolling Validation - println (s"After Roll TnT Forecast Matrix yf = ${mod.getYf}") - mod.diagnoseAll (y, mod.getYf, Forecaster.teRng (y.dim), 0) // only diagnose on the testing set -// println (s"Final TnT Forecast Matrix yf = ${mod.getYf}") - end for - -end aRX_SymbTest4 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRX_SymbTest5` main function tests the `ARX_Symb` class on real data: - * Forecasting COVID-19 using In-Sample Testing (In-ST). - * Test forecasts (h = 1 to hh steps ahead forecasts). - * This version performs feature selection. - * > runMain scalation.modeling.forecasting.aRX_SymbTest5 - */ -@main def aRX_SymbTest5 (): Unit = - -// val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val exo_vars = Array ("icu_patients", "hosp_patients") - val (xxe, yy) = loadData (exo_vars, response) - println (s"xxe.dims = ${xxe.dims}, yy.dim = ${yy.dim}") - -// val xe = xxe // full - val xe = xxe(0 until 116) // clip the flat end -// val y = yy // full - val y = yy(0 until 116) // clip the flat end - val hh = 6 // maximum forecasting horizon - hp("p") = 10 // endo lags - hp("q") = 10 // exo lags - hp("spec") = 5 // trend specification: 0, 1, 2, 3, 5 - hp("lwave") = 20 // wavelength (distance between peaks) - - val mod = ARX_Symb (xe, y, hh) // create model for time series data - banner (s"In-ST Forecasts: ${mod.modelName} on COVID-19 Dataset") - mod.trainNtest_x ()() // train and test on full dataset - println (mod.summary ()) // statistical summary of fit - - mod.forecastAll () // forecast h-steps ahead (h = 1 to hh) for all y - Forecaster.evalForecasts (mod, mod.getYb, hh) - println (s"Final In-ST Forecast Matrix yf = ${mod.getYf}") - - banner ("Feature Selection Technique: Forward") - val (cols, rSq) = mod.forwardSelAll () // R^2, R^2 bar, sMAPE, R^2 cv -// val (cols, rSq) = mod.backwardElimAll () // R^2, R^2 bar, sMAPE, R^2 cv - val k = cols.size - println (s"k = $k") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "R^2 cv"), - s"R^2 vs n for ${mod.modelName}", lines = true) - println (s"rSq = $rSq") - -end aRX_SymbTest5 - diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/old/ARX_Symb.scala.bak2 b/target/scala-3.6.4/classes/scalation/modeling/forecasting/old/ARX_Symb.scala.bak2 deleted file mode 100644 index c5d1948ad..000000000 --- a/target/scala-3.6.4/classes/scalation/modeling/forecasting/old/ARX_Symb.scala.bak2 +++ /dev/null @@ -1,528 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Tue Jan 14 15:47:45 EST 2025 - * @see LICENSE (MIT style license file). - * - * @note Model: Auto-Regressive on lagged y and xe with SR terms (ARX_Symb) using OLS - * - * @see `scalation.modeling.Regression` - */ - -package scalation -package modeling -package forecasting - -import scala.collection.mutable.ArrayBuffer -import scala.math.min - -import scalation.mathstat._ - -import MakeMatrix4TS._ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARX_Symb` class provides time series analysis capabilities for ARX Symbolic - * Regression (SR) models. These models include trend, linear, power, root, and cross terms - * for the single endogenous (y) variable and zero or more exogenous (xe) variables. - * Given time series data stored in vector y and matrix xe, its next value y_t = combination - * of last p values of y, y^p, y^r and the last q values of each exogenous variable xe_j, - * again in linear, power and root forms (as well as ENDO-EXO cross terms). - * - * y_t = b dot x_t + e_t - * - * where y_t is the value of y at time t, x_t is a vector of inputs, and e_t is the - * residual/error term. - * @see `MakeMatrix4TS` for hyper-parameter specifications. - * @param x the data/input matrix (lagged columns of y and xe) @see `ARX_Symb.apply` - * @param y the response/output vector (main time series data) - * @param hh the maximum forecasting horizon (h = 1 to hh) - * @param n_exo the number of exogenous variables - * @param fname the feature/variable names - * @param tRng the time range, if relevant (time index may suffice) - * @param hparam the hyper-parameters (defaults to `MakeMatrix4TS.hp`) - * @param itran the inverse transformation to return to the original scale (defaults to null) - * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) - */ -class ARX_Symb (x: MatrixD, y: VectorD, hh: Int, n_exo: Int, fname: Array [String], - tRng: Range = null, hparam: HyperParameter = hp, - val itran: FunctionS2S = null, bakcast: Boolean = false) // backcast value used only `MakeMatrix4TS` - extends Forecaster_Reg (x, y, hh, fname, tRng, hparam, bakcast): // no automatic backcasting - - private val debug = debugf ("ARX_Symb", true) // debug function - private val p = hparam("p").toInt // use the last p values (p lags) - private val pp = hparam("pp").toDouble // power to raise the endogenous lags to (defaults to quadratic) - private val pr = hparam("pr").toDouble // root to take the endogenous lags to (defaults to square root) - private val q = hparam("q").toInt // use the last q exogenous values (q lags) - private val qp = hparam("qp").toDouble // power to raise the exogenous lags to (defaults to quadratic) - private val qr = hparam("qr").toDouble // root to take the exogenous lags to (defaults to square root) - private val spec = hparam("spec").toInt // trend terms: 0 - none, 1 - constant, 2 - linear, 3 - quadratic - // 4 - sine, 5 cosine - modelName = s"ARX_Symb($p, $q, $n_exo)" - - debug ("init", s"$modelName with with $n_exo exogenous variables and additional term spec = $spec") -// debug ("init", s"[ x | y ] = ${x :^+ y}") - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train/fit an `ARX_Symb` model to the times-series data in vector y_. - * Estimate the coefficient vector b for a p-th order Auto-Regressive ARX_Symb(p) model. - * Uses OLS Matrix Fatorization to determine the coefficients, i.e., the b (φ) vector. - * @param x_ the data/input matrix (e.g., full x) - * @param y_ the training/full response vector (e.g., full y) - override def train (x_ : MatrixD, y_ : VectorD): Unit = - debug ("train", s"$modelName, x_.dims = ${x_.dims}, y_.dim = ${y_.dim}") - reg.train (x_, y_) // train the regression model - b = reg.parameter // coefficients from regression - end train - */ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train and test the forecasting model y_ = f(y-past) + e and report its QoF - * and plot its predictions. Return the predictions and QoF. - * NOTE: must use `trainNtest_x` when an x matrix is used, such as in `ARX_Symb`. - * @param x_ the training/full data/input matrix (defaults to full x) - * @param y_ the training/full response/output vector (defaults to full y) - * @param xx the testing/full data/input matrix (defaults to full x) - * @param yy the testing/full response/output vector (defaults to full y) - def trainNtest_x (x_ : MatrixD = x, y_ : VectorD = y)(xx: MatrixD = x, yy: VectorD = y): (VectorD, VectorD) = - val y_tr = y_.drop () :+ -0.0 // skip the first, add placeholder past end - train (x_, y_tr) // train the model on training set - val (yp, qof) = test (xx, yy) // test the model on testing set - println (report (qof)) // report on Quality of Fit (QoF) - (yp, qof) - end trainNtest_x - */ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test FORECASTS of a forecasting model y_ = f(lags (y_)) + e - * and RETURN (1) aligned actual values, (2) its forecasts and (3) QoF vector. - * Testing may be in-sample (on the training set) or out-of-sample (on the testing set) - * as determined by the parameters passed in. Note: must call train and forecastAll - * before testF. - * @param h the forecasting horizon, number of steps ahead to produce forecasts - * @param y_ the testing/full response/output vector - override def testF (h: Int, y_ : VectorD): (VectorD, VectorD, VectorD) = - val h_ = h - 1 - val yy = y_(h_ until y_.dim) // align the actual values - val yfh = yf(?, h)(0 until y_.dim-h_) // column h of the forecast matrix - println (s"yy.dim = ${yy.dim}, yfh.dim = ${yfh.dim}") -// Forecaster.differ (yy, yfh) // uncomment for debugging - assert (yy.dim == yfh.dim) // make sure the vector sizes agree - - new Plot (null, yy, yfh, s"testF: yy, yfh vs. t for $modelName @h = $h", lines = true) - mod_resetDF (yy.dim) // reset the degrees of freedom - (yy, yfh, diagnose (yy, yfh)) // return actual, forecasted and QoF vectors - end testF - */ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Produce a vector of size hh, h = 1 to hh-steps ahead forecasts for the model, - * i.e., forecast the following time points: t+1, ..., t+h. - * Intended to work with rolling validation (analog of predict method). - * @param t the time point from which to make forecasts - * @param y_ the actual values to use in making predictions - override def forecast (t: Int, y_ : VectorD = y): VectorD = - val yh = new VectorD (hh) // hold forecasts for each horizon - for h <- 1 to hh do - val xy = forge (x(min (t+1, x.dim-1)), yf(t), h) // FIX - why t+1 - val pred = rectify (reg.predict (xy), nneg) // slide in prior forecasted values -// debug ("forecast", s"h = $h, @t = $t, xy = $xy, yp = $pred, y_ = ${y_(t)}") - yf(t, h) = pred // record in forecast matrix - yh(h-1) = pred // record forecasts for each horizon - yh // return forecasts for all horizons - end forecast - */ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all y_.dim time points at horizon h (h-steps ahead). - * Assign into FORECAST MATRIX and return the h-steps ahead forecast. - * Note, `predictAll` provides predictions for h = 1. - * @see `forecastAll` method in `Forecaster` trait. - * @param h the forecasting horizon, number of steps ahead to produce forecasts - * @param y_ the actual values to use in making forecasts - override def forecastAt (h: Int, y_ : VectorD = y): VectorD = - if h < 2 then flaw ("forecastAt", s"horizon h = $h must be at least 2") - - for t <- y_.indices do // make forecasts over all time points for horizon h - val xy = forge (x(t), yf(t), h) - val pred = rectify (reg.predict (xy), nneg) -// debug ("forecastAt", s"h = $h, @t = $t, xy = $xy, yp = $pred, y_ = ${y_(t)}") - yf(t, h) = pred // record in forecast matrix - yf(?, h) // return the h-step ahead forecast vector - end forecastAt - */ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forge a new vector from the first spec values of x, the last p-h+1 values - * of x (past values) and recent values 1 to h-1 from the forecasts. - * @param xx the t-th row of the input matrix (lagged actual values) - * @param yy the t-th row of the forecast matrix (forecasted future values) - * @param h the forecasting horizon, number of steps ahead to produce forecasts - */ - def forge (xx: VectorD, yy: VectorD, h: Int): VectorD = - val x_trend = xx(0 until spec) // get trend values - - // add terms for the endogenous variable - val n_endo = spec + p // number of trend + endogenous values - val x_act = xx(n_endo-(p+1-h) until n_endo) // get actual lagged y-values (endogenous) - val nyy = n_endo - x_trend.dim - x_act.dim // number of forecasted values needed -// println (s"forge: h = $h, n_nedo = $n_endo, [ ${x_trend.dim}, ${x_act.dim} ], nyy = $nyy") - val x_fcast = yy(h-nyy until h) // get forecasted y-values - val xpp_act = x_act ~^ pp // get actual y^pp-values - val xpp_fcast = x_fcast ~^ pp // get forecasted y^pp-values - val xpr_act = x_act ~^ pr // get actual y_root-values - val xpr_fcast = x_fcast ~^ pr // get forecasted y_root-values - - val xy = x_trend ++ x_act ++ x_fcast ++ xpp_act ++ xpp_fcast ++ xpr_act ++ xpr_fcast - - // add terms for the exogenous variables - var exo = hide (xx(n_endo+2*p until n_endo+2*p + q), h) - for j <- 1 until n_exo do // for the j-th exogenous variable - exo = exo ++ hide (xx(n_endo+2*p + j*q until n_endo+2*p + (j+1)*q), h) // get actual lagged xe-values for exogenous variable j - val exo_pp = exo ~^ qp // get exogenous y^qp-values - val exo_pr = exo ~^ qr // get exogenous y_root-values - - // add endogenous- exogenous cross terms - // FIX - to be implemented - - xy ++ exo ++ exo_pp ++ exo_pr - end forge - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Hide values at the end of vector z (last h-1 values) as the increasing horizon - * turns them in future values (hence unavailable). Set these values to either - * zero (the default) or the last available value. - * @param z the vector to shift - * @param h the current horizon (number of steps ahead to forecast) - * @param fill whether to backfill with the rightmost value (true) or with 0 (false) - */ - def hide (z: VectorD, h: Int, fill: Boolean = false): VectorD = - val lst = z.dim - h // last available index position in z - val zl = if lst >= 0 then z(lst) else 0.0 // last available z value per horizon - val z_ = new VectorD (z.dim) - for k <- z.indices do - z_(k) = if k <= lst then z(k) else if fill then zl else 0.0 - z_ - end hide - -end ARX_Symb - -import Example_Covid._ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARX_Symb` companion object provides factory methods for the `ARX_Symb` class. - */ -object ARX_Symb: - - private val bounds = (1.0, 5.0) // (lower, upper) bounds for rescaling - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create an `ARX_Symb` object by building an input matrix xy and then calling the - * `ARX_Symb` constructor. - * @param xe the matrix of exogenous variable values - * @param y the endogenous/response vector (main time series data) - * @param hh the maximum forecasting horizon (h = 1 to hh) - * @param fname_ the feature/variable names - * @param tRng the time range, if relevant (time index may suffice) - * @param hparam the hyper-parameters - * @param tran the transformation function (defaults to null, could use log1p) - * @param itran the inverse transformation function to rescale predictions to original y scale - * (defaults to null, could use expm1) - * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) - */ - def apply (xe: MatrixD, y: VectorD, hh: Int, fname_ : Array [String] = null, - tRng: Range = null, hparam: HyperParameter = hp, - tran: FunctionS2S = null, itran: FunctionS2S = null, - bakcast: Boolean = false): ARX_Symb = - val p = hparam("p").toInt // use the last p values - val pp = hparam("pp").toDouble // use the last p values raised to pp power - val pr = hparam("pr").toDouble // use the last p values taken to the pr root - val q = hparam("q").toInt // use the last q exogenous values - val qp = hparam("qp").toDouble // use the last q exogenous values raised to pp power - val qr = hparam("qr").toDouble // use the last q exogenous values taken to the pr root - val spec = hparam("spec").toInt // 0 - none, 1 - constant, 2 - linear, 3 -quadratic, 4 - sin, 5 = cos - val lwave = hparam("lwave").toDouble // wavelength (distance between peaks) - val cross = hparam("cross").toInt == 1 // whether to include ENDO-EXO cross terms - val yt = if tran != null then y.map (tran) // y transformed - else y - val xy = buildMatrix4TS (xe, yt, p, pp, pr, q, qp, qr, spec, lwave, cross, bakcast) - val n_exo = xe.dim2 - val fname = if fname_ == null then formNames (spec, p, n_exo, q) - else fname_ - new ARX_Symb (xy, y, hh, n_exo, fname, tRng, hparam, itran, bakcast) - end apply - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create an `ARX_Symb` object by building an input matrix xy and then calling the - * `ARX_Symb` constructor, with rescaling of endogneous and exogenous variable values. - * @param xe the matrix of exogenous variable values - * @param y the endogenous/response vector (main time series data) - * @param hh the maximum forecasting horizon (h = 1 to hh) - * @param tRng the time range, if relevant (time index may suffice) - * @param hparam the hyper-parameters - * @param tran the transformation function (defaults to null, could use log1p) - * @param itran the inverse transformation function to rescale predictions to original y scale - * (defaults to null, could use expm1) - * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) - */ - def rescale (xe: MatrixD, y: VectorD, hh: Int, fname: Array [String] = null, - tRng: Range = null, hparam: HyperParameter = hp, - tran: FunctionS2S = null, itran: FunctionS2S = null, - bakcast: Boolean = false): ARX_Symb = - val p = hparam("p").toInt // use the last p values - val pp = hparam("pp").toDouble // use the last p values raised to pp power - val pr = hparam("pr").toDouble // use the last p values taken to the pr root - val q = hparam("q").toInt // use the last q exogenous values - val qp = hparam("qp").toDouble // use the last q exogenous values raised to pp power - val qr = hparam("qr").toDouble // use the last q exogenous values taken to the pr root - val spec = hparam("spec").toInt // 0 - none, 1 - constant, 2 - linear, 3 -quadratic, 4 - sin, 5 = cos - val lwave = hparam("lwave").toDouble // wavelength (distance between peaks) - val cross = hparam("cross").toInt == 1 // whether to include ENDO-EXO cross terms - val xet = scale (extreme (xe), bounds)(xe) // rescale x matrix to bounds - val yt = if tran != null then y.map (tran) // y transformed - else y - val xy = buildMatrix4TS (xet, yt, p, pp, pr, q, qp, qr, spec, lwave, cross, bakcast) - new ARX_Symb (xy, y, hh, xe.dim2, fname, tRng, hparam, itran, bakcast) - end rescale - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Build an input matrix by combining up 5 trend terms, endogenous terms from - * endogenous variable y, exogenous terms from n = xe.dim2 exogenous variables xe, - * and endogenous-exogenous cross terms. Specifically, - * trend terms spec (0 to 5) trend terms - * endogenous terms: p lagged linear terms, p lagged power terms, p lagged root terms, - * exogenous terms: q * n lagged linear terms, q * n lagged power terms, q * n lagged root terms, - * cross terms: q * n lagged linear cross terms. - * @param xe the matrix of exogenous variable values - * @param y the response vector (time series data) - * @param p the number of lags for the endogenous variable (lags 1 to p) - * @param pp the power (defaults to quadratic) to raise the lags of the endogenous variable to - * @param pr the root (defaults to sqrt) to take of the lags of the endogenous variable - * @param q the number of lags for each exogenous variable (lags 1 to q) - * @param qp the power (defaults to quadratic) to raise the lags of the exogenous variables to - * @param qr the root (defaults to sqrt) to take of the lags of the exogenous variables - * @param spec the number of trend terms (added columns) - * 0 - none, 1 - constant 2 - linear, 3 - quadratic, 4 - sine, 5 - cosine - * @param lwave the wavelength (distance between peaks) - * @param cross whether to include cross terms between endogenous and exogenous variables - * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) - */ - def buildMatrix4TS (xe: MatrixD, y: VectorD, - p: Int, pp: Double, pr: Double, - q: Int, qp: Double, qr: Double, - spec: Int, lwave: Double, cross: Boolean, - bakcast: Boolean): MatrixD = - - // add trend terms and terms for the endogenous variable - var xy = makeMatrix4T (y, spec, lwave, bakcast) ++^ // lagged linear terms and trend terms - makeMatrix4L (y, p, bakcast) ++^ // lagged linear terms and trend terms - makeMatrix4P (y, p, pp, bakcast) ++^ // lagged power/quadratic terms - makeMatrix4P (y, p, pr, bakcast) // lagged root/square-root terms - - // add terms for the exogenous variables - if xe.dim2 > 0 then xy = xy ++^ makeMatrix4EXO (xe, q, 1, bakcast) ++^ // lagged linear terms - makeMatrix4EXO (xe, q, qp, bakcast) ++^ // lagged power/quadratic terms - makeMatrix4EXO (xe, q, qr, bakcast) // lagged root/square-root terms - - // add cross terms of the endogenous and exogenous variables - if cross then - val yxe = y *~: xe // element-wise multiplication of vector y and matrix xe - xy = xy ++^ makeMatrix4EXO (yxe, q, 1, bakcast) // lagged linear cross terms - end if - xy - end buildMatrix4TS - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Form an array of names for the features included in the model. - * @param spec the number of trend terms - * @param p the number of lags for the endogenous variable (lags 1 to p) - * @param n_exo the number of exogenous variable - * @param q the number of lags for each exogenous variable (lags 1 to q) - */ - def formNames (spec: Int, p: Int, n_exo: Int, q: Int): Array [String] = - val names = ArrayBuffer [String] () - for i <- p to 1 by -1 do names += s"yl$i~" - for j <- 0 until n_exo; k <- q to 1 by -1 do names += s"xe${j}l$k" - for j <- 0 until n_exo; k <- q to 1 by -1 do names += s"xe${j}l$k^" - for j <- 0 until n_exo; k <- q to 1 by -1 do names += s"xe${j}l$k~" - MakeMatrix4TS.formNames (spec, p, true) ++ names.toArray - end formNames - -end ARX_Symb - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRX_SymbTest` main function tests the `ARX_Symb` class on real data: - * Forecasting Lake Levels using In-Sample Testing (In-ST). - * Test forecasts (h = 1 to hh steps ahead forecasts). - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.aRX_SymbTest - * -@main def aRX_SymbTest (): Unit = - - val hh = 3 // maximum forecasting horizon - - val mod = ARX_Symb (y, hh) // create model for time series data - banner (s"In-ST Forecasts: ${mod.modelName} on LakeLevels Dataset") - mod.trainNtest_x ()() // train and test on full dataset - - mod.forecastAll () // forecast h-steps ahead (h = 1 to hh) for all y - Forecaster.evalForecasts (mod, mod.getYb, hh) - println (s"Final In-ST Forecast Matrix yf = ${mod.getYf}") - -end aRX_SymbTest - */ - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRX_SymbTest2` main function tests the `ARX_Symb` class on real data: - * Forecasting Lake Levels using Train-n-Test Split (TnT) with Rolling Validation. - * Test forecasts (h = 1 to hh steps ahead forecasts). - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.aRX_SymbTest2 - * -@main def aRX_SymbTest2 (): Unit = - - val hh = 3 // maximum forecasting horizon - - val mod = ARX_Symb (y, hh) // create model for time series data - banner (s"TnT Forecasts: ${mod.modelName} on LakeLevels Dataset") - mod.trainNtest_x ()() // train and test on full dataset - - mod.rollValidate () // TnT with Rolling Validation - println (s"Final TnT Forecast Matrix yf = ${mod.getYf}") - -end aRX_SymbTest2 - */ - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRX_SymbTest3` main function tests the `ARX_Symb` class on real data: - * Forecasting COVID-19 using In-Sample Testing (In-ST). - * Test forecasts (h = 1 to hh steps ahead forecasts). - * > runMain scalation.modeling.forecasting.aRX_SymbTest3 - */ -@main def aRX_SymbTest3 (): Unit = - -// val exo_vars = NO_EXO - val exo_vars = Array ("hosp_patients") -// val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (xxe, yy) = loadData (exo_vars, response) - println (s"xxe.dims = ${xxe.dims}, yy.dim = ${yy.dim}") - -// val xe = xxe // full - val xe = xxe(0 until 116) // clip the flat end -// val y = yy // full - val y = yy(0 until 116) // clip the flat end - val hh = 6 // maximum forecasting horizon - hp("lwave") = 20 // wavelength (distance between peaks) - - for p <- 1 to 5; s <- 1 to 2; q <- 1 to 3 do // number of lags; trend; number of exo lags - hp("p") = p // endo lags - hp("q") = q // exo lags - hp("spec") = s // trend specification: 0, 1, 2, 3, 5 - val mod = ARX_Symb (xe, y, hh) // create model for time series data - banner (s"In-ST Forecasts: ${mod.modelName} on COVID-19 Dataset") - mod.trainNtest_x ()() // train and test on full dataset - println (mod.summary ()) // statistical summary of fit - -// mod.setSkip (p) // full AR-formula available when t >= p - mod.forecastAll () // forecast h-steps ahead (h = 1 to hh) for all y - mod.diagnoseAll (y, mod.getYf) // QoF for each horizon -// Forecaster.evalForecasts (mod, mod.getYb, hh) -// println (s"Final In-ST Forecast Matrix yf = ${mod.getYf}") -// println (s"Final In-ST Forecast Matrix yf = ${mod.getYf.shiftDiag}") - end for - -end aRX_SymbTest3 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRX_SymbTest4` main function tests the `ARX_Symb` class on real data: - * Forecasting COVID-19 using Train-n-Test Split (TnT) with Rolling Validation. - * Test forecasts (h = 1 to hh steps ahead forecasts). - * > runMain scalation.modeling.forecasting.aRX_SymbTest4 - */ -@main def aRX_SymbTest4 (): Unit = - -// val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val exo_vars = Array ("icu_patients", "hosp_patients") - val (xxe, yy) = loadData (exo_vars, response) - println (s"xxe.dims = ${xxe.dims}, yy.dim = ${yy.dim}") - -// val xe = xxe // full - val xe = xxe(0 until 116) // clip the flat end -// val y = yy // full - val y = yy(0 until 116) // clip the flat end - val hh = 6 // maximum forecasting horizon - hp("lwave") = 20 // wavelength (distance between peaks) - - for p <- 1 to 5; s <- 1 to 1 do // number of lags; trend - hp("p") = p // endo lags -// hp("q") = 1 // exo lags - hp("q") = min (2, p) // try various rules - hp("spec") = s // trend specification: 0, 1, 2, 3, 5 - val mod = ARX_Symb (xe, y, hh) // create model for time series data - banner (s"TnT Forecasts: ${mod.modelName} on COVID-19 Dataset") - mod.trainNtest_x ()() // use customized trainNtest_x - - mod.setSkip (0) - mod.rollValidate () // TnT with Rolling Validation - println (s"After Roll TnT Forecast Matrix yf = ${mod.getYf}") - mod.diagnoseAll (y, mod.getYf, Forecaster.teRng (y.dim), 0) // only diagnose on the testing set -// println (s"Final TnT Forecast Matrix yf = ${mod.getYf}") - end for - -end aRX_SymbTest4 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRX_SymbTest5` main function tests the `ARX_Symb` class on real data: - * Forecasting COVID-19 using In-Sample Testing (In-ST). - * Test forecasts (h = 1 to hh steps ahead forecasts). - * This version performs feature selection. - * > runMain scalation.modeling.forecasting.aRX_SymbTest5 - */ -@main def aRX_SymbTest5 (): Unit = - - val exo_vars = Array ("hosp_patients") -// val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (xxe, yy) = loadData (exo_vars, response) - println (s"xxe.dims = ${xxe.dims}, yy.dim = ${yy.dim}") - -// val xe = xxe // full - val xe = xxe(0 until 116) // clip the flat end -// val y = yy // full - val y = yy(0 until 116) // clip the flat end - val hh = 6 // maximum forecasting horizon - val p = 6 - val q = 4 - hp("p") = p // endo lags - hp("pp") = 2.0 // use 1.5 for the power/exponent (default is 2) - hp("q") = q // exo lags - hp("qp") = 2.0 // use 1.5 for the power/exponent (default is 2) - hp("spec") = 5 // trend specification: 0, 1, 2, 3, 5 - hp("lwave") = 20 // wavelength (distance between peaks) - - val mod = ARX_Symb (xe, y, hh) // create model for time series data - banner (s"In-ST Forecasts: ${mod.modelName} on COVID-19 Dataset") - mod.trainNtest_x ()() // train and test on full dataset - println (mod.summary ()) // statistical summary of fit - -// mod.setSkip (p) // full AR-formula available when t >= p - mod.forecastAll () // forecast h-steps ahead (h = 1 to hh) for all y - mod.diagnoseAll (y, mod.getYf) // QoF for each horizon -// Forecaster.evalForecasts (mod, mod.getYb, hh) -// println (s"Final In-ST Forecast Matrix yf = ${mod.getYf}") - - banner ("Feature Selection Technique: Forward") - val (cols, rSq) = mod.forwardSelAll () // R^2, R^2 bar, sMAPE, R^2 cv -// val (cols, rSq) = mod.backwardElimAll () // R^2, R^2 bar, sMAPE, R^2 cv - val k = cols.size - println (s"k = $k") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "R^2 cv"), - s"R^2 vs n for ${mod.modelName}", lines = true) - println (s"rSq = $rSq") - -end aRX_SymbTest5 - diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/old/ARX_Symb.scala.bak3 b/target/scala-3.6.4/classes/scalation/modeling/forecasting/old/ARX_Symb.scala.bak3 deleted file mode 100644 index 659c2157b..000000000 --- a/target/scala-3.6.4/classes/scalation/modeling/forecasting/old/ARX_Symb.scala.bak3 +++ /dev/null @@ -1,466 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller, Yousef Fekri Dabanloo - * @version 2.0 - * @date Tue Jan 14 15:47:45 EST 2025 - * @see LICENSE (MIT style license file). - * - * @note Model: Auto-Regressive on lagged y and xe with SR terms (ARX_Symb) using OLS - * - * @see `scalation.modeling.Regression` - */ - -package scalation -package modeling -package forecasting - -import scala.collection.mutable.ArrayBuffer -import scala.math._ - -import scalation.mathstat._ - -import MakeMatrix4TS._ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARX_Symb` class provides time series analysis capabilities for ARX Symbolic - * Regression (SR) models. These models include trend, linear, power, root, and cross terms - * for the single endogenous (y) variable and zero or more exogenous (xe) variables. - * Given time series data stored in vector y and matrix xe, its next value y_t = combination - * of last p values of y, y^p, y^r and the last q values of each exogenous variable xe_j, - * again in linear, power and root forms (as well as ENDO-EXO cross terms). - * - * y_t = b dot x_t + e_t - * - * where y_t is the value of y at time t, x_t is a vector of inputs, and e_t is the - * residual/error term. - * @see `MakeMatrix4TS` for hyper-parameter specifications. - * @param x the data/input matrix (lagged columns of y and xe) @see `ARX_Symb.apply` - * @param y the response/output vector (main time series data) - * @param hh the maximum forecasting horizon (h = 1 to hh) - * @param n_exo the number of exogenous variables - * @param fname the feature/variable names - * @param tRng the time range, if relevant (time index may suffice) - * @param hparam the hyper-parameters (defaults to `MakeMatrix4TS.hp`) - * @param fEndo the array of functions used to transform endogenous variables - * @param fExo the array of functions used to transform exogenous variables - * @param itran the inverse transformation to return to the original scale (defaults to null) - * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) - */ -class ARX_Symb (x: MatrixD, y: VectorD, hh: Int, n_exo: Int, fname: Array [String], - tRng: Range = null, hparam: HyperParameter = hp, - fEndo: Array [FunctionS2S] = Array (powTo(1.5), log1p), - fExo: Array [FunctionS2S] = Array (powTo(1.5), log1p), - val itran: FunctionS2S = null, bakcast: Boolean = false) // backcast value used only `MakeMatrix4TS` - extends Forecaster_Reg (x, y, hh, fname, tRng, hparam, bakcast): // no automatic backcasting - - private val debug = debugf ("ARX_Symb", true) // debug function - private val n_fEn = fEndo.length // number of functions used to map endogenous variables - private val n_fEx = fExo.length // number of functions used to map exogenous variables - private val p = hparam("p").toInt // use the last p values (p lags) - private val q = hparam("q").toInt // use the last q exogenous values (q lags) - private val cross = hparam("cross").toInt == 1 // whether to include ENDO-EXO cross terms - private val spec = hparam("spec").toInt // trend terms: 0 - none, 1 - constant, 2 - linear, 3 - quadratic - // 4 - sine, 5 cosine - modelName = s"ARX_Symb($p, $q, $n_exo)" - - debug ("init", s"$modelName with with $n_exo exogenous variables and additional term spec = $spec") - debug ("init", s"[ x | y ] = ${x :^+ y}") - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forge a new vector from the first spec values of x, the last p-h+1 values - * of x (past values) and recent values 1 to h-1 from the forecasts. - * @param xx the t-th row of the input matrix (lagged actual values) - * @param yy the t-th row of the forecast matrix (forecasted future values) - * @param h the forecasting horizon, number of steps ahead to produce forecasts - */ - def forge (xx: VectorD, yy: VectorD, h: Int): VectorD = - val x_trend = xx(0 until spec) // get trend values - - // add terms for the endogenous variable - val n_endo = spec + p // number of trend + endogenous values - val x_act = xx(n_endo-(p+1-h) until n_endo) // get actual lagged y-values (endogenous) - val nyy = p - x_act.dim // number of forecasted values needed - val x_fcast = yy(h-nyy until h) // get forecasted y-values - - val xy_org = x_act ++ x_fcast // original values before any mapping - var xy = xy_org - for i <- fEndo.indices do xy = xy ++ xy_org.map (fEndo(i)) - - // add terms for the exogenous variables - var exo_org = hide (xx(n_endo+n_fEn*p until n_endo+n_fEn*p + q), h) - for j <- 1 until n_exo do // for the j-th exogenous variable - exo_org = exo_org ++ hide (xx(n_endo+n_fEn*p + j*q until n_endo+n_fEn*p + (j+1)*q), h) // get actual lagged xe-values for exogenous variable j - - var exo = exo_org - for i <- fExo.indices do exo = exo ++ exo_org.map (fExo(i)) - - // add endogenous-exogenous cross terms, if any - if cross then - x_trend ++ xy ++ exo ++ hide (xx(spec + (1 + n_fEn)*p + (1 + n_fEx)*n_exo*q until xx.dim), h) - else - x_trend ++ xy ++ exo - end forge - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Hide values at the end of vector z (last h-1 values) as the increasing horizon - * turns them in future values (hence unavailable). Set these values to either - * zero (the default) or the last available value. - * @param z the vector to shift - * @param h the current horizon (number of steps ahead to forecast) - * @param fill whether to backfill with the rightmost value (true) or with 0 (false) - */ - def hide (z: VectorD, h: Int, fill: Boolean = true): VectorD = - val lst = z.dim - h // last available index position in z - val zl = if lst >= 0 then z(lst) else 0.0 // last available z value per horizon - val z_ = new VectorD (z.dim) - for k <- z.indices do - z_(k) = if k <= lst then z(k) else if fill then zl else 0.0 - z_ - end hide - -end ARX_Symb - -import Example_Covid._ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARX_Symb` companion object provides factory methods for the `ARX_Symb` class. - */ -object ARX_Symb: - - private val bounds = (1.0, 2.0) // (lower, upper) bounds for rescaling - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create an `ARX_Symb` object by building an input matrix xy and then calling the - * `ARX_Symb` constructor. - * @param xe the matrix of exogenous variable values - * @param y the endogenous/response vector (main time series data) - * @param hh the maximum forecasting horizon (h = 1 to hh) - * @param fname_ the feature/variable names - * @param tRng the time range, if relevant (time index may suffice) - * @param hparam the hyper-parameters - * @param fEndo the array of functions used to transform endogenous variables - * @param fExo the array of functions used to transform exogenous variables - * @param tran the transformation function (defaults to null, could use log1p) - * @param itran the inverse transformation function to rescale predictions to original y scale - * (defaults to null, could use expm1) - * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) - */ - def apply (xe: MatrixD, y: VectorD, hh: Int, fname_ : Array [String] = null, - tRng: Range = null, hparam: HyperParameter = hp, - fEndo: Array [FunctionS2S] = Array (powTo(1.5), log1p), - fExo: Array [FunctionS2S] = Array (powTo(1.5), log1p), - tran: FunctionS2S = null, itran: FunctionS2S = null, - bakcast: Boolean = false): ARX_Symb = - val n_fEn = fEndo.length // number of functions used to map endogenous variables - val n_fEx = fExo.length // number of functions used to map exogenous variables - val p = hparam("p").toInt // use the last p values - val q = hparam("q").toInt // use the last q exogenous values - val spec = hparam("spec").toInt // 0 - none, 1 - constant, 2 - linear, 3 -quadratic, 4 - sin, 5 = cos - val lwave = hparam("lwave").toDouble // wavelength (distance between peaks) - val cross = hparam("cross").toInt == 1 // whether to include ENDO-EXO cross terms - val yt = if tran != null then y.map (tran) // y transformed - else y - val xy = buildMatrix4TS (xe, yt, p, q, spec, lwave, fEndo, fExo, cross, bakcast) - val n_exo = xe.dim2 - val fname = if fname_ == null then formNames (spec, p, n_exo, q, n_fEn, n_fEx, cross) - else fname_ - new ARX_Symb (xy, y, hh, n_exo, fname, tRng, hparam, fEndo, fExo, itran, bakcast) - end apply - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create an `ARX_Symb` object by building an input matrix xy and then calling the - * `ARX_Symb` constructor, with rescaling of endogneous and exogenous variable values. - * @param xe the matrix of exogenous variable values - * @param y the endogenous/response vector (main time series data) - * @param hh the maximum forecasting horizon (h = 1 to hh) - * @param fname_ the feature/variable names - * @param tRng the time range, if relevant (time index may suffice) - * @param hparam the hyper-parameters - * @param fEndo the array of functions used to transform endogenous variables - * @param fExo the array of functions used to transform exogenous variables - * @param tran the transformation function (defaults to null, could use log1p) - * @param itran the inverse transformation function to rescale predictions to original y scale - * (defaults to null, could use expm1) - * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) - */ - def rescale (xe: MatrixD, y: VectorD, hh: Int, fname_ : Array [String] = null, - tRng: Range = null, hparam: HyperParameter = hp, - fEndo: Array[FunctionS2S] = Array(powTo(1.5), log1p), - fExo: Array[FunctionS2S] = Array(powTo(1.5), log1p), - tran: FunctionS2S = null, itran: FunctionS2S = null, - bakcast: Boolean = false): ARX_Symb = - val n_fEn = fEndo.length // number of functions used to map endogenous variables - val n_fEx = fExo.length // number of functions used to map exogenous variables - val p = hparam("p").toInt // use the last p values - val q = hparam("q").toInt // use the last q exogenous values - val spec = hparam("spec").toInt // 0 - none, 1 - constant, 2 - linear, 3 -quadratic, 4 - sin, 5 = cos - val lwave = hparam("lwave").toDouble // wavelength (distance between peaks) - val cross = hparam("cross").toInt == 1 // whether to include ENDO-EXO cross terms - val xet = scale (extreme (xe), bounds)(xe) // rescale x matrix to bounds - val yt = if tran != null then y.map (tran) // y transformed - else y - val xy = buildMatrix4TS (xet, yt, p, q, spec, lwave, fEndo, fExo, cross, bakcast) - val n_exo = xe.dim2 - val fname = if fname_ == null then formNames(spec, p, n_exo, q, n_fEn, n_fEx, cross) - else fname_ - new ARX_Symb (xy, yt, hh, n_exo, fname, tRng, hparam, fEndo, fExo, itran, bakcast) - end rescale - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Build an input matrix by combining up 5 trend terms, endogenous terms from - * endogenous variable y, exogenous terms from n = xe.dim2 exogenous variables xe, - * and endogenous-exogenous cross terms. Specifically, - * trend terms spec (0 to 5) trend terms - * endogenous terms: p lagged linear terms, p lagged power terms, p lagged root terms, - * exogenous terms: q * n lagged linear terms, q * n lagged power terms, q * n lagged root terms, - * cross terms: q * n lagged linear cross terms. - * @param xe the matrix of exogenous variable values - * @param y the response vector (time series data) - * @param p the number of lags for the endogenous variable (lags 1 to p) - * @param q the number of lags for each exogenous variable (lags 1 to q) - * @param spec the number of trend terms (added columns) - * 0 - none, 1 - constant 2 - linear, 3 - quadratic, 4 - sine, 5 - cosine - * @param lwave the wavelength (distance between peaks) - * @param fEndo the array of functions used to transform endogenous variables - * @param fExo the array of functions used to transform exogenous variables - * @param cross whether to include cross terms between endogenous and exogenous variables - * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) - */ - def buildMatrix4TS (xe: MatrixD, y: VectorD, - p: Int, q: Int, spec: Int, lwave: Double, - fEndo: Array [FunctionS2S], fExo: Array [FunctionS2S], - cross: Boolean, bakcast: Boolean): MatrixD = - // add trend terms and terms for the endogenous variable - val xt = makeMatrix4T (y, spec, lwave, bakcast) // trend terms - val xl = makeMatrix4L (y, p, bakcast) // lagged linear terms - - var xy = xt ++^ xl - for i <- fEndo.indices do xy = xy ++^ xl.map_(fEndo(i)) - - // add terms for the exogenous variables - if xe.dim2 > 0 then - val exo = makeMatrix4EXO (xe, q, 1, bakcast) - xy = xy ++^ exo - for i <- fExo.indices do xy = xy ++^ exo.map_(fExo(i)) - - // add cross terms of the endogenous and exogenous variables - if cross then - val yxe = y *~: xe // element-wise multiplication of vector y and matrix xe - xy = xy ++^ makeMatrix4EXO (yxe, q, 1, bakcast) // lagged linear cross terms - - xy - end buildMatrix4TS - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Form an array of names for the features included in the model. - * @param spec the number of trend terms - * @param p the number of lags for the endogenous variable (lags 1 to p) - * @param n_exo the number of exogenous variable - * @param q the number of lags for each exogenous variable (lags 1 to q) - * @param n_fEn the number of functions used to map endogenous variables - * @param n_fEx the number of functions used to map exogenous variables - * @param cross whether to include cross terms between endogenous and exogenous variables - */ - def formNames (spec: Int, p: Int, n_exo: Int, q: Int, n_fEn: Int, n_fEx: Int, cross: Boolean): - Array [String] = - val names = ArrayBuffer [String] () - for i <- 0 until n_fEn; j <- p to 1 by -1 do names += s"f$i(yl$j)" // function lags endo terms - - for j <- 0 until n_exo; k <- q to 1 by -1 do names += s"xe${j}l$k" // exo lag terms - for i <- 0 until n_fEx do - for j <- 0 until n_exo; k <- q to 1 by -1 do names += s"g$i(xe${j}l$k)" // function lags exo terms - - if cross then - for j <- 0 until n_exo; k <- q to 1 by -1 do names += s"xe${j}l$k*yl$k" // lagged cross terms - - MakeMatrix4TS.formNames (spec, p) ++ names.toArray - end formNames - -end ARX_Symb - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRX_SymbTest` main function tests the `ARX_Symb` class on real data: - * Forecasting Lake Levels using In-Sample Testing (In-ST). - * Test forecasts (h = 1 to hh steps ahead forecasts). - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.aRX_SymbTest - * -@main def aRX_SymbTest (): Unit = - - val hh = 3 // maximum forecasting horizon - - val mod = ARX_Symb (y, hh) // create model for time series data - banner (s"In-ST Forecasts: ${mod.modelName} on LakeLevels Dataset") - mod.trainNtest_x ()() // train and test on full dataset - - mod.forecastAll () // forecast h-steps ahead (h = 1 to hh) for all y - Forecaster.evalForecasts (mod, mod.getYb, hh) - println (s"Final In-ST Forecast Matrix yf = ${mod.getYf}") - -end aRX_SymbTest - */ - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRX_SymbTest2` main function tests the `ARX_Symb` class on real data: - * Forecasting Lake Levels using Train-n-Test Split (TnT) with Rolling Validation. - * Test forecasts (h = 1 to hh steps ahead forecasts). - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.aRX_SymbTest2 - * -@main def aRX_SymbTest2 (): Unit = - - val hh = 3 // maximum forecasting horizon - - val mod = ARX_Symb (y, hh) // create model for time series data - banner (s"TnT Forecasts: ${mod.modelName} on LakeLevels Dataset") - mod.trainNtest_x ()() // train and test on full dataset - - mod.rollValidate () // TnT with Rolling Validation - println (s"Final TnT Forecast Matrix yf = ${mod.getYf}") - -end aRX_SymbTest2 - */ - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRX_SymbTest3` main function tests the `ARX_Symb` class on real data: - * Forecasting COVID-19 using In-Sample Testing (In-ST). - * Test forecasts (h = 1 to hh steps ahead forecasts). - * > runMain scalation.modeling.forecasting.aRX_SymbTest3 - */ -@main def aRX_SymbTest3 (): Unit = - -// val exo_vars = NO_EXO - val exo_vars = Array ("hosp_patients") -// val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (xxe, yy) = loadData (exo_vars, response) - println (s"xxe.dims = ${xxe.dims}, yy.dim = ${yy.dim}") - -// val xe = xxe // full - val xe = xxe(0 until 116) // clip the flat end -// val y = yy // full - val y = yy(0 until 116) // clip the flat end - val hh = 6 // maximum forecasting horizon - hp("lwave") = 20 // wavelength (distance between peaks) -// hp("cross") = 1 // 1 => add cross terms - - val ff = Array (powTo (1.5), powTo (0.5), log1p, sin, cos) // functions to apply to endo lags - val gg = Array (powTo (1.5), powTo (0.5), log1p, sin, cos) // functions to apply to exo lags - - for p <- 6 to 6; s <- 1 to 1; q <- 6 to 6 do // number of lags; trend; number of exo lags - hp("p") = p // endo lags - hp("q") = q // exo lags - hp("spec") = s // trend specification: 0, 1, 2, 3, 5 - - val mod = ARX_Symb (xe, y, hh, fEndo = ff, fExo = gg) // create model for time series data - banner (s"In-ST Forecasts: ${mod.modelName} on COVID-19 Dataset") - mod.trainNtest_x ()() // train and test on full dataset - println (mod.summary ()) // statistical summary of fit - -// mod.setSkip (p) // full AR-formula available when t >= p - mod.forecastAll () // forecast h-steps ahead (h = 1 to hh) for all y - mod.diagnoseAll (y, mod.getYf) // QoF for each horizon -// Forecaster.evalForecasts (mod, mod.getYb, hh) -// println (s"Final In-ST Forecast Matrix yf = ${mod.getYf}") -// println (s"Final In-ST Forecast Matrix yf = ${mod.getYf.shiftDiag}") - end for - -end aRX_SymbTest3 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRX_SymbTest4` main function tests the `ARX_Symb` class on real data: - * Forecasting COVID-19 using Train-n-Test Split (TnT) with Rolling Validation. - * Test forecasts (h = 1 to hh steps ahead forecasts). - * > runMain scalation.modeling.forecasting.aRX_SymbTest4 - */ -@main def aRX_SymbTest4 (): Unit = - - val exo_vars = Array ("hosp_patients") -// val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (xxe, yy) = loadData (exo_vars, response) - println (s"xxe.dims = ${xxe.dims}, yy.dim = ${yy.dim}") - -// val xe = xxe // full - val xe = xxe(0 until 116) // clip the flat end -// val y = yy // full - val y = yy(0 until 116) // clip the flat end - val hh = 6 // maximum forecasting horizon - hp("lwave") = 20 // wavelength (distance between peaks) -// hp("cross") = 1 // 1 => add cross terms - - val ff = Array (powTo (1.5), powTo (0.5), log1p, sin, cos) // functions to apply to endo lags - val gg = Array (powTo (1.5), powTo (0.5), log1p, sin, cos) // functions to apply to exo lags - - for p <- 6 to 6; s <- 1 to 1; q <- 6 to 6 do // number of lags; trend - hp("p") = p // endo lags - hp("q") = q // exo lags - hp("spec") = s // trend specification: 0, 1, 2, 3, 5 - - val mod = ARX_Symb (xe, y, hh, fEndo = ff, fExo = gg) // create model for time series data - banner (s"TnT Forecasts: ${mod.modelName} on COVID-19 Dataset") - mod.trainNtest_x ()() // use customized trainNtest_x - - mod.setSkip (0) - mod.rollValidate () // TnT with Rolling Validation - println (s"After Roll TnT Forecast Matrix yf = ${mod.getYf}") - mod.diagnoseAll (y, mod.getYf, Forecaster.teRng (y.dim), 0) // only diagnose on the testing set -// println (s"Final TnT Forecast Matrix yf = ${mod.getYf}") - end for - -end aRX_SymbTest4 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRX_SymbTest5` main function tests the `ARX_Symb` class on real data: - * Forecasting COVID-19 using In-Sample Testing (In-ST). - * Test forecasts (h = 1 to hh steps ahead forecasts). - * This version performs feature selection. - * > runMain scalation.modeling.forecasting.aRX_SymbTest5 - */ -@main def aRX_SymbTest5 (): Unit = - - val exo_vars = Array ("icu_patients", "hosp_patients") -// val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (xxe, yy) = loadData (exo_vars, response) - println (s"xxe.dims = ${xxe.dims}, yy.dim = ${yy.dim}") - -// val xe = xxe // full - val xe = xxe(0 until 116) // clip the flat end -// val y = yy // full - val y = yy(0 until 116) // clip the flat end - val hh = 6 // maximum forecasting horizon - val p = 6 - val q = 6 - hp("p") = p // endo lags - hp("q") = q // exo lags - hp("spec") = 5 // trend specification: 0, 1, 2, 3, 5 - hp("lwave") = 20 // wavelength (distance between peaks) - hp("cross") = 1 // 1 => add cross terms - hp("lambda") = 1.0 // regularization/shrinkage parameter - - val ff = Array (powTo (1.5), powTo (0.5), log1p, sin, cos) // functions to apply to endo lags - val gg = Array (powTo (1.5), powTo (0.5), log1p, sin, cos) // functions to apply to exo lags - - val mod = ARX_Symb (xe, y, hh, fEndo = ff, fExo = gg) // create model for time series data - banner (s"In-ST Forecasts: ${mod.modelName} on COVID-19 Dataset") - mod.trainNtest_x ()() // train and test on full dataset - println (mod.summary ()) // statistical summary of fit - - mod.setSkip(0) - mod.rollValidate () // TnT with Rolling Validation - mod.diagnoseAll (y, mod.getYf, Forecaster.teRng(y.dim), 0) - - banner ("Feature Selection Technique: Stepwise") - val (cols, rSq) = mod.stepwiseSelAll () // R^2, R^2 bar, sMAPE, R^2 cv -// val (cols, rSq) = mod.backwardElimAll () // R^2, R^2 bar, sMAPE, R^2 cv - val k = cols.size - println (s"k = $k") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "R^2 cv"), - s"R^2 vs n for ${mod.modelName}", lines = true) - println (s"rSq = $rSq") - -end aRX_SymbTest5 - diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/old/ARX_Symb_D.scala.bak b/target/scala-3.6.4/classes/scalation/modeling/forecasting/old/ARX_Symb_D.scala.bak deleted file mode 100644 index 7aa9d5023..000000000 --- a/target/scala-3.6.4/classes/scalation/modeling/forecasting/old/ARX_Symb_D.scala.bak +++ /dev/null @@ -1,276 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author Yousef Fekri Dabanloo - * @version 2.0 - * @date Thu Jan 30 21:15:45 EST 2025 - * @see LICENSE (MIT style license file). - * - * @note Model: Auto-Regressive on lagged y and xe with SR terms (ARX_Symb_D) using OLS - Direct Forecasting - * - * @see `scalation.modeling.Regression` - */ - - -package scalation -package modeling -package forecasting - -import scalation.mathstat._ - -import MakeMatrix4TS._ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARX_Symb_D` class provides time series analysis capabilities for ARX_D Symbolic - * Regression (SR) models. These models include trend, linear, power, root, and cross terms - * for the single endogenous (y) variable and zero or more exogenous (xe) variables. - * Given time series data stored in vector y and matrix xe, its next value y_t = combination - * of last p values of y, y^p, y^r and the last q values of each exogenous variable xe_j, - * again in linear, power and root forms (as well as ENDO-EXO cross terms). - * - * y_t = b dot x_t + e_t - * - * where y_t is the value of y at time t, x_t is a vector of inputs, and e_t is the - * residual/error term. - * @see `MakeMatrix4TS` for hyper-parameter specifications. - * @param x the data/input matrix (lagged columns of y and xe) @see `ARX_Symb_D.apply` - * @param y the response/output vector (main time series data) - * @param hh the maximum forecasting horizon (h = 1 to hh) - * @param n_exo the number of exogenous variables - * @param fname the feature/variable names - * @param tRng the time range, if relevant (time index may suffice) - * @param hparam the hyper-parameters (defaults to `MakeMatrix4TS.hp`) - * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) - * @param tForms the map of transformations applied - */ -class ARX_Symb_D (x: MatrixD, y: MatrixD, hh: Int, n_exo: Int, fname: Array [String], - tRng: Range = null, hparam: HyperParameter = hp, - bakcast: Boolean = false, // backcast value used only `MakeMatrix4TS` - tForms: TransformMap = Map ("tForm_y" -> null)) - extends ARX_D (x, y, hh, n_exo, fname, tRng, hparam, bakcast, tForms): // no automatic backcasting - - private val debug = debugf ("ARX_Symb_D", true) // debug function - - modelName = s"ARX_Symb_D($p, $q, $n_exo)" - - debug ("init", s"$modelName with with $n_exo exogenous variables and additional term spec = $spec") - debug ("init", s"[ x | y ] = ${x ++^ y}") - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train/fit an `ARX_Symb_D` model to the times-series data in vector y_. - * Estimate the coefficient mattrix bb for a p-th order Auto-Regressive ARX_Symb_D(p) model. - * Uses OLS Matrix Fatorization to determine the coefficients, i.e., the bb matrix. - * @param x_ the data/input matrix (e.g., full x) - * @param y_ the training/full response vector (e.g., full y) - */ - def train_x (x_ : MatrixD, y_ : MatrixD): Unit = - debug ("train_x", s"$modelName, x_.dim = ${x_.dim}, y_.dim = ${y_.dim}") - reg.train (x_, y_) // train the multi-variate regression model - bb = reg.parameter // coefficients from regression - end train_x - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Produce a QoF summary for a model with diagnostics for each predictor 'x_j' - * and the overall Quality of Fit (QoF). - * @param x_ the testing/full data/input matrix - * @param fname_ the array of feature/variable names - * @param b_ the parameters/coefficients for the model - * @param vifs the Variance Inflation Factors (VIFs) - */ - override def summary (x_ : MatrixD = getX, fname_ : Array[String] = reg.getFname, - b_ : VectorD = b, vifs: VectorD = reg.vif()): String = - super.summary (x_, fname_, b_, vifs) // summary from `Fit` - end summary - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Predict a value for y_t using the 1-step ahead forecast. - * - * y_t = b_0 + b_1 y_t-1 + b_2 y_t-2 + ... + b_p y_t-p = b dot x_t - * - * @param t the time point being predicted - * @param y_ the actual values to use in making predictions (ignored) - */ - def predict (t: Int, y_ : MatrixD): VectorD = - val yp = rectify (reg.predict (x(t)), nneg) - if t < y_.dim then - debug ("predict", s"@t = $t, x(t) = ${x(t)}, yp = $yp vs. y_ = ${y_(t)}") - yp - end predict - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Produce a vector of size hh, h = 1 to hh-steps ahead forecasts for the model, - * i.e., forecast the following time points: t+1, ..., t+h. - * Intended to work with rolling validation (analog of predict method). - * @param t the time point from which to make forecasts - * @param y_ the actual values to use in making predictions - */ - override def forecast (t: Int, y_ : VectorD): VectorD = - val pred = predict (t, MatrixD (y_).transpose) - for h <- 1 to hh do yf(t, h) = pred(h-1) - pred // yh is pred - end forecast - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all y_.dim time points all horizons h (h-steps ahead). - * Assign into FORECAST MATRIX and return the forecast matrix. - * @param y_ the matrix of actual response values - */ - override def forecastAll (y_ : MatrixD): MatrixD = - for t <- y_.indices do - val pred = predict (t, y_) - for h <- 1 to hh do yf(t, h) = pred(h - 1) - yf - end forecastAll - -end ARX_Symb_D - -import Example_Covid._ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARX_Symb_D` companion object provides factory methods for the `ARX_Symb_D` class. - */ -object ARX_Symb_D: - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create an `ARX_Symb_D` object by building an input matrix xy and then calling the - * `ARX_Symb_D` constructor. - * @param xe the matrix of exogenous variable values - * @param y the endogenous/response vector (main time series data) - * @param hh the maximum forecasting horizon (h = 1 to hh) - * @param fname_ the feature/variable names - * @param tRng the time range, if relevant (time index may suffice) - * @param hparam the hyper-parameters - * @param fEndo the array of transforms used to transform endogenous variables - * @param fExo the array of transforms used to transform exogenous variables - * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) - */ - def apply (xe: MatrixD, y: VectorD, hh: Int, fname_ : Array [String] = null, - tRng: Range = null, hparam: HyperParameter = hp, - fEndo: Array [Transform] = Array (log1pForm), - fExo: Array [Transform] = Array (log1pForm), - bakcast: Boolean = false): ARX_Symb_D = - val (n_fEndo, n_fExo) = (fEndo.length, fExo.length) - val (xy, tForms) = ARX_Symb.buildMatrix (xe, y, hparam, fEndo, fExo, bakcast) - val yy = makeMatrix4Y (y, hh, bakcast) - val fname = if fname_ == null then ARX_Symb.formNames (xe.dim2, hparam, n_fEndo, n_fExo) else fname_ - new ARX_Symb_D (xy, yy, hh, xe.dim2, fname, tRng, hparam, bakcast, tForms) - end apply - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create an `ARX_Symb_D` object by building an input matrix xy and then calling the - * `ARX_Symb_D` constructor, with rescaling of endogneous and exogenous variable values. - * @param xe the matrix of exogenous variable values - * @param y the endogenous/response vector (main time series data) - * @param hh the maximum forecasting horizon (h = 1 to hh) - * @param fname_ the feature/variable names - * @param tRng the time range, if relevant (time index may suffice) - * @param hparam the hyper-parameters - * @param fEndo the array of transforms used to transform endogenous variables - * @param fExo the array of transforms used to transform exogenous variables - * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) - * @param tForm the transform for y - */ - def rescale (xe: MatrixD, y: VectorD, hh: Int, fname_ : Array [String] = null, - tRng: Range = null, hparam: HyperParameter = hp, - fEndo: Array [Transform] = Array (log1pForm), - fExo: Array [Transform] = Array (log1pForm), - bakcast: Boolean = false, - tForm: VectorD | MatrixD => Transform = x => zForm(x)): ARX_Symb_D = - val (n_fEndo, n_fExo) = (fEndo.length, fExo.length) - val (xy, tForms) = ARX_Symb.buildMatrix (xe, y, hparam, fEndo, fExo, bakcast, tForm) - val fname = if fname_ == null then ARX_Symb.formNames (xe.dim2, hparam, n_fEndo, n_fExo) else fname_ - val y_scl = tForms("tForm_y").f(y) - if tForms("tForm_y").getClass.getSimpleName == "zForm" then hp("nneg") = 0 - val yy = makeMatrix4Y (y_scl, hh, bakcast) - new ARX_Symb_D (xy, yy, hh, xe.dim2, fname, tRng, hparam, bakcast, tForms) - end rescale - -end ARX_Symb_D - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARX_Symb_DTest3` main function tests the `ARX_Symb_D` class on real data: - * Forecasting COVID-19 using In-Sample Testing (In-ST). - * Test forecasts (h = 1 to hh steps ahead forecasts). - * > runMain scalation.modeling.forecasting.aRX_Symb_DTest3 - */ -@main def aRX_Symb_DTest3 (): Unit = - -// val exo_vars = NO_EXO - val exo_vars = Array ("icu_patients") -// val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (xxe, yy) = loadData (exo_vars, response) - println (s"xxe.dims = ${xxe.dims}, yy.dim = ${yy.dim}") - -// val xe = xxe // full - val xe = xxe(0 until 116) // clip the flat end -// val y = yy // full - val y = yy(0 until 116) // clip the flat end - val hh = 6 // maximum forecasting horizon - hp("lwave") = 20 // wavelength (distance between peaks) - hp("cross") = 1 // 1 => add cross terms - - for p <- 1 to 6; s <- 1 to 5 do // number of lags; trend; number of exo lags - hp("p") = p // endo lags - hp("q") = 2 // exo lags - hp("spec") = s // trend specification: 0, 1, 2, 3, 5 - - val mod = ARX_Symb_D (xe, y, hh) // create model for time series data - banner (s"In-ST Forecasts: ${mod.modelName} on COVID-19 Dataset") - mod.trainNtest_x ()() // train and test on full dataset - println (mod.summary ()) // statistical summary of fit - println (s"Before forecastAll Matrix yf = ${mod.getYf}") - -// mod.setSkip (p) // full AR-formula available when t >= p - mod.forecastAll (mod.getYy) // forecast h-steps ahead (h = 1 to hh) for all y - mod.diagnoseAll (y, mod.getYf) // QoF for each horizon - println (s"Final In-ST Forecast Matrix yf = ${mod.getYf}") -// println (s"Final In-ST Forecast Matrix yf = ${mod.getYf.shiftDiag}") - end for - -end aRX_Symb_DTest3 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARX_Symb_DTest4` main function tests the `ARX_Symb_D` class on real data: - * Forecasting COVID-19 using Train and Test (TnT). - * Test forecasts (h = 1 to hh steps ahead forecasts). - * > runMain scalation.modeling.forecasting.aRX_Symb_DTest4 - */ -@main def aRX_Symb_DTest4 (): Unit = - - val exo_vars = Array ("icu_patients") -// val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (xxe, yy) = loadData (exo_vars, response) - println (s"xxe.dims = ${xxe.dims}, yy.dim = ${yy.dim}") - -// val xe = xxe // full - val xe = xxe(0 until 116) // clip the flat end -// val y = yy // full - val y = yy(0 until 116) // clip the flat end - val hh = 6 // maximum forecasting horizon - val pp = 1.5 - hp("lwave") = 20 // wavelength (distance between peaks) -// hp("cross") = 1 // 1 => add cross terms - - val ff = Array [Transform] (powForm (VectorD (pp))) - val gg = Array [Transform] () - - for p <- 6 to 6; q <- 4 to 4; s <- 1 to 1 do // number of lags (endo, exo); trend - hp("p") = p // endo lags - hp("q") = q // exo lags - hp("spec") = s // trend specification: 0, 1, 2, 3, 5 - - val mod = ARX_Symb_D (xe, y, hh, fEndo = ff, fExo = gg) // create model for time series data - banner (s"TnT Forecasts: ${mod.modelName} on COVID-19 Dataset") - mod.trainNtest_x ()() // use customized trainNtest_x - println (mod.summary ()) // statistical summary of fit - - mod.setSkip (0) - mod.rollValidate () // TnT with Rolling Validation - println (s"After Roll TnT Forecast Matrix yf = ${mod.getYf}") - mod.diagnoseAll (y, mod.getYf, Forecaster.teRng (y.dim), 0) // only diagnose on the testing set -// println (s"Final TnT Forecast Matrix yf = ${mod.getYf}") - end for - -end aRX_Symb_DTest4 - diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/old/ARY_D.scala.bak b/target/scala-3.6.4/classes/scalation/modeling/forecasting/old/ARY_D.scala.bak deleted file mode 100644 index d974997a1..000000000 --- a/target/scala-3.6.4/classes/scalation/modeling/forecasting/old/ARY_D.scala.bak +++ /dev/null @@ -1,373 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Sun Jun 30 13:27:00 EDT 2024 - * @see LICENSE (MIT style license file). - * - * @note Model: Auto-Regressive on lagged y (ARY_D) using OLS - Direct Forecasting - */ - -package scalation -package modeling -package forecasting2 - -import scala.math.{max, min} -//import scala.math.max - -import scalation.mathstat._ -import scalation.modeling.neuralnet.{RegressionMV => REGRESSION} - -import Example_Covid.loadData_y -import Example_LakeLevels.y - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARY_D` class provides basic time series analysis capabilities for - * ARY_D models. ARY_D models are often used for forecasting. - * `ARY_D` uses DIRECT (as opposed to RECURSIVE) multi-horizon forecasting. - * Given time series data stored in vector y, its next value y_t = combination of last p values. - * - * y_t = b dot x_t + e_t - * - * where y_t is the value of y at time t and e_t is the residual/error term. - * @param x the data/input matrix (lagged columns of y) @see `ARY_D.apply` - * @param y the response/output matrix (column per horizon) (time series data) - * @param hh the maximum forecasting horizon (h = 1 to hh) - * @param tt the time vector, if relevant (time index may suffice) - * @param hparam the hyper-parameters (defaults to `ARY.hp`) - * @param bakcast whether a backcasted value is prepended to the time series (defaults to false) - */ -class ARY_D (x: MatrixD, y: MatrixD, hh: Int, tt: VectorD = null, - hparam: HyperParameter = ARY.hp, - bakcast: Boolean = false) - extends Forecaster (y(?, 0), hh, tt, hparam, bakcast): // no automatic backcasting, @see `ARY_D.appky` - - private val debug = debugf ("ARY_D", true) // flaw function -// private val flaw = flawf ("ARY_D") // flaw function - private val p = hparam("p").toInt // use the last p values (p lags) - private val spec = hparam("spec").toInt // additional terms: 0 => none, 1 => constant, 2 => linear - private val reg = new REGRESSION (x, y, null, hparam) // delegate training to multi-variate regression - private var bb: MatrixD = null // use parameter matrix bb instead of vector b - - modelName = s"ARY_D($p)" - - debug ("init", s"$modelName with additional term spec = $spec") - debug ("init", s"[ x | y ] = ${x ++^ y}") - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Get the data/input matrix built from lagged y values. - */ - override def getX: MatrixD = x - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the used response/output matrix y. - */ - def getYy: MatrixD = y - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train/fit an `ARY_D` model to the times-series data in vector y_. - * Estimate the coefficient vector b for a p-th order Auto-Regressive ARY_D(p) model. - * Uses OLS Matrix Fatorization to determine the coefficients, i.e., the b (φ) vector. - * @param x_ the data/input matrix (e.g., full x) - * @param y_ the training/full response vector (e.g., full y) - */ - def train (x_ : MatrixD, y_ : MatrixD): Unit = - debug ("train", s"$modelName, x_.dim = ${x_.dim}, y_.dim = ${y_.dim}") - reg.train (x_, y_) // train the multi-variate regression model - bb = reg.parameter // coefficients from regression - end train - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train and test the forecasting model y_ = f(y-past) + e and report its QoF - * and plot its predictions. Return the predictions and QoF. - * NOTE: must use `trainNtest_x` when an x matrix is used, such as in `ARY_D`. - * @param x_ the training/full data/input matrix (defaults to full x) - * @param y_ the training/full response/output vector (defaults to full y) - * @param xx the testing/full data/input matrix (defaults to full x) - * @param yy the testing/full response/output vector (defaults to full y) - */ - def trainNtest_x (x_ : MatrixD = x, y_ : MatrixD = y)(xx: MatrixD = x, yy: MatrixD = y): - (VectorD, VectorD) = - train (x_, y_) // train the model on training set - val (yp, qof) = test (xx, yy) // test the model on testing set - println (report (qof)) // report on Quality of Fit (QoF) - (yp, qof) - end trainNtest_x - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test PREDICTIONS of a forecasting model y_ = f(lags (y_)) + e - * and return its predictions and QoF vector. Testing may be in-sample - * (on the training set) or out-of-sample (on the testing set) as determined - * by the parameters passed in. Note: must call train before test. - * Must override to get Quality of Fit (QoF). - * @param x_null the data/input matrix - * @param y_ the actual testing/full response/output matrix - */ - def test (x_ : MatrixD, y_ : MatrixD): (VectorD, VectorD) = - val m = y_.dim - 1 - predictAll (y_) // make all predictions - saved in yf - debug ("test", s"x_.dims = ${x_.dims}, y_.dims, ${y_.dims}, yf.dims = ${yf.dims}") - - val y0 = y_(0 until m, 0) // actual values (except last) for h = 1 - val yf1 = yf(0 until m, 1) // forecasted values for h = 1 - new Plot (null, y0, yf1, s"test: Plot of y0, yf1 for $modelName vs. t", true) - val df = max (1, parameter.size - 1) // degrees of freedom for model - resetDF (df, y0.dim - df) // reset the degrees of freedom - (yf1, diagnose (y0, yf1)) // return predicted and QoF vectors - end test - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Predict a value for y_t using the 1-step ahead forecast. - * - * y_t = b_0 + b_1 y_t-1 + b_2 y_t-2 + ... + b_p y_t-p = b dot x_t - * - * FIX - parameter order is in conflict with AR models. - * @param t the time point being predicted - * @param y_ the actual values to use in making predictions (ignored) - */ - def predict (t: Int, y_ : MatrixD): VectorD = - val yp = reg.predict (x(t)) -// debug ("predict", s"@t = $t, x(t) = ${x(t)}, yp = $yp vs. y_ = ${y_(t)}") - yp - end predict - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Predict all values (for all horizons) corresponding to the given time series vector y_. - * Create FORECAST MATRIX yf and return it. - * Note `forecastAll` simply returns the values produced by `predictAll`. - * @param y_ the actual time series values to use in making predictions - */ - def predictAll (y_ : MatrixD): MatrixD = - for t <- 0 until y_.dim do yf(t, 1 until hh+1) = predict (t, y_) - yf - end predictAll - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Produce a vector of size hh, h = 1 to hh-steps ahead forecasts for the model, - * i.e., forecast the following time points: t+1, ..., t+h. - * Intended to work with rolling validation (analog of predict method). - * @param t the time point from which to make forecasts - * @param y_ the actual values to use in making predictions - */ -// def forecast (t: Int, y_ : MatrixD): VectorD = reg.predict (x(t)) - override def forecast (t: Int, y_ : VectorD): VectorD = - println ("IN FORECAST") - val pred = reg.predict (x(min (t+2, x.dim-1))) // FIX - why t+1 -// val pred = reg.predict (x(t)) - for h <- 1 to hh do yf(t, h) = pred(h-1) - pred // yh is pred - end forecast - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all y_.dim time points and all horizons (1 through hh-steps ahead). - * Simply return the FORECAST MATRIX yf created by `predictAll`, where - * - * yf(t, h) = h-steps ahead forecast for y_t - * - * @param y_ the actual values to use in making forecasts - */ - override def forecastAll (y_ : VectorD = yb): MatrixD = yf - -end ARY_D - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARY_D` companion object provides factory methods for the - * `ARY_D` class. - */ -object ARY_D: - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create an `ARY_D` object by building an input matrix x and then calling the constructor. - * @param y the response vector (time series data) - * @param hh the maximum forecasting horizon (h = 1 to hh) - * @param tt the time vector, if relevant (time index may suffice) - * @param hparam the hyper-parameters - */ - def apply (y: VectorD, hh: Int, tt: VectorD = null, hparam: HyperParameter = ARY.hp): ARY_D = - val p = hparam("p").toInt // use the last p values - val spec = hparam("spec").toInt // 0 => none, 1 => constant, 2 => linear trend - val (x, yy) = buildMatrix4TS (y, p, hh, spec) - new ARY_D (x, yy, hh, tt, hparam) - end apply - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Given a response vector (time series) y, build and return an input/predictor MATRIX x - * and a forecasting target MATRIX yy for today and all future horizons up to hh. - * @param y the given output/response vector - * @param p the maximum lag included (inclusive) - * @param hh the maximum forecasting horizon (h = 1, 2, ... hh) - * @param spec the specification for adding columns (0 => none, 1 => constant 2 => linear) - */ - def buildMatrix4TS (y: VectorD, p: Int, hh: Int, spec: Int = 1): (MatrixD, MatrixD) = - val yb = WeightedMovingAverage.backcast (y) +: y // y prepended with one backcast - val m = yb.dim - val x = new MatrixD (m, spec + p) // columns for spec + each lag - val yy = new MatrixD (m, hh) // yy = [ y_h ] for h = 1 to hh - if spec >= 1 then x(?, 0) = VectorD.one (m) // intercept/constant term - if spec == 2 then x(?, 1) = VectorD.range (0, m) // time trend - - for t <- x.indices do - for j <- 1 to p do x(t, spec + p - j) = yb(max0 (t + 1 - j)) // x -> lags - for h <- yy.indices2 do yy(t, h) = if t+h+1 >= m then -0.0 else yb(t+h+1) // yy -> actual and horizons - - println (s"buildMatrix4TS: x.dims = ${x.dims}, yy.dims = ${yy.dims}") -// println (x ++^ yy) - (x, yy) - end buildMatrix4TS - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Evaluate the quality of point and optionally interval forecast for horizon (h = 1 to hh). - * @param mod the forecasting model to be evaluated - * @param yy the complete shifted per horizon actual time series values - * @param hh the maximum forecasting horizon (h = 1 to hh) - * @param ints whether to evaluate prediction interval forecasts as well as point forecasts - */ - def evalForecasts (mod: Forecaster, yy: MatrixD, hh: Int, ints: Boolean = false): Unit = - val ftMat = new MatrixD (hh, Fit.N_QoF) - banner (s"Evaluate ${mod.modelName}'s QoF for horizons 1 to $hh:") - val m = yy.dim - - for h <- 1 to hh do - val yh = yy(0 until m-h, h-1) // h-steps ahead actual values - val yfh = mod.getYf(0 until m-h, h) // h-steps ahead forecast - val qof = mod.diagnose (yh, yfh) - ftMat(h-1) = qof -// println (FitM.fitMap (qof, qoF_names)) // evaluate h-steps ahead forecasts - new Plot (null, yh, yfh, s"evalForecast: Plot of yh, yfh for ${mod.modelName} vs. t @h = $h", true) - -/* - if ints then - val (low, up) = mod.forecastAtI (yy, yfh, h) // prediction interval forecasts - val qof_all = mod.diagnose_ (yy, yfh, low, up) // fully evaluate h-steps ahead forecasts - mod.show_interval_forecasts (yy, yfh, low, up, qof_all, h) -*/ - end for - - println ("fitMap qof = ") - println (FitM.showFitMap (ftMat.transpose, QoF.values.map (_.toString))) - end evalForecasts - -end ARY_D - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRY_DTest` main function tests the `ARY_D` class on real data: - * Forecasting Lake Levels using In-Sample Testing (In-ST). - * Test forecasts (h = 1 to hh steps ahead forecasts). - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting2.aRY_DTest - */ -@main def aRY_DTest (): Unit = - - val hh = 3 // maximum forecasting horizon - - val mod = ARY_D (y, hh) // create model for time series data - banner (s"In-ST Forecasts: ${mod.modelName} on LakeLevels Dataset") - mod.trainNtest_x ()() // train and test on full dataset - - mod.forecastAll () // forecast h-steps ahead (h = 1 to hh) for all y - ARY_D.evalForecasts (mod, mod.getYy, hh) - println (s"Final In-ST Forecast Matrix yf = ${mod.getYf}") - -end aRY_DTest - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRY_DTest2` main function tests the `ARY_D` class on real data: - * Forecasting Lake Levels using Train-n-Test Split (TnT) with Rolling Validation. - * Test forecasts (h = 1 to hh steps ahead forecasts). - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting2.aRY_DTest2 - */ -@main def aRY_DTest2 (): Unit = - - val hh = 3 // maximum forecasting horizon - val rc = 1 // retraining cycle - - val mod = ARY_D (y, hh) // create model for time series data - banner (s"TnT Forecasts: ${mod.modelName} on LakeLevels Dataset") - mod.trainNtest_x ()() // train and test on full dataset - - mod.rollValidate (rc) // TnT with Rolling Validation - println (s"Final TnT Forecast Matrix yf = ${mod.getYf}") - -end aRY_DTest2 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRY_DTest3` main function tests the `ARY_D` class on real data: - * Forecasting COVID-19 using In-Sample Testing (In-ST). - * Test forecasts (h = 1 to hh steps ahead forecasts). - * > runMain scalation.modeling.forecasting2.aRY_DTest3 - */ -@main def aRY_DTest3 (): Unit = - - val yy = loadData_y () -// val y = yy // full - val y = yy(0 until 116) // clip the flat end - val hh = 6 // maximum forecasting horizon - - for p <- 1 to 5 do // number of lags - ARY.hp("p") = p - val mod = ARY_D (y, hh) // create model for time series data - banner (s"In-ST Forecasts: ${mod.modelName} on COVID-19 Dataset") - mod.trainNtest_x ()() // train and test on full dataset - - mod.forecastAll () // forecast h-steps ahead (h = 1 to hh) for all y - ARY_D.evalForecasts (mod, mod.getYy, hh) - println (s"Final In-ST Forecast Matrix yf = ${mod.getYf}") - end for - -end aRY_DTest3 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRY_DTest4` main function tests the `ARY_D` class on real data: - * Forecasting COVID-19 using Train-n-Test Split (TnT) with Rolling Validation. - * Test forecasts (h = 1 to hh steps ahead forecasts). - * > runMain scalation.modeling.forecasting2.aRY_DTest4 - */ -@main def aRY_DTest4 (): Unit = - - val yy = loadData_y () -// val y = yy // full - val y = yy(0 until 116) // clip the flat end - val rc = 1 // retraining cycle - val hh = 6 // maximum forecasting horizon - - for p <- 1 to 5 do // number of lags - ARY.hp("p") = p - val mod = ARY_D (y, hh) // create model for time series data - banner (s"TnT Forecasts: ${mod.modelName} on COVID-19 Dataset") - mod.trainNtest_x ()() // use customized trainNtest_ - - mod.rollValidate (rc) // TnT with Rolling Validation - println (s"Final TnT Forecast Matrix yf = ${mod.getYf}") - end for - -end aRY_DTest4 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRY_DTest5` main function tests the `ARY_D` object's ability to build input - * matrices. Build an input/predictor data matrix for the COVID-19 dataset. - * > runMain scalation.modeling.forecasting2.aRY_DTest5 - */ -@main def aRY_DTest5 (): Unit = - - val yy = loadData_y () -// val y = yy // full - val y = yy(0 until 116) // clip the flat end - val p = 3 // the number of lags - val hh = 2 // the number of horizons - val spec = 1 // additional terms - - println (s"y = $y") - - val (x, y_) = ARY_D.buildMatrix4TS (y, p, hh, spec) - - println (s"y.dim = ${y.dim}, x.dims = ${x.dims}, y_.dims = ${y_.dims}") - -end aRY_DTest5 - diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/old/Example_Covid.scala.bak b/target/scala-3.6.4/classes/scalation/modeling/forecasting/old/Example_Covid.scala.bak deleted file mode 100644 index 8b489c71a..000000000 --- a/target/scala-3.6.4/classes/scalation/modeling/forecasting/old/Example_Covid.scala.bak +++ /dev/null @@ -1,942 +0,0 @@ - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Sat Jul 29 11:30:42 EDT 2023 - * @see LICENSE (MIT style license file). - * - * @note Example Time Series Data: Covid-19 Weekly Data - */ - -package scalation -package modeling -package forecasting2 - -import scala.math.min -import scala.runtime.ScalaRunTime.stringOf - -import scalation.mathstat._ - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Example_Covid` object provides a convenient way to load Covid-19 weekly data. - * See test cases (odd In-ST, even TnT Split) below for - * Loss/Equations Optimizer - * (a: 1, 2) Plot and EDA - - - * Univariate: - * (b: 3, 4) Baseline Models none or CSSE none or various - * (c: 5, 6) AR(p) Models Yule-Walker Durbin-Levinson - * (d: 7, 8) ARMA(p, q=0) Models CSSE BFGS - * (e: 9, 10) ARY(p) Models CSSE QR Factorization - * (f: 11, 12) ARY_D(p) Models CSSE + Direct QR Factorization - * (g: 13, 14) ARMA(p, q=1) Models CSSE BFGS - * Multivariate: - * (h: 15, 16) ARX(p, 2, 2) Models CSSE QR Factorization - * (i: 17, 18) ARX_D Models CSSE + Direct QR Factorization - * (j: 19, 20) ARX_Quad_D Models CSSE QR Factorization - * - * Known Bugs: 13, 14 - */ -object Example_Covid: - - import scala.collection.mutable.HashMap - - val fileName = "covid_19_weekly.csv" - - val header = Array ("new_cases", - "new_deaths", - "reproduction_rate", - "icu_patients", - "hosp_patients", - "new_tests", - "positive_rate", - "tests_per_case", - "people_vaccinated", - "people_fully_vaccinated", - "total_boosters", - "new_vaccinations", - "excess_mortality_cumulative_absolute", - "excess_mortality_cumulative", - "excess_mortality", - "excess_mortality_cumulative_per_million") - - val response = "new_deaths" // main response/output variable - val NO_EXO = Array.ofDim [String] (0) // empty array => no exogenous variables - - val yy = Example_Covid.loadData_y () -// val y = yy // full - val y = yy(0 until 116) // clip the flat end - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Load the Covid-19 weekly data into a matrix for the exogenous variables x - * and a vector for the response/endogenous variable y. - * @param x_strs the column names for the exogenous variables x - * @param y_str the column name for the endogenous variable y - * @param trim the number of initial rows to trim away (e.g., they are all 0) - */ - def loadData (x_strs: Array [String], y_str: String = response, trim: Int = 0): (MatrixD, VectorD) = - val col = HashMap [String, Int] () - for i <- header.indices do col += header(i) -> i - - val data = MatrixD.load (fileName, 1+trim, 1) // skip first row (header) + trim first column - val x_cols = for s <- x_strs yield col(s) - (data(?, x_cols), data(?, col(y_str))) - end loadData - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Load the Covid-19 weekly data into a vector for the response/endogenous variable y. - * @param y_str the column name for the endogenous variable y - * @param trim the number of initial rows to trim away (e.g., they are all 0) - */ - def loadData_y (y_str: String = response, trim: Int = 0): VectorD = - val col = HashMap [String, Int] () - for i <- header.indices do col += header(i) -> i - - val data = MatrixD.load (fileName, 1+trim, 1) // skip first row (header) + trim first column - data(?, col(y_str)) - end loadData_y - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Load the Covid-19 weekly data into a matrix for the variables y. - * @param y_str the column names for the variables y (e.g., used in a VAR model) - * @param trim the number of initial rows to trim away (e.g., they are all 0) - */ - def loadData_yy (y_strs: Array [String], trim: Int = 0): MatrixD = - val col = HashMap [String, Int] () - for i <- header.indices do col += header(i) -> i - - val data = MatrixD.load (fileName, 1+trim, 1) // skip first row (header) + trim first column - val y_cols = for s <- y_strs yield col(s) - data(?, y_cols) - end loadData_yy - -end Example_Covid - -import Example_Covid._ - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `example_CovidTest` main function tests the `Example_Covid` object. - * Prints and plots the response column ("new_deaths"). - * > runMain scalation.modeling.forecasting2.example_CovidTest - */ -@main def example_CovidTest (): Unit = - - banner (s"Print the response = $response column for the Covid-19 dataset (${y.dim} points") - for i <- y.indices do println (s"$i \t ${y(i)}") - - banner (s"Plot the response = $response column for the Covid-19 dataset (${y.dim} points") - new Plot (null, y, null, s"y ($response)", lines = true) - -end example_CovidTest - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `example_CovidTest2` main function tests the `Example_Covid` object. - * Performs Exploratory Data Analysis (EDA) to find relationships between - * contemporaneous variables. - * > runMain scalation.modeling.forecasting2.example_CovidTest2 - */ -@main def example_CovidTest2 (): Unit = - - import scala.collection.mutable.Set - - val (xx, yy) = loadData (header, response) -// val (x, y) = (xx, yy) // full - val (x, y) = (xx(0 until 116), yy(0 until 116)) // clip the flat end - - new Plot (null, y, null, s"y ($response)", lines = true) - - for j <- x.indices2 do - banner (s"EDA for response = $response vs. ${header(j)}") - var xj = x(?, j) // get column j - xj = scaleV (extreme (xj), (0.0, 2.0))(xj) // rescale vector xj to [0, 2] - val xxj = MatrixD.fromVector (xj) -// val mod = SymbolicRegression.quadratic (xxj, y) -// val mod = SymbolicRegression.rescale (xxj, y, null, Set (1.0, 2.0, 3.0), cross = false) - val mod = SymbolicRegression (xxj, y, null, Set (0.5, 1.0, 2.0, 3.0), cross = false) - mod.trainNtest ()() - val yp = mod.predict (mod.getX) - println (mod.summary ()) - new Plot (xj, y, yp, s"y, yp ($response) vs. x_$j (${header(j)})") - end for - -end example_CovidTest2 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `example_CovidTest3` main function tests the `Example_Covid` object. - * Uses In-Sample Testing (In-ST), i.e., train and test on the same data. - * Runs several baseline models for horizons 1 to 6, see sMAPE metrics below: - * - * 56.1873, 54.9320, 53.6656, 52.4506, 51.6106, 51.7282 Null - * 55.6298, 54.4006, 53.1310, 51.9187, 51.1081, 51.2892 Trend - * 25.1598, 33.1616, 43.7944, 51.4347, 58.7755, 64.7805 SMA - * 27.1668, 32.9085, 45.0652, 51.5055, 59.6247, 64.9615 WMA - * 20.0526, 30.5979, 40.0663, 48.4903, 56.1782, 63.2487 SES - * 20.3933, 30.9930, 40.4824, 48.8231, 56.4826, 63.4345 RW - * 19.7869, 30.1938, 39.6200, 48.1468, 55.8660, 63.0492 RWS - * 20.3547, 29.9510, 38.0585, 45.3859, 51.7718, 56.6248 AR(1) - * - * > runMain scalation.modeling.forecasting2.example_CovidTest3 - */ -@main def example_CovidTest3 (): Unit = - - val hh = 6 // max forecasting horizon - var mod: Forecaster = null - - new Plot (null, y, null, s"y ($response)", lines = true) - - banner ("In-ST Test: Null Model") - mod = new NullModel (y, hh) - mod.trainNtest ()() - mod.forecastAll (y) - mod.diagnoseAll (y, mod.getYf) // should agree with evalForecasts - Forecaster.evalForecasts (mod, mod.getYb, hh) - - banner ("In-ST Test: Trend Model") - mod = new TrendModel (y, hh) - mod.trainNtest ()() - mod.forecastAll (y) - mod.diagnoseAll (y, mod.getYf) // should agree with evalForecasts - Forecaster.evalForecasts (mod, mod.getYb, hh) - - banner ("In-ST Test: Simple Moving Average Model") - mod = new SimpleMovingAverage (y, hh) - mod.trainNtest ()() - mod.forecastAll (y) - mod.diagnoseAll (y, mod.getYf) // should agree with evalForecasts - Forecaster.evalForecasts (mod, mod.getYb, hh) - - banner ("In-ST Test: Weighted Moving Average Model") - mod = new WeightedMovingAverage (y, hh) - mod.trainNtest ()() - mod.forecastAll (y) - mod.diagnoseAll (y, mod.getYf) // should agree with evalForecasts - Forecaster.evalForecasts (mod, mod.getYb, hh) - - banner ("In-ST Test: Simple Exponential Smoothing Model") - mod = new SimpleExpSmoothing (y, hh) - mod.trainNtest ()() - mod.forecastAll (y) - mod.diagnoseAll (y, mod.getYf) // should agree with evalForecasts - Forecaster.evalForecasts (mod, mod.getYb, hh) - - banner ("In-ST Test: Random Walk Model") - mod = new RandomWalk (y, hh) - mod.trainNtest ()() - mod.forecastAll (y) - mod.diagnoseAll (y, mod.getYf) // should agree with evalForecasts - Forecaster.evalForecasts (mod, mod.getYb, hh) - - banner ("In-ST Test: Random Walk Slope Adjusted Model") - mod = new RandomWalkS (y, hh) - mod.trainNtest ()() - mod.forecastAll (y) - mod.diagnoseAll (y, mod.getYf) // should agree with evalForecasts - Forecaster.evalForecasts (mod, mod.getYb, hh) - - banner ("In-ST Test: Auto-Regressive AR(1) Model") - mod = new AR (y, hh) - mod.trainNtest ()() - mod.forecastAll (y) - mod.diagnoseAll (y, mod.getYf) // should agree with evalForecasts - Forecaster.evalForecasts (mod, mod.getYb, hh) - -end example_CovidTest3 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `example_CovidTest4` main function tests the `Example_Covid` object. - * Uses Train-n-Test Split (TnT) with Rolling Validation. - * Runs several baseline models for horizons 1 to 6, see sMAPE metrics below: - * - * 57.1057, 60.0825, 62.9136, 64.7453, 67.9247, 70.6674 Null - * 61.9077, 65.1881, 68.7187, 71.4655, 73.9327, 75.9584 Trend - * 22.3044, 30.4325, 45.3661, 55.7217, 67.6973, 77.4038 SMA - * 23.8526, 30.0945, 46.9748, 55.8104, 68.7352, 77.7010 WMA - * 18.3769, 27.1712, 40.3425, 51.8124, 63.7356, 75.0046 SES - * 18.6713, 27.5720, 40.9387, 52.3496, 64.2481, 75.3015 RW - * 18.0855, 26.7084, 39.6941, 51.2218, 63.1873, 74.6834 RWS - * 19.1590, 31.1975, 44.4850, 55.3120, 65.5536, 74.4969 AR(1) - * - * > runMain scalation.modeling.forecasting2.example_CovidTest4 - */ -@main def example_CovidTest4 (): Unit = - - val hh = 6 // max forecasting horizon - - new Plot (null, y, null, s"y ($response)", lines = true) - - var mod: Forecaster = null - - banner ("TnT Test: Null Model") - mod = new NullModel (y, hh) - mod.trainNtest ()() - mod.setSkip (0) // start at beginning of test-set - mod.rollValidate () // TnT with Rolling Validation - mod.diagnoseAll (y, mod.getYf, Forecaster.teRng (y.dim)) // only diagnose on the testing set - - banner ("TnT Test: Trend Model") - mod = new TrendModel (y, hh) - mod.trainNtest ()() - mod.setSkip (0) - mod.rollValidate () // TnT with Rolling Validation - mod.diagnoseAll (y, mod.getYf, Forecaster.teRng (y.dim)) // only diagnose on the testing set - - banner ("TnT Test: Simple Moving Average Model") - mod = new SimpleMovingAverage (y, hh) - mod.trainNtest ()() - mod.setSkip (0) - mod.rollValidate () // TnT with Rolling Validation - mod.diagnoseAll (y, mod.getYf, Forecaster.teRng (y.dim)) // only diagnose on the testing set - - banner ("TnT Test: Weighted Moving Average Model") - mod = new WeightedMovingAverage (y, hh) - mod.trainNtest ()() - mod.setSkip (0) - mod.rollValidate () // TnT with Rolling Validation - mod.diagnoseAll (y, mod.getYf, Forecaster.teRng (y.dim)) // only diagnose on the testing set - - banner ("TnT Test: Simple Exponential Smoothing Model") - mod = new SimpleExpSmoothing (y, hh) - mod.trainNtest ()() - mod.setSkip (0) - mod.rollValidate () // TnT with Rolling Validation - mod.diagnoseAll (y, mod.getYf, Forecaster.teRng (y.dim)) // only diagnose on the testing set - - banner ("TnT Test: Random Walk Model") - mod = new RandomWalk (y, hh) - mod.trainNtest ()() - mod.setSkip (0) - mod.rollValidate () // TnT with Rolling Validation - mod.diagnoseAll (y, mod.getYf, Forecaster.teRng (y.dim)) // only diagnose on the testing set - - banner ("TnT Test: Random Walk Slope Adjusted Model") - mod = new RandomWalkS (y, hh) - mod.trainNtest ()() - mod.setSkip (0) - mod.rollValidate () // TnT with Rolling Validation - mod.diagnoseAll (y, mod.getYf, Forecaster.teRng (y.dim)) // only diagnose on the testing set - - banner ("TnT Test: Auto-Regressive AR(1) Model") - mod = new AR (y, hh) - mod.trainNtest ()() - mod.setSkip (0) - mod.rollValidate () // TnT with Rolling Validation - mod.diagnoseAll (y, mod.getYf, Forecaster.teRng (y.dim)) // only diagnose on the testing set - -end example_CovidTest4 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `example_CovidTest5` main function tests the `Example_Covid` object. - * Uses In-Sample Testing (In-ST), i.e., train and test on the same data. - * Runs Auto-Regressive AR(p) models for several p values and horizons 1 to 6, - * see sMAPE metrics below: - * - * 20.3547, 29.9510, 38.0585, 45.3859, 51.7718, 56.6248 AR(1) - * 18.1684, 27.0930, 35.1466, 41.4030, 46.7556, 51.7624 AR(2) - * 17.7411, 24.9003, 31.8377, 37.3797, 42.1010, 47.4027 AR(3) - * 17.5993, 24.4001, 30.4701, 35.1507, 39.6652, 44.3065 AR(4) - * 17.7440, 24.3782, 30.2910, 34.6374, 39.2784, 43.5109 AR(5) - * 18.0122, 24.6661, 30.5325, 34.8721, 39.2814, 43.0225 AR(6) - * 17.7321, 24.7313, 30.3868, 34.7377, 39.0677, 44.1712 AR(7) - * 17.7501, 25.1060, 30.9891, 35.6894, 40.2345, 45.3789 AR(8) - * 17.5918, 24.7878, 30.2912, 34.6213, 39.2452, 44.6205 AR(9) - * 17.6843, 24.6729, 29.9977, 34.6003, 38.6488, 43.8855 AR(10) - * - * > runMain scalation.modeling.forecasting2.example_CovidTest5 - */ -@main def example_CovidTest5 (): Unit = - - val hh = 6 // max forecasting horizon - val hp = AR.hp // hyper-parameters for AR family of models - - new Plot (null, y, null, s"y ($response)", lines = true) - - for p <- 1 to 10 do // AR hyper-parameter settings - hp("p") = p - val mod = new AR (y, hh) // create an AR model - banner (s"In-ST Test: ${mod.modelName} Model") - mod.trainNtest ()() - - mod.forecastAll (y) - mod.diagnoseAll (y, mod.getYf) - Forecaster.evalForecasts (mod, mod.getYb, hh) - end for - -end example_CovidTest5 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `example_CovidTest6` main function tests the `Example_Covid` object. - * Uses Train-n-Test Split (TnT) with Rolling Validation. - * Runs Auto-Regressive AR(p) models for several p values and horizons 1 to 6, - * see sMAPE metrics below: - * - * 19.1590, 31.1975, 44.4850, 55.3120, 65.5536, 74.4969 AR(1) - * 17.1764, 27.8131, 41.0173, 52.3883, 62.4018, 71.3206 AR(2) - * 16.1569, 24.1092, 35.0634, 45.3502, 56.0450, 65.4998 AR(3) - * 15.2413, 23.2293, 30.1320, 40.3648, 48.8558, 57.8766 AR(4) - * 15.4399, 23.3058, 30.4161, 40.4655, 49.3913, 58.6573 AR(5) - * 15.7443, 22.8374, 29.7678, 38.5566, 45.5084, 50.8096 AR(6) - * 15.8906, 24.2516, 31.1198, 40.2877, 47.4982, 56.6783 AR(7) - * 15.8394, 24.8442, 31.2414, 40.4416, 47.5974, 56.3880 AR(8) - * 15.2112, 23.6265, 30.7560, 40.1489, 49.4426, 58.3781 AR(9) - * 15.7954, 23.7332, 32.8467, 42.5300, 52.3179, 60.5518 AR(10) - * - * > runMain scalation.modeling.forecasting2.example_CovidTest6 - */ -@main def example_CovidTest6 (): Unit = - - val hh = 6 // max forecasting horizon - val hp = AR.hp // hyper-parameters for AR family of models - - new Plot (null, y, null, s"y ($response)", lines = true) - - for p <- 1 to 10 do // AR hyper-parameter settings - hp("p") = p - val mod = new AR (y, hh) // create an AR model - banner (s"TnT Test: ${mod.modelName} Model") - mod.trainNtest ()() - - mod.setSkip (0) - mod.rollValidate () // TnT with Rolling Validation - mod.diagnoseAll (y, mod.getYf, Forecaster.teRng (y.dim)) // only diagnose on the testing set - end for - -end example_CovidTest6 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `example_CovidTest7` main function tests the `Example_Covid` object. - * Uses In-Sample Testing (In-ST), i.e., train and test on the same data. - * Runs Auto-Regressive, Moving Average ARMA(p, 0) models for several p and - * horizons 1 to 6, see sMAPE metrics below: - * - * 20.2191, 29.9108, 38.1525, 45.5858, 52.2918, 57.3670 ARMA(1, 0) - * 17.7900, 25.3293, 33.3283, 39.5055, 44.9095, 50.6043 ARMA(2, 0) - * 17.4057, 23.9135, 30.5357, 35.5950, 40.6434, 46.4122 ARMA(3, 0) - * 17.2928, 23.6678, 29.5574, 34.0383, 38.9062, 44.1568 ARMA(4, 0) - * 17.2850, 23.6708, 29.5699, 34.0520, 38.9330, 44.2125 ARMA(5, 0) - * 17.3271, 23.9829, 29.9874, 34.6032, 39.0682, 43.6979 ARMA(6, 0) - * 17.2335, 24.0097, 29.9465, 34.3426, 38.9182, 44.4357 ARMA(7, 0) - * 17.2811, 23.7288, 29.5992, 34.0946, 38.6983, 44.1365 ARMA(8, 0) - * 17.2044, 23.6396, 29.5609, 34.2834, 38.9406, 44.1984 ARMA(9, 0) - * 17.2588, 23.6012, 29.4737, 34.3447, 39.0981, 44.1297 ARMA(10, 0) - * - * > runMain scalation.modeling.forecasting2.example_CovidTest7 - */ -@main def example_CovidTest7 (): Unit = - - val hh = 6 // max forecasting horizon - val hp = AR.hp // hyper-parameters for AR family of models - hp("q") = 0 // no MA terms => AR with different optimizer - - new Plot (null, y, null, s"y ($response)", lines = true) - - for p <- 1 to 10 do // ARMA hyper-parameter settings - hp("p") = p - val mod = new ARMA (y, hh) // create an ARMA model - banner (s"In-ST Test: ${mod.modelName} Model") - mod.trainNtest ()() - - mod.forecastAll () - mod.diagnoseAll (y, mod.getYf) - Forecaster.evalForecasts (mod, mod.getYb, hh) - end for - -end example_CovidTest7 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `example_CovidTest8` main function tests the `Example_Covid` object. - * Uses Train-n-Test Split (TnT) with Rolling Validation. - * Runs Auto-Regressive, Moving Average ARMA(p, 0) models for several p values - * and horizons 1 to 6, see sMAPE metrics below: - * - * 19.0003, 30.3936, 43.8008, 54.8254, 65.3736, 74.5465 ARMA(1, 0) - * 17.0385, 26.7633, 39.4985, 51.0132, 61.2488, 70.4454 ARMA(2, 0) - * 16.0454, 22.1844, 31.7033, 41.1297, 51.6017, 61.3707 ARMA(3, 0) - * 15.2966, 20.7829, 27.7076, 36.3322, 41.5452, 49.0153 ARMA(4, 0) - * 15.6244, 20.6003, 29.0435, 36.8354, 43.1722, 48.1613 ARMA(5, 0) - * 15.6619, 23.1335, 32.0946, 41.3166, 50.0557, 60.0608 ARMA(6, 0) - * 16.0957, 22.2142, 32.4196, 39.8389, 47.6075, 51.5675 ARMA(7, 0) - * 15.8659, 25.6319, 36.0707, 45.6189, 54.9417, 58.8670 ARMA(8, 0) - * 15.5716, 24.2525, 34.1386, 44.2350, 55.1113, 60.8057 ARMA(9, 0) - * 14.9008, 22.6571, 30.4335, 41.6601, 50.1669, 61.2246 ARMA(10, 0) - * - * > runMain scalation.modeling.forecasting2.example_CovidTest8 - */ -@main def example_CovidTest8 (): Unit = - - val hh = 6 // max forecasting horizon - val hp = AR.hp // hyper-parameters for AR family of models - hp("q") = 0 // no MA terms => AR with different optimizer - - new Plot (null, y, null, s"y ($response)", lines = true) - - for p <- 1 to 10 do // ARMA hyper-parameter settings - hp("p") = p - val mod = new ARMA (y, hh) // create an ARMA model - banner (s"TnT Test: ${mod.modelName} Model") - mod.trainNtest ()() - - mod.setSkip (0) - mod.rollValidate () // TnT with Rolling Validation - mod.diagnoseAll (y, mod.getYf, Forecaster.teRng (y.dim)) // only diagnose on the testing set - end for - -end example_CovidTest8 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `example_CovidTest9` main function tests the `Example_Covid` object. - * Uses In-Sample Testing (In-ST), i.e., train and test on the same data. - * Runs Auto-Regressive, Lagged Regression ARY(p) models for several p values and - * horizons 1 to 6, see sMAPE metrics below: - * - * 20.1794, 29.8589, 38.1450, 45.5634, 52.3478, 57.4474 ARY(1) - * 17.7728, 25.1705, 33.1900, 39.4218, 44.8621, 50.5991 ARY(2) - * 17.3594, 23.7550, 30.3838, 35.4514, 40.5868, 46.4292 ARY(3) - * 17.2457, 23.5122, 29.4110, 33.9350, 38.8422, 44.2303 ARY(4) - * 17.2314, 23.5178, 29.4345, 33.9602, 38.9022, 44.3249 ARY(5) - * 17.2503, 23.8232, 29.8341, 34.4885, 39.0138, 43.8011 ARY(6) - * 17.1625, 23.8385, 29.8227, 34.2751, 38.9853, 44.6092 ARY(7) - * 17.2067, 23.5579, 29.4741, 34.0077, 38.6431, 44.3218 ARY(8) - * 17.1326, 23.4530, 29.4149, 34.1103, 38.8254, 44.3564 ARY(9) - * 17.1791, 23.4175, 29.3213, 34.1509, 38.8917, 44.2659 ARY(10) - * - * > runMain scalation.modeling.forecasting2.example_CovidTest9 - */ -@main def example_CovidTest9 (): Unit = - - val hh = 6 // max forecasting horizon - val hp = ARY.hp - hp("lambda") = 1.0 // regularization parameter - - new Plot (null, y, null, s"y ($response)", lines = true) - - for p <- 1 to 10 do // ARY hyper-parameter settings - hp("p") = p - val mod = ARY (y, hh) // create an ARY model - banner (s"In-ST Test: ${mod.modelName} Model") - mod.trainNtest_x ()() // needs x matrix => use _x version - - mod.forecastAll () - mod.diagnoseAll (y, mod.getYf) - Forecaster.evalForecasts (mod, mod.getYb, hh) - end for - -end example_CovidTest9 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `example_CovidTest10` main function tests the `Example_Covid` object. - * Uses Train-n-Test Split (TnT) with Rolling Validation. - * Runs Auto-Regressive, Lagged Regression ARY(p) models for several p values, - * and horizons 1 to 6, see sMAPE metrics below: - * - * 19.0003, 30.3936, 43.8008, 54.8254, 65.3736, 74.5465 ARY(1) - * 16.8486, 26.3959, 39.1085, 50.6966, 61.0053, 70.3446 ARY(2) - * 15.7448, 21.8608, 31.3677, 40.9140, 51.5319, 61.5140 ARY(3) - * 14.7953, 20.1791, 26.5422, 35.2717, 40.7200, 48.6407 ARY(4) - * 14.9856, 19.5241, 27.1485, 35.1070, 40.1716, 47.1898 ARY(5) - * 15.0238, 21.1032, 28.4153, 36.6326, 42.5539, 49.8734 ARY(6) - * 15.5620, 20.7860, 29.8501, 37.1646, 43.7716, 48.4778 ARY(7) - * 15.1719, 23.2761, 32.2952, 40.3584, 46.1975, 51.3488 ARY(8) - * 14.9497, 22.5065, 31.3207, 39.5034, 45.5495, 51.4103 ARY(9) - * 14.4824, 21.5906, 29.9550, 37.9214, 43.3013, 52.2868 ARY(10) - * - * FIX - discrepancy between rollValidate and diagnoseAll handled by sft parameter - why needed? - * - * > runMain scalation.modeling.forecasting2.example_CovidTest10 - */ -@main def example_CovidTest10 (): Unit = - - val hh = 6 // max forecasting horizon - val hp = ARY.hp - hp("lambda") = 1.0 // regularization parameter - - new Plot (null, y, null, s"y ($response)", lines = true) - - for p <- 1 to 10 do // ARY hyper-parameter settings - hp("p") = p - val mod = ARY (y, hh) // create an ARY model - banner (s"TnT Test: ${mod.modelName} Model") - mod.trainNtest_x ()() // needs x matrix => use _x version - - mod.setSkip (0) - mod.rollValidate () // TnT with Rolling Validation - mod.diagnoseAll (y, mod.getYf, Forecaster.teRng (y.dim), 0) // only diagnose on the testing set - end for - -end example_CovidTest10 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `example_CovidTest11` main function tests the `Example_Covid` object. - * Uses In-Sample Testing (In-ST), i.e., train and test on the same data. - * Runs Auto-Regressive, Lagged Regression, Direct ARY_D(p) models for several p values, - * and horizons 1 to 6, see sMAPE metrics below: - * - * 19.9912, 30.1349, 38.7483, 45.1096, 49.5424, 52.5320 ARY_D(1) - * 17.7245, 24.2871, 31.1716, 35.9357, 40.5132, 46.4806 ARY_D(2) - * 17.2367, 23.2007, 29.4120, 33.5757, 38.8647, 44.1707 ARY_D(3) - * 17.1336, 23.1984, 29.1758, 33.5773, 38.6493, 43.8045 ARY_D(4) - * 17.1196, 23.1224, 29.1769, 33.6120, 38.7839, 43.9346 ARY_D(5) - * 17.1324, 23.1273, 29.2292, 33.8956, 39.1209, 44.0869 ARY_D(6) - * 16.9815, 23.2879, 29.2536, 33.9433, 39.1474, 44.2361 ARY_D(7) - * 17.0492, 23.1888, 29.2826, 34.0878, 39.2379, 44.7474 ARY_D(8) - * 16.9841, 23.1090, 29.2154, 34.1249, 39.2711, 44.7709 ARY_D(9) - * 17.0676, 23.1089, 28.9425, 33.9046, 38.9082, 44.0469 ARY_D(10) - * - * > runMain scalation.modeling.forecasting2.example_CovidTest11 - */ -@main def example_CovidTest11 (): Unit = - - val hh = 6 // max forecasting horizon - val hp = ARY.hp -// hp("lambda") = 1.0 // regularization parameter - - new Plot (null, y, null, s"y ($response)", lines = true) - - for p <- 1 to 10 do // ARY hyper-parameter settings - hp("p") = p - val mod = ARY_D (y, hh) // create an ARY_D model - banner (s"In-ST Test: ${mod.modelName} Model") - mod.trainNtest_x ()() // note: suffix "_x" currently required - - mod.forecastAll (mod.getYy) // forecast h-steps ahead (h = 1 to hh) for all y - ARY_D.evalForecasts (mod, mod.getYy, hh) - mod.setSkip (0) // so evalForecast and diagnoseAll start at same place - mod.diagnoseAll (y, mod.getYf) - end for - -end example_CovidTest11 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `example_CovidTest12` main function tests the `Example_Covid` object. - * Uses Train-n-Test Split (TnT) with Rolling Validation. - * Runs Auto-Regressive, Lagged Regression, Direct ARY_D(p) models for several p values, - * and horizons 1 to 6, see sMAPE metrics below: - * - * 18.9312, 31.2905, 45.7578, 57.0037, 65.9690, 72.4626 ARY_D(1) - * 16.8059, 23.1653, 31.9736, 40.6603, 46.6809, 57.1835 ARY_D(2) - * 15.9031, 20.7335, 27.3975, 35.5557, 39.3269, 51.2769 ARY_D(3) - * 15.0132, 20.2209, 27.5774, 35.4134, 39.7899, 48.6745 ARY_D(4) - * 15.2338, 19.4826, 27.6054, 35.6699, 39.8746, 48.4355 ARY_D(5) - * 15.1603, 19.7425, 27.7367, 35.7799, 40.1055, 49.1122 ARY_D(6) - * 15.5484, 22.7247, 31.0076, 38.5501, 44.5176, 50.8537 ARY_D(7) - * 15.3248, 23.2628, 30.6794, 39.0621, 44.5661, 52.6579 ARY_D(8) - * 15.0875, 21.7912, 30.2152, 37.4165, 42.6637, 52.9831 ARY_D(9) - * 14.7569, 22.2172, 30.9435, 40.5641, 46.2016, 57.6445 ARY_D(10) - * - * > runMain scalation.modeling.forecasting2.example_CovidTest12 - */ -@main def example_CovidTest12 (): Unit = - - val hh = 6 // max forecasting horizon - val hp = ARY.hp -// hp("lambda") = 1.0 // regularization parameter - - new Plot (null, y, null, s"y ($response)", lines = true) - - for p <- 1 to 10 do // ARX hyper-parameter settings - hp("p") = p - val mod = ARY_D (y, hh) // create model ARY_D for time series data - banner (s"TnT Test: ${mod.modelName} Model") - mod.trainNtest_x ()() // note: suffix "_x" currently required - - mod.setSkip (0) - mod.rollValidate () // TnT with Rolling Validation - mod.diagnoseAll (y, mod.getYf, Forecaster.teRng (y.dim), 0) // only diagnose on the testing set - end for - -end example_CovidTest12 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `example_CovidTest13` main function tests the `Example_Covid` object. - * Uses In-Sample Testing (In-ST), i.e., train and test on the same data. - * Runs Auto-Regressive, Moving Average ARMA(p, q) models for several p values, - * and horizons 1 to 6, see sMAPE metrics below: - * - * FIX - good for h = 1, but then sMAPE scores explode - * - * > runMain scalation.modeling.forecasting2.example_CovidTest13 - */ -@main def example_CovidTest13 (): Unit = - - val hh = 6 // max forecasting horizon - val hp = AR.hp // hyper-parameters for AR family of models - hp("q") = 1 // one MA term - - new Plot (null, y, null, s"y ($response)", lines = true) - - for p <- 2 to 2 do // ARMA hyper-parameter settings - hp("p") = p - val mod = new ARMA (y, hh) // create an ARMA model - banner (s"In-ST Test: ${mod.modelName} Model") - mod.trainNtest ()() - - mod.forecastAll () - mod.diagnoseAll (y, mod.getYf) - Forecaster.evalForecasts (mod, mod.getYb, hh) - end for - -end example_CovidTest13 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `example_CovidTest14` main function tests the `Example_Covid` object. - * Uses Train-n-Test Split (TnT) with Rolling Validation. - * Runs Auto-Regressive, Moving Average ARMA(p, q) models for several p values. - * and horizons 1 to 6, see sMAPE metrics below: - * - * FIX - for all h sMAPE scores have exploded - * - * > runMain scalation.modeling.forecasting2.example_CovidTest14 - */ -@main def example_CovidTest14 (): Unit = - - val hh = 6 // max forecasting horizon - val hp = AR.hp // hyper-parameters for AR family of models - hp("q") = 1 // one MA term - - new Plot (null, y, null, s"y ($response)", lines = true) - - for p <- 2 to 2 do // ARMA hyper-parameter settings - hp("p") = p - val mod = new ARMA (y, hh) // create an ARMA model - banner (s"TnT Test: ${mod.modelName} Model") - mod.trainNtest ()() - - mod.setSkip (0) - mod.rollValidate () // TnT with Rolling Validation - mod.diagnoseAll (y, mod.getYf, Forecaster.teRng (y.dim)) // only diagnose on the testing set - end for - -end example_CovidTest14 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `example_CovidTest15` main function tests the `Example_Covid` object. - * Uses In-Sample Testing (In-ST), i.e., train and test on the same data. - * Runs Auto-Regressive, Exogenous ARX(p, q, n) models for several p values, - * and horizons 1 to 6, see sMAPE metrics below: - * - * 18.3346, 26.5990, 35.8624, 44.8289, 53.7512, 60.5086 ARX(1, 1, 2) - * 15.5184, 20.9192, 27.8176, 35.3589, 43.9210, 50.5047 ARX(2, 2, 2) - * 15.3592, 20.1736, 25.4967, 32.6258, 40.4916, 47.2481 ARX(3, 2, 2) - * 15.3224, 19.8423, 25.0511, 31.9170, 38.9812, 45.6829 ARX(4, 2, 2) - * 15.3200, 19.8433, 25.0510, 31.9146, 38.9858, 45.6849 ARX(5, 2, 2) - * 15.4286, 19.9065, 25.7220, 32.6493, 39.6406, 46.0115 ARX(6, 2, 2) - * 15.3576, 19.9718, 25.4068, 32.3474, 39.0521, 45.5616 ARX(7, 2, 2) - * 15.4913, 19.5610, 25.4153, 32.2240, 39.3885, 45.8530 ARX(8, 2, 2) - * 15.3410, 19.6328, 25.6180, 32.6323, 39.8298, 46.6052 ARX(9, 2, 2) - * 15.4446, 19.6831, 25.6035, 32.8968, 40.6220, 47.7878 ARX(10, 2, 2) - * - * > runMain scalation.modeling.forecasting2.example_CovidTest15 - */ -@main def example_CovidTest15 (): Unit = - -// val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val exo_vars = Array ("icu_patients", "hosp_patients") - val (xxe, yy) = loadData (exo_vars, response) - println (s"xxe.dims = ${xxe.dims}, yy.dim = ${yy.dim}") - -// val xe = xxe // full - val xe = xxe(0 until 116) // clip the flat end -// val y = yy // full - val y = yy(0 until 116) // clip the flat end - val hh = 6 // maximum forecasting horizon - - banner (s"exo_vars = ${stringOf (exo_vars)}, endo_var = $response") - println (s"xe.dims = ${xe.dims}, y.dim = ${y.dim}") - new Plot (null, y, null, s"y ($response)", lines = true) - - for p <- 1 to 10 do // ARX hyper-parameter settings - ARY.hp("p") = p - ARY.hp("q") = min (2, p) - val mod = ARX (xe, y, hh, nonneg = true) // create model for time series data - banner (s"In-ST Test: ${mod.modelName} Model") - mod.trainNtest_x ()() // train and test on full dataset - - mod.forecastAll () - Forecaster.evalForecasts (mod, mod.getYb, hh) - mod.setSkip (0) // so evalForecast and diagnoseAll start at same place - mod.diagnoseAll (y, mod.getYf) - end for - -end example_CovidTest15 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `example_CovidTest16` main function tests the `Example_Covid` object. - * Uses Train-n-Test Split (TnT) with Rolling Validation. - * Runs Auto-Regressive, Exogenous ARX(p, q, n) models for several p values, - * and horizons 1 to 6, see sMAPE metrics below: - * - * 12.2356, 20.6830, 35.2603, 43.9974, 51.5944, 52.0301 ARX(1, 1, 2) - * 9.72391, 20.6254, 25.4950, 34.2458, 44.5078, 49.9804 ARX(2, 2, 2) - * 10.0738, 21.4470, 26.2178, 34.2212, 44.0982, 49.4524 ARX(3, 2, 2) - * 9.29391, 19.6487, 22.8980, 31.6528, 41.6049, 46.9430 ARX(4, 2, 2) - * 10.2806, 19.2649, 23.1211, 32.1942, 41.9189, 47.2119 ARX(5, 2, 2) - * 11.4258, 19.7370, 24.5103, 34.4673, 44.9873, 49.7458 ARX(6, 2, 2) - * 11.2501, 19.0128, 22.3547, 31.9938, 42.1729, 47.1063 ARX(7, 2, 2) - * 10.9763, 18.8067, 22.5181, 32.0960, 41.8394, 47.1825 ARX(8, 2, 2) - * 11.1796, 19.3087, 23.7479, 33.1067, 42.8283, 47.6904 ARX(9, 2, 2) - * 10.9499, 20.6255, 25.8116, 35.5139, 45.2163, 50.0280 ARX(10, 2, 2) - * - * > runMain scalation.modeling.forecasting2.example_CovidTest16 - */ -@main def example_CovidTest16 (): Unit = - -// val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val exo_vars = Array ("icu_patients", "hosp_patients") - val (xxe, yy) = loadData (exo_vars, response) - println (s"xxe.dims = ${xxe.dims}, yy.dim = ${yy.dim}") - -// val xe = xxe // full - val xe = xxe(0 until 116) // clip the flat end -// val y = yy // full - val y = yy(0 until 116) // clip the flat end - val hh = 6 // maximum forecasting horizon - - banner (s"exo_vars = ${stringOf (exo_vars)}, endo_var = $response") - println (s"xe.dims = ${xe.dims}, y.dim = ${y.dim}") - new Plot (null, y, null, s"y ($response)", lines = true) - - for p <- 1 to 10 do // ARX hyper-parameter settings - ARY.hp("p") = p - ARY.hp("q") = min (2, p) - val mod = ARX (xe, y, hh, nonneg = true) // create an ARX model - banner (s"TnT Test: ${mod.modelName} Model") - mod.trainNtest_x ()() // use customized trainNtest_x - - mod.setSkip (0) - mod.rollValidate () // TnT with Rolling Validation - println (s"After Roll TnT Forecast Matrix yf = ${mod.getYf}") - mod.diagnoseAll (y, mod.getYf, Forecaster.teRng (y.dim), 0) // only diagnose on the testing set - end for - -end example_CovidTest16 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `example_CovidTest17` main function tests the `Example_Covid` object. - * Uses In-Sample Testing (In-ST), i.e., train and test on the same data. - * Runs Auto-Regressive, Exogenous ARX_D(p, q, n) models for several p values. - * > runMain scalation.modeling.forecasting2.example_CovidTest17 - * -@main def example_CovidTest17 (): Unit = - - val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (xx, yy) = Example_Covid.loadData (exo_vars, response) -// val (x, y) = (xx, yy) // full - val (x, y) = (xx(0 until 116), yy(0 until 116)) // clip the flat end - val hh = 6 // max forecasting horizon - - banner (s"exo_vars = ${stringOf (exo_vars)}, endo_var = $response") - println (s"x.dims = ${x.dims}, y.dim = ${y.dim}") - new Plot (null, y, null, s"y ($response)", lines = true) - - for p <- 1 to 10 do // ARX hyper-parameter settings - val mod = ARX_MV.exo (y, p, x, hh)(1, p+1) // create an ARX_MV model - banner (s"In-ST Test: ${mod.modelName} Model") - mod.trainNtest ()() - end for - -end example_CovidTest17 - */ - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `example_CovidTest18` main function tests the `Example_Covid` object. - * Uses Train-n-Test Split (TnT) with Rolling Validation. - * Runs Auto-Regressive, Exogenous ARX_D(p, q, n) models for several p values. - * > runMain scalation.modeling.forecasting2.example_CovidTest18 - * -@main def example_CovidTest18 (): Unit = - - val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (xx, yy) = Example_Covid.loadData (exo_vars, response) -// val (x, y) = (xx, yy) // full - val (x, y) = (xx(0 until 116), yy(0 until 116)) // clip the flat end - val hh = 6 // max forecasting horizon - - banner (s"exo_vars = ${stringOf (exo_vars)}, endo_var = $response") - println (s"x.dims = ${x.dims}, y.dim = ${y.dim}") - new Plot (null, y, null, s"y ($response)", lines = true) - - for p <- 1 to 10 do // ARX hyper-parameter settings - val mod = ARX_MV.exo (y, p, x, hh)(1, p+1) // create an ARX_MV model - banner (s"TnT Test: ${mod.modelName} Model") - mod.trainNtest ()() - ARX_MV.rollValidate (mod) // direct does all horizon at once - end for - -end example_CovidTest18 - */ - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `example_CovidTest19` main function tests the `Example_Covid` object. - * Uses In-Sample Testing (In-ST), i.e., train and test on the same data. - * Runs Auto-Regressive, Exogenous ARX_Quad_D(p, q, n) models for several p values. - * > runMain scalation.modeling.forecasting2.example_CovidTest19 - * -@main def example_CovidTest19 (): Unit = - - val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "new_cases", "people_vaccinated", "people_fully_vaccinated") -// val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (xx, yy) = Example_Covid.loadData (exo_vars, response) -// val (x, y) = (xx, yy) // full - val (x, y) = (xx(0 until 116), yy(0 until 116)) // clip the flat end - val hh = 6 // max forecasting horizon - - banner (s"exo_vars = ${stringOf (exo_vars)}, endo_var = $response") - println (s"x.dims = ${x.dims}, y.dim = ${y.dim}") - new Plot (null, y, null, s"y ($response)", lines = true) - - for p <- 1 to 10 do // ARX hyper-parameter settings - val mod = ARX_Quad_MV.exo (y, p, x, hh)(1, p+1) // create an ARX_Quad_MV model - banner (s"In-ST Test: ${mod.modelName} Model") - mod.trainNtest ()() - end for - -end example_CovidTest19 - */ - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `example_CovidTest20` main function tests the `Example_Covid` object. - * Uses Train-n-Test Split (TnT) with Rolling Validation. - * Runs Auto-Regressive, Exogenous ARX_Quad_D(p, q, n) models for several p values. - * > runMain scalation.modeling.forecasting2.example_CovidTest20 - * -@main def example_CovidTest20 (): Unit = - - val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") -// val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "new_cases", "people_vaccinated", "people_fully_vaccinated") - val (xx, yy) = Example_Covid.loadData (exo_vars, response) -// val (x, y) = (xx, yy) // full - val (x, y) = (xx(0 until 116), yy(0 until 116)) // clip the flat end - val hh = 6 // max forecasting horizon - val pw = 1.5 // power pw: tune with values around 2.0 - - banner (s"exo_vars = ${stringOf (exo_vars)}, endo_var = $response") - println (s"x.dims = ${x.dims}, y.dim = ${y.dim}") - new Plot (null, y, null, s"y ($response)", lines = true) - - for p <- 1 to 10 do // ARX hyper-parameter settings - val mod = ARX_Quad_MV.exo (y, p, x, hh, pw)(1, p+1) // create an ARX_Quad_MV model - banner (s"TnT Test: ${mod.modelName} Model") - mod.trainNtest ()() - ARX_MV.rollValidate (mod) // direct does all horizon at once - end for - -end example_CovidTest20 - */ - diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/periodogramTest.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/periodogramTest.class deleted file mode 100644 index 555f80f6a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/periodogramTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/periodogramTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/periodogramTest.tasty deleted file mode 100644 index 67c3c86e5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/periodogramTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/randomWalkSTest.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/randomWalkSTest.class deleted file mode 100644 index 65294e4dc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/randomWalkSTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/randomWalkSTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/randomWalkSTest.tasty deleted file mode 100644 index 6468bf2f3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/randomWalkSTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/randomWalkSTest2.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/randomWalkSTest2.class deleted file mode 100644 index 9d072a6ee..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/randomWalkSTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/randomWalkSTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/randomWalkSTest2.tasty deleted file mode 100644 index 6103c9fca..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/randomWalkSTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/randomWalkSTest3.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/randomWalkSTest3.class deleted file mode 100644 index 2759bccd1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/randomWalkSTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/randomWalkSTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/randomWalkSTest3.tasty deleted file mode 100644 index dcaede061..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/randomWalkSTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/randomWalkSTest4.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/randomWalkSTest4.class deleted file mode 100644 index f4b15799a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/randomWalkSTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/randomWalkSTest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/randomWalkSTest4.tasty deleted file mode 100644 index da9cd326a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/randomWalkSTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/randomWalkTest.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/randomWalkTest.class deleted file mode 100644 index 3c08b55c7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/randomWalkTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/randomWalkTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/randomWalkTest.tasty deleted file mode 100644 index c736818dd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/randomWalkTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/randomWalkTest2.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/randomWalkTest2.class deleted file mode 100644 index e1a97d1e6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/randomWalkTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/randomWalkTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/randomWalkTest2.tasty deleted file mode 100644 index 45a0332bc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/randomWalkTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/randomWalkTest3.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/randomWalkTest3.class deleted file mode 100644 index dc887f18c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/randomWalkTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/randomWalkTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/randomWalkTest3.tasty deleted file mode 100644 index a80f03211..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/randomWalkTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/randomWalkTest4.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/randomWalkTest4.class deleted file mode 100644 index 4197169e0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/randomWalkTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/randomWalkTest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/randomWalkTest4.tasty deleted file mode 100644 index a4245466b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/randomWalkTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/randomWalkTest5.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/randomWalkTest5.class deleted file mode 100644 index bb0c01a50..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/randomWalkTest5.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/randomWalkTest5.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/randomWalkTest5.tasty deleted file mode 100644 index 79fe2f9b0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/randomWalkTest5.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/randomWalkTest6.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/randomWalkTest6.class deleted file mode 100644 index 0158abacf..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/randomWalkTest6.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/randomWalkTest6.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/randomWalkTest6.tasty deleted file mode 100644 index 9041a711e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/randomWalkTest6.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/sARYTest.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/sARYTest.class deleted file mode 100644 index e13dd5142..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/sARYTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/sARYTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/sARYTest.tasty deleted file mode 100644 index 003d69307..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/sARYTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/sARYTest2.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/sARYTest2.class deleted file mode 100644 index ffb715a95..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/sARYTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/sARYTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/sARYTest2.tasty deleted file mode 100644 index 7953398ce..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/sARYTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/sARYTest3.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/sARYTest3.class deleted file mode 100644 index 81a4d1d59..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/sARYTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/sARYTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/sARYTest3.tasty deleted file mode 100644 index 96ddee585..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/sARYTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/sARYTest4.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/sARYTest4.class deleted file mode 100644 index 094c380b0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/sARYTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/sARYTest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/sARYTest4.tasty deleted file mode 100644 index 1c5655fe3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/sARYTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/sARYTest5.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/sARYTest5.class deleted file mode 100644 index 58bd0607c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/sARYTest5.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/sARYTest5.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/sARYTest5.tasty deleted file mode 100644 index 051c32882..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/sARYTest5.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/simpleExpSmoothingTest.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/simpleExpSmoothingTest.class deleted file mode 100644 index 214d50954..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/simpleExpSmoothingTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/simpleExpSmoothingTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/simpleExpSmoothingTest.tasty deleted file mode 100644 index e817371d9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/simpleExpSmoothingTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/simpleExpSmoothingTest2.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/simpleExpSmoothingTest2.class deleted file mode 100644 index 0f03587e4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/simpleExpSmoothingTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/simpleExpSmoothingTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/simpleExpSmoothingTest2.tasty deleted file mode 100644 index a0a9eff17..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/simpleExpSmoothingTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/simpleExpSmoothingTest3.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/simpleExpSmoothingTest3.class deleted file mode 100644 index ad736b7af..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/simpleExpSmoothingTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/simpleExpSmoothingTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/simpleExpSmoothingTest3.tasty deleted file mode 100644 index 165662828..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/simpleExpSmoothingTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/simpleExpSmoothingTest4.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/simpleExpSmoothingTest4.class deleted file mode 100644 index e9a395273..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/simpleExpSmoothingTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/simpleExpSmoothingTest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/simpleExpSmoothingTest4.tasty deleted file mode 100644 index 7430c3d52..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/simpleExpSmoothingTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/simpleMovingAverageTest.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/simpleMovingAverageTest.class deleted file mode 100644 index bed95e4d4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/simpleMovingAverageTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/simpleMovingAverageTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/simpleMovingAverageTest.tasty deleted file mode 100644 index 113fc161a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/simpleMovingAverageTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/simpleMovingAverageTest2.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/simpleMovingAverageTest2.class deleted file mode 100644 index 873c2bc7d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/simpleMovingAverageTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/simpleMovingAverageTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/simpleMovingAverageTest2.tasty deleted file mode 100644 index 38532e428..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/simpleMovingAverageTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/simpleMovingAverageTest3.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/simpleMovingAverageTest3.class deleted file mode 100644 index 2240cd6e8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/simpleMovingAverageTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/simpleMovingAverageTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/simpleMovingAverageTest3.tasty deleted file mode 100644 index 304aff6b6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/simpleMovingAverageTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/simpleMovingAverageTest4.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/simpleMovingAverageTest4.class deleted file mode 100644 index ab0d5ad4c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/simpleMovingAverageTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/simpleMovingAverageTest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/simpleMovingAverageTest4.tasty deleted file mode 100644 index 53826af59..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/simpleMovingAverageTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/stationarity_KPSSTest.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/stationarity_KPSSTest.class deleted file mode 100644 index d8f274923..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/stationarity_KPSSTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/stationarity_KPSSTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/stationarity_KPSSTest.tasty deleted file mode 100644 index ddd134d4e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/stationarity_KPSSTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/stationarity_KPSSTest2.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/stationarity_KPSSTest2.class deleted file mode 100644 index cecb16a60..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/stationarity_KPSSTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/stationarity_KPSSTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/stationarity_KPSSTest2.tasty deleted file mode 100644 index b699c3a8a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/stationarity_KPSSTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/stationarity_KPSSTest3.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/stationarity_KPSSTest3.class deleted file mode 100644 index 995ff0f5d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/stationarity_KPSSTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/stationarity_KPSSTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/stationarity_KPSSTest3.tasty deleted file mode 100644 index 6897cb974..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/stationarity_KPSSTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/stationaryTest.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/stationaryTest.class deleted file mode 100644 index 8c0faba79..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/stationaryTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/stationaryTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/stationaryTest.tasty deleted file mode 100644 index 2a65914af..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/stationaryTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/stationaryTest2.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/stationaryTest2.class deleted file mode 100644 index a521feb85..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/stationaryTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/stationaryTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/stationaryTest2.tasty deleted file mode 100644 index bda2c15b0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/stationaryTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/stationaryTest3.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/stationaryTest3.class deleted file mode 100644 index fcf7d5871..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/stationaryTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/stationaryTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/stationaryTest3.tasty deleted file mode 100644 index 090afda0d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/stationaryTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/tranARYTest.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/tranARYTest.class deleted file mode 100644 index e6effe1a9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/tranARYTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/tranARYTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/tranARYTest.tasty deleted file mode 100644 index 7d9a4838a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/tranARYTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/tranARYTest2.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/tranARYTest2.class deleted file mode 100644 index a76d8f410..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/tranARYTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/tranARYTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/tranARYTest2.tasty deleted file mode 100644 index 0328fe3ce..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/tranARYTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/tranARYTest3.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/tranARYTest3.class deleted file mode 100644 index 4d29098e2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/tranARYTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/tranARYTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/tranARYTest3.tasty deleted file mode 100644 index 6fb278b58..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/tranARYTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/tranARYTest4.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/tranARYTest4.class deleted file mode 100644 index 4d20ebfe8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/tranARYTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/tranARYTest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/tranARYTest4.tasty deleted file mode 100644 index 67853cedc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/tranARYTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/trendModelTest.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/trendModelTest.class deleted file mode 100644 index 60284a410..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/trendModelTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/trendModelTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/trendModelTest.tasty deleted file mode 100644 index 572b0c30b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/trendModelTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/trendModelTest2.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/trendModelTest2.class deleted file mode 100644 index af332b063..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/trendModelTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/trendModelTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/trendModelTest2.tasty deleted file mode 100644 index 6d2f625ae..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/trendModelTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/trendModelTest3.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/trendModelTest3.class deleted file mode 100644 index c0ffb69c2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/trendModelTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/trendModelTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/trendModelTest3.tasty deleted file mode 100644 index 2870b6904..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/trendModelTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/trendModelTest4.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/trendModelTest4.class deleted file mode 100644 index e5b1166c2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/trendModelTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/trendModelTest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/trendModelTest4.tasty deleted file mode 100644 index a65ac5db5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/trendModelTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/weightedMovingAverageTest.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/weightedMovingAverageTest.class deleted file mode 100644 index 0dd52ecb7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/weightedMovingAverageTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/weightedMovingAverageTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/weightedMovingAverageTest.tasty deleted file mode 100644 index db5e11f40..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/weightedMovingAverageTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/weightedMovingAverageTest2.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/weightedMovingAverageTest2.class deleted file mode 100644 index e0d4ff0c3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/weightedMovingAverageTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/weightedMovingAverageTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/weightedMovingAverageTest2.tasty deleted file mode 100644 index b80058214..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/weightedMovingAverageTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/weightedMovingAverageTest3.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/weightedMovingAverageTest3.class deleted file mode 100644 index 4b6932348..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/weightedMovingAverageTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/weightedMovingAverageTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/weightedMovingAverageTest3.tasty deleted file mode 100644 index e779f46d9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/weightedMovingAverageTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/weightedMovingAverageTest4.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting/weightedMovingAverageTest4.class deleted file mode 100644 index 136280ac5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/weightedMovingAverageTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting/weightedMovingAverageTest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting/weightedMovingAverageTest4.tasty deleted file mode 100644 index 7f7ee33c9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting/weightedMovingAverageTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/AR$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/AR$.class deleted file mode 100644 index 5dc9d8f97..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/AR$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/AR$package$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/AR$package$.class deleted file mode 100644 index b5aa0a2f8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/AR$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/AR$package.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/AR$package.class deleted file mode 100644 index b11033a30..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/AR$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/AR$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/AR$package.tasty deleted file mode 100644 index 83ed91ebd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/AR$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/AR.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/AR.class deleted file mode 100644 index 7d990228b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/AR.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/AR.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/AR.tasty deleted file mode 100644 index 7355d4dac..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/AR.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/AR1MA$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/AR1MA$.class deleted file mode 100644 index 430f4b0b2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/AR1MA$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/AR1MA$package$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/AR1MA$package$.class deleted file mode 100644 index ad4a8a5c5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/AR1MA$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/AR1MA$package.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/AR1MA$package.class deleted file mode 100644 index 29433d162..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/AR1MA$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/AR1MA$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/AR1MA$package.tasty deleted file mode 100644 index f52a7d73e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/AR1MA$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/AR1MA.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/AR1MA.class deleted file mode 100644 index 774bdbf32..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/AR1MA.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/AR1MA.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/AR1MA.tasty deleted file mode 100644 index e15f76718..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/AR1MA.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARIMA$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARIMA$.class deleted file mode 100644 index 07a3f519e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARIMA$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARIMA$package$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARIMA$package$.class deleted file mode 100644 index eb8960a5b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARIMA$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARIMA$package.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARIMA$package.class deleted file mode 100644 index 13b54a0bf..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARIMA$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARIMA$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARIMA$package.tasty deleted file mode 100644 index 91204d045..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARIMA$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARIMA.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARIMA.class deleted file mode 100644 index 4ed9a3004..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARIMA.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARIMA.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARIMA.tasty deleted file mode 100644 index dd140f264..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARIMA.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARIMA_diff$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARIMA_diff$.class deleted file mode 100644 index 260decf90..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARIMA_diff$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARIMA_diff$package$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARIMA_diff$package$.class deleted file mode 100644 index f2ab22674..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARIMA_diff$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARIMA_diff$package.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARIMA_diff$package.class deleted file mode 100644 index 67174886e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARIMA_diff$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARIMA_diff$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARIMA_diff$package.tasty deleted file mode 100644 index 3dbe7e53e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARIMA_diff$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARIMA_diff.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARIMA_diff.class deleted file mode 100644 index d4373987a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARIMA_diff.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARIMA_diff.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARIMA_diff.tasty deleted file mode 100644 index fab452a83..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARIMA_diff.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARMA$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARMA$.class deleted file mode 100644 index af8dd01c7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARMA$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARMA$package$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARMA$package$.class deleted file mode 100644 index bfd453b07..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARMA$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARMA$package.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARMA$package.class deleted file mode 100644 index b6b038950..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARMA$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARMA$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARMA$package.tasty deleted file mode 100644 index aa811dfc0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARMA$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARMA.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARMA.class deleted file mode 100644 index 8c2e3f2da..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARMA.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARMA.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARMA.tasty deleted file mode 100644 index 166bd4ec8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARMA.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX$.class deleted file mode 100644 index a12b22028..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX$package$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX$package$.class deleted file mode 100644 index afe6eaf08..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX$package.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX$package.class deleted file mode 100644 index d357ef101..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX$package.tasty deleted file mode 100644 index 06d213260..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX.class deleted file mode 100644 index dd22e5028..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX.tasty deleted file mode 100644 index a8f5a773b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_MV$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_MV$.class deleted file mode 100644 index 3826d2f1c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_MV$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_MV$package$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_MV$package$.class deleted file mode 100644 index ff0fdc99c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_MV$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_MV$package.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_MV$package.class deleted file mode 100644 index c8828dad3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_MV$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_MV$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_MV$package.tasty deleted file mode 100644 index e155f5fbc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_MV$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_MV.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_MV.class deleted file mode 100644 index d6cfe8ede..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_MV.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_MV.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_MV.tasty deleted file mode 100644 index 4c9ea769c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_MV.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_Quad$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_Quad$.class deleted file mode 100644 index 475cfd340..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_Quad$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_Quad$package$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_Quad$package$.class deleted file mode 100644 index b098ed858..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_Quad$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_Quad$package.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_Quad$package.class deleted file mode 100644 index 1a97961c1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_Quad$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_Quad$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_Quad$package.tasty deleted file mode 100644 index dda3fbb41..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_Quad$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_Quad.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_Quad.class deleted file mode 100644 index 2778f99d5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_Quad.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_Quad.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_Quad.tasty deleted file mode 100644 index 0dba7a08e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_Quad.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_QuadTest.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_QuadTest.class deleted file mode 100644 index e18e71c16..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_QuadTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_QuadTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_QuadTest.tasty deleted file mode 100644 index b9480ddf5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_QuadTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_QuadTest2.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_QuadTest2.class deleted file mode 100644 index 0516af71c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_QuadTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_QuadTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_QuadTest2.tasty deleted file mode 100644 index b9d51bb2a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_QuadTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_QuadTest3.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_QuadTest3.class deleted file mode 100644 index 436c0993e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_QuadTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_QuadTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_QuadTest3.tasty deleted file mode 100644 index ce63624f6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_QuadTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_QuadTest4.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_QuadTest4.class deleted file mode 100644 index fda1943c8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_QuadTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_QuadTest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_QuadTest4.tasty deleted file mode 100644 index 7d1bf1cc6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_QuadTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_QuadTest5.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_QuadTest5.class deleted file mode 100644 index f073a1fb5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_QuadTest5.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_QuadTest5.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_QuadTest5.tasty deleted file mode 100644 index ee6aec115..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_QuadTest5.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_QuadTest6.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_QuadTest6.class deleted file mode 100644 index f4edc220b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_QuadTest6.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_QuadTest6.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_QuadTest6.tasty deleted file mode 100644 index c29444a5e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_QuadTest6.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_Quad_MV$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_Quad_MV$.class deleted file mode 100644 index 1cb899d93..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_Quad_MV$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_Quad_MV$package$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_Quad_MV$package$.class deleted file mode 100644 index d1bd16a92..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_Quad_MV$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_Quad_MV$package.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_Quad_MV$package.class deleted file mode 100644 index 15f1e2033..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_Quad_MV$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_Quad_MV$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_Quad_MV$package.tasty deleted file mode 100644 index a90f6488a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_Quad_MV$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_Quad_MV.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_Quad_MV.class deleted file mode 100644 index 6779c3981..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_Quad_MV.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_Quad_MV.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_Quad_MV.tasty deleted file mode 100644 index aa22cbbf5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ARX_Quad_MV.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ForecastUtil$package$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ForecastUtil$package$.class deleted file mode 100644 index 60e44f22f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ForecastUtil$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ForecastUtil$package.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ForecastUtil$package.class deleted file mode 100644 index f199dec79..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ForecastUtil$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ForecastUtil$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ForecastUtil$package.tasty deleted file mode 100644 index f0026b9f3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ForecastUtil$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/Forecaster$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/Forecaster$.class deleted file mode 100644 index 8ffef83e6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/Forecaster$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/Forecaster$package$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/Forecaster$package$.class deleted file mode 100644 index e51300376..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/Forecaster$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/Forecaster$package.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/Forecaster$package.class deleted file mode 100644 index 954d0c3d8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/Forecaster$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/Forecaster$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/Forecaster$package.tasty deleted file mode 100644 index 7edbd85ed..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/Forecaster$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/Forecaster.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/Forecaster.class deleted file mode 100644 index f62470515..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/Forecaster.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/Forecaster.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/Forecaster.tasty deleted file mode 100644 index fb6ba188a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/Forecaster.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ForecasterX$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ForecasterX$.class deleted file mode 100644 index 48478117b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ForecasterX$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ForecasterX.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ForecasterX.class deleted file mode 100644 index 7994e1a19..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ForecasterX.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ForecasterX.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ForecasterX.tasty deleted file mode 100644 index 0a47eb96d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/ForecasterX.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/KalmanFilter$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/KalmanFilter$.class deleted file mode 100644 index 9cc02c508..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/KalmanFilter$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/KalmanFilter$package$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/KalmanFilter$package$.class deleted file mode 100644 index 49a7c4f16..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/KalmanFilter$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/KalmanFilter$package.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/KalmanFilter$package.class deleted file mode 100644 index c728cd89b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/KalmanFilter$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/KalmanFilter$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/KalmanFilter$package.tasty deleted file mode 100644 index 250c557e2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/KalmanFilter$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/KalmanFilter.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/KalmanFilter.class deleted file mode 100644 index 79a1aeaf7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/KalmanFilter.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/KalmanFilter.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/KalmanFilter.tasty deleted file mode 100644 index 5e8d3126b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/KalmanFilter.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/NullModel$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/NullModel$.class deleted file mode 100644 index fbcc61a48..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/NullModel$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/NullModel$package$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/NullModel$package$.class deleted file mode 100644 index 580c19336..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/NullModel$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/NullModel$package.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/NullModel$package.class deleted file mode 100644 index ca8de5292..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/NullModel$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/NullModel$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/NullModel$package.tasty deleted file mode 100644 index 99874d3d7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/NullModel$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/NullModel.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/NullModel.class deleted file mode 100644 index 230f32929..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/NullModel.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/NullModel.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/NullModel.tasty deleted file mode 100644 index b90874d8f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/NullModel.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/QuadSpline$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/QuadSpline$.class deleted file mode 100644 index 12191bf34..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/QuadSpline$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/QuadSpline$package$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/QuadSpline$package$.class deleted file mode 100644 index cd7d3ab97..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/QuadSpline$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/QuadSpline$package.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/QuadSpline$package.class deleted file mode 100644 index 6e4e015b6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/QuadSpline$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/QuadSpline$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/QuadSpline$package.tasty deleted file mode 100644 index 2ab3a2ed2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/QuadSpline$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/QuadSpline.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/QuadSpline.class deleted file mode 100644 index 51b967b69..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/QuadSpline.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/QuadSpline.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/QuadSpline.tasty deleted file mode 100644 index 8c03df70c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/QuadSpline.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/README.txt b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/README.txt deleted file mode 100644 index b607bbb81..000000000 --- a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/README.txt +++ /dev/null @@ -1,37 +0,0 @@ - -This package consists of older and experimental code. -End users should use the `forecasting` package - -Simple Univariate Time Series Models ------------------------------------- - -Utilities: - -Forecaster.scala -- Base Trait for Forecasters with Vector Input -RollingValidation.scala -- Rolling Validation for Forecasters -Stationarity.scala -- Unit Root Tests for Time Series Stationarity -Stationarity_KPSS.scala -- Kwiatkowski–Phillips–Schmidt–Shin (KPSS) Test for Stationarity - -Baseline: - -NullModel.scala -- Null/Mean Model: - y_t = mean -RandomWalk.scala -- Random Walk/Last-Value Model: - y_t = y_t-1 -MovingAverage.scala -- Simple Moving Average Model: - y_t = mean of last q values -TrendModel.scala -- Linear Trend Model: - y_t = a + b t - -Simple Models: - -SimpleExpSmoothing.scala -- Simple Exponential Smoothing (SES) Model: - s_t = α y_t-1 + (1-α)s_t-1; y_t = s_t -QuadSpline.scala -- Quadratic Spline Model: - y_t = y_t = a + b t + c t^2 -AR.scala -- Auto-Regressive (AR) Model: - y_t = δ + φ_1 y_t-1 + ... + φ_p y_t-p -ARMA.scala -- Auto-Regressive, Moving-Average (ARMA) Model: - y_t = δ + φ_1 y_t-1 + ... + φ_p y_t-p - + θ_1 e_t-1 + ... + θ_q e_t-q - diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RandomWalk$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RandomWalk$.class deleted file mode 100644 index 0a4fd1543..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RandomWalk$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RandomWalk$package$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RandomWalk$package$.class deleted file mode 100644 index 20a63e64f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RandomWalk$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RandomWalk$package.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RandomWalk$package.class deleted file mode 100644 index 10f8ae6ce..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RandomWalk$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RandomWalk$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RandomWalk$package.tasty deleted file mode 100644 index a85b2270f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RandomWalk$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RandomWalk.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RandomWalk.class deleted file mode 100644 index 8a430b88a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RandomWalk.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RandomWalk.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RandomWalk.tasty deleted file mode 100644 index 2f74ee645..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RandomWalk.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeGB4TS$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeGB4TS$.class deleted file mode 100644 index f6e33577f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeGB4TS$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeGB4TS$package$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeGB4TS$package$.class deleted file mode 100644 index 2fb8b93bb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeGB4TS$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeGB4TS$package.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeGB4TS$package.class deleted file mode 100644 index 352749703..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeGB4TS$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeGB4TS$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeGB4TS$package.tasty deleted file mode 100644 index 90a932e3e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeGB4TS$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeGB4TS.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeGB4TS.class deleted file mode 100644 index 969da1024..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeGB4TS.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeGB4TS.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeGB4TS.tasty deleted file mode 100644 index b3f44384c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeGB4TS.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeGB4TS2$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeGB4TS2$.class deleted file mode 100644 index 9c6c30a11..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeGB4TS2$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeGB4TS2$package$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeGB4TS2$package$.class deleted file mode 100644 index ee3709efc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeGB4TS2$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeGB4TS2$package.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeGB4TS2$package.class deleted file mode 100644 index a47b7f347..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeGB4TS2$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeGB4TS2$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeGB4TS2$package.tasty deleted file mode 100644 index 1ac240dad..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeGB4TS2$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeGB4TS2.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeGB4TS2.class deleted file mode 100644 index 617767b26..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeGB4TS2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeGB4TS2.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeGB4TS2.tasty deleted file mode 100644 index 481bb62d8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeGB4TS2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeMT4TS$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeMT4TS$.class deleted file mode 100644 index 12080e8c6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeMT4TS$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeMT4TS$package$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeMT4TS$package$.class deleted file mode 100644 index f1bfbaf01..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeMT4TS$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeMT4TS$package.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeMT4TS$package.class deleted file mode 100644 index aecec4b89..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeMT4TS$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeMT4TS$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeMT4TS$package.tasty deleted file mode 100644 index 8a2086e06..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeMT4TS$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeMT4TS.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeMT4TS.class deleted file mode 100644 index b63d90502..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeMT4TS.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeMT4TS.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeMT4TS.tasty deleted file mode 100644 index 6b7a4102c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeMT4TS.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeRF4TS$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeRF4TS$.class deleted file mode 100644 index 6146120d8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeRF4TS$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeRF4TS$package$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeRF4TS$package$.class deleted file mode 100644 index de1c11b68..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeRF4TS$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeRF4TS$package.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeRF4TS$package.class deleted file mode 100644 index 046466e8c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeRF4TS$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeRF4TS$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeRF4TS$package.tasty deleted file mode 100644 index 06f0e6f20..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeRF4TS$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeRF4TS.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeRF4TS.class deleted file mode 100644 index 52bec2e1e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeRF4TS.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeRF4TS.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeRF4TS.tasty deleted file mode 100644 index a4151178b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeRF4TS.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeRF_MT4TS$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeRF_MT4TS$.class deleted file mode 100644 index 141355280..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeRF_MT4TS$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeRF_MT4TS$package$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeRF_MT4TS$package$.class deleted file mode 100644 index b526455d3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeRF_MT4TS$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeRF_MT4TS$package.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeRF_MT4TS$package.class deleted file mode 100644 index 94b316fb8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeRF_MT4TS$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeRF_MT4TS$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeRF_MT4TS$package.tasty deleted file mode 100644 index 382265a80..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeRF_MT4TS$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeRF_MT4TS.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeRF_MT4TS.class deleted file mode 100644 index a2c49cd57..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeRF_MT4TS.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeRF_MT4TS.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeRF_MT4TS.tasty deleted file mode 100644 index 2796f1a7f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RegressionTreeRF_MT4TS.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RollingValidation$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RollingValidation$.class deleted file mode 100644 index 5424b5bff..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RollingValidation$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RollingValidation$package$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RollingValidation$package$.class deleted file mode 100644 index 55b57b93c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RollingValidation$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RollingValidation$package.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RollingValidation$package.class deleted file mode 100644 index 9af7e2a9e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RollingValidation$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RollingValidation$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RollingValidation$package.tasty deleted file mode 100644 index d512cb714..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RollingValidation$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RollingValidation.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RollingValidation.class deleted file mode 100644 index e842cab06..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RollingValidation.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RollingValidation.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RollingValidation.tasty deleted file mode 100644 index e2a29a5d3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/RollingValidation.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/SARIMA$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/SARIMA$.class deleted file mode 100644 index 8a8e0ea17..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/SARIMA$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/SARIMA.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/SARIMA.class deleted file mode 100644 index 35a0b9272..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/SARIMA.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/SARIMA.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/SARIMA.tasty deleted file mode 100644 index 7bf04bc04..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/SARIMA.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/SARIMAX.scalaa b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/SARIMAX.scalaa deleted file mode 100644 index 26c78d694..000000000 --- a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/SARIMAX.scalaa +++ /dev/null @@ -1,489 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Fri Feb 9 20:23:15 EST 2024 - * @see LICENSE (MIT style license file). - * - * @note Model: Seasonal Auto-Regressive, Integrated, Moving Average, with eXogenous variables (SARIMAX) - * - * @see http://en.wikipedia.org/wiki/Autoregressive%E2%80%93moving-average_model - * @see http://www.emu.edu.tr/mbalcilar/teaching2007/econ604/lecture_notes.htm - * @see http://www.stat.berkeley.edu/~bartlett/courses/153-fall2010 - * @see http://www.princeton.edu/~apapanic/ORFE_405,_Financial_Time_Series_%28Fall_2011%29_files/slides12-13.pdf - */ - -// U N D E R D E V E L O P M E N T - -package scalation -package modeling -package forecasting - -import scala.math.sqrt - -import scalation.mathstat._ -import scalation.optimization.quasi_newton.BFGS -import scalation.random.{Normal, Uniform} - -import SARIMA._ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `SARIMAX` class provides basic time series analysis capabilities for Auto- - * Regressive AR Integrated I Moving-Average MA, with eXogenous variables X models. - * In a SARIMAX(p, d, q)x(P, D, Q)_s [a, b] model, - * p and q are the orders of the AR and MA components; d is the order of differencing. - * P and Q are the orders of the AR and MA seasonal components; d is the order of Seasonal differencing. - * s is the seasonal period, and [a, b] is the range of lags for the eXogenous variables. - *------------------------------------------------------------------------------ - * @param y the original endogenous input vector (time series data) - * @param x the exogenous time series data as an input matrix - * @param dd the order of seasonal differencing - * @param period the seasonal period (at least 2) - * @param tt the time vector, if relevant (time index may suffice) - * @param hparam the hyper-parameters - */ -class SARIMAX (y: VectorD, x: MatrixD, dd: Int = 0, period: Int = 2, - tt: VectorD = null, hparam: HyperParameter = SARIMAX.hp) - extends SARIMA (y, dd, period, tt, hparam): - - private val flaw = flawf ("SARIMAX") // flaw function - - if period < 2 then flaw ("init", "the seasonal period must be at least 2") - - private var pp = 0 // seasonal AR order - private var qq = 0 // seasonal MA order, @see ARIMA for (p, d, q) - private var z = VectorD.nullv // time series after differencing - private var z_ = VectorD.nullv // intermediate results after simple differencing - private var zp = VectorD.nullv // predicted values prior to undifferencing/uncentering - private var φφ = VectorD.nullv // seasonal AR(pp) coefficients - private var θθ = VectorD.nullv // seasonal MA(qq) coefficients - - differenced = d > 0 || dd > 0 // flag indicating whether differencing will be applied - init (y) // initialize vectors and parameters - - println (s"x.dims = ${x.dims}") - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the model name including its current hyper-parameter. - */ - modelName = if dd > 0 then s"SARIMAX ($p, $d, $q) x ($pp, $dd, $qq)_${period}" - else s"SARIMAX ($p, $d, $q)" - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the maximum lag used by this model (its capacity to look into the past). - */ - override def cap: Int = Array (p, q, pp*period, qq*period).max - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Initialize variables based on the working time-series v. - * Set/change the working time series. May be used to set the time series - * to a different time window in order to produce newer forecast. - * @param v the working vector/time-series - */ - protected override def init (v: VectorD): Unit = - mu = v.mean // sample mean - val zz = difference (v, d, dd, period) // difference (simple/seasonal) the time series - z = zz._1 // time series after differencing - z_ = zz._2 // intermediate results after simple differencing - zp = new VectorD (z.dim) // predicted values prior to undifferencing/uncentering -// e = new VectorD (z.dim) // vector of errors/residuals - sig2 = z.variance // sample variance - end init - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Set values for the models orders p, q, pp and qq. - * @param pq the vector of model orders - */ - override def setPQ (pq: VectorI): Unit = - val n = pq.dim -// if n > 0 then p = pq(0) - φ = new VectorD (p) // AR coefficients -// if n > 1 then q = pq(1) - θ = new VectorD (q) // MA coefficients - if n > 2 then pp = pq(2) - φφ = new VectorD (pp) // seasonal AR coefficients - if n > 3 then qq = pq(3) - θθ = new VectorD (qq) // seasonal MA coefficients - params = p + q + pp + qq + (if differenced then 0 else 1) // number of parameters - end setPQ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train/fit an `SARIMAX` model to the times-series data in vector y_. Must call setPQ first. - * Estimate the coefficient vectors doer a SARIMAX(p, d, q, P, D, Q)_s model. - * It uses BFGS, a Quasi-Newton optimizer, to minimize the negative log-likelihood. - * @param x_null the data/input matrix (ignored, pass null) - * @param y_ the training/full response vector - */ - override def train (x_null: MatrixD, y_ : VectorD): Unit = - val solver = new BFGS (nll) // nonlinear optimizer - val b = new VectorD (params + 1) // parameter values - - if ! differenced then b(b.size-2) = mu // sample mean, initial est. for μ parameter - b(b.size-1) = sqrt (sig2) // sample standard deviation, initial est. for σ parameter - solver.solve (b) // find b that maximizes likelihood - - δ = μ * (1 - φ.sum) // update drift value -// δ = stats.mu * (1 - φ.sum) - - showParameterEstimates () // show ARMA parameters - println (s"θθ = $θθ") // seasonal moving average coefficients - println (s"φφ = $φφ") // seasonal auto-regressive coefficients - end train - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** The negative log-likelihood function to be minimized. - * @see spia.uga.edu/faculty_pages/monogan/teaching/ts/Barima.pdf - * @see stats.stackexchange.com/questions/77663/arima-estimation-by-hand - * @param b the input parameter vector - */ - protected override def nll (b: VectorD): Double = - if b.size != params + 1 then flaw ("nll", "input parameter vector size incorrect") - for i <- 0 until p do φ(i) = b(i) - for i <- p until p+pp do φφ(i-p) = b(i) - for i <- p+pp until p+pp+q do θ(i-p-pp) = b(i) - for i <- p+pp+q until p+pp+q+qq do θθ(i-p-pp-q) = b(i) - if ! differenced then μ = b(b.size-2) - σ2 = b.last~^2 - - updateFittedValues () - end nll - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Update the vector of fitted values zp, the vector of errors e, and - * return the negative log-likelihood -ll. - * @see `Fit` for definition of ll. - */ - protected override def updateFittedValues (): Double = - if ! differenced then for i <- z.indices do z(i) = y(i) - μ // for undifferenced time series, center using est. μ - - zp(0) = z(0) // no past values or errors => copy actual - for t <- 1 until zp.dim do - e(t-1) = z(t-1) - zp(t-1) // error in previous forecast - var sum = 0.0 - for j <- 0 until p if t-j > 0 do sum += φ(j) * z(t-1-j) - for j <- 0 until pp if t-(1+j)*period >= 0 do sum += φφ(j) * z(t-(1+j)*period) - for j <- 0 until q if t-j > 0 do sum += θ(j) * e(t-1-j) - for j <- 0 until qq if t-(1+j)*period >= 0 do sum += θθ(j) * e(t-(1+j)*period) - zp(t) = sum - end for - - -ll (e.normSq / m, σ2, m) // return negative log likelihood - end updateFittedValues - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the parameter vector (concatenation of φ, θ, φφ and θθ). - */ - override def parameter: VectorD = φ ++ θ ++ φφ ++ θθ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the vector of predicted/fitted values on the training/full data. - * Based on zp calculated in the updateFittedValues method. - * @param y_ the given time-series - */ - override def predictAll (y_ : VectorD): VectorD = - if differenced then transformBack (zp, z_, y, d, dd, period) else zp + μ - end predictAll - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Produce h-steps-ahead forecast for SARIMAX models. - * @see ams.sunysb.edu/~zhu/ams586/Forecasting.pdf - * @param t the time point from which to make forecasts (in the original scale) - * @param h the number of steps to forecast, must be at least one - */ -// override def forecast (t: Int = y.dim, h: Int = 1): VectorD = // FIX - adjust to new framework - override def forecast (t: Int, yf: MatrixD, y_ : VectorD, h: Int): VectorD = - if t > y.dim then flaw ("forecast", s"t ($t) cannot be greater than y.dim (${y.dim})") - val tz = t - d - dd * period // scale t to match vector z and e - if tz < cap then flaw ("forecast", s"tz ($tz) must be at least cap ($cap)") - - val zf = new VectorD (cap + h) // forecasted centered values - val e_ = new VectorD (cap + h) // available observed errors - - for i <- 0 until cap if tz-cap+i >= 0 do // seed with first cap values - zf(i) = z(tz-cap+i) // copy first cap values - e_(i) = e(tz-cap+i) // unveil first cap errors (observed in training) - end for - - for i <- cap until zf.dim do // start at t = cap (enough for first value to forecast) - var sum = 0.0 - for j <- 0 until p do sum += φ(j) * zf(i-1-j) - for j <- 0 until pp do sum += φφ(j) * zf(i-(1+j)*period) - for j <- 0 until q do sum += θ(j) * e_(i-1-j) - for j <- 0 until qq do sum += θθ(j) * e_(i-(1+j)*period) - zf(i) = sum - end for - - val f = zf(cap until zf.dim) // dump first cap values - if differenced then transformBackF (f, z_, y, d, dd, period, t) - else f + μ // return the vector of forecasts - end forecast - -end SARIMAX - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `sARIMAXTest` main function is used to test the `SARIMAX` class. - * > runMain scalation.modeling.forecasting.sARIMAXTest - */ -@main def sARIMAXTest (): Unit = - - println ("SARIMAX") - val m = 100 - val noise = Uniform (-5, 5) - val y = VectorD (for i <- 0 until m yield i + noise.gen) - val x = MatrixD ((1, 1), 1) // FIX - make a useful matrix - - val mod = new SARIMAX (y, x, 1) // time series data: y, x vs. t, apply 1st order differencing - -// mod.plotFunc2 (mod.acf, "ACF") // must turn on DEBUG so that ACF is actually computed - - banner (s"Build a SARIMAX(1, 0, 0) model") - mod.setPQ (VectorI (1, 0)) // set p and q, train and evaluate the ARIMA model - mod.train (null, y) // train the model on full dataset - val (yp, qof) = mod.test (null, y) // test the model on full dataset - println (mod.report (qof)) // report on Quality of Fit (QoF) - - banner (s"Build a SARIMAX(2, 0, 0) model") - mod.setPQ (VectorI (2, 0)) // set p and q, train and evaluate the ARIMA model - mod.train (null, y) // train the model on full dataset - val (yp2, qof2) = mod.test (null, y) // test the model on full dataset - println (mod.report (qof2)) // report on Quality of Fit (QoF) - - banner (s"Build a SARIMAX(1, 0, 1) model") - mod.setPQ (VectorI (1, 1)) // set p and q, train and evaluate the ARIMA model - mod.train (null, y) // train the model on full dataset - val (yp3, qof3) = mod.test (null, y) // test the model on full dataset - println (mod.report (qof3)) // report on Quality of Fit (QoF) - -end sARIMAXTest - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `sARIMAXTest2` main function is used to test the `SARIMAX` class. - * > runMain scalation.modeling.forecasting.sARIMAXTest2 - */ -@main def sARIMAXTest2 (): Unit = - - val m = 30 - val noise = Normal (0, 2) - val y = VectorD (for i <- 0 until m yield i + noise.gen) - val x = MatrixD ((1, 1), 1) // FIX - make a useful matrix - - println (s"y = $y") - - val (p, d, q) = (1, 1, 1) -// val steps = 2 // number of steps for the forecasts - - val mod = new SARIMAX (y, x, d) // time series data: y, x vs. t - - banner (s"Build a SARIMAX(p, 0, 0) model") - mod.setPQ (VectorI (p, 0)) // set p and q, train and evaluate the ARIMA model - mod.train (null, y) // train the model on full dataset - val (yp, qof) = mod.test (null, y) // test the model on full dataset - println (mod.report (qof)) // report on Quality of Fit (QoF) - -// val ar_f = ts.forecast (h = steps) -// println (s"$steps-step ahead forecasts using SARIMAX($p, 0, 0) model = $ar_f") - - banner (s"Build a SARIMAX(0, 0, q) model") - mod.setPQ (VectorI (0, q)) // set p and q, train and evaluate the ARIMA model - mod.train (null, y) // train the model on full dataset - val (yp2, qof2) = mod.test (null, y) // test the model on full dataset - println (mod.report (qof2)) // report on Quality of Fit (QoF) - -// val ma_f = ts.forecast (h = steps) -// println (s"$steps-step ahead forecasts using SARIMAX(0, 0, $q) model = $ma_f") - - banner (s"Build a SARIMAX(p, 0, q) model") - mod.setPQ (VectorI (p, q)) // set p and q, train and evaluate the ARIMA model - mod.train (null, y) // train the model on full dataset - val (yp3, qof3) = mod.test (null, y) // test the model on full dataset - println (mod.report (qof3)) // report on Quality of Fit (QoF) - -// val arma_f = ts.forecast (h = steps) -// println (s"$steps-step ahead forecasts using SARIMAX($p, 0, $q) model = $arma_f") - -end sARIMAXTest2 - -import Example_LakeLevels.y - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `sARIMAXTest3` main function is used to test the `SARIMAX` class. - * Forecasting lake levels. - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.sARIMAXTest3 - */ -@main def sARIMAXTest3 (): Unit = - - val x = MatrixD ((1, 1), 1) // FIX - make a useful matrix - -// val (pp, dd, qq) = (1, 1, 1) // seasonal hyper-parameters -// val period = 4 // seasonal period - -// val d = 0 // apply d-th order differencing - no differencing - val d = 1 // apply d-th order differencing - first differences - - for h <- 1 to 2 do // forecasting horizon - for p <- 1 to 3 do // auto-regressive hyper-parameter settings - for q <- 0 to 2 do // moving-average hyper-parameter settings - banner (s"Test3: SARIMAX ($p, $d, $q) with h = $h") - val mod = new SARIMAX (y, x, d) // create an SARIMAX model - mod.setPQ (VectorI (p, q)) // set p and q, train and evaluate the ARIMA model - mod.train (null, y) // train the model on full dataset - val (yp, qof) = mod.test (null, y) // test the model on full dataset - println (mod.report (qof)) // report on Quality of Fit (QoF) -/* - val yfa = mod.forecastAll (h) - val yfb = mod.forecastAll2 (h) - assert (yfa == yfb) - val yf = yfa.col(h) // forecasted values - h steps ahead - val yf2 = yfb.col(h) // forecasted 2 values - h steps ahead - - new Plot (null, y, yp, s"Plot of y & yp, predicted SARIMAX ($p, $d, $q) vs. t", true) - new Plot (null, y, yf, s"Plot of y & yf, forecasted (h = $h) SARIMAX ($p, $d, $q) vs. t", true) - new Plot (null, y, yf2, s"Plot of y & yf2, forecasted2 (h = $h) SARIMAX ($p, $d, $q) vs. t", true) - - if h == 1 then differ (yp, yf, allow = true) - differ (yf, yf2, allow = true) - val skip = max (p, q) // skip the cap start-up - - banner (s"SARIMAXTest3: QoF (@h = $h) for yf = mod.forecastAll") - println (s"rSq (yf) for h = $h is ${rSqF (y, yf)}") - println (s"rSq (yf) for h = $h is ${rSqF (y, yf, max (p, q))}, skip = $skip") - println (s"sMAPE (yf) for h = $h is ${smapeF (y, yf)}") - println (s"sMAPE (yf) for h = $h is ${smapeF (y, yf, max (p, q))}, skip = $skip") - - banner (s"SARIMAXTest3: QoF (@h = $h) for yf2 = mod.forecastAll2") - println (s"rSq (yf2) for h = $h is ${rSqF (y, yf2)}") - println (s"rSq (yf2) for h = $h is ${rSqF (y, yf2, max (p, q))}, skip = $skip") - println (s"sMAPE (yf2) for h = $h is ${smapeF (y, yf2)}") - println (s"sMAPE (yf2) for h = $h is ${smapeF (y, yf2, max (p, q))}, skip = $skip") -*/ - end for - end for - end for - -end sARIMAXTest3 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `sARIMAXTest4` main function is used to test the `SARIMAX` class. - * > runMain scalation.modeling.forecasting.sARIMAXTest4 - */ -@main def sARIMAXTest4 (): Unit = - - val nfile = "travelTime.csv" - val data = MatrixD.load (nfile) - -// val t = data(?, 0) - val y = data(?, 1) - val x = MatrixD ((1, 1), 1) // FIX - make a useful matrix - - val (p, d, q) = (1, 1, 1) -// val steps = 1 // number of steps for the forecasts - - val mod = new SARIMAX (y, x, d) // time series data: y, x vs. t - - println (s"y = $y") - - // Build SARIMAX(p, d, q) models - - mod.setPQ (VectorI (p, 0)) // set p and q, train and evaluate the ARIMA model - mod.train (null, y) // train the model on full dataset - val (yp, qof) = mod.test (null, y) // test the model on full dataset - println (mod.report (qof)) // report on Quality of Fit (QoF) - -// val ar_f = ts.forecast (h = steps) -// println (s"$steps-step ahead forecasts using SARIMAX($p, $d, $q) model = $ar_f") - - mod.setPQ (VectorI (0, q)) // set p and q, train and evaluate the ARIMA model - mod.train (null, y) // train the model on full dataset - val (yp2, qof2) = mod.test (null, y) // test the model on full dataset - println (mod.report (qof2)) // report on Quality of Fit (QoF) - -// val ma_f = ts.forecast (h = steps) -// println (s"$steps-step ahead forecasts using SARIMAX($p, $d, $q) model = $ma_f") - - mod.setPQ (VectorI (p, q)) // set p and q, train and evaluate the ARIMA model - mod.train (null, y) // train the model on full dataset - val (yp3, qof3) = mod.test (null, y) // test the model on full dataset - println (mod.report (qof3)) // report on Quality of Fit (QoF) - -// val arma_f = ts.forecast (h = steps) -// println (s"$steps-step ahead forecasts using SARIMAX($p, $d, $q) model = $arma_f") - -end sARIMAXTest4 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `sARIMAXTest5` main function tests the `SARIMAX` class on real data: - * Forecasting COVID-19. - * > runMain scalation.modeling.forecasting.sARIMAXTest5 - */ -@main def sARIMAXTest5 (): Unit = - - import SARIMAX.hp - - val data = MatrixD.load ("covid_19.csv", 1, 1) // skip first row (header) and first column - val yy = data(?, 4) // column 5 is daily deaths -// val yy = data(?, 5) // column 5 is daily deaths smoothed - val is = yy.indexWhere (_ >= 2.0) // find day of first death with at least 2 deaths - println (s"is = $is is first day with at least 2 deaths") - val y = yy(is until yy.dim) // slice out days before is - val x = MatrixD ((1, 1), 1) // FIX - make a useful matrix - - val h = 2 // forecasting horizon - hp("d") = 0 // level of differencing, try 0 and 1 - val dd = 1 // level of seasonal differencing, try 0 and 1 - val s = 7 - for p <- 1 to 15; q <- 1 to 3 do // SARIMAX hyper-parameter settings - hp("p") = p; hp("q") = q - val mod = new SARIMAX (y, x, dd, s) // create an SARIMAX model - mod.setPQ (VectorI (p, q, 0, 0)) - val (yp, qof) = mod.trainNtest ()() // train and the model on full dataset - - val yfa = mod.forecastAll (y, h) - val yf = yfa(?, h) // forecasted values - h steps ahead - new Plot (null, y, yf, s"Plot of y & yf, forecasted (h = $h) ${mod.modelName} vs. t", true) - - end for - -end sARIMAXTest5 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `sARIMAXTest6` main function is used to test the `SARIMAX` class. - * > runMain scalation.modeling.forecasting.sARIMAXTest6 - */ -@main def sARIMAXTest6 (): Unit = - - val m = 50 - val d = 0 // levels of differencing -// val steps = 2 // number of steps for the forecasts - val sig2 = 10000.0 - val noise = Normal (0.0, sig2) - val y = VectorD (for i <- 0 until m yield 40 * (i-1) - (i-2) * (i-2) + noise.gen) - val x = MatrixD ((1, 1), 1) // FIX - make a useful matrix - - val mod = new SARIMAX (y, x, d) // time series model SARIMAX - - banner (s"Build SARIMAX(1, $d, 0) model") - mod.setPQ (VectorI (1, 0)) // train for SARIMAX(1, d, 0) model - mod.train (null, y) // train the model on full dataset - val (yp, qof) = mod.test (null, y) // test the model on full dataset - println (mod.report (qof)) // report on Quality of Fit (QoF) - - for p <- 1 to 3 do - banner (s"Build SARIMAX($p, $d, $p) model") - mod.setPQ (VectorI (p, p)) // retrain for SARIMAX(p, d, p) model - mod.train (null, y) // train the model on full dataset - val (yp, qof) = mod.test (null, y) // test the model on full dataset - println (mod.report (qof)) // report on Quality of Fit (QoF) - -// banner ("Make Forecasts") -// val yf = mod.forecast (steps) -// println (s"$steps-step ahead forecasts using SARIMAX($p, $d, $p) model = $yf") - end for - -end sARIMAXTest6 - diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/SimpleExpSmoothing$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/SimpleExpSmoothing$.class deleted file mode 100644 index 71ca8a19b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/SimpleExpSmoothing$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/SimpleExpSmoothing$package$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/SimpleExpSmoothing$package$.class deleted file mode 100644 index 724c61392..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/SimpleExpSmoothing$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/SimpleExpSmoothing$package.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/SimpleExpSmoothing$package.class deleted file mode 100644 index 4d3c0fa9e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/SimpleExpSmoothing$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/SimpleExpSmoothing$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/SimpleExpSmoothing$package.tasty deleted file mode 100644 index f6bce79c7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/SimpleExpSmoothing$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/SimpleExpSmoothing.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/SimpleExpSmoothing.class deleted file mode 100644 index f3b3ed145..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/SimpleExpSmoothing.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/SimpleExpSmoothing.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/SimpleExpSmoothing.tasty deleted file mode 100644 index 47ab82718..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/SimpleExpSmoothing.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/SimpleMovingAverage$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/SimpleMovingAverage$.class deleted file mode 100644 index 3b305b4f7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/SimpleMovingAverage$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/SimpleMovingAverage$package$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/SimpleMovingAverage$package$.class deleted file mode 100644 index 6ba4fea2d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/SimpleMovingAverage$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/SimpleMovingAverage$package.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/SimpleMovingAverage$package.class deleted file mode 100644 index 27c3372c5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/SimpleMovingAverage$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/SimpleMovingAverage$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/SimpleMovingAverage$package.tasty deleted file mode 100644 index 6be95c9d7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/SimpleMovingAverage$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/SimpleMovingAverage.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/SimpleMovingAverage.class deleted file mode 100644 index 88b42d622..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/SimpleMovingAverage.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/SimpleMovingAverage.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/SimpleMovingAverage.tasty deleted file mode 100644 index 654c83f9a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/SimpleMovingAverage.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/Stationarity$package$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/Stationarity$package$.class deleted file mode 100644 index 1805fcc6c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/Stationarity$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/Stationarity$package.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/Stationarity$package.class deleted file mode 100644 index f148c1038..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/Stationarity$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/Stationarity$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/Stationarity$package.tasty deleted file mode 100644 index 5bb138863..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/Stationarity$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/TrendModel$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/TrendModel$.class deleted file mode 100644 index 8ac55e351..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/TrendModel$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/TrendModel$package$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/TrendModel$package$.class deleted file mode 100644 index 792ee9cad..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/TrendModel$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/TrendModel$package.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/TrendModel$package.class deleted file mode 100644 index 8568f64ea..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/TrendModel$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/TrendModel$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/TrendModel$package.tasty deleted file mode 100644 index 6e868e7d6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/TrendModel$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/TrendModel.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/TrendModel.class deleted file mode 100644 index 45d665fc4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/TrendModel.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/TrendModel.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/TrendModel.tasty deleted file mode 100644 index bfb7a1228..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/TrendModel.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/UnitRoot.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/UnitRoot.class deleted file mode 100644 index c9963f9b3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/UnitRoot.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/UnitRoot.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/UnitRoot.tasty deleted file mode 100644 index 7d64e0884..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/UnitRoot.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/VAR$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/VAR$.class deleted file mode 100644 index 26cd7dd3d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/VAR$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/VAR$package$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/VAR$package$.class deleted file mode 100644 index 1e7a09c91..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/VAR$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/VAR$package.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/VAR$package.class deleted file mode 100644 index f8bd8b213..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/VAR$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/VAR$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/VAR$package.tasty deleted file mode 100644 index e3fd6c962..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/VAR$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/VAR.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/VAR.class deleted file mode 100644 index 1c5fcb19d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/VAR.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/VAR.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/VAR.tasty deleted file mode 100644 index 3b3a8f10d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/VAR.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/WeightedMovingAverage$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/WeightedMovingAverage$.class deleted file mode 100644 index 026ab1ef1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/WeightedMovingAverage$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/WeightedMovingAverage$package$.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/WeightedMovingAverage$package$.class deleted file mode 100644 index ac6a9c860..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/WeightedMovingAverage$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/WeightedMovingAverage$package.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/WeightedMovingAverage$package.class deleted file mode 100644 index ec11bd256..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/WeightedMovingAverage$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/WeightedMovingAverage$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/WeightedMovingAverage$package.tasty deleted file mode 100644 index ebfed43d4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/WeightedMovingAverage$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/WeightedMovingAverage.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/WeightedMovingAverage.class deleted file mode 100644 index eebe07c4f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/WeightedMovingAverage.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/WeightedMovingAverage.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/WeightedMovingAverage.tasty deleted file mode 100644 index cf3c5b1d4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/WeightedMovingAverage.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aR1MATest.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aR1MATest.class deleted file mode 100644 index a87d0a34b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aR1MATest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aR1MATest.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aR1MATest.tasty deleted file mode 100644 index 2f8f526c3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aR1MATest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aR1MATest2.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aR1MATest2.class deleted file mode 100644 index 016f4224e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aR1MATest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aR1MATest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aR1MATest2.tasty deleted file mode 100644 index b1682884f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aR1MATest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aR1MATest3.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aR1MATest3.class deleted file mode 100644 index 47404832d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aR1MATest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aR1MATest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aR1MATest3.tasty deleted file mode 100644 index be00056ef..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aR1MATest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aR1MATest4.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aR1MATest4.class deleted file mode 100644 index f513f6b2e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aR1MATest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aR1MATest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aR1MATest4.tasty deleted file mode 100644 index 2f12c5710..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aR1MATest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aR1MATest5.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aR1MATest5.class deleted file mode 100644 index ddae2125d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aR1MATest5.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aR1MATest5.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aR1MATest5.tasty deleted file mode 100644 index 0116a56f1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aR1MATest5.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRIMATest.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRIMATest.class deleted file mode 100644 index 93ee3ff93..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRIMATest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRIMATest.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRIMATest.tasty deleted file mode 100644 index ef2abcdfd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRIMATest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRIMATest2.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRIMATest2.class deleted file mode 100644 index 18fd74916..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRIMATest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRIMATest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRIMATest2.tasty deleted file mode 100644 index 6fafa31d7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRIMATest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRIMATest3.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRIMATest3.class deleted file mode 100644 index aef95fb21..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRIMATest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRIMATest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRIMATest3.tasty deleted file mode 100644 index 3166ae1cb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRIMATest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRIMATest4.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRIMATest4.class deleted file mode 100644 index 6d851069f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRIMATest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRIMATest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRIMATest4.tasty deleted file mode 100644 index ab35f219f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRIMATest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRIMA_diffTest.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRIMA_diffTest.class deleted file mode 100644 index ca5085cd1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRIMA_diffTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRIMA_diffTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRIMA_diffTest.tasty deleted file mode 100644 index 3ef7e6ed5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRIMA_diffTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRMATest.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRMATest.class deleted file mode 100644 index 74e4c853e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRMATest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRMATest.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRMATest.tasty deleted file mode 100644 index d0899132a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRMATest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRMATest2.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRMATest2.class deleted file mode 100644 index 541c416b3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRMATest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRMATest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRMATest2.tasty deleted file mode 100644 index c728d1ed7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRMATest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRMATest3.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRMATest3.class deleted file mode 100644 index 0ef9c60f7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRMATest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRMATest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRMATest3.tasty deleted file mode 100644 index e35053bec..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRMATest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRMATest4.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRMATest4.class deleted file mode 100644 index 37c520da0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRMATest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRMATest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRMATest4.tasty deleted file mode 100644 index e55de0077..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRMATest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRMATest5.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRMATest5.class deleted file mode 100644 index 26fcdafce..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRMATest5.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRMATest5.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRMATest5.tasty deleted file mode 100644 index 04364ab7e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRMATest5.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRMATest6.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRMATest6.class deleted file mode 100644 index 9fde73d53..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRMATest6.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRMATest6.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRMATest6.tasty deleted file mode 100644 index 1af6143f5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRMATest6.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRMATest7.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRMATest7.class deleted file mode 100644 index cfeea0de3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRMATest7.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRMATest7.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRMATest7.tasty deleted file mode 100644 index d63795475..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRMATest7.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRTest.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRTest.class deleted file mode 100644 index 6167a662f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRTest.tasty deleted file mode 100644 index 1fb612e46..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRTest2.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRTest2.class deleted file mode 100644 index 2578ccff8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRTest2.tasty deleted file mode 100644 index 33448d0c7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRTest3.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRTest3.class deleted file mode 100644 index 69ebbb44f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRTest3.tasty deleted file mode 100644 index b187274ca..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRTest4.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRTest4.class deleted file mode 100644 index 72e4982e3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRTest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRTest4.tasty deleted file mode 100644 index 313fcc5b1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRTest5.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRTest5.class deleted file mode 100644 index 7c3523dc0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRTest5.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRTest5.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRTest5.tasty deleted file mode 100644 index 57bf2152b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRTest5.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRTest6.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRTest6.class deleted file mode 100644 index fc8ad388e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRTest6.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRTest6.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRTest6.tasty deleted file mode 100644 index 98dd4968f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRTest6.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRTest7.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRTest7.class deleted file mode 100644 index 11acf2c16..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRTest7.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRTest7.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRTest7.tasty deleted file mode 100644 index 10b694633..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRTest7.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRXTest.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRXTest.class deleted file mode 100644 index 376229884..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRXTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRXTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRXTest.tasty deleted file mode 100644 index c8f9ba316..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRXTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRXTest2.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRXTest2.class deleted file mode 100644 index 6dde796a2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRXTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRXTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRXTest2.tasty deleted file mode 100644 index be3d0bb5c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRXTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRXTest3.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRXTest3.class deleted file mode 100644 index fd1401c04..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRXTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRXTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRXTest3.tasty deleted file mode 100644 index 0ec238e37..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRXTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRXTest4.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRXTest4.class deleted file mode 100644 index b18a4f611..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRXTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRXTest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRXTest4.tasty deleted file mode 100644 index 15dde1f18..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRXTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRXTest5.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRXTest5.class deleted file mode 100644 index 69848fbac..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRXTest5.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRXTest5.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRXTest5.tasty deleted file mode 100644 index c6b475d77..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRXTest5.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRXTest6.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRXTest6.class deleted file mode 100644 index 290ff9d25..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRXTest6.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRXTest6.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRXTest6.tasty deleted file mode 100644 index fd9ea410f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRXTest6.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRXTest7.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRXTest7.class deleted file mode 100644 index 61a2e8d61..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRXTest7.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRXTest7.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRXTest7.tasty deleted file mode 100644 index bded7d6b4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRXTest7.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_MVTest.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_MVTest.class deleted file mode 100644 index cfbf74ea1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_MVTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_MVTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_MVTest.tasty deleted file mode 100644 index 3d1f8aec8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_MVTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_MVTest2.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_MVTest2.class deleted file mode 100644 index 28103ccbb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_MVTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_MVTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_MVTest2.tasty deleted file mode 100644 index 635044043..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_MVTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_MVTest3.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_MVTest3.class deleted file mode 100644 index 484be3905..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_MVTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_MVTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_MVTest3.tasty deleted file mode 100644 index ab4fe66c7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_MVTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_MVTest4.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_MVTest4.class deleted file mode 100644 index 67faa9a03..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_MVTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_MVTest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_MVTest4.tasty deleted file mode 100644 index 5759ac818..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_MVTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_MVTest5.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_MVTest5.class deleted file mode 100644 index f02ebc099..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_MVTest5.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_MVTest5.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_MVTest5.tasty deleted file mode 100644 index c5457b0b3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_MVTest5.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_MVTest6.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_MVTest6.class deleted file mode 100644 index b4de63aff..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_MVTest6.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_MVTest6.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_MVTest6.tasty deleted file mode 100644 index ad01f70e3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_MVTest6.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_Quad_MVTest.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_Quad_MVTest.class deleted file mode 100644 index 58f6a89eb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_Quad_MVTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_Quad_MVTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_Quad_MVTest.tasty deleted file mode 100644 index b906f65e9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_Quad_MVTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_Quad_MVTest2.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_Quad_MVTest2.class deleted file mode 100644 index 605dc45eb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_Quad_MVTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_Quad_MVTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_Quad_MVTest2.tasty deleted file mode 100644 index 355e8a5c4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_Quad_MVTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_Quad_MVTest3.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_Quad_MVTest3.class deleted file mode 100644 index dfc4b323f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_Quad_MVTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_Quad_MVTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_Quad_MVTest3.tasty deleted file mode 100644 index a62de965f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_Quad_MVTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_Quad_MVTest4.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_Quad_MVTest4.class deleted file mode 100644 index bcbc08ee3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_Quad_MVTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_Quad_MVTest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_Quad_MVTest4.tasty deleted file mode 100644 index 6ad59a950..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_Quad_MVTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_Quad_MVTest5.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_Quad_MVTest5.class deleted file mode 100644 index 62ef3e0c5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_Quad_MVTest5.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_Quad_MVTest5.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_Quad_MVTest5.tasty deleted file mode 100644 index dec83eea7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_Quad_MVTest5.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_Quad_MVTest6.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_Quad_MVTest6.class deleted file mode 100644 index 829a42473..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_Quad_MVTest6.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_Quad_MVTest6.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_Quad_MVTest6.tasty deleted file mode 100644 index 527095781..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/aRX_Quad_MVTest6.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/buildTensor4TSTest.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/buildTensor4TSTest.class deleted file mode 100644 index f2f528ddc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/buildTensor4TSTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/buildTensor4TSTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/buildTensor4TSTest.tasty deleted file mode 100644 index a228ff35c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/buildTensor4TSTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/index.html b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/index.html deleted file mode 100644 index 36cee371a..000000000 --- a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/index.html +++ /dev/null @@ -1,41 +0,0 @@ - - -

    Source files in forecasting Package

    -

    - - \ No newline at end of file diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/kalmanFilterTest.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/kalmanFilterTest.class deleted file mode 100644 index 481509eec..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/kalmanFilterTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/kalmanFilterTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/kalmanFilterTest.tasty deleted file mode 100644 index efccd4d1e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/kalmanFilterTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/nullModelTest.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/nullModelTest.class deleted file mode 100644 index e3d64c5d3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/nullModelTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/nullModelTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/nullModelTest.tasty deleted file mode 100644 index f92d2834b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/nullModelTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/nullModelTest2.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/nullModelTest2.class deleted file mode 100644 index 17e6302db..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/nullModelTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/nullModelTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/nullModelTest2.tasty deleted file mode 100644 index 8ef9b26f9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/nullModelTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/nullModelTest3.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/nullModelTest3.class deleted file mode 100644 index d2e898d4c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/nullModelTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/nullModelTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/nullModelTest3.tasty deleted file mode 100644 index 978d84ec0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/nullModelTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/AR.scala.bak b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/AR.scala.bak deleted file mode 100644 index 00ed61247..000000000 --- a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/AR.scala.bak +++ /dev/null @@ -1,252 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author Hao Peng, John Miller, Michael Cotterell - * @version 2.0 - * @date Sat Jun 13 01:27:00 EST 2017 - * @see LICENSE (MIT style license file). - * - * @title Model: Auto-Regressive - * - * @see http://en.wikipedia.org/wiki/Autoregressive%E2%80%93moving-average_model - * @see http://www.emu.edu.tr/mbalcilar/teaching2007/econ604/lecture_notes.htm - * @see http://www.stat.berkeley.edu/~bartlett/courses/153-fall2010 - * @see http://www.princeton.edu/~apapanic/ORFE_405,_Financial_Time_Series_%28Fall_2011%29_files/slides12-13.pdf - */ - -package scalation -package modeling -package forecasting - -import scala.collection.mutable.Set -import scala.math.{max, min} - -import scalation.mathstat._ -import scalation.random.{Normal, Uniform} - -import Fit._ - -// FIX - don't use actual y values for first p predictions - compare with ARIMA - -import AR._ -import Forecaster._ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `AR` class provides basic time series analysis capabilities for Auto-Regressive - * (AR) models. In an AR(p) model, p refers to the order of the Auto-Regressive - * components of the model. AR models are often used for forecasting. - * Given time series data stored in vector y, its next value y_t = y(t) - * may be predicted based on prior values of y and its noise: - * y_t = δ + Σ(φ_k y_t-k) - * where δ is a constant, φ is the autoregressive coefficient vector, - * and e is the noise vector. - *---------------------------------------------------------------------------------- - * @param y the response vector (time-series data) - * @param tt the time vector, if relevant (time index may suffice) - * @param hparam the hyper-parameters - */ -class AR (y: VectorD, tt: VectorD = null, hparam: HyperParameter = AR.hp) - extends Forecaster (y, tt, hparam) - with Correlogram (y) - with Fit (dfm = hparam("p").toInt, df = y.dim - hparam("p").toInt): - - private val debug = debugf ("AR", true) // debug function - private val flaw = flawf ("AR") // flaw function - private var m = y.dim // number of time points - private var p = hparam("p").toInt // p-th order Auto-Regressive model - private var φ = VectorD.nullv // AR(p) parameters/coefficients - private var δ = NO_DOUBLE // drift/constant term - private var trained = false // has trained been called? - - if p > MAX_LAGS then flaw ("AR", s"p = $p must not be greater than MAX_LAGS = $MAX_LAGS") - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the model name including its current hyper-parameter, e.g., AR(2). - */ - override def modelName: String = s"AR($p)" - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train/fit an `AR` model to the times-series data in vector y_. - * Estimate the coefficient vector φ for a p-th order Auto-Regressive AR(p) model. - * z_t = φ_0 * z_t-1 + ... + φ_p-1 * z_t-p + e_t - * Uses the Durbin-Levinson Algorithm to determine the coefficients. - * The φ vector is p-th row of 'psi' matrix (ignoring the first (0th) column). - * @param x_null the data/input matrix (ignored) - * @param y_ the training/full response vector (defaults to y) - */ - def train (x_null: MatrixD = null, y_ : VectorD = y): Unit = - m = y_.dim - resetDF (p, m - p) - makeCorrelogram (y_) - φ = psiM(p)(1 to p+1) - δ = statsF.mu * (1 - φ.sum) - trained = true - debug ("train", s"parameters for AR($p) model: φ = $φ, δ = $δ") - end train - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Retrain/fit an `AR` model to the times-series data using another order p_ - * Estimate the coefficient vector φ for a p-th order Auto-Regressive AR(p) model. - * The φ vector is p-th row of psi matrix (ignoring the first (0th) column). - * @param p_ another order - */ - def retrain (p_ : Int): Unit = - if ! trained then flaw ("retrain", "train must be called before retrain") - p = p_ - resetDF (p, m - p) - φ = psiM(p)(1 to p+1) - δ = statsF.mu * (1 - φ.sum) - debug ("retrain", s"parameters for AR($p) model: φ = $φ, δ = $δ") - end retrain - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test an AR forecasting model y_ = f(lags (y_)) + e and return its QoF vector. - * Testing may be be in-sample (on the training set) or out-of-sample - * (on the testing set) as determined by the parameters passed in. - * Note: must call train before test. - * @param x_null the training/testing data/input matrix (ignored, pass null) - * @param y_ the training/testing response/output vector (defaults to full y) - */ - def test (x_null: MatrixD = null, y_ : VectorD = y): VectorD = - val yp = predictAll (y_) // make predictions - val (y1, y2) = align (y_, yp) - diagnose (y1, y2) // evaluate the quality of these predictions - fit // return the QoF vector - end test - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the parameter vector. - */ - def parameter: VectorD = φ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Predict a value for time point t using 1-step ahead forecasts. - * y_t = φ_0 y_t-1 + φ_1 y_t-2 + ... + φ_p-1 y_t-p - * When k < 0 let y_k = y_0 (assume first value repeats). - * @param tn the time point (index) to be predicted - * @param y_ the actual values to use in making predictions - */ - def predict (t: Int, y_ : VectorD): Double = - if t < 1 || t > y_.dim then flaw ("predict", s"time index t = $t is out of range") - var sum = δ - for j <- 0 until p do sum += φ(j) * y_(max (0, t-1-j)) - sum - end predict - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Predict values for all time points using 1-step ahead forecasts. - * Return a vector that is the predictions (zero-centered) of a p-th order - * Auto-Regressive AR(p) model. - * @see predictAll in `ForecasterVec` for uncentered results - def predictAllz (): VectorD = - val zp = new VectorD (m) // forecasts for all time points t - for t <- 0 until p do zp(t) = z(t) // copy first p actual values into zp - for t <- p until m do - var sum = 0.0 - for j <- 0 until p do sum += φ(j) * zp(max (0, t-1-j)) - zp(t) = sum - end for - zp // return vector of predicted values - end predictAllz - */ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all m time points and all horizons (1 through h-steps ahead). - * Record these in the yf matrix, where - * yf(t, k) = k-steps ahead forecast for y_t - * Note, yf(?, 0) is set to y (the actual time-series values). - * @param h the maximum forecasting horizon, number of steps ahead to produce forecasts - */ - def forecastAll (h: Int): MatrixD = - val yf = new MatrixD (m, h+1) // forecasts for all time points t & horizons to h - yf(?, 0) = y // first column is actual values, horizon 0 - for k <- 1 to h do - val c = min (k, p) // cut point from actual to forecasted values - for t <- 0 until c do yf(t, k) = y(t) // copy first c actual values - for t <- c until m do // forecast the rest - var sum = δ - for j <- 0 until p do sum += φ(j) * yf(max (0, t-1-j), max (0, k-1-j)) - yf(t, k) = sum - end for - debug ("forecastAll", s"yf(?, $k) = ${yf(?, k)}") - end for - yf // return matrix of forecasted values - end forecastAll - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Produce h-steps ahead forecast on the testing data during cross validation. - * @param y the current response vector - * @param t the time point to be forecast - * @param h the forecasting horizon, number of steps ahead to produce forecast - override def forecastX (y: VectorD = y, t: Int = y.dim, h: Int = 1): Double = - if t > m then flaw ("forecast", "no forecasts with starting t > m are provided") - - val zf = new VectorD (p+h) - // Must calculate the z values by doing y - mu on the fly because these values are beyond the bounds of the z vector - for l <- 0 until p do zf(l) = y(max (0, t-p+l)) - stats.mu // copy first p values into zf. - - for k <- 1 to h do // advance the forecasting horizon - var sum = 0.0 - for j <- 0 until p do sum += φ(j) * zf(p-2+k-j) - zf(p-1+k) = sum - end for - zf.last + stats.mu // return the last forecast - end forecastX - */ - -end AR - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `AR` companion object provides factory methods for the `AR` class. - */ -object AR: - - /** Base hyper-parameter specification for `AR` - */ - val hp = new HyperParameter - hp += ("p", 1, 1) // for the AR part - - private val flaw = flawf ("AR") // flaw function - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create an `AR` object. - * @param y the response vector (time series data) - * @param tt the time vector, if relevant (time index may suffice) - * @param hparam the hyper-parameters - */ - def apply (y: VectorD, tt: VectorD = null, hparam: HyperParameter = AR.hp): AR = - new AR (y, tt, hparam) - end apply - -end AR - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARTest` object is used to test the `AR` class on real data: Forecasting lake - * levels. Test the test and predictAll methods over the whole times-series. - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.ARTest - */ -object ARTest extends App: - - import Example_LakeLevels.y - val t = VectorD.range (1, y.dim) - - var ar: AR = null - for p <- 1 to 11 do // autoregressive hyper-parameter - hp("p") = p // set p hyper-parameter - banner (s"Test: AR($p)") - ar = new AR (y) // create model for time series data - ar.train () // train the model on full dataset - println (ar.report (ar.test ())) // test the model and report results - val yp = ar.predictAll (y) // predict 1-step ahead for all y - val (y1, y2) = align (y, yp) // compare y(1:m) with yp(0:m-1) - new Plot (t, y1, y2, s"AR($p): y-actual vs. y-predicted", lines = true) - end for - - banner ("Select model based on ACF and PACF") - ar.plotFunc (ar.acF, "ACF") - ar.plotFunc (ar.pacF, "PACF") - -end ARTest - diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/AR.scala.bak2 b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/AR.scala.bak2 deleted file mode 100644 index a6bb1d97e..000000000 --- a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/AR.scala.bak2 +++ /dev/null @@ -1,218 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author Hao Peng, John Miller, Michael Cotterell - * @version 2.0 - * @date Sat Jun 13 01:27:00 EST 2017 - * @see LICENSE (MIT style license file). - * - * @title Model: Auto-Regressive (AR) - * - * @see http://en.wikipedia.org/wiki/Autoregressive%E2%80%93moving-average_model - * @see http://www.emu.edu.tr/mbalcilar/teaching2007/econ604/lecture_notes.htm - * @see http://www.stat.berkeley.edu/~bartlett/courses/153-fall2010 - * @see http://www.princeton.edu/~apapanic/ORFE_405,_Financial_Time_Series_%28Fall_2011%29_files/slides12-13.pdf - */ - -package scalation -package modeling -package forecasting - -import scala.math.max - -import scalation.mathstat._ - -import AR.hp - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `AR` class provides basic time series analysis capabilities for Auto-Regressive - * (AR) models. In an AR(p) model, p refers to the order of the Auto-Regressive - * components of the model. AR models are often used for forecasting. - * Given time-series data stored in vector y, its next value y_t = y(t) - * may be predicted based on prior values of y and its noise: - * y_t = δ + Σ(φ_k y_t-k) + e_t - * where δ is a constant, φ is the auto-regressive coefficient vector, - * and e_t is the noise term. - *---------------------------------------------------------------------------------- - * @param y the response vector (time-series data) - * @param tt the time vector, if relevant (time index may suffice) - * @param hparam the hyper-parameters - */ -abstract class AR (y: VectorD, tt: VectorD = null, hparam: HyperParameter = AR.hp) - extends Forecaster (y, tt, hparam) - with Correlogram (y) - with Fit (dfm = hparam("p").toInt, df = y.dim - hparam("p").toInt): - - private val debug = debugf ("AR", true) // debug function - private val flaw = flawf ("AR") // flaw function - - m = y.dim // number of time points (@see `FitM`) - private var p = hparam("p").toInt // p-th order Auto-Regressive model - private var φ = VectorD.nullv // AR(p) parameters/coefficients - private var δ = NO_DOUBLE // drift/intercept/constant term - private var yf = MatrixD.nullm // the forecast matrix - time points x horizons - - if p > MAX_LAGS then flaw ("init", s"p = $p must not be greater than MAX_LAGS = $MAX_LAGS") - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the model name including its current hyper-parameter, e.g., AR(2). - */ - override def modelName: String = s"AR($p)" - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train/fit an `AR` model to the times-series data in vector y_. - * Estimate the coefficient vector φ for a p-th order Auto-Regressive AR(p) model. - * Uses Durbin-Levinson Algorithm (in `Correlogram`) to determine the coefficients. - * The φ vector is p-th row of psi matrix (ignoring the first (0th) column). - * @param x_null the data/input matrix (ignored, pass null) - * @param y_ the training/full response vector (e.g., full y) - */ - def train (x_null: MatrixD, y_ : VectorD): Unit = - m = y_.dim // length of relevant time-series - resetDF (p, m - p) // reset the degrees of freedom - makeCorrelogram (y_) // correlogram computes psi matrix - φ = psiM(p)(1 to p+1) // coefficients = p-th row, columns 1, 2, ... p - δ = statsF.mu * (1 - φ.sum) // compute drift/intercept - debug ("train", s"parameters for AR($p) model: φ = $φ, δ = $δ") - end train - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test PREDICTIONS of an AR forecasting model y_ = f(lags (y_)) + e and return its - * QoF vector. Testing may be be in-sample (on the training set) or out-of-sample - * (on the testing set) as determined by the parameters passed in. - * Note: must call train before test. - * @param x_null the training/testing data/input matrix (ignored, pass null) - * @param y_ the full/testing response/output vector (e.g., full y) - */ - def test (x_null: MatrixD, y_ : VectorD): (VectorD, VectorD) = - val (yy, yp) = testSetup (y_) // get and align actual and predicted values - e = yy - yp // set the residuals/errors - resetDF (p, yy.dim - p) // reset the degrees of freedom - (yp, diagnose (yy, yp)) // return predictions and QoF Vector - end test - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test FORECASTS of an AR forecasting model y_ = f(lags (y_)) + e and return its - * QoF vector. Testing may be in-sample (on the training set) or out-of-sample - * (on the testing set) as determined by the parameters passed in. - * Note: must call train before testf. - * @param h the forecasting horizon, number of steps ahead to produce forecasts - * @param y_ the full/testing response/output vector (e.g., full y) - * @param redo whether to use existing forecasts or redo them (defaults to false) - */ - def testf (h: Int, y_ : VectorD, redo: Boolean = false): VectorD = - if yf == null || yf.dim2 < h+1 || redo then yf = forecastAll (h, y_) // redo all forecasts - val yy = y_(h to y_.dim) - val yf_h = yf(?, h)(h to y_.dim) // pull column h from the forecast matrix and align - resetDF (p, yy.dim - p) // reset the degrees of freedom - diagnose (yy, yf_h) // evaluate and return the QoF of these forecasts - end testf - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the parameter vector for the AR(p) model. - */ - override def parameter: VectorD = φ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Predict a value for time point/index t using 1-step ahead forecasts. - * y_t = φ_0 y_t-1 + φ_1 y_t-2 + ... + φ_p-1 y_t-p - * When k < 0 let y_k = y_0 (i.e., assume first value repeats back in time). - * @see predictAll in `Forecaster` - * @param t the time point/index to be predicted - * @param y_ the actual values to use in making predictions - */ - def predict (t: Int, y_ : VectorD): Double = - if t < 1 || t > y_.dim then flaw ("predict", s"time index t = $t is out of range") - var sum = δ - for j <- 0 until p do sum += φ(j) * y_(max (0, t-1-j)) - sum // prediction for y_t - end predict - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all y_.dim time points and all horizons (1 through h-steps ahead). - * Record these in the yf matrix, where - * yf(t, k) = k-steps ahead forecast for y_t - * Note, column 0, yf(?, 0), is set to y (the actual time-series values). - * Forecast recurse down diagonals in the yf forecast matrix. - * The top right and bottom left triangles in yf matrix are not forecastable. - * @param h the maximum forecasting horizon, number of steps ahead to produce forecasts - * @param y_ the actual values to use in making forecasts - */ - def forecastAll (h: Int, y_ : VectorD): MatrixD = - yf = new MatrixD (y_.dim+h, h+1) // forecasts for all time points t & horizons to h - for t <- 0 until m do yf(t, 0) = y_(t) // first column is actual values, horizon 0 - for k <- 1 to h do - for t <- y_.indices do // make forecasts over all time points for horizon k - var sum = δ - for j <- 0 until p do sum += φ(j) * yf(max (0, t+k-1-j), max (0, k-1-j)) - yf(t+k, k) = sum // forecast down the diagonal - end for - debug ("forecastAll", s"yf(?, $k) = ${yf(?, k)}") - end for - yf // return matrix of forecasted values - end forecastAll - -end AR - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `AR` companion object provides factory methods for the `AR` class. - */ -object AR: - - /** Base hyper-parameter specification for `AR` class - */ - val hp = new HyperParameter - hp += ("p", 1, 1) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create an `AR` object. - * @param y the response vector (time series data) - * @param tt the time vector, if relevant (time index may suffice) - * @param hparam the hyper-parameters - */ - def apply (y: VectorD, tt: VectorD = null, hparam: HyperParameter = AR.hp): AR = - new AR (y, tt, hparam) - end apply - -end AR - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRTest` main function tests the `AR` class on real data: Forecasting lake levels. - * Test the test, predictAll, testf and forecastAll methods over the whole times-series. - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.aRTest - */ -@main def aRTest (): Unit = - - import Example_LakeLevels.y - - val m = y.dim - val t = VectorD.range (1, m) - val h = 3 // the forecasting horizon - - var mod: AR = null - for p <- 1 to 10 do // autoregressive hyper-parameter p - hp("p") = p // set p hyper-parameter - banner (s"Test: AR($p)") - mod = new AR (y) // create model for time series data - mod.train (null, y) // train the model on full dataset - val yp = mod.testPred (y, t) - - val yf = mod.forecastAll (h, y) // forecast h-steps ahead for all y - println (s"yf = $yf") - println (s"yf.dims = ${yf.dims}") -// assert (yf(?, 0)(0 until m) == y) // column 0 must agree with actual values -// assert (yf(?, 1)(1 to m+1) == yp) // column 1 must agree with one step-ahead predictions - for k <- 1 to h do - println (s"evalaute QoF for horizon $k:") - println (FitM.fitMap (mod.testf (k, y), QoF.values.map (_.toString))) // evaluate k-units ahead forecasts - end for - end for - - banner ("Select model based on ACF and PACF") - mod.plotFunc (mod.acF, "ACF") // Auto-Correlation Function (ACF) - mod.plotFunc (mod.pacF, "PACF") // Partial Auto-Correlation Function (PACF) - -end aRTest - diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/AR.scala.bak3 b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/AR.scala.bak3 deleted file mode 100644 index 47b407d4d..000000000 --- a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/AR.scala.bak3 +++ /dev/null @@ -1,289 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author Hao Peng, John Miller, Michael Cotterell - * @version 2.0 - * @date Sat Jun 13 01:27:00 EST 2017 - * @see LICENSE (MIT style license file). - * - * @title Model: Auto-Regressive (AR) - * - * @see http://en.wikipedia.org/wiki/Autoregressive%E2%80%93moving-average_model - * @see http://www.emu.edu.tr/mbalcilar/teaching2007/econ604/lecture_notes.htm - * @see http://www.stat.berkeley.edu/~bartlett/courses/153-fall2010 - * @see http://www.princeton.edu/~apapanic/ORFE_405,_Financial_Time_Series_%28Fall_2011%29_files/slides12-13.pdf - */ - -package scalation -package modeling -package forecasting - -import scala.math.max - -import scalation.mathstat._ - -import Forecaster.differ -import AR.hp - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `AR` class provides basic time series analysis capabilities for Auto-Regressive - * (AR) models. In an AR(p) model, p refers to the order of the Auto-Regressive - * components of the model. AR models are often used for forecasting. - * Given time-series data stored in vector y, its next value y_t = y(t) - * may be predicted based on prior values of y and its noise: - * y_t = δ + Σ(φ_k y_t-k) + e_t - * where δ is a constant, φ is the auto-regressive coefficient vector, - * and e_t is the noise term. - * @param y the response vector (time-series data) - * @param tt the time vector, if relevant (time index may suffice) - * @param hparam the hyper-parameters - */ -class AR (y: VectorD, tt: VectorD = null, hparam: HyperParameter = AR.hp) - extends Forecaster (y, tt, hparam) - with Correlogram (y) - with Fit (dfm = hparam("p").toInt, df = y.dim - hparam("p").toInt): - - private val debug = debugf ("AR", true) // debug function - private val flaw = flawf ("AR") // flaw function - - m = y.dim // number of time points (@see `FitM`) - private var p = hparam("p").toInt // p-th order Auto-Regressive model - private var φ = VectorD.nullv // AR(p) parameters/coefficients - private var δ = NO_DOUBLE // drift/intercept/constant term -// private var yf = MatrixD.nullm // the forecast matrix - time points x horizons - - if p > MAX_LAGS then flaw ("init", s"p = $p must not be greater than MAX_LAGS = $MAX_LAGS") - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the model name including its current hyper-parameter, e.g., AR(2). - */ - override def modelName: String = s"AR($p)" - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train/fit an `AR` model to the times-series data in vector y_. - * Estimate the coefficient vector φ for a p-th order Auto-Regressive AR(p) model. - * Uses Durbin-Levinson Algorithm (in `Correlogram`) to determine the coefficients. - * The φ vector is p-th row of psi matrix (ignoring the first (0th) column). - * @param x_null the data/input matrix (ignored, pass null) - * @param y_ the training/full response vector (e.g., full y) - */ - def train (x_null: MatrixD, y_ : VectorD): Unit = - m = y_.dim // length of relevant time-series - resetDF (p, m - p) // reset the degrees of freedom - makeCorrelogram (y_) // correlogram computes psi matrix - φ = psiM(p)(1 to p+1) // coefficients = p-th row, columns 1, 2, ... p - δ = statsF.mu * (1 - φ.sum) // compute drift/intercept - debug ("train", s"parameters for AR($p) model: φ = $φ, δ = $δ") - end train - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test PREDICTIONS of an AR forecasting model y_ = f(lags (y_)) + e - * and return its predictions and QoF vector. Testing may be in-sample - * (on the training set) or out-of-sample (on the testing set) as determined - * by the parameters passed in. Note: must call train before test. - * @param x_null the training/testing data/input matrix (ignored, pass null) - * @param y_ the training/testing/full response/output vector - */ - def test (x_null: MatrixD, y_ : VectorD): (VectorD, VectorD) = - val (yy, yp) = testSetup (y_) // get and align actual and predicted values - resetDF (1, yy.dim - 1) // reset the degrees of freedom - e = yy - yp // determine error/residual vector - println (s"test: yy.dim = ${yy.dim}, yp.dim = ${yp.dim}") -// differ (yy, yp) // uncomment for debugging - (yp, diagnose (yy, yp)) // return predictions and QoF vector - end test - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test FORECASTS of an AR forecasting model y_ = f(lags (y_)) + e - * and return its forecasts and QoF vector. Testing may be in-sample - * (on the training set) or out-of-sample (on the testing set) as determined - * by the parameters passed in. Note: must call train and forecastAll before testF. - * @param h the forecasting horizon, number of steps ahead to produce forecasts - * @param y_ the training/testing/full response/output vector - */ - def testF (h: Int, y_ : VectorD): (VectorD, VectorD) = - val (yy, yfh) = testSetupH (y_, h) // get and align actual and forecasted values - resetDF (1, yy.dim - 1) // reset the degrees of freedom - println (s"testF: yy.dim = ${yy.dim}, yfh.dim = ${yfh.dim}") -// differ (yy, yfh) // uncomment for debugging - (yfh, diagnose (yy, yfh)) // return predictions and QoF vector - end testF - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the parameter vector for the AR(p) model. - */ - override def parameter: VectorD = φ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Predict a value for time point/index t using 1-step ahead forecasts. - * y_t = φ_0 y_t-1 + φ_1 y_t-2 + ... + φ_p-1 y_t-p - * When k < 0 let y_k = y_0 (i.e., assume first value repeats back in time). - * @see predictAll in `Forecaster` - * @param t the time point/index to be predicted - * @param y_ the actual values to use in making predictions - * - def predict (t: Int, y_ : VectorD): Double = - if t < 1 || t > y_.dim then flaw ("predict", s"time index t = $t is out of range") - var sum = δ - for j <- 0 until p do sum += φ(j) * y_(max (0, t-1-j)) - sum // prediction for y_t - end predict - */ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all y_.dim time points and all horizons (1 through h-steps ahead). - * Record these in the yf matrix, where - * yf(t, k) = k-steps ahead forecast for y_t - * Note, column 0, yf(?, 0), is set to y (the actual time-series values). - * Forecast recurse down diagonals in the yf forecast matrix. - * The top right and bottom left triangles in yf matrix are not forecastable. - * @param h the maximum forecasting horizon, number of steps ahead to produce forecasts - * @param y_ the actual values to use in making forecasts - * - def forecastAll (h: Int, y_ : VectorD): MatrixD = - yf = new MatrixD (y_.dim+h, h+1) // forecasts for all time points t & horizons to h - for t <- 0 until m do yf(t, 0) = y_(t) // first column is actual values, horizon 0 - for k <- 1 to h do - for t <- y_.indices do // make forecasts over all time points for horizon k - var sum = δ - for j <- 0 until p do sum += φ(j) * yf(max (0, t+k-1-j), max (0, k-1-j)) - yf(t+k, k) = sum // forecast down the diagonal - end for - debug ("forecastAll", s"yf(?, $k) = ${yf(?, k)}") - end for - yf // return matrix of forecasted values - end forecastAll - */ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all y_.dim time points at horizon h (h-steps ahead). - * Assign to forecasting matrix and return h-step ahead forecast. - * @param yf the forecasting matrix (time x horizons) - * @param y_ the actual values to use in making forecasts - * @param h the forecasting horizon, number of steps ahead to produce forecasts - */ - def forecastAt (yf: MatrixD, y_ : VectorD, h: Int): VectorD = - for t <- y_.indices do // make forecasts over all time points for horizon k - var sum = δ - for j <- 0 until p do sum += φ(j) * yf(max (0, t+h-1-j), max (0, h-1-j)) - yf(t+h, h) = sum // forecast down the diagonal - end for - yf(?, h) // return the h-step ahead forecast vector - end forecastAt - -end AR - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `AR` companion object provides factory methods for the `AR` class. - */ -object AR: - - /** Base hyper-parameter specification for `AR` class - */ - val hp = new HyperParameter - hp += ("p", 1, 1) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create an `AR` object. - * @param y the response vector (time series data) - * @param tt the time vector, if relevant (time index may suffice) - * @param hparam the hyper-parameters - */ - def apply (y: VectorD, tt: VectorD = null, hparam: HyperParameter = AR.hp): AR = - new AR (y, tt, hparam) - end apply - -end AR - -import Example_LakeLevels.y - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRTest` main function tests the `AR` class on real data: Forecasting lake levels. - * Test predictions (one step ahead forecasts). - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.aRTest - */ -@main def aRTest (): Unit = - - banner (s"Test Predictions: AR(1) on LakeLevels Dataset") - val mod = new AR (y) // create model for time series data AR(1) - mod.train (null, y) // train the model on full dataset - val (yp, qof) = mod.test (null, y) // test the model on full dataset - println (mod.report (qof)) // report on Quality of Fit (QoF) - - banner ("Select model based on ACF and PACF") - mod.plotFunc (mod.acF, "ACF") // Auto-Correlation Function (ACF) - mod.plotFunc (mod.pacF, "PACF") // Partial Auto-Correlation Function (PACF) - -end aRTest - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRTest2` main function tests the `AR` class on real data: Forecasting lake levels. - * Test forecasts (1 to h steps ahead forecasts). - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.aRTest2 - */ -@main def aRTest2 (): Unit = - - val m = y.dim // number of data points - val hh = 3 // maximum forecasting horizon - - banner (s"Test Forecasts: AR(1) on LakeLevels Dataset") - val mod = new AR (y) // create model for time series data AR(1) - mod.train (null, y) // train the model on full dataset - val (yp, qof) = mod.test (null, y) // test the model on full dataset - println (mod.report (qof)) // report of Quality of Fit (QoF) - - val yf = mod.forecastAll (y, hh) // forecast h-steps ahead (h = 1 to hh) for all y - println (s"yf = $yf") - println (s"y.dim = ${y.dim}, yp.dim = ${yp.dim}, yf.dims = ${yf.dims}") - assert (yf(?, 0)(0 until m) == y) // column 0 must agree with actual values - differ (yf(?, 1)(1 until m), yp) - assert (yf(?, 1)(1 until m) == yp) // column 1 must agree with one step-ahead predictions - - for h <- 1 to hh do - val (yfh, qof) = mod.testF (h, y) // h-steps ahead forecast and its QoF - println (s"Evaluate QoF for horizon $h:") - println (FitM.fitMap (qof, QoF.values.map (_.toString))) // evaluate h-steps ahead forecasts - end for - -end aRTest2 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRTest3` main function tests the `AR` class on real data: Forecasting lake levels. - * Test forecasts (1 to h steps ahead forecasts). Try multiple values for p. - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.aRTest3 - */ -@main def aRTest3 (): Unit = - - val m = y.dim // number of data points - val hh = 3 // maximum forecasting horizon - - var mod: AR = null - for p <- 1 to 10 do // autoregressive hyper-parameter p - hp("p") = p // set p hyper-parameter - banner (s"Test Forecasts: AR($p) on LakeLevels Dataset") - mod = new AR (y) // create model for time series data - mod.train (null, y) // train the model on full dataset - val (yp, qof) = mod.test (null, y) // test the model on full dataset - println (mod.report (qof)) // report of Quality of Fit (QoF) - - val yf = mod.forecastAll (y, hh) // forecast h-steps ahead for all y - println (s"yf = $yf") - println (s"y.dim = ${y.dim}, yp.dim = ${yp.dim}, yf.dims = ${yf.dims}") - assert (yf(?, 0)(0 until m) == y) // column 0 must agree with actual values - differ (yf(?, 1)(1 until m), yp) - assert (yf(?, 1)(1 until m) == yp) // column 1 must agree with one step-ahead predictions - - for h <- 1 to hh do - val (yfh, qof) = mod.testF (h, y) // h-steps ahead forecast and its QoF - println (s"Evaluate QoF for horizon $h:") - println (FitM.fitMap (qof, QoF.values.map (_.toString))) // evaluate h-steps ahead forecasts - end for - end for - -end aRTest3 - diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/AR.scala.bak4 b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/AR.scala.bak4 deleted file mode 100644 index 92cb73cdb..000000000 --- a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/AR.scala.bak4 +++ /dev/null @@ -1,456 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author Hao Peng, John Miller, Michael Cotterell - * @version 2.0 - * @date Sat Jun 13 01:27:00 EST 2017 - * @see LICENSE (MIT style license file). - * - * @title Model: Auto-Regressive (AR) - * - * @see http://en.wikipedia.org/wiki/Autoregressive%E2%80%93moving-average_model - * @see http://www.emu.edu.tr/mbalcilar/teaching2007/econ604/lecture_notes.htm - * @see http://www.stat.berkeley.edu/~bartlett/courses/153-fall2010 - * @see http://www.princeton.edu/~apapanic/ORFE_405,_Financial_Time_Series_%28Fall_2011%29_files/slides12-13.pdf - */ - -package scalation -package modeling -package forecasting - -import scala.math.max - -import scalation.mathstat._ - -import Forecaster.differ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `AR` class provides basic time series analysis capabilities for Auto-Regressive - * (AR) models. In an AR(p) model, p refers to the order of the Auto-Regressive - * components of the model. AR models are often used for forecasting. - * Given time series data stored in vector y, its next value y_t+1 = y(t+1) - * may be predicted based on prior values of y and its noise: - * y_t+1 = δ + Σ(φ_j y_t-j) + e_t+1 - * where δ is a constant, φ is the auto-regressive coefficient vector, - * and e_t+1 is the noise term. - * @param y the response vector (time series data) - * @param tt the time vector, if relevant (time index may suffice) - * @param hparam the hyper-parameters - */ -class AR (y: VectorD, tt: VectorD = null, hparam: HyperParameter = ARMA.hp) - extends Forecaster (y, tt, hparam) - with Correlogram (y) - with Fit (dfm = hparam("p").toInt, df = y.dim - hparam("p").toInt): - - private val debug = debugf ("AR", true) // debug function - private val flaw = flawf ("AR") // flaw function - private val p = hparam("p").toInt // p-th order Auto-Regressive model - private var φ = VectorD.nullv // AR(p) parameters/coefficients - private var δ = NO_DOUBLE // drift/intercept/constant term - private val pnq = p // sum of # parameters - private var calPhi = true // caluculate phi vector - not externally supplied - - if p > MAX_LAGS then flaw ("init", s"p = $p must not be greater than MAX_LAGS = $MAX_LAGS") - - modelName = s"AR($p)" - - def setPhi (phi: VectorD): Unit = - φ = phi - calPhi = false - end setPhi - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train/fit an `AR` model to the times-series data in vector y_. - * Estimate the coefficient vector φ for a p-th order Auto-Regressive AR(p) model. - * Uses Durbin-Levinson Algorithm (in `Correlogram`) to determine the coefficients. - * The φ vector is p-th row of psi matrix (ignoring the first (0th) column). - * @param x_null the data/input matrix (ignored, pass null) - * @param y_ the training/full response vector (e.g., full y) - */ - def train (x_null: MatrixD, y_ : VectorD): Unit = - m = y_.dim // length of relevant time series - resetDF (pnq, m - pnq) // reset the degrees of freedom - makeCorrelogram (y_) // correlogram computes psi matrix - if calPhi then φ = psiM(p)(1 until p+1) // coefficients = p-th row, columns 1, 2, ... p - δ = statsF.mu * (1 - φ.sum) // compute drift/intercept - debug ("train", s"parameters for AR($p) model: φ = $φ, δ = $δ") - end train - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test PREDICTIONS of an AR forecasting model y_ = f(lags (y_)) + e - * and return its predictions and QoF vector. Testing may be in-sample - * (on the training set) or out-of-sample (on the testing set) as determined - * by the parameters passed in. Note: must call train before test. - * @param x_null the data/input matrix (ignored, pass null) - * @param y_ the testing/full response/output vector - */ - def test (x_null: MatrixD, y_ : VectorD): (VectorD, VectorD) = - val (yy, yp) = testSetup (y_) // get and align actual and predicted values - resetDF (pnq, yy.dim - pnq) // reset the degrees of freedom - println (s"test: yy.dim = ${yy.dim}, yp.dim = ${yp.dim}") -// differ (yy, yp) // uncomment for debugging - (yp, diagnose (yy, yp)) // return predictions and QoF vector - end test - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test FORECASTS of an AR forecasting model y_ = f(lags (y_)) + e - * and return its forecasts and QoF vector. Testing may be in-sample - * (on the training set) or out-of-sample (on the testing set) as determined - * by the parameters passed in. Note: must call train and forecastAll before testF. - * @param h the forecasting horizon, number of steps ahead to produce forecasts - * @param y_ the testing/full response/output vector - */ - def testF (h: Int, y_ : VectorD): (VectorD, VectorD) = - val (yy, yfh) = testSetupF (y_, h) // get and align actual and forecasted values - resetDF (pnq, yy.dim - pnq) // reset the degrees of freedom - println (s"testF: yy.dim = ${yy.dim}, yfh.dim = ${yfh.dim}") -// differ (yy, yfh) // uncomment for debugging - (yfh, diagnose (yy, yfh)) // return predictions and QoF vector - end testF - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the parameter vector for the AR(p) model. - */ - override def parameter: VectorD = φ :+ δ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Predict a value for y_t+1 using the 1-step ahead forecast. - * y_t+1 = φ_0 y_t + φ_1 y_t-1 + ... + φ_p-1 y_t-(p-1) - * When t-j is negative, use y_0 - * @param t the time point from which to make prediction - * @param y_ the actual values to use in making predictions - */ - def predict (t: Int, y_ : VectorD): Double = - var sum = δ // intercept - for j <- 0 until p do sum += φ(j) * y_(max (0, t-j)) // add φ_j y_t-j - sum - end predict - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Produce a vector of size h, of 1 through h-steps ahead forecasts for the model. - * forecast the following time points: t+1, ..., t-1+h. - * Note, must create the yf matrix before calling the forecast method. - * Intended to work with rolling validation (analog of predict method) - * @param t the time point from which to make forecasts - * @param yf the forecasting matrix (time x horizons) - * @param y_ the actual values to use in making predictions - * @param h the forecasting horizon, number of steps ahead to produce forecasts - */ - def forecast (t: Int, yf: MatrixD, y_ : VectorD, h: Int): VectorD = - if h < 1 then flaw ("forecast", s"horizon h = $h must be at least 1") - val yd = new VectorD (h) // hold forecasts for each horizon - for k <- 1 to h do - val t1 = t + k - 1 // time point prior to horizon - val sum0 = δ + rdot (φ, yf, t1, k-1) - var sum = δ - for j <- 0 until p do sum += φ(j) * yf(max (0, t1-j), max (0, k-1-j)) - assert (sum0 == sum) - yf(t+k, k) = sum // forecast down the diagonal - yd (k-1) = sum // record diagonal values - end for - yd // return forecasts for each horizon - end forecast - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute a reverse dot product of the parameter vector b and a diagonal - * of the the yf matrix starting at element (r, c) and moving up and back. - * Use max (0, ..) to avoid using negative indices into the yf matrix. - * @param b the parameter/cofficient vector (e.g., φ for AR) - * @param yf the forecasting matrix (time x horizons) - * @param r the starting row in the forecasting matrix - * @param c the starting column in the forecasting matrix - */ - def rdot (b: VectorD, yf: MatrixD, r: Int, c: Int): Double = - var sum = 0.0 - for j <- b.indices do sum += b(j) * yf(max (0, r-j), max (0, c-j)) - sum - end rdot - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all y_.dim time points at horizon h (h-steps ahead). - * Assign to forecasting matrix and return h-step ahead forecast. - * For 1-step ahead (h = 1), - * y_t = δ + φ_0 y_t-1 + φ_1 y_t-2 + ... + φ_p-1 y_t-p - * When k < 0 let y_k = y_0 (i.e., assume first value repeats back in time). - * @param yf the forecasting matrix (time x horizons) - * @param y_ the actual values to use in making forecasts - * @param h the forecasting horizon, number of steps ahead to produce forecasts - */ - def forecastAt (yf: MatrixD, y_ : VectorD, h: Int): VectorD = - if h < 1 then flaw ("forecastAt", s"horizon h = $h must be at least 1") - for t <- y_.indices do // make forecasts over all time points for horizon h - val t1 = t + h - 1 // time point prior to horizon - val sum0 = δ + rdot (φ, yf, t1, h-1) - var sum = δ - for j <- 0 until p do sum += φ(j) * yf(max (0, t1-j), max (0, h-1-j)) - println (s"sum0 = $sum0, sum = $sum") - assert (sum0 =~ sum) - yf(t+h, h) = sum // forecast down the diagonal - end for - yf(?, h) // return the h-step ahead forecast vector - end forecastAt - -end AR - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `AR` companion object provides factory methods for the `AR` class. - * Use `ARMA` for hyper-parameters. - */ -object AR: - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create an `AR` object. - * @param y the response vector (time series data) - * @param tt the time vector, if relevant (time index may suffice) - * @param hparam the hyper-parameters - */ - def apply (y: VectorD, tt: VectorD = null, hparam: HyperParameter = ARMA.hp): AR = - new AR (y, tt, hparam) - end apply - -end AR - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRTest` main function tests the `AR` class on simulated data. - * Test predictions (one step ahead forecasts). - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.aRTest - */ -@main def aRTest (): Unit = - - val y = makeTSeries () // create simulated time series (see `Stationary`) - - banner (s"Test Predictions: AR(1) on simulated time series") - val mod = new AR (y) // create model for time series data AR(1) - mod.trainNtest ()() // train and test on full dataset - - banner ("Select model based on ACF and PACF") - mod.plotFunc (mod.acF, "ACF") // Auto-Correlation Function (ACF) - mod.plotFunc (mod.pacF, "PACF") // Partial Auto-Correlation Function (PACF) - -end aRTest - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRTest2` main function tests the `AR` class on simulated data. - * Test predictions (one step ahead forecasts). - * > runMain scalation.modeling.forecasting.aRTest2 - */ -@main def aRTest2 (): Unit = - - import scala.math.sqrt - - import ActivationFun.f_sigmoid.{fM, dM} - import neuralnet.{NeuralNet_3L, Optimizer} - - val y = VectorD (1, 2, 4, 7, 9, 8, 6, 5, 3) // create a time series by hand - - val m = y.dim - val mu_y = y.mean // mean for full series - - def rho (k: Int): Double = - var s = 0.0 - var q = 0.0 - for t <- 0 until y.dim-k do - s += (y(t) - mu_y) * (y(t+k) - mu_y) - for t <- 0 until y.dim do - q += (y(t) - mu_y)~^2 - s / q - end rho - - val yB1 = y(1 until m) // apply back-shift operator - val yy = y(0 until m-1) // y clipped to match the size of yB1 - val zz = yy - yy.mean - val zB1 = yB1 - yB1.mean - val r1 = (zz dot zB1) / sqrt ((zz dot zz) * (zB1 dot zB1)) // lag-1 auto-correlation - println (s"mu_y = $mu_y") - println (s"zz = $zz") - println (s"zB1 = $zB1") - println (s"r1 = $r1") - println (s"rho1 = ${rho(1)}") - println (s"rho2 = ${rho(2)}") - - banner (s"Test Predictions: AR(1) on hand created time series") - var mod = new AR (y) // create model for time series data AR(1) - mod.setPhi (VectorD (0.6)) // allows coeficients to be user specified - mod.trainNtest ()() // train and test on full dataset - - banner (s"Test Predictions: AR(2) on hand created time series") - ARMA.hp("p") = 2 - mod = new AR (y) // create model for time series data AR(2) - mod.trainNtest ()() // train and test on full dataset - - val x = MatrixD ((9, 3), 1, 1, 8, - 1, 2, 7, - 1, 3, 6, - 1, 4, 5, - 1, 5, 5, - 1, 6, 4, - 1, 7, 4, - 1, 8, 3, - 1, 9, 2) - - banner (s"Test Predictions: Regression on hand created time series") - val reg = new Regression (x, y) - val (yp, qof) =reg.trainNtest ()() // train and test on full dataset - println (reg.summary ()) - new Plot (null, y, yp, "Regression", lines = true) - - banner (s"Test Predictions: NeuralNet_3L on hand created time series") - val x_ = x(?, 1 until 3) - val y_ = MatrixD.fromVector (y) - val a = MatrixD.fill (2, 2, 0.1) // weight matrix A - val b = MatrixD.fill (2, 1, 0.1) // weight matrix B - val ab = VectorD.fill (2)(0.1) // bias vector alpha - val bb = VectorD.fill (1)(0.1) // bias vector beta - val u = x_ * a + ab // hidden layer pre-activation - val z = fM (u) // hidden layer (use sigmoid) - val v = z * b + bb // output layer pre-activation - val yp_ = v // output layer (use id) - val e = yp_ - y_ // negative error - val d1 = e *~ dM (v) // delta 1: output -> hidden - val d0 = (d1 * b.transpose) *~ dM (z) // delta 0: hidden -> input - - println (s"u = $u, z = $z, v = $v, yp_ = $yp_, e = $e, d1 = $d1, d0 = $d0") - - Optimizer.hp ("eta") = 1.0 - val nn3 = new NeuralNet_3L (x(?, 1 until 3), MatrixD.fromVector (y), nz = 2) - val (yq, q0f) = nn3.trainNtest ()() // train and test on full dataset -// val (yq, q0f) = nn3.trainNtest2 ()() // train and test on full dataset - auto eta - nn3.opti.plotLoss ("NeuralNet_3L") - new Plot (null, y, yq(?, 0), "NeuralNet_3L", lines = true) - - banner ("Select model based on ACF and PACF") - mod.plotFunc (mod.acF, "ACF") // Auto-Correlation Function (ACF) - mod.plotFunc (mod.pacF, "PACF") // Partial Auto-Correlation Function (PACF) - -end aRTest2 - -import Example_LakeLevels.y - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRTest3` main function tests the `AR` class on real data: Forecasting lake levels. - * Test predictions (one step ahead forecasts). - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.aRTest3 - */ -@main def aRTest3 (): Unit = - - banner (s"Test Predictions: AR(1) on LakeLevels Dataset") - val mod = new AR (y) // create model for time series data AR(1) - mod.trainNtest ()() // train and test on full dataset - - banner ("Select model based on ACF and PACF") - mod.plotFunc (mod.acF, "ACF") // Auto-Correlation Function (ACF) - mod.plotFunc (mod.pacF, "PACF") // Partial Auto-Correlation Function (PACF) - -end aRTest3 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRTest4` main function tests the `AR` class on real data: Forecasting lake levels. - * Test forecasts (1 to h steps ahead forecasts). - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.aRTest4 - */ -@main def aRTest4 (): Unit = - - val m = y.dim // number of data points - val hh = 2 // maximum forecasting horizon - - ARMA.hp("p") = 3 - banner (s"Test Forecasts: AR(1) on LakeLevels Dataset") - val mod = new AR (y) // create model for time series data AR(1) - val (yp, qof) = mod.trainNtest ()() // train and test on full dataset - - val yf = mod.forecastAll (y, hh) // forecast h-steps ahead (h = 1 to hh) for all y - println (s"y.dim = ${y.dim}, yp.dim = ${yp.dim}, yf.dims = ${yf.dims}") - println (s"yf = $yf") - assert (yf(?, 0)(0 until m) == y) // column 0 must agree with actual values - differ (yf(?, 1)(1 until m), yp) - assert (yf(?, 1)(1 until m) == yp) // column 1 must agree with one step-ahead predictions - - for h <- 1 to hh do - val (yfh, qof) = mod.testF (h, y) // h-steps ahead forecast and its QoF - val yy = y(h until m) // actual response aligned with yfh - println (s"Evaluate QoF for horizon $h:") - println (FitM.fitMap (qof, QoF.values.map (_.toString))) // evaluate h-steps ahead forecasts - println (s"Fit.mae (y, yfh, h) = ${Fit.mae (y, yfh, h)}") // evaluate h-steps ahead forecasts with MAE - println (s"Fit.mae_n (y, 1) = ${Fit.mae_n (y, 1)}") // evaluate h-steps ahead forecasts with MAE_n - println (s"Fit.mase (y, yfh, h) = ${Fit.mase (y, yfh, h)}") // evaluate h-steps ahead forecasts with MASE - end for - -end aRTest4 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRTest5` main function tests the `AR` class on real data: Forecasting lake levels. - * Test forecasts (1 to h steps ahead forecasts). Try multiple values for p. - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.aRTest5 - */ -@main def aRTest5 (): Unit = - - val m = y.dim // number of data points - val hh = 2 // maximum forecasting horizon - - var mod: AR = null - for p <- 1 to 7 do // autoregressive hyper-parameter p - ARMA.hp("p") = p // set p hyper-parameter - banner (s"Test: AR($p) on LakeLevels Dataset") - mod = new AR (y) // create model for time series data - val (yp, qof) = mod.trainNtest ()() // train and test on full dataset - - val yf = mod.forecastAll (y, hh) // forecast h-steps ahead for all y -// println (s"yf = $yf") - println (s"y.dim = ${y.dim}, yp.dim = ${yp.dim}, yf.dims = ${yf.dims}") - assert (yf(?, 0)(0 until m) == y) // column 0 must agree with actual values - differ (yf(?, 1)(1 until m), yp) - assert (yf(?, 1)(1 until m) == yp) // column 1 must agree with one step-ahead predictions - - for h <- 1 to hh do - val (yfh, qof) = mod.testF (h, y) // h-steps ahead forecast and its QoF - println (s"Evaluate QoF for horizon $h:") - println (FitM.fitMap (qof, QoF.values.map (_.toString))) // evaluate h-steps ahead forecasts - end for - end for - -end aRTest5 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRTest6` main function tests the `AR` class on real data: Forecasting Weekly Covid-19. - * Test forecasts (1 to h steps ahead forecasts). Try multiple values for p. - * > runMain scalation.modeling.forecasting.aRTest6 - */ -@main def aRTest6 (): Unit = - - val y = Example_Covid.loadData_y ("new_deaths") - val m = y.dim // number of data points - val hh = 2 // maximum forecasting horizon - - println (s"y.dim = ${y.dim}") - - var mod: AR = null - for p <- 1 to 12 do // autoregressive hyper-parameter p - ARMA.hp("p") = p // set p hyper-parameter - banner (s"Test: AR($p) on Covid-19 Weekly Dataset") - mod = new AR (y) // create model for time series data - val (yp, qof) = mod.trainNtest ()() // train and test on full dataset - - val yf = mod.forecastAll (y, hh) // forecast h-steps ahead for all y -// println (s"yf = $yf") - println (s"y.dim = ${y.dim}, yp.dim = ${yp.dim}, yf.dims = ${yf.dims}") - assert (yf(?, 0)(0 until m) == y) // column 0 must agree with actual values - differ (yf(?, 1)(1 until m), yp) - assert (yf(?, 1)(1 until m) == yp) // column 1 must agree with one step-ahead predictions - - for h <- 1 to hh do - val (yfh, qof) = mod.testF (h, y) // h-steps ahead forecast and its QoF - println (s"Evaluate QoF for horizon $h:") - println (FitM.fitMap (qof, QoF.values.map (_.toString))) // evaluate h-steps ahead forecasts - end for - end for - -end aRTest6 - diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/AR1MA.scala.bak b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/AR1MA.scala.bak deleted file mode 100644 index 1e89a07bb..000000000 --- a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/AR1MA.scala.bak +++ /dev/null @@ -1,352 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller, Hao Peng - * @version 2.0 - * @date Thu May 26 18:06:08 EDT 2022 - * @see LICENSE (MIT style license file). - * - * @note Model: Auto-Regressive, Integrated (0 or 1), Moving Average (AR1MA) - * - * @see http://en.wikipedia.org/wiki/Autoregressive%E2%80%93moving-average_model - * @see http://www.emu.edu.tr/mbalcilar/teaching2007/econ604/lecture_notes.htm - * @see http://www.stat.berkeley.edu/~bartlett/courses/153-fall2010 - * @see http://www.princeton.edu/~apapanic/ORFE_405,_Financial_Time_Series_%28Fall_2011%29_files/slides12-13.pdf - */ - -package scalation -package modeling -package forecasting - -import scalation.mathstat._ - -import ARIMA_diff._ -import Forecaster.differ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Return the first difference of the time-series y, giving the velocity v_t = y_t+1 - y_t. - * @param y the original time-series to be differenced - */ -//def del (y: VectorD): VectorD = VectorD (for t <- 0 until y.dim - 1 yield y(t+1) - y(t)) - -//inline def Δ (y: VectorD): VectorD = del (y) - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Return the undifferenced time-series from the velocity series. - * @param v the differenced time-series (velocity) - * @param y0 the first value in the original time-series - * -def undel (v: VectorD, y0: Double): VectorD = - val y = new VectorD (v.dim + 1) - y(0) = y0 - for t <- 1 until y.dim do y(t) = v(t-1) + y(t-1) - y -end undel - */ - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `AR1MA` class provides basic time-series analysis capabilities for Auto- - * Regressive 'AR' Integrated 'I' Moving-Average 'MA' models. In an - * AR1MA(p, q) model, p and q refer to the order of the Auto-Regressive and - * Moving-Average components of the model; d=1 refers to the order of differencing. - * Works by taking the first difference and delegating to the `ARMA` class. - * Also works for d=0 (no differencing). - * @param y the response vector (time-series data) - * @param tt the time vector, if relevant (time index may suffice) - * @param hparam the hyper-parameters - * @param diffr whether to take a first difference (defaults to true) - */ -class AR1MA (y: VectorD, tt: VectorD = null, hparam: HyperParameter = SARIMAX.hp, - diffr: Boolean = true) - extends Forecaster (y, tt, hparam) - with Correlogram (y) - with Fit (dfm = hparam("p").toDouble, df = y.dim - pq (hparam)): - - private val debug = debugf ("AR1MA", true) // debug function - private val p = hparam("p").toInt // p-th order Auto-Regressive model - private val q = hparam("q").toInt // q-th order Moving-Average model - private val v = if diffr then Δ(y) else y // first difference of the full time-series - private val arma = new ARMA (v, tt, hparam) // delegate to the `ARMA` class - - arma.modelName = s"AR1MA($p, $q)" // rename delegate ARMA to match - modelName = arma.modelName // use same name for AR1MA - - new Plot (null, y, null, s"Plot $modelName: y vs. t", lines = true) - if diffr then new Plot (null, v, null, s"Plot $modelName: v = Δ(y) vs. t", lines = true) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Pick one of the following vectors: v full first difference, u differenced, or u itself. - * @param u the input time-series vector - */ - def pick (u: VectorD): VectorD = - if u == y then v // passed in original full time-series - else if diffr then Δ(u) // sub-series differenced - else u // sub-series as is - end pick - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train/fit an `AR1MA` model to the times-series data in vector y_. - * Estimate the coefficient vectors φ and θ for (p, q)-th order AR1MA(p, q) model. - * @param x_null the data/input matrix (ignored, pass null) - * @param y_ the training/full response vector (e.g., full y) - */ - override def train (x_null: MatrixD, y_ : VectorD): Unit = - arma.train (x_null, pick (y_)) - end train - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test PREDICTIONS of an AR1MA forecasting model y_ = f(lags (y_)) + e - * and return its predictions and QoF vector. Testing may be in-sample - * (on the training set) or out-of-sample (on the testing set) as determined - * by the parameters passed in. Note: must call train before test. - * @param x_null the data/input matrix (ignored, pass null) - * @param y_ the training/testing/full response/output vector - */ - def test (x_null: MatrixD, y_ : VectorD): (VectorD, VectorD) = - arma.test (x_null, pick (y_)) - end test - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train and test the forecasting model y_ = f(y-past) + e and report its QoF - * and plot its predictions. Return the predictions and QoF. - * @param y_ the training/full response/output vector (defaults to full y) - * @param yy the testing/full response/output vector (defaults to full y) - */ - override def trainNtest (y_ : VectorD = y)(yy: VectorD = y): (VectorD, VectorD) = - arma.trainNtest (pick (y_))(pick (yy)) - end trainNtest - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test FORECASTS of an AR1MA forecasting model y_ = f(lags (y_)) + e - * and return its forecasts and QoF vector. Testing may be in-sample - * (on the training set) or out-of-sample (on the testing set) as determined - * by the parameters passed in. Note: must call train and forecastAll before testF. - * @param h the forecasting horizon, number of steps ahead to produce forecasts - * @param y_ the training/testing/full response/output vector - */ - def testF (h: Int, y_ : VectorD): (VectorD, VectorD, VectorD) = - arma.testF (h, pick (y_)) // return aligned actual, forecasted and qof vectors - end testF - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the parameter vector for the AR1MA(p, q) model. - */ - override def parameter: VectorD = arma.parameter - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Predict a value for y_t+1 using the 1-step ahead forecast. - * y_t+1 = f (y_t, ...) - * @param t the time point from which to make prediction - * @param y_ the actual values to use in making predictions - */ - def predict (t: Int, y_ : VectorD): Double = - arma.predict (t, pick (y_)) - end predict - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Predict all values corresponding to the given vector y_. - * @param y_ the actual values to use in making predictions - */ - override def predictAll (y_ : VectorD): VectorD = - arma.predictAll (pick (y_)) - end predictAll - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Produce a vector of size h, of 1 through h-steps ahead forecasts for the model. - * forecast the following time points: t+1, ..., t-1+h. - * Note, must create the yf matrix before calling the forecast method. - * Intended to work with rolling validation (analog of predict method) - * @param t the time point from which to make forecasts - * @param yf the forecasting matrix (time x horizons) - * @param y_ the actual values to use in making predictions - * @param h the forecasting horizon, number of steps ahead to produce forecasts - */ - def forecast (t: Int, yf: MatrixD, y_ : VectorD, h: Int): VectorD = - arma.forecast (t, yf, pick (y_), h) - end forecast - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all y_.dim time points at horizon h (h-steps ahead). - * Assign to forecasting matrix and return h-step ahead forecast. - * @param yf the forecasting matrix (time x horizons) - * @param y_ the actual values to use in making forecasts - * @param h the forecasting horizon, number of steps ahead to produce forecasts - */ - def forecastAt (yf: MatrixD, y_ : VectorD, h: Int): VectorD = - arma.forecastAt (yf, pick (y_), h) - end forecastAt - - //////////////////////////////////////////////////////////////////////////////// - // Make predictions/forecasts on the original scale time-series (not differenced). - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Predict a value for y_t+1 using the 1-step ahead forecast. - * y_t+1 = f (y_t, ...) + e_t+1 - * @param t the time point from which to make prediction - * @param y_ the actual values to use in making predictions - */ - def predict2 (t: Int, y_ : VectorD): Double = - arma.predict (t, pick (y_)) + (if diffr then y_(t) else 0.0) - end predict2 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Predict all values corresponding to the given vector y_. - * @param y_ the actual values to use in making predictions - */ - def predictAll2 (y_ : VectorD, show: Boolean = true): VectorD = - val yp = new VectorD (y_.dim) - yp(0) = y_(0) - for t <- 0 until y_.dim-1 do yp(t+1) = predict2 (t, y_) - if show then -// println (FitM.fitMap (diagnose (y_, yp), qoF_names)) - println (s"nparams = $nparams") - resetDF (nparams - 1, y_.dim - nparams) - println (report (diagnose (y_, yp))) // report on Quality of Fit (QoF) - println (s"mase = ${Fit.mase (y, yp)}") // Means Absolute Scaled Error - new Plot (null, y_, yp, "Plot y, yp vs. t", lines = true) - end if - yp - end predictAll2 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all y_.dim time points at horizon h (h-steps ahead). - * Assign to forecasting matrix and return h-step ahead forecast. - * @param yf the forecasting matrix (time x horizons) - * @param y_ the actual values to use in making forecasts - * @param h the forecasting horizon, number of steps ahead to produce forecasts - */ - def forecastAt2 (yf: MatrixD, y_ : VectorD, h: Int): VectorD = - val yfh = arma.forecastAt (yf, pick (y_), h) - if diffr then yfh + y_ else yfh - end forecastAt2 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all y_.dim time points and all horizons (1 through h-steps ahead). - * Record these in the yf matrix, where - * yf(t, k) = k-steps ahead forecast for y_t - * Note, column 0, yf(?, 0), is set to y (the actual time-series values). - * Forecast recursively down diagonals in the yf forecasting matrix. - * The top right and bottom left triangles in yf matrix are not forecastable. - * @param y_ the actual values to use in making forecasts - * @param h the maximum forecasting horizon, number of steps ahead to produce forecasts - */ - def forecastAll2 (y_ : VectorD, h: Int): MatrixD = - debug ("forecastAll2", s"y_.dim = ${y_.dim}, e.dim = ${e.dim}") - yf = new MatrixD (y_.dim+h, h+2) // forecasts for all time points t & horizons to h - for t <- y_.indices do yf(t, 0) = y_(t) // first column is the timestep (e.g., logical day) - for k <- 1 to h do forecastAt2 (yf, y_, k) // forecast k-steps into the future - for t <- yf.indices do yf(t, h+1) = t // last column is time (logical day) - yf // return matrix of forecasted values - end forecastAll2 - -end AR1MA - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aR1MATest` main function tests the `AR1MA` class on simulated data. - * Test predictions (one step ahead forecasts). - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.aR1MATest - */ -@main def aR1MATest (): Unit = - - val y = makeTSeries () // create simulated time-series (see `Stationary`) - - banner (s"Test Predictions: AR1MA(1, 0) on simulated time-series") - val mod = new AR1MA (y) // create model for time-series data AR(1) - mod.trainNtest ()() // train and test on full dataset - - banner ("Select model based on ACF and PACF") - mod.plotFunc (mod.acF, "ACF") // Auto-Correlation Function (ACF) - mod.plotFunc (mod.pacF, "PACF") // Partial Auto-Correlation Function (PACF) - -end aR1MATest - -import Example_LakeLevels.y - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aR1MATest2` main function tests the `AR1MA` class on real data: Forecasting lake levels. - * Test predictions (one step ahead forecasts) with no differencing - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.aR1MATest2 - */ -@main def aR1MATest2 (): Unit = - - import SARIMAX.hp - - // d = 0 (no differencing) => should give same results as ARMA (@see `aRMATest4`) - - for p <- 1 to 3; q <- 0 to 2 do - hp("p") = p; hp("q") = q // set p (AR) and q (MA) hyper-parameters - val mod = new AR1MA (y, diffr = false) // create model for time-series data AR1MA(1, 0) - banner (s"Test Predictions: ${mod.modelName} (d=0) on LakeLevels Dataset") - val (vp, qof) = mod.trainNtest ()() // test and test the model on full dataset - val yp = mod.predictAll2 (y) // results on original scale - - new Plot (null, y, yp, s"Plot: ${mod.modelName} predictAll2: y, yp vs t", lines = true) - end for - -end aR1MATest2 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aR1MATest3` main function tests the `AR1MA` class on real data: Forecasting lake levels. - * Test predictions (one step ahead forecasts) taking one difference. - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.aR1MATest3 - */ -@main def aR1MATest3 (): Unit = - - import SARIMAX.hp - - val v = Δ (y) // take the first difference of time-series y - differ (y, undiff (v, y)) // verify recovery of original times-series -// differ (y, undel (v, y(0))) // verify recovery of original times-series - - for p <- 1 to 7; q <- 0 to 2 do - hp("p") = p; hp("q") = q // set p (AR) and q (MA) hyper-parameters - val mod = new AR1MA (y) // create model for time-series data AR1MA(1, 0) - banner (s"Test Predictions: ${mod.modelName} (d=1) on LakeLevels Dataset") - val (vp, qof) = mod.trainNtest ()() // test and test the model on full dataset - val yp = mod.predictAll2 (y) // results on original scale - - new Plot (null, y, yp, s"Plot: ${mod.modelName} predictAll2: y, yp vs t", lines = true) - end for - -end aR1MATest3 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aR1MATest4` main function tests the `AR1MA` class on real data: - * Forecasting COVID-19. - * > runMain scalation.modeling.forecasting.aR1MATest4 - */ -@main def aR1MATest4 (): Unit = - - import SARIMAX.hp - - val data = MatrixD.load ("covid_19.csv", 1, 1) // skip first row (header) and first column - val yy = data(?, 4) // column 4 is daily deaths -// val yy = data(?, 5) // column 5 is daily deaths smoothed - val is = yy.indexWhere (_ >= 2.0) // find day of first death with at least 2 deaths - println (s"is = $is is first day with at least 2 deaths") - val y = yy(is until yy.dim) // slice out days before is - -// val h = 2 // forecasting horizon - for p <- 1 to 5; q <- 1 to 3 do // AR1MA hyper-parameter settings - hp("p") = p; hp("q") = q - val mod = new AR1MA (y) // create an AR1MA model - val (vp, qof) = mod.trainNtest ()() // train and the model on full dataset - val yp = mod.predictAll2 (y) // results on original scale - println (s"yp = $yp") - -/* - val yfa = mod.forecastAll (y, h) - val yf = yfa(?, h) // forecasted values - h steps ahead - new Plot (null, y, yf, s"Plot of y & yf, forecasted (h = $h) ${mod.modelName} vs. t", true) -*/ - - end for - -end aR1MATest4 diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/ARIMA.scala.bak b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/ARIMA.scala.bak deleted file mode 100644 index a26e0dd8d..000000000 --- a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/ARIMA.scala.bak +++ /dev/null @@ -1,268 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Sat Jul 31 13:20:29 EDT 2021 - * @see LICENSE (MIT style license file). - * - * @title Model: Auto-Regressive, Integrated, Moving-Average (ARIMA) - * - * @see http://en.wikipedia.org/wiki/Autoregressive%E2%80%93moving-average_model - * @see http://www.emu.edu.tr/mbalcilar/teaching2007/econ604/lecture_notes.htm - * @see http://www.stat.berkeley.edu/~bartlett/courses/153-fall2010 - * @see http://www.princeton.edu/~apapanic/ORFE_405,_Financial_Time_Series_%28Fall_2011%29_files/slides12-13.pdf - */ - -package scalation -package modeling -package forecasting - -import scala.math.max - -import scalation.mathstat._ -import scalation.optimization._ - -import ARIMA.hp - -val flaw = flawf ("forecasting") // flaw function - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Take the 1-st difference of vector/time-series y. - * Note, it stores the first value in the original times-series in the first - * position of the differenced vector. - * @param y the vector/time-series to be differenced - */ -def diff (y: VectorD): (Double, VectorD) = - val yd = new VectorD (y.dim-1) - for i <- yd.indices do yd(i) = y(i+1) - y(i) - (y(0), yd) -end diff - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Take the 1-st inverse-difference of vector/time-series x. - * Restores the original time-series if x(0) holds first value in original time-series. - * @param x the vector/time-series to be inverse-differenced - */ -def diffinv (y0: Double, yd: VectorD): VectorD = - val y = new VectorD (yd.dim+1) - y(0) = y0 - for i <- 1 until y.dim do y(i) = yd(i-1) + y(i-1) - y -end diffinv - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Take the 'd'-th difference on vector/time-series 'y'. For efficiency, - * this method is destructive of 'y' (make a copy to preserve). - * @param y the vector/time-series to be differenced - * @param d the order or number of differences to be taken -def diff (y: VectorD, d: Int): VectorD = - if d < 1 then flaw ("diff", s"requires the number of differences $d > 0") - for k <- 1 to d do diff (y) - y -end diff - */ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Take the 'd'-th inverse-difference on vector/time-series 'y'. For efficiency, - * this method is destructive of 'y' (make a copy to preserve). - * Restores the original time-series if 'y(0)' holds first value in original time-series. - * @param y the vector/time-series to be inverse-differenced - * @param d the order or number of inverse-differences to be taken -def diffinv (y: VectorD, d: Int): VectorD = - if d < 1 then flaw ("diffinv", s"requires the number of inverse-differences $d > 0") - for k <- 1 to d do diffinv (y) - y -end diffinv - */ - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARIMA` class provides basic time series analysis capabilities for Auto-Regressive, - * Integrated, Moving-Average (ARIMA) models. In an ARIMA(p, q) model, p refers to the - * order of the Auto-Regressive components, d refers to the number of differences, - * and q refers to the Moving-Average compoenest of the model. - * ARIMA models are often used for forecasting. - * Given time-series data stored in vector y, its next value y_t = y(t) - * may be predicted based on prior values of y and its noise: - * diff_d (y_t) = δ + Σ(φ_k y_t-k) + Σ(θ_k e_t-k) + e_t - * where δ is a constant, φ is the auto-regressive coefficient vector, - * θ is the moving-average vector, and e_t is the noise term. - *---------------------------------------------------------------------------------- - * @param y the response vector (time-series data) - * @param tt the time vector, if relevant (time index may suffice) - * @param hparam the hyper-parameters - */ -class ARIMA (y: VectorD, tt: VectorD = null, hparam: HyperParameter = ARIMA.hp) - extends ARMA (diff (y)._2, tt, hparam): -// extends ARMA (diff (y, hparam("d").toInt), tt, hparam): - - private val debug = debugf ("ARIMA", true) // debug function - private val flaw = flawf ("ARIMA") // flaw function - private val d = hparam("d").toInt // the number of differences - private val (y0, yd) = diff (y) - - assert (getY == yd) - - if p > MAX_LAGS then flaw ("ARIMA", s"p = $p must not be greater than MAX_LAGS = $MAX_LAGS") - - debug ("constructor", s"d = $d, y0 = $y0, diff (yd) = $yd") - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the model name including its current hyper-parameter, e.g., ARIMA(3, 1, 2). - */ - override def modelName: String = s"ARIMA($p, $d, $q)" - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Predict all values for a time-series using 1-step ahead forecasts. - * y_t = φ_0 y_t-1 + φ_1 y_t-2 + ... + φ_p-1 y_t-p - * @see predictAll in `Forecaster` - * @param y_ the actual values to use in making predictions - */ - override def predictAll (y_ : VectorD): VectorD = - super.predictAll (y_) - end predictAll - - def predictAll2 (y_ : VectorD): VectorD = - val yd = super.predictAll (y_) - val yy = new VectorD (yd.dim + 1) - yy(0) = y0 - for i <- 1 until yy.dim do yy(i) = yy(i-1) + yd(i-1) - println (s"predictAll2: yy = $yy") - yy - end predictAll2 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test PREDICTIONS of an ARMA forecasting model y_ = f(lags (y_)) + e and return its - * QoF vector. Testing may be be in-sample (on the training set) or out-of-sample - * (on the testing set) as determined by the parameters passed in. - * Note: must call train before test. - * @param x_null the training/testing data/input matrix (ignored, pass null) - * @param y_ the training/testing response/output vector (e.g., full y) - */ - def test2 (x_null: MatrixD, y_ : VectorD): VectorD = - val yp = predictAll2 (yd) // make predictions - val yy = y_(1 to y_.dim) - val yyp = yp(0 to y_.dim-1) // align actual and predicted vectors - - resetDF (p, yy.dim - p) // reset the degrees of freedom - diagnose (yy, yp) // evaluate and return the QoF of these predictions - end test2 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all y_.dim time points and all horizons (1 through h-steps ahead). - * Record these in the yf matrix, where - * yf(t, k) = k-steps ahead forecast for y_t - * Note, column 0, yf(?, 0), is set to y (the actual time-series values). - * Forecast recurse down diagonals in the yf forecast matrix. - * The top right and bottom left triangles in yf matrix are not forecastable. - * @param h the maximum forecasting horizon, number of steps ahead to produce forecasts - * @param y_ the actual values to use in making forecasts - */ - override def forecastAll (h: Int, y_ : VectorD): MatrixD = - val yy = super.forecastAll (h, y_) -// MatrixD (for j <- yf.indices2 yield diffinv (yy(?, j))).transpose - yy - end forecastAll - -end ARIMA - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARIMA` companion object provides factory methods for the `ARIMA` class. - */ -object ARIMA: - - /** Base hyper-parameter specification for `ARIMA` class - */ - val hp = new HyperParameter - hp += ("p", 1, 1) - hp += ("d", 1, 1) - hp += ("q", 1, 1) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create an `ARIMA` object. - * @param y the response vector (time series data) - * @param tt the time vector, if relevant (time index may suffice) - * @param hparam the hyper-parameters - */ - def apply (y: VectorD, tt: VectorD = null, hparam: HyperParameter = ARIMA.hp): ARIMA = - new ARIMA (y, tt, hparam) - end apply - -end ARIMA - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARIMATest` object is used to test the `ARIMA` class on real data: Forecasting lake levels. - * Test the test, predictAll, testf and forecastAll methods over the whole times-series. - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.ARIMATest - */ -object ARIMATest extends App: - - import Example_LakeLevels.y - val m = y.dim - val t = VectorD.range (1, m) - val h = 3 // the forecasting horizon - val d = 1; hp("d") = d // differencing hyper-parameter d - val q = 1; hp("q") = q // moving-average hyper-parameter q - - var ar: ARIMA = null - for p <- 1 to 1 do // autoregressive hyper-parameter p - hp("p") = p // set p hyper-parameter - banner (s"Test: ARIMA($p, $d, $q}") - ar = new ARIMA (y) // create model for time series data - val yd = ar.getY - ar.train (null, yd) // train the model on full dataset - - banner (s"Test: ARIMA($p, $d, $q} Differenced") - println (ar.report (ar.test (null, yd))) // test the model and report results - val ydp = ar.predictAll (yd) // predict 1-step ahead for all y - val yyd = yd(1 to yd.dim) - new Plot (t, yyd, ydp, s"ARIMA($p, $d, $q): yd-actual vs. yd-predicted", lines = true) - - banner (s"Test: ARIMA($p, $d, $q} Undifferenced") - println (ar.report (ar.test2 (null, yd))) // test the model and report results - val yp = ar.predictAll2 (yd) // predict 1-step ahead for all y - val yy = y(1 to y.dim) - new Plot (t, yy, yp, s"ARIMA($p, $d, $q): y-actual vs. y-predicted", lines = true) -/* - val yf = ar.forecastAll (h, y) // forecast h-steps ahead for all y - println (s"yf = $yf") - println (s"yf.dims = ${yf.dims}") - assert (yf(?, 0)(0 until m) == y) // column 0 must agree with actual values - assert (yf(?, 1)(1 to m+1) == yp) // column 1 must agree with one step-ahead predictions - for k <- 1 to h do - println (s"evalaute QoF for horizon $k:") - println (Fit.fitMap (ar.testf (k, y))) // evaluate k-units ahead forecasts - end for -*/ - end for - - banner ("Select model based on ACF and PACF") - ar.plotFunc (ar.acF, "ACF") // Auto-Correlation Function (ACF) - ar.plotFunc (ar.pacF, "PACF") // Partial Auto-Correlation Function (PACF) - -end ARIMATest - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARIMATest2` object is used to test the `ARIMA` class on real data: Forecasting lake levels. - * Test the test, predictAll, testf and forecastAll methods over the whole times-series. - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.ARIMATest2 - */ -object ARIMATest2 extends App: - - import Example_LakeLevels.y - - val (y0, yd) = diff (y) - val z = diffinv (y0, yd) - - println (s"original y = $y") - println (s"differenced x = $yd") - println (s"restored z = $z") - assert (z == y) - -end ARIMATest2 - diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/ARIMA.scala.bak2 b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/ARIMA.scala.bak2 deleted file mode 100644 index 3a0150d87..000000000 --- a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/ARIMA.scala.bak2 +++ /dev/null @@ -1,341 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author Hao Peng, John Miller, Michael Cotterell - * @version 2.0 - * @date Sat Jun 13 01:27:00 EST 2017 - * @see LICENSE (MIT style license file). - * - * @title Model: Auto-Regressive, Integrated, Moving-Average (ARIMA) - * - * @see http://en.wikipedia.org/wiki/Autoregressive%E2%80%93moving-average_model - * @see http://www.emu.edu.tr/mbalcilar/teaching2007/econ604/lecture_notes.htm - * @see http://www.stat.berkeley.edu/~bartlett/courses/153-fall2010 - * @see http://www.princeton.edu/~apapanic/ORFE_405,_Financial_Time_Series_%28Fall_2011%29_files/slides12-13.pdf - */ - -package scalation -package modeling -package forecasting - -import scala.math.max - -import scalation.mathstat._ -import scalation.optimization._ - -import ARIMA.hp - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Take the 1-st difference of vector/time-series y, returning the first original - * value and the differenced time-series. - * @param y the vector/time-series to be differenced - */ -def diff (y: VectorD): (Double, VectorD) = - val yd = new VectorD (y.dim-1) - for i <- yd.indices do yd(i) = y(i+1) - y(i) - (y(0), yd) -end diff - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Take the 1-st inverse-difference of vector/time-series yd. - * @param y0 the first value from the undifferenced time-series - * @param yd the vector/time-series to be inverse-differenced - */ -def diffinv (y0: Double, yd: VectorD): VectorD = - val y = new VectorD (yd.dim+1) - y(0) = y0 - for i <- 1 until y.dim do y(i) = yd(i-1) + y(i-1) - y -end diffinv - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARIMA` class provides basic time series analysis capabilities for Auto-Regressive, - * Moving-Average (ARIMA) models. In an ARIMA(p, q) model, p refers to the order of the - * Auto-Regressive components and q refers to the Moving-Average compoenest of the model. - * ARIMA models are often used for forecasting. - * Given time-series data stored in vector y, its next value y_t = y(t) - * may be predicted based on prior values of y and its noise: - * y_t = δ + Σ(φ_k y_t-k) + Σ(θ_k e_t-k) + e_t - * where δ is a constant, φ is the auto-regressive coefficient vector, - * θ is the moving-average vector, and e_t is the noise term. - *---------------------------------------------------------------------------------- - * @param y the response vector (time-series data) - * @param tt the time vector, if relevant (time index may suffice) - * @param hparam the hyper-parameters - */ -class ARIMA (y: VectorD, tt: VectorD = null, hparam: HyperParameter = ARIMA.hp) - extends Forecaster (y, tt, hparam) - with Correlogram (y) - with Fit (dfm = hparam("p").toInt, df = y.dim - hparam("p").toInt): - - private val debug = debugf ("ARIMA", true) // debug function - private val flaw = flawf ("ARIMA") // flaw function - private var m = y.dim // number of time points - private var p = hparam("p").toInt // p-th order Auto-Regressive, - private var d = hparam("d").toInt // d-th order Differencing and - private var q = hparam("q").toInt // q-th order Moving-Average model - private var φ = VectorD.nullv // AR(p) parameters/coefficients part - private var θ = VectorD.nullv // MA(q) parameters/coefficients part - private var yf = MatrixD.nullm // the forecast matrix - time points x horizons - - val (y0, yd) = diff (y) // the first orginal value and differenced time-series - - if p > MAX_LAGS then flaw ("ARIMA", s"p = $p must not be greater than MAX_LAGS = $MAX_LAGS") - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the model name including its current hyper-parameter, e.g., ARIMA(2, 1, 1). - */ - override def modelName: String = s"ARIMA($p, $d, $q)" - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train/fit an `ARIMA` model to the times-series data in vector y_. - * Estimate the coefficient vector φ for a p-th order Auto-Regressive ARIMA(p) model. - * Uses Durbin-Levinson Algorithm (in `Correlogram`) to determine the coefficients. - * The φ vector is p-th row of psi matrix (ignoring the first (0th) column). - * @param x_null the data/input matrix (ignored, pass null) - * @param y_ the training/full response vector (e.g., full y) - */ - def train (x_null: MatrixD, y_ : VectorD): Unit = - m = y_.dim // length of relevant time-series - e = new VectorD (m) - resetDF (p, m - p) // reset the degrees of freedom - makeCorrelogram (y_) // correlogram computes psi matrix, gives ACF and PACF - - φ = new VectorD (p) // zeros for AR part - θ = new VectorD (q) // zeros for MA part - val b = φ ++ θ // combine all parameters -> vector to optimize - - def csse (b: VectorD): Double = // objective function - conditional sum of squared errors - φ = b(0 to p); θ = b(p to p+q) - val (yy, yp) = testSetup (yd) // get and align actual and predicted values - val s = (yy - yp).normSq // sum of squared errors -// println (s"csse: s = $s, b = $b") - s - end csse - - def nll (b: VectorD): Double = // objective function - negative log-likelihood (MLE) - 0.0 // FIX - implement - end nll - - val optimizer = new BFGS (csse) // apply Quasi-Newton BFGS optimizer -// val optimizer = new ConjugateGradient (csse) // apply Conjugate Gradient optimizer - fails -// val optimizer = new CoordinateDescent (csse) // apply Coordinate Descent optimizer -// val optimizer = new NelderMeadSimplex (csse, 3) // apply Nelder-Mead Simplex optimizer -// val optimizer = new GridSearch (csse, 3); optimizer.setAxes () // apply GridSearch BFGS optimizer - close - val (fb, bb) = optimizer.solve (b) // optimal solution for the objective function and parameters - - φ = bb(0 to p); θ = bb(p to p+q) // recover parameters for z - debug ("train", s"parameters for ARIMA($p, $d, $q) model: φ = $φ, θ = $θ") - end train - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test PREDICTIONS of an ARIMA forecasting model y_ = f(lags (y_)) + e and return its - * QoF vector. Testing may be be in-sample (on the training set) or out-of-sample - * (on the testing set) as determined by the parameters passed in. - * Note: must call train before test. - * @param x_null the training/testing data/input matrix (ignored, pass null) - * @param y_ the training/testing response/output vector (e.g., full y) - */ - def test (x_null: MatrixD, y_ : VectorD): VectorD = - val (yy, yp) = testSetup (y_) // get and align actual and predicted values - resetDF (p, yy.dim - p) // reset the degrees of freedom - diagnose (yy, yp) // evaluate and return the QoF of these predictions - end test - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test PREDICTIONS of an ARIMA forecasting model y_ = f(lags (y_)) + e and return its - * QoF vector. Testing may be be in-sample (on the training set) or out-of-sample - * (on the testing set) as determined by the parameters passed in. - * Note: must call train before test. - * @param x_null the training/testing data/input matrix (ignored, pass null) - * @param y_ the training/testing response/output vector (e.g., full y) - */ - def test2 (x_null: MatrixD, y_ : VectorD): VectorD = - val yp = predictAll2 (yd) // make predictions - val yy = y_(1 to y_.dim) - val yyp = yp(0 to y_.dim-1) // align actual and predicted vectors - - resetDF (p, yy.dim - p) // reset the degrees of freedom - diagnose (yy, yyp) // evaluate and return the QoF of these predictions - end test2 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test FORECASTS of an ARIMA forecasting model y_ = f(lags (y_)) + e and return its - * QoF vector. Testing may be be in-sample (on the training set) or out-of-sample - * (on the testing set) as determined by the parameters passed in. - * Note: must call train before test. - * @param h the forecasting horizon, number of steps ahead to produce forecasts - * @param y_ the training/testing response/output vector (e.g., full y) - * @param redo whether to use existing forecasts or redo them (defaults to false) - */ - def testf (h: Int, y_ : VectorD, redo: Boolean = false): VectorD = - if yf == null || yf.dim2 < h+1 || redo then yf = forecastAll (h, y_) // redo all forecasts - val yy = y_(h to y_.dim) - val yf_h = yf(?, h)(h to y_.dim) // pull column h from the forecast matrix and align - resetDF (p, yy.dim - p) // reset the degrees of freedom - diagnose (yy, yf_h) // evaluate and return the QoF of these forecasts - end testf - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the parameter vector for the ARIMA(p, d, q) model. - */ - override def parameter: VectorD = φ ++ θ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Predict a value for time point/index t using 1-step ahead forecasts. - * y_t = φ_0 y_t-1 + φ_1 y_t-2 + ... + φ_p-1 y_t-p - * When k < 0 let y_k = y_0 (i.e., assume first value repeats back in time), - * but do not assume errors repeat. - * @see predictAll in `Forecaster` - * @param t the time point/index to be predicted - * @param y_ the actual values to use in making predictions - */ - def predict (t: Int, y_ : VectorD): Double = - if t < 1 || t > y_.dim then flaw ("predict", s"time index t = $t is out of range") - var sum = 0.0 - for j <- 0 until p do sum += φ(j) * y_(max (0, t-1-j)) - for j <- 0 until q if t-j > 0 do sum += θ(j) * e(t-1-j) - if t < y_.dim then e(t) = y_(t) - sum // update the t-th error e_t - sum // prediction for y_t, yp_t - end predict - - def predictAll2 (y_ : VectorD): VectorD = - val yp = new VectorD (yd.dim+1) - yp(0) = y0 - e(0) = 0.0 - for t <- 1 to yd.dim do - var sum = 0.0 - for j <- 0 until p do sum += φ(j) * yd(max (0, t-1-j)) - for j <- 0 until q if t-j > 0 do sum += θ(j) * e(t-1-j) - yp(t) = y(t-1) + sum // prediction for y_t, yp_t - if t < yd.dim then e(t) = yd(t) - sum // update the t-th error e_t -// if t < yd.dim then e(t) = y(t) - yp(t) // update the t-th error e_t - end for - yp(1 to yd.dim) - end predictAll2 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all y_.dim time points and all horizons (1 through h-steps ahead). - * Record these in the yf matrix, where - * yf(t, k) = k-steps ahead forecast for y_t - * Note, column 0, yf(?, 0), is set to y (the actual time-series values). - * Forecast recurse down diagonals in the yf forecast matrix. - * The top right and bottom left triangles in yf matrix are not forecastable. - * @param h the maximum forecasting horizon, number of steps ahead to produce forecasts - * @param y_ the actual values to use in making forecasts - */ - def forecastAll (h: Int, y_ : VectorD): MatrixD = - yf = new MatrixD (y_.dim+h, h+1) // forecasts for all time points t & horizons to h - for t <- 0 until m do yf(t, 0) = y_(t) // first column is actual values, horizon 0 - for k <- 1 to h do - for t <- y_.indices do // make forecasts over all time points for horizon k - var sum = 0.0 - for j <- 0 until p do sum += φ(j) * yf(max (0, t+k-1-j), max (0, k-1-j)) - yf(t+k, k) = sum // forecast down the diagonal - end for - debug ("forecastAll", s"yf(?, $k) = ${yf(?, k)}") - end for - yf // return matrix of forecasted values - end forecastAll - -end ARIMA - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARIMA` companion object provides factory methods for the `ARIMA` class. - */ -object ARIMA: - - /** Base hyper-parameter specification for `ARIMA` class - */ - val hp = new HyperParameter - hp += ("p", 1, 1) - hp += ("d", 1, 1) - hp += ("q", 1, 1) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create an `ARIMA` object. - * @param y the response vector (time series data) - * @param tt the time vector, if relevant (time index may suffice) - * @param hparam the hyper-parameters - */ - def apply (y: VectorD, tt: VectorD = null, hparam: HyperParameter = ARIMA.hp): ARIMA = - new ARIMA (y, tt, hparam) - end apply - -end ARIMA - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARIMATest` object is used to test the `ARIMA` class on real data: Forecasting lake levels. - * Test the test, predictAll, testf and forecastAll methods over the whole times-series. - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.ARIMATest - */ -object ARIMATest extends App: - - import Example_LakeLevels.y - val m = y.dim - val t = VectorD.range (1, m) - val h = 3 // the forecasting horizon - val d = 1; hp("d") = d // number of differences - val q = 1; hp("q") = q // moving-average hyper-parameter q - - var ar: ARIMA = null - for p <- 1 to 1 do // autoregressive hyper-parameter p - hp("p") = p // set p hyper-parameter - banner (s"Test: ARIMA($p, $q}") - ar = new ARIMA (y) // create model for time series data - ar.train (null, y) // train the model on full dataset - - banner ("Test: Differenced Time-Series") - val yd = ar.yd - println (ar.report (ar.test (null, yd))) // test the model and report results - var yp = ar.predictAll (yd) // predict 1-step ahead for all y - var yy = yd(1 to yd.dim) - new Plot (t, yy, yp, s"ARIMA($p, $d, $q): yd-actual vs. yd-predicted", lines = true) - - banner ("Test: Undifferenced Time-Series") -// println (ar.report (ar.test2 (null, yd))) // test the model and report results - yp = ar.predictAll2 (y) // predict 1-step ahead for all y - yy = y(1 to yd.dim) - println (s"fit = ${ar.diagnose (yy, yp)}") - new Plot (t, yy, yp, s"ARIMA($p, $d, $q): y-actual vs. y-predicted", lines = true) -/* - val yf = ar.forecastAll (h, y) // forecast h-steps ahead for all y - println (s"yf = $yf") - println (s"yf.dims = ${yf.dims}") - assert (yf(?, 0)(0 until m) == y) // column 0 must agree with actual values - assert (yf(?, 1)(1 to m+1) == yp) // column 1 must agree with one step-ahead predictions - for k <- 1 to h do - println (s"evalaute QoF for horizon $k:") - println (Fit.fitMap (ar.testf (k, y))) // evaluate k-units ahead forecasts - end for -*/ - end for - - banner ("Select model based on ACF and PACF") - ar.plotFunc (ar.acF, "ACF") // Auto-Correlation Function (ACF) - ar.plotFunc (ar.pacF, "PACF") // Partial Auto-Correlation Function (PACF) - -end ARIMATest - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARIMATest2` object is used to test functions used by the `ARIMA` class. - * Test the diff and diffinv functions. - * > runMain scalation.modeling.forecasting.ARIMATest2 - */ -object ARIMATest2 extends App: - - import Example_LakeLevels.y - - val (y0, yd) = diff (y) - val z = diffinv (y0, yd) - - println (s"original y = $y") - println (s"differenced x = $yd") - println (s"restored z = $z") - assert (z == y) - -end ARIMATest2 - diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/ARIMA.scala.sav b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/ARIMA.scala.sav deleted file mode 100644 index c142e2857..000000000 --- a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/ARIMA.scala.sav +++ /dev/null @@ -1,561 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author Hao Peng, John Miller - * @version 2.0 - * @date Sat Jun 13 01:27:00 EST 2017 - * @see LICENSE (MIT style license file). - * - * @note Auto-Regressive, Integrated, Moving Average (ARIMA) Model - * - * @see http://en.wikipedia.org/wiki/Autoregressive%E2%80%93moving-average_model - * @see http://www.emu.edu.tr/mbalcilar/teaching2007/econ604/lecture_notes.htm - * @see http://www.stat.berkeley.edu/~bartlett/courses/153-fall2010 - * @see http://www.princeton.edu/~apapanic/ORFE_405,_Financial_Time_Series_%28Fall_2011%29_files/slides12-13.pdf - */ - -package scalation -package modeling -package forecasting - -import scala.math.{max, sqrt} - -import scalation.mathstat._ -import scalation.optimization._ -import scalation.random.{Normal, Uniform} - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Companion object for class `ARIMA`. Includes features related to differencing - * and automated order selection. - * @see www.jstatsoft.org/article/view/v027i03/v27i03.pdf - */ -object ARIMA: - - /** Base hyper-parameter specification for `ARIMA` class - */ - val hp = new HyperParameter - hp += ("p", 1, 1) - hp += ("d", 1, 1) - hp += ("q", 1, 1) - - private val flaw = flawf ("ARIMA") // flaw function - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the 'd'th difference of the time-series for 'd' in {0, 1, 2, 3}. - * A new vector is returned even when there is no difference taken ('d = 0'), - * to ensure the original is preserved. - * @param y the original time-series to be differenced - * @param d the order of simple differencing - */ - def difference (y: VectorD, d: Int): VectorD = - d match - case 0 => - y.copy - case 1 => - VectorD (for i <- 0 until y.dim-1 yield y(i+1) - y(i)) - case 2 => - VectorD (for i <- 0 until y.dim-2 yield y(i+2) - 2*y(i+1) + y(i)) - case 3 => - VectorD (for i <- 0 until y.dim-3 yield y(i+3) - 3*y(i+2) + 3*y(i+1) - y(i)) - case _ => - flaw ("difference", "ARIMA does not support differencing higher than order 3"); null - end match - end difference - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Transform the fitted values on the training data of a differenced time series back - * to the original scale. Undo trend differencing only. - * @see stats.stackexchange.com/questions/32634/difference-time-series-before-arima-or-within-arima - * @param yp the vector of predicted/fitted values - * @param y the original time-series vector - * @param d the order of simple differencing - */ - def transformBack (yp: VectorD, y: VectorD, d: Int): VectorD = - d match - case 0 => - yp - case 1 => - val tb = new VectorD (y.dim) - tb(0) = y(0) - for i <- 0 until y.dim-1 do tb(i+1) = yp(i) + y(i) - tb - case 2 => - val tb = new VectorD (y.dim) - tb(0) = y(0); tb(1) = y(1) - for i <- 0 until y.dim-2 do tb(i+2) = yp(i) + 2*y(i+1) - y(i) - tb - case 3 => - val tb = new VectorD (y.dim) - tb(0) = y(0); tb(1) = y(1); tb(2) = y(2) - for i <- 0 until y.dim-3 do tb(i+3) = yp(i) + 3*y(i+2) - 3*y(i+1) + y(i) - tb - case _ => - flaw ("transformBack", "ARIMA does not support differencing higher than order 3"); null - end match - end transformBack - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Transform the forecasted values of a differenced time series back to the original - * for all horizons scale. - * @see stats.stackexchange.com/questions/32634/difference-time-series-before-arima-or-within-arima - * @param ypa the matrix of all multi-horizon forecasted values - * @param y the original time-series vector - * @param d the order of simple differencing - */ - def transformBack_allH (ypa: MatrixD, y: VectorD, d: Int): MatrixD = - val tb = new MatrixD (ypa.dim, ypa.dim2) - tb(?, 0) = y - for k <- 1 until ypa.dim2 do tb(?, k) = transformBack (ypa(?, k), y, d) - tb - end transformBack_allH - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Transform the forecast values of a differenced time series back to the - * original scale. - * @see stats.stackexchange.com/questions/32634/difference-time-series-before-arima-or-within-arima - * @param yf the vector of forecasted values - * @param y the original time series - * @param d the order of simple differencing - * @param t the time point being forecasted (@see the 'forecast' method) - */ - def transformBackF (yf: VectorD, y: VectorD, d: Int, t: Int): VectorD = - d match - case 0 => - yf - case 1 => - val tb = y(t - 1 to t) ++ yf - for i <- 1 until tb.dim do tb(i) += tb(i-1) - tb(1 to tb.dim) - case 2 => - val tb = y(t-2 to t) ++ yf - for i <- 2 until tb.dim do tb(i) += (2*tb(i-1) - tb(i-2)) - tb(2 to tb.dim) - case 3 => - val tb = y(t-3 to t) ++ yf - for i <- 3 until tb.dim do tb(i) += (3*tb(i-1) - 3*tb(i-2) + tb(i-3)) - tb(3 to tb.dim) - case _ => - flaw ("transformBackF", "ARIMA does not support differencing higher than order 3"); null - end match - end transformBackF - -end ARIMA - -import ARIMA._ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARIMA` class provides basic time series analysis capabilities for Auto- - * Regressive 'AR' Integrated 'I' Moving-Average 'MA' models. In an - * ARIMA(p, d, q) model, p and q refer to the order of the Auto-Regressive - * and Moving-Average components of the model; d refers to the order of - * differencing. Given time series data stored in vector y, its next value y_t = y(t) - * may be predicted based on prior values of y and its noise: - * y_t = δ + Σ(φ_i y_t-i) + Σ(θ_i e_t-i) + e_t - * where δ is a constant, φ is the auto-regressive coefficient vector, - * θ is the moving-average coefficient vector, and e is the noise vector. - *------------------------------------------------------------------------------ - * If d > 0, then the time series must be differenced first before applying - * the above model. - *------------------------------------------------------------------------------ - * @param y the original input vector (time series data) - * @param tt the time vector, if relevant (time index may suffice) - * @param hparam the hyper-parameters - */ -class ARIMA (y: VectorD, tt: VectorD = null, hparam: HyperParameter = ARIMA.hp) - extends ARMA (y, tt, hparam): - - private val debug = debugf ("ARIMA", true) // debug function - private val flaw = flawf ("ARIMA") // flaw function - - protected val d = hparam("d").toInt // the number of differences to take - protected var differenced = d > 0 // flag indicating whether differencing will be applied - protected var params = p + q + (if differenced then 0 else 1) // number of parameters estimated - - protected var mu = -0.0 // sample mean (-0.0 means unassigned) - protected var μ = -0.0 // population mean estimated using MLE - protected var sig2 = -0.0 // sample variance - protected var σ2 = -0.0 // population variance estimated using MLE - - private var z = VectorD.nullv // vector of centered predicted/fitted values - private var zp = VectorD.nullv // vector of centered predicted/fitted values - - init (y) // initialize vectors and parameters - - modelName = s"ARIMA($p, $d, $q)" - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Initialize variables based on the working time-series v. - * Set/change the working time series. May be used to set the time series - * to a different time window in order to produce newer forecast. - * @param v the working vector/time-series - */ - protected def init (v: VectorD): Unit = - mu = v.mean // sample mean - z = difference (v, d) // take the d-th difference of the time series - zp = new VectorD (z.dim) // predicted values prior to undifferencing/uncentering -// e = new VectorD (z.dim) // vector of errors/residuals - sig2 = z.variance // sample variance - end init - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the maximum lag used by this model (its capacity to look into the past). - */ - override def cap: Int = max (p, q) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Show estimates for parameters. - */ - def showParameterEstimates (): Unit = - println (s"differenced = $differenced") - println (s"φ = $φ") // AR parameters - println (s"θ = $θ") // MA parameters - println (s"δ = $δ") // drift - println (s"mu = $mu") // sample mean - println (s"μ = $μ") // MLE mean - println (s"sig2 = $sig2") // sample variance - println (s"σ2 = $σ2") // MLE variance - end showParameterEstimates - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train/fit an `ARIMA` model to the times-series data in vector y_. - * Estimate the coefficient vectors φ and θ for (p, q)-th order ARIMA(p, d, q) model. - * It uses BFGS, a Quasi-Newton optimizer, to minimize the negative log-likelihood. - * @param x_null the data/input matrix (ignored, pass null) - * @param y_ the training/full response vector - */ - override def train (x_null: MatrixD, y_ : VectorD): Unit = - val optimizer = new BFGS (nll) // nonlinear optimizer - val b = new VectorD (params + 1) // parameter values - - if ! differenced then b(b.size-2) = mu // sample mean, initial est. for μ parameter - b(b.size-1) = sqrt (sig2) // sample standard deviation, initial est. for σ parameter - optimizer.solve (b) // find b that maximizes likelihood - - δ = μ * (1 - φ.sum) // update drift value -// δ = stats.mu * (1 - φ.sum) - - showParameterEstimates () - end train - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** The negative log-likelihood function to be minimized. - * @see math.unice.fr/~frapetti/CorsoP/Chapitre_4_IMEA_1.pdf, page 36 - * @see spia.uga.edu/faculty_pages/monogan/teaching/ts/Barima.pdf - * @see stats.stackexchange.com/questions/77663/arima-estimation-by-hand - * @param b the input parameter vector - */ - protected def nll (b: VectorD): Double = - if b.size != params + 1 then flaw ("nll", "input parameter vector size incorrect") - for i <- 0 until p do φ(i) = b(i) - for i <- p until p+q do θ(i-p) = b(i) - if ! differenced then μ = b(b.size-2) - σ2 = b.last~^2 - - updateFittedValues () - end nll - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Update the vector of fitted values 'zp', the vector of errors 'e', and - * return the negative log-likelihood '-ll'. - * @see `Fit` for definition of 'll'. - */ - protected def updateFittedValues (): Double = - if ! differenced then for i <- z.indices do z(i) = y(i) - μ // for undifferenced time series, center using est. μ - - zp(0) = z(0) // no past values or errors => copy actual - for t <- 1 until zp.dim do - e(t-1) = z(t-1) - zp(t-1) // error in previous forecast - var sum = 0.0 - for j <- 0 until p if t-j > 0 do sum += φ(j) * z(t-1-j) - for j <- 0 until q if t-j > 0 do sum += θ(j) * e(t-1-j) - zp(t) = sum - end for - - -ll (e.normSq / m, σ2, m) // return negative log likelihood - end updateFittedValues - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the error (difference between actual and predicted) and useful - * diagnostics for the dataset. - * @param y vector of observed values - * @param yp vector of predicted values - */ - override def test (x_null: MatrixD, y_ : VectorD): (VectorD, VectorD) = - // FIX - add testSetup - val yp = predictAll (y_) - resetDF (params, y.dim - params) - (yp, diagnose (y, yp)) - end test - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the vector of predicted/fitted values on the training/full dataset. - * Based on 'zp' calculated in the 'updateFittedValues' method. - * @param y_ the given time-series - */ - override def predictAll (y_ : VectorD): VectorD = - if differenced then transformBack (zp, y, d) else zp + μ - end predictAll - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Produce h-steps-ahead forecast for ARIMA models. - * @see ams.sunysb.edu/~zhu/ams586/Forecasting.pdf - * @param t the time point from which to make forecasts (in the original scale) - * @param h the number of steps to forecast, must be at least one - */ - def forecast (t: Int = y.dim, h: Int = 1): VectorD = - if t > y.dim then flaw ("forecast", s"t ($t) cannot be greater than y.dim (${y.dim})") - val tz = t - d // scale t to match vector z and e - if tz < cap then flaw ("forecast", s"tz ($tz) must be at least cap ($cap)") - - val zf = new VectorD (cap + h) // forecasted centered values - val e_ = new VectorD (cap + h) // available observed errors - - for i <- 0 until cap if tz-cap+i >= 0 do // seed with first cap = max(p, q) values - zf(i) = z(tz-cap+i) // copy first cap values - e_(i) = e(tz-cap+i) // unveil first cap errors (observed in training) - end for - for i <- cap until zf.dim do // start at t = cap (enough for first value to forecast) - var sum = 0.0 - for j <- 0 until p do sum += φ(j) * zf(i-1-j) - for j <- 0 until q do sum += θ(j) * e_(i-1-j) - zf(i) = sum - end for - val f = zf(cap to zf.dim) // dump first cap values - if differenced then transformBackF (f, y, d, t) - else f + μ // return the vector of forecasts - end forecast - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all time points using 1 through h-steps ahead forecasts. - * The h-th row of matrix is the horizon h forecast (where h = 0 is actual data). - * @param h the forecasting horizon, number of steps ahead to produce forecasts - */ - override def forecastAll (y_ : VectorD, h: Int): MatrixD = - val yf = new MatrixD (y.dim, h+1) // forecasts for all horizons h & time points t - yf(?, 0) = y // first row is actual values - val cut = cap + d // cut over from actual to forecasted values - - for t <- y.indices do - if t < cut then - for k <- 1 to h do yf(t, k) = y(t) // copy first cut observed values from y - else - val ft = forecast (t, h) // forecasts at time point t, horizons 1 to h - for k <- 1 to h if t+k-1 < y.dim do - yf(t+k-1, k) = ft(k-1) // place forecasts diagonally - end for - end if - end for - - // fill in blank values in first few rows where no forecasts can be produced by copying values from previous columns - for k <- 2 to h; t <- cut until cut+k-1 do yf(t, k) = yf(t, k-1) // copy forecasted values - - yf // return matrix of forecasted values - end forecastAll - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all m time points and all horizons (1 through h-steps ahead). - * Record these in the yf matrix, where - * yf(t, k) = k-steps ahead forecast for y_t - * Note, yf(?, 0) is set to y (the actual time-series values). - * Do not forecast errors, rather use observed errors from training and make sure not - * to use errors that would correspond to knowing future errors (all future errors should - * be assumed to be 0). - * @see https://otexts.com/fpp3/arima-forecasting.html, section 9.8 - * @param h the maximum forecasting horizon, number of steps ahead to produce forecasts - */ -/*** - override def forecastAll2 (h: Int): MatrixD = forecastAll (h) - FIX - values must be computed diagonially - does not work for d = 1, etc. (missing value at 'cut') - override def forecastAll2 (h: Int): MatrixD = - { - val zf = new MatrixD (y.dim, h+1) // forecast matrix: rows - time, cols - horizon - for t <- z.indices do zf(t, 0) = z(t) // first column is actual values, horizon 0 - val cut = cap + d // cut over from actual to forecasted values - - for k <- 1 to h do // loop through k-steps ahead forecasts - val e_ = new VectorD (z.dim) // redetermine errors from a clean slate - - for t <- 0 until cut do // seed the first cap = max(p, q) values - zf(t, k) = z(t) // copy first cap actual values - e_(t) = e(t) // copy first cap errors (observed in training) - end for - - for t <- cut until y.dim do // forecast from cap to the end of time-series - if t-k >= 0 then e_(t-k) = e(t-k) // unveil previous error at time t-k - var sum = 0.0 - for j <- 0 until p if t-j > 0 then sum += φ(j) * zf(t-1-j, max (0, k-1-j)) - for j <- 0 until q if t-j > 0 then sum += θ(j) * e_(t-1-j) - zf(t, k) = sum // centered forecast for time t - end for - end for - println (s"forecastAll2: zf (${zf.dim}) = $zf") - if differenced then transformBack_allH (zf, y, d) - else zf + μ // return uncentered forecasts - end forecastAll2 -***/ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Obtain residuals/errors in the original scale. - */ - def residuals: VectorD = if differenced then y - predictAll (y) else e - -end ARIMA - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRIMATest` main function tests the `ARIMA` class on real data: - * Forecasting lake levels. - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.aRIMATest - */ -@main def aRIMATest (): Unit = - - import Example_LakeLevels.y - - val d = 0 // apply d-th order differencing - no differencing -// val d = 1 // apply d-th order differencing - first differences - - for h <- 1 to 2 do // forecasting horizon - for p <- 1 to 6; q <- 0 to 3 do // ARMA hyper-parameter settings - banner (s"Test: ARIMA ($p, $d, $q) with h = $h") - hp("p") = p; hp("d") = d; hp("q") = q - val mod = new ARIMA (y) // create an ARIMA model - val (yp, qof) = mod.trainNtest ()() // train and the model on full dataset - - val yfa = mod.forecastAll (y, h) - val yf = yfa(?, h) // forecasted values - h steps ahead - new Plot (null, y, yf, s"Plot of y & yf, forecasted (h = $h) ${mod.modelName} vs. t", true) - - if h == 1 then Forecaster.differ (yp, yf, allow = true) -/* - val skip = max (p, q) // skip the cap start-up - banner (s"aRIMATest: QoF (@h = $h) for yf = mod.forecastAll") - println (s"rSq (yf) for h = $h is ${rSqF (y, yf)}") - println (s"rSq (yf) for h = $h is ${rSqF (y, yf, max (p, q))}, skip = $skip") - println (s"sMAPE (yf) for h = $h is ${smapeF (y, yf)}") - println (s"sMAPE (yf) for h = $h is ${smapeF (y, yf, max (p, q))}, skip = $skip") - - banner (s"aRIMATest: QoF (@h = $h) for yf2 = mod.forecastAll2") - println (s"rSq (yf2) for h = $h is ${rSqF (y, yf2)}") - println (s"rSq (yf2) for h = $h is ${rSqF (y, yf2, max (p, q))}, skip = $skip") - println (s"sMAPE (yf2) for h = $h is ${smapeF (y, yf2)}") - println (s"sMAPE (yf2) for h = $h is ${smapeF (y, yf2, max (p, q))}, skip = $skip") -*/ - end for - end for - -end aRIMATest - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRIMATest2` main function tests the `ARIMA` class. - * Test simulated data. - * > runMain scalation.modeling.forecasting.aRIMATest2 - */ -@main def aRIMATest2 (): Unit = - - banner ("ARIMA Test2") - val m = 20 - val noise = Normal (0, 2) -// val noise = Uniform (-5, 5) - val y = VectorD (for i <- 0 until m yield i + noise.gen) - - println (s"y = $y") - - val (p, d, q) = (1, 1, 1) - hp("p") = p; hp("d") = d; hp("q") = q - banner (s"Build ARIMA($p, $d, $q) model") - val mod = new ARIMA (y) // time series data: y vs. t - mod.trainNtest ()() // train and the model on full dataset - -end aRIMATest2 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRIMATest3` main function tests the `ARIMA` class. - * Traffic dataset. - * > runMain scalation.modeling.forecasting.aRIMATest3 - */ -@main def aRIMATest3 (): Unit = - - val data = MatrixD.load ("travelTime.csv") - val y = data(?, 1) - - val steps = 1 // number of steps for the forecasts - val d = 1 // levels of differencing - val (p, q) = (1, 1) - hp("p") = p; hp("d") = d; hp("q") = q - - banner (s"Build ARIMA($p, $d, $q) model") - val mod = new ARIMA (y) // time series data: y vs. t - mod.trainNtest ()() // train and the model on full dataset - - val ar_f = mod.forecast (h = steps) - println (s"$steps-step ahead forecasts using ${mod.modelName} model = $ar_f") - -end aRIMATest3 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRIMATest4` main function tests the `ARIMA` class. - * Simulated data with a quadratic pattern. - * > runMain scalation.modeling.forecasting.aRIMATest4 - */ -@main def aRIMATest4 (): Unit = - - val y = makeTSeries () // make a simulated time-series (see `Stationary`) - - val steps = 2 // number of steps for the forecasts - val (d, q) = (1, 1) // levels of differencing - hp("d") = d; hp("q") = q - - for p <- 1 to 3 do - hp("p") = p - banner (s"Build ARIMA($p, $d, $q) model") - val mod = new ARIMA (y) // time series model ARIMA - mod.trainNtest ()() // train and the model on full dataset - - banner ("Make Forecasts") - val yf = mod.forecast (steps) - println (s"$steps-step ahead forecasts using ${mod.modelName} model = $yf") - end for - -end aRIMATest4 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRIMATest5` main function tests the `ARIMA` class on real data: - * Forecasting COVID-19. - * > runMain scalation.modeling.forecasting.aRIMATest5 - */ -@main def aRIMATest5 (): Unit = - - val data = MatrixD.load ("covid_19.csv", 1, 1) // skip first row (header) and first column - val yy = data(?, 4) // column 5 is daily deaths -// val yy = data(?, 5) // column 5 is daily deaths smoothed - val is = yy.indexWhere (_ >= 2.0) // find day of first death with at least 2 deaths - println (s"is = $is is first day with at least 2 deaths") - val y = yy(is until yy.dim) // slice out days before is - val h = 2 // forecasting horizon - - val ar1 = new AR (y) - ar1.trainNtest ()() // train and the model on full dataset - banner (s"AR(1) $h-steps rolling validation results") - RollingValidation.rollValidate (ar1, 2, 14) -/* - val yfa = rw.forecastAll (y, h) - val yf = yfa(?, h) // forecasted values - h steps ahead - new Plot (null, y, yf, s"Plot of y & yf, forecasted (h = $h) ${rw.modelName} vs. t", true) - - hp("d") = 0 // level of differencing, try 0 and 1 - for p <- 1 to 15; q <- 1 to 3 do // ARIMA hyper-parameter settings - hp("p") = p; hp("q") = q - val mod = new ARIMA (y) // create an ARIMA model - val (yp, qof) = mod.trainNtest ()() // train and the model on full dataset - - val yfa = mod.forecastAll (y, h) - val yf = yfa(?, h) // forecasted values - h steps ahead - new Plot (null, y, yf, s"Plot of y & yf, forecasted (h = $h) ${mod.modelName} vs. t", true) - end for -*/ - -end aRIMATest5 - diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/ARIMA.scala.sav2 b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/ARIMA.scala.sav2 deleted file mode 100644 index 5d2771f26..000000000 --- a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/ARIMA.scala.sav2 +++ /dev/null @@ -1,568 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author Hao Peng, John Miller - * @version 2.0 - * @date Sat Jun 13 01:27:00 EST 2017 - * @see LICENSE (MIT style license file). - * - * @note Model: Auto-Regressive, Integrated, Moving Average (ARIMA) - * - * @see http://en.wikipedia.org/wiki/Autoregressive%E2%80%93moving-average_model - * @see http://www.emu.edu.tr/mbalcilar/teaching2007/econ604/lecture_notes.htm - * @see http://www.stat.berkeley.edu/~bartlett/courses/153-fall2010 - * @see http://www.princeton.edu/~apapanic/ORFE_405,_Financial_Time_Series_%28Fall_2011%29_files/slides12-13.pdf - */ - -// U N D E R D E V E L O P M E N T - -package scalation -package modeling -package forecasting - -import scala.math.sqrt - -import scalation.mathstat._ -import scalation.optimization.quasi_newton.{BFGS => Optimizer} // change import to change optimizer -//import scalation.optimization.quasi_newton.{LBFGS => Optimizer} -import scalation.random.Normal - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Companion object for class `ARIMA`. Includes features related to differencing - * and automated order selection. - * @see www.jstatsoft.org/article/view/v027i03/v27i03.pdf - */ -object ARIMA: - - private val flaw = flawf ("ARIMA") // flaw function - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the 'd'th difference of the time-series for 'd' in {0, 1, 2, 3}. - * A new vector is returned even when there is no difference taken ('d = 0'), - * to ensure the original is preserved. - * @param y the original time-series to be differenced - * @param d the order of simple differencing - */ - def difference (y: VectorD, d: Int): VectorD = - d match - case 0 => - y.copy - case 1 => - VectorD (for i <- 0 until y.dim-1 yield y(i+1) - y(i)) - case 2 => - VectorD (for i <- 0 until y.dim-2 yield y(i+2) - 2*y(i+1) + y(i)) - case 3 => - VectorD (for i <- 0 until y.dim-3 yield y(i+3) - 3*y(i+2) + 3*y(i+1) - y(i)) - case _ => - flaw ("difference", "ARIMA does not support differencing higher than order 3"); null - end match - end difference - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Transform the fitted values on the training data of a differenced time series back - * to the original scale. Undo trend differencing only. - * @see stats.stackexchange.com/questions/32634/difference-time-series-before-arima-or-within-arima - * @param yp the vector of predicted/fitted values - * @param y the original time-series vector - * @param d the order of simple differencing - */ - def transformBack (yp: VectorD, y: VectorD, d: Int): VectorD = - d match - case 0 => - yp - case 1 => - val tb = new VectorD (y.dim) - tb(0) = y(0) - for i <- 0 until y.dim-1 do tb(i+1) = yp(i) + y(i) - tb - case 2 => - val tb = new VectorD (y.dim) - tb(0) = y(0); tb(1) = y(1) - for i <- 0 until y.dim-2 do tb(i+2) = yp(i) + 2*y(i+1) - y(i) - tb - case 3 => - val tb = new VectorD (y.dim) - tb(0) = y(0); tb(1) = y(1); tb(2) = y(2) - for i <- 0 until y.dim-3 do tb(i+3) = yp(i) + 3*y(i+2) - 3*y(i+1) + y(i) - tb - case _ => - flaw ("transformBack", "ARIMA does not support differencing higher than order 3"); null - end match - end transformBack - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Transform the forecasted values of a differenced time series back to the original - * for all horizons scale. - * @see stats.stackexchange.com/questions/32634/difference-time-series-before-arima-or-within-arima - * @param ypa the matrix of all multi-horizon forecasted values - * @param y the original time-series vector - * @param d the order of simple differencing - */ - def transformBack_allH (ypa: MatrixD, y: VectorD, d: Int): MatrixD = - val tb = new MatrixD (ypa.dim, ypa.dim2) - tb(?, 0) = y - for k <- 1 until ypa.dim2 do tb(?, k) = transformBack (ypa(?, k), y, d) - tb - end transformBack_allH - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Transform the forecast values of a differenced time series back to the - * original scale. - * @see stats.stackexchange.com/questions/32634/difference-time-series-before-arima-or-within-arima - * @param yf the vector of forecasted values - * @param y the original time series - * @param d the order of simple differencing - * @param t the time point being forecasted (@see the 'forecast' method) - */ - def transformBackF (yf: VectorD, y: VectorD, d: Int, t: Int): VectorD = - d match - case 0 => - yf - case 1 => - val tb = y(t - 1 to t) ++ yf - for i <- 1 until tb.dim do tb(i) += tb(i-1) - tb(1 to tb.dim) - case 2 => - val tb = y(t-2 to t) ++ yf - for i <- 2 until tb.dim do tb(i) += (2*tb(i-1) - tb(i-2)) - tb(2 to tb.dim) - case 3 => - val tb = y(t-3 to t) ++ yf - for i <- 3 until tb.dim do tb(i) += (3*tb(i-1) - 3*tb(i-2) + tb(i-3)) - tb(3 to tb.dim) - case _ => - flaw ("transformBackF", "ARIMA does not support differencing higher than order 3"); null - end match - end transformBackF - -end ARIMA - -import ARIMA._ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARIMA` class provides basic time series analysis capabilities for Auto- - * Regressive 'AR' Integrated 'I' Moving-Average 'MA' models. In an - * ARIMA(p, d, q) model, p and q refer to the order of the Auto-Regressive - * and Moving-Average components of the model; d refers to the order of - * differencing. Given time series data stored in vector y, its next value y_t = y(t) - * may be predicted based on prior values of y and its noise: - * y_t = δ + Σ(φ_i y_t-i) + Σ(θ_i e_t-i) + e_t - * where δ is a constant, φ is the auto-regressive coefficient vector, - * θ is the moving-average coefficient vector, and e is the noise vector. - *------------------------------------------------------------------------------ - * If d > 0, then the time series must be differenced first before applying - * the above model. - *------------------------------------------------------------------------------ - * @param y the original input vector (time series data) - * @param tt the time vector, if relevant (time index may suffice) - * @param hparam the hyper-parameters - */ -class ARIMA (y: VectorD, tt: VectorD = null, hparam: HyperParameter = SARIMAX.hp) - extends ARMA (y, tt, hparam): - - private val flaw = flawf ("ARIMA") // flaw function - - protected val d = hparam("d").toInt // the number of differences to take -// protected var cap = 0 // max of p and q - protected var params = 0 // number of parameters estimated - protected var differenced = d > 0 // flag indicating whether differencing will be applied - - protected var mu = -0.0 // sample mean (-0.0 means unassigned) - protected var μ = -0.0 // population mean estimated using MLE - protected var sig2 = -0.0 // sample variance - protected var σ2 = -0.0 // population variance estimated using MLE - - private var z = VectorD.nullv // vector of centered predicted/fitted values - private var zp = VectorD.nullv // vector of centered predicted/fitted values - - init (y) // initialize vectors and parameters - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the model name including current hyper-parameters, e.g., ARIMA(2, 1, 1). - */ - modelName = s"ARIMA($p, $d, $q)" - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Initialize variables based on the working time-series v. - * Set/change the working time series. May be used to set the time series - * to a different time window in order to produce newer forecast. - * @param v the working vector/time-series - */ - protected def init (v: VectorD): Unit = - mu = v.mean // sample mean - z = difference (v, d) // take the d-th difference of the time series - zp = new VectorD (z.dim) // predicted values prior to undifferencing/uncentering -// e = new VectorD (z.dim) // vector of errors/residuals - sig2 = z.variance // sample variance - - φ = new VectorD (p) // AR coefficients - θ = new VectorD (q) // MA coefficients -// cap = max (p, q) // greatest lag - params = p + q + (if differenced then 0 else 1) // number of parameters - end init - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Show estimates for parameters. - */ - def showParameterEstimates (): Unit = - println (s"differenced = $differenced") - println (s"φ = $φ") // AR parameters - println (s"θ = $θ") // MA parameters - println (s"δ = $δ") // drift - println (s"mu = $mu") // sample mean - println (s"μ = $μ") // MLE mean - println (s"sig2 = $sig2") // sample variance - println (s"σ2 = $σ2") // MLE variance - end showParameterEstimates - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train/fit an `ARIMA` model to the times-series data in vector y_. Must call setPQ first. - * Estimate the coefficient vectors φ and θ for (p, q)-th order ARIMA(p, d, q) model. - * It uses BFGS, a Quasi-Newton optimizer, to minimize the negative log-likelihood. - * @param x_null the data/input matrix (ignored, pass null) - * @param y_ the training/full response vector - */ - override def train (x_null: MatrixD, y_ : VectorD): Unit = - val b = new VectorD (params + 1) // parameter values - if ! differenced then b(b.size-2) = mu // sample mean, initial est. for μ parameter - b(b.size-1) = sqrt (sig2) // sample standard deviation, initial est. for σ parameter - - val optimizer = new Optimizer (nll) // apply Quasi-Newton optimizer -// val (fb, bb) = optimizer.solve (b, 0.5) // optimal solution for the objective function and parameters - val (fb, bb) = optimizer.solve3 (b, 0.5) // optimal solution for the objective function and parameters - - δ = μ * (1 - φ.sum) // update drift value -// δ = stats.mu * (1 - φ.sum) - - showParameterEstimates () - end train - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** The negative log-likelihood function to be minimized. - * @see math.unice.fr/~frapetti/CorsoP/Chapitre_4_IMEA_1.pdf, page 36 - * @see spia.uga.edu/faculty_pages/monogan/teaching/ts/Barima.pdf - * @see stats.stackexchange.com/questions/77663/arima-estimation-by-hand - * @param b the input parameter vector - */ - protected def nll (b: VectorD): Double = - if b.size != params + 1 then flaw ("nll", "input parameter vector size incorrect") - for i <- 0 until p do φ(i) = b(i) - for i <- p until p+q do θ(i-p) = b(i) - if ! differenced then μ = b(b.size-2) - σ2 = b.last~^2 - - updateFittedValues () - end nll - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Update the vector of fitted values 'zp', the vector of errors 'e', and - * return the negative log-likelihood '-ll'. - * @see `Fit` for definition of 'll'. - */ - protected def updateFittedValues (): Double = - if ! differenced then for i <- z.indices do z(i) = y(i) - μ // for undifferenced time series, center using est. μ - - zp(0) = z(0) // no past values or errors => copy actual - for t <- 1 until zp.dim do - e(t-1) = z(t-1) - zp(t-1) // error in previous forecast - var sum = 0.0 - for j <- 0 until p if t-j > 0 do sum += φ(j) * z(t-1-j) - for j <- 0 until q if t-j > 0 do sum += θ(j) * e(t-1-j) - zp(t) = sum - end for - - -ll (e.normSq / m, σ2, m) // return negative log likelihood - end updateFittedValues - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the error (difference between actual and predicted) and useful - * diagnostics for the dataset. - * @param y_ vector of observed values - * @param yp vector of predicted values - */ - override def test (x_null: MatrixD, y_ : VectorD): (VectorD, VectorD) = - // FIX - add testSetup - val yp = predictAll (y_) - resetDF (params, y.dim - params) - (yp, diagnose (y, yp)) - end test - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the vector of predicted/fitted values on the training/full dataset. - * Based on 'zp' calculated in the 'updateFittedValues' method. - * @param y_ the given time-series - */ - override def predictAll (y_ : VectorD): VectorD = - println (s"predictAll: y.dim = ${y.dim}, y_.dim = ${y_.dim}, zp.dim = ${zp.dim}") - if differenced then { println (s"zp.dim = ${zp.dim}"); transformBack (zp, y_, d) } else zp + μ - end predictAll - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Produce h-steps-ahead forecast for ARIMA models. - * @see ams.sunysb.edu/~zhu/ams586/Forecasting.pdf - * @param t the time point from which to make forecasts (in the original scale) - * @param h the number of steps to forecast, must be at least one - */ - def forecast (t: Int = y.dim, h: Int = 1): VectorD = - if t > y.dim then flaw ("forecast", s"t ($t) cannot be greater than y.dim (${y.dim})") - val tz = t - d // scale t to match vector z and e - if tz < cap then flaw ("forecast", s"tz ($tz) must be at least cap ($cap)") - - val zf = new VectorD (cap + h) // forecasted centered values - val e_ = new VectorD (cap + h) // available observed errors - - for i <- 0 until cap if tz-cap+i >= 0 do // seed with first cap = max(p, q) values - zf(i) = z(tz-cap+i) // copy first cap values - e_(i) = e(tz-cap+i) // unveil first cap errors (observed in training) - end for - for i <- cap until zf.dim do // start at t = cap (enough for first value to forecast) - var sum = 0.0 - for j <- 0 until p do sum += φ(j) * zf(i-1-j) - for j <- 0 until q do sum += θ(j) * e_(i-1-j) - zf(i) = sum - end for - val f = zf(cap to zf.dim) // dump first cap values - if differenced then transformBackF (f, y, d, t) - else f + μ // return the vector of forecasts - end forecast - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all time points using 1 through h-steps ahead forecasts. - * The h-th row of matrix is the horizon h forecast (where h = 0 is actual data). - * @param h the forecasting horizon, number of steps ahead to produce forecasts - */ - override def forecastAll (y_ : VectorD, h: Int): MatrixD = - val yf = new MatrixD (y.dim, h+1) // forecasts for all horizons h & time points t - yf(?, 0) = y // first row is actual values - val cut = cap + d // cut over from actual to forecasted values - - for t <- y.indices do - if t < cut then - for k <- 1 to h do yf(t, k) = y(t) // copy first cut observed values from y - else - val ft = forecast (t, h) // forecasts at time point t, horizons 1 to h - for k <- 1 to h if t+k-1 < y.dim do - yf(t+k-1, k) = ft(k-1) // place forecasts diagonally - end for - end if - end for - - // fill in blank values in first few rows where no forecasts can be produced by copying values from previous columns - for k <- 2 to h; t <- cut until cut+k-1 do yf(t, k) = yf(t, k-1) // copy forecasted values - - yf // return matrix of forecasted values - end forecastAll - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all m time points and all horizons (1 through h-steps ahead). - * Record these in the yf matrix, where - * yf(t, k) = k-steps ahead forecast for y_t - * Note, yf(?, 0) is set to y (the actual time-series values). - * Do not forecast errors, rather use observed errors from training and make sure not - * to use errors that would correspond to knowing future errors (all future errors should - * be assumed to be 0). - * @see https://otexts.com/fpp3/arima-forecasting.html, section 9.8 - * @param h the maximum forecasting horizon, number of steps ahead to produce forecasts - */ -/*** - override def forecastAll2 (h: Int): MatrixD = forecastAll (h) - FIX - values must be computed diagonially - does not work for d = 1, etc. (missing value at 'cut') - override def forecastAll2 (h: Int): MatrixD = - { - val zf = new MatrixD (y.dim, h+1) // forecast matrix: rows - time, cols - horizon - for t <- z.indices do zf(t, 0) = z(t) // first column is actual values, horizon 0 - val cut = cap + d // cut over from actual to forecasted values - - for k <- 1 to h do // loop through k-steps ahead forecasts - val e_ = new VectorD (z.dim) // redetermine errors from a clean slate - - for t <- 0 until cut do // seed the first cap = max(p, q) values - zf(t, k) = z(t) // copy first cap actual values - e_(t) = e(t) // copy first cap errors (observed in training) - end for - - for t <- cut until y.dim do // forecast from cap to the end of time-series - if t-k >= 0 then e_(t-k) = e(t-k) // unveil previous error at time t-k - var sum = 0.0 - for j <- 0 until p if t-j > 0 then sum += φ(j) * zf(t-1-j, max (0, k-1-j)) - for j <- 0 until q if t-j > 0 then sum += θ(j) * e_(t-1-j) - zf(t, k) = sum // centered forecast for time t - end for - end for - println (s"forecastAll2: zf (${zf.dim}) = $zf") - if differenced then transformBack_allH (zf, y, d) - else zf + μ // return uncentered forecasts - end forecastAll2 -***/ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Obtain residuals/errors in the original scale. - */ - def residuals: VectorD = if differenced then y - predictAll (y) else e - -end ARIMA - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRIMATest` main function tests the `ARIMA` class on real data: - * Forecasting lake levels. - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.aRIMATest - */ -@main def aRIMATest (): Unit = - - import Example_LakeLevels.y - import SARIMAX.hp - -// val d = 0 // apply d-th order differencing - no differencing - val d = 1 // apply d-th order differencing - first differences - - for h <- 1 to 1 do // forecasting horizon - for p <- 1 to 7 do // auto-regressive hyper-parameter settings - for q <- 0 to 2 do // moving-average hyper-parameter settings - banner (s"Test: ARIMA ($p, $d, $q) with h = $h") - hp("p") = p; hp("d") = d; hp("q") = q // set p, d and q for the ARIMA model - val mod = new ARIMA (y) // create an ARIMA model - mod.trainNtest ()() // train the model on full dataset - -/* - val yfa = mod.forecastAll (y, h) - val yf = yfa(?, h) // forecasted values - h steps ahead - new Plot (null, y, yf, s"Plot of y & yf, forecasted (h = $h) ${mod.modelName} vs. t", true) - - if h == 1 then Forecaster.differ (yp, yf, allow = true) - val skip = max (p, q) // skip the cap start-up - banner (s"aRIMATest: QoF (@h = $h) for yf = mod.forecastAll") - println (s"rSq (yf) for h = $h is ${rSqF (y, yf)}") - println (s"rSq (yf) for h = $h is ${rSqF (y, yf, max (p, q))}, skip = $skip") - println (s"sMAPE (yf) for h = $h is ${smapeF (y, yf)}") - println (s"sMAPE (yf) for h = $h is ${smapeF (y, yf, max (p, q))}, skip = $skip") - - banner (s"aRIMATest: QoF (@h = $h) for yf2 = mod.forecastAll2") - println (s"rSq (yf2) for h = $h is ${rSqF (y, yf2)}") - println (s"rSq (yf2) for h = $h is ${rSqF (y, yf2, max (p, q))}, skip = $skip") - println (s"sMAPE (yf2) for h = $h is ${smapeF (y, yf2)}") - println (s"sMAPE (yf2) for h = $h is ${smapeF (y, yf2, max (p, q))}, skip = $skip") -*/ - end for - end for - end for - -end aRIMATest - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRIMATest2` main function tests the `ARIMA` class. - * Test simulated data. - * > runMain scalation.modeling.forecasting.aRIMATest2 - */ -@main def aRIMATest2 (): Unit = - - import SARIMAX.hp - - banner ("ARIMA Test2") - val m = 20 - val noise = Normal (0, 2) -// val noise = Uniform (-5, 5) - val y = VectorD (for i <- 0 until m yield i + noise.gen) - - println (s"y = $y") - - val (p, d, q) = (1, 1, 1) - hp("p") = p; hp("d") = d; hp("q") = q // set p, d and q for the ARIMA model - val mod = new ARIMA (y) // time series data: y vs. t - mod.train (null, y) // train the model on full dataset - val (yp, qof) = mod.test (null, y) // test the model on full dataset - println (mod.report (qof)) // report on Quality of Fit (QoF) - -end aRIMATest2 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRIMATest3` main function tests the `ARIMA` class. - * Traffic dataset. - * > runMain scalation.modeling.forecasting.aRIMATest3 - */ -@main def aRIMATest3 (): Unit = - - import SARIMAX.hp - - val nfile = "travelTime.csv" - val data = MatrixD.load (nfile) - -// val t = data(?, 0) - val y = data(?, 1) - println (s"y = $y") - - val (p, d, q) = (1, 1, 1) - val steps = 1 // number of steps for the forecasts - - hp("p") = p; hp("d") = d; hp("q") = q // set p, d and q for the ARIMA model - val mod = new ARIMA (y) // time series data: y vs. t - mod.train (null, y) // train the model on full dataset - val (yp, qof) = mod.test (null, y) // test the model on full dataset - println (mod.report (qof)) // report on Quality of Fit (QoF) - - val ar_f = mod.forecast (h = steps) - println (s"$steps-step ahead forecasts using ${mod.modelName} model = $ar_f") - -end aRIMATest3 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRIMATest4` main function tests the `ARIMA` class. - * Simulated data with a quadratic pattern. - * > runMain scalation.modeling.forecasting.aRIMATest4 - */ -@main def aRIMATest4 (): Unit = - - import SARIMAX.hp - - val m = 50 - val (p, d, q) = (1, 1, 1) // hyper-parameters for the ARIMA model - val steps = 2 // number of steps for the forecasts - val sig2 = 10000.0 - val noise = Normal (0.0, sig2) - val y = VectorD (for i <- 0 until m yield 40 * (i-1) - (i-2) * (i-2) + noise.gen) - - banner (s"Build ARIMA($p, $d, $q) model") - hp("p") = p; hp("d") = d; hp("q") = q // set p, d and q for the ARIMA model - val mod = new ARIMA (y) // time series model ARIMA - mod.train (null, y) // train the model on full dataset - val (yp, qof) = mod.test (null, y) // test the model on full dataset - println (mod.report (qof)) // report on Quality of Fit (QoF) - - banner ("Make Forecasts") - val yf = mod.forecast (steps) - println (s"$steps-step ahead forecasts using ${mod.modelName} model = $yf") - -end aRIMATest4 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRIMATest5` main function tests the `ARIMA` class on real data: - * Forecasting lake levels. Select the best number of lags. - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.aRIMATest5 - * -@main def aRIMATest5 (): Unit = - - import Example_LakeLevels.y - - val d = 0 // level of differencing - val mod = new ARIMA (y) // create model for time series data - mod.setPQ (VectorI (1, 1)) - mod.train (null, y) // train the model on full dataset - val (yp, qof) = mod.test (null, y) // test the model on full dataset - println (mod.report (qof)) // report on Quality of Fit (QoF) - - val res = mod.forwardSel () - println (s"forwardSel: $res") - - for (sp, sq) <- Array ((1, 0), (2, 1), (1, 1), (1, 2), (0, 1)) do - val res = mod.forwardSel2 (VectorI (sp, sq)) - println (s"forwardSel2: $res") - end for - -end aRIMATest5 - */ - diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/ARMA.scala.bak b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/ARMA.scala.bak deleted file mode 100644 index 9773c32ef..000000000 --- a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/ARMA.scala.bak +++ /dev/null @@ -1,255 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author Hao Peng, John Miller, Michael Cotterell - * @version 2.0 - * @date Sat Jun 13 01:27:00 EST 2017 - * @see LICENSE (MIT style license file). - * - * @title Model: Auto-Regressive, Moving-Average (ARMA) - * - * @see http://en.wikipedia.org/wiki/Autoregressive%E2%80%93moving-average_model - * @see http://www.emu.edu.tr/mbalcilar/teaching2007/econ604/lecture_notes.htm - * @see http://www.stat.berkeley.edu/~bartlett/courses/153-fall2010 - * @see http://www.princeton.edu/~apapanic/ORFE_405,_Financial_Time_Series_%28Fall_2011%29_files/slides12-13.pdf - */ - -package scalation -package modeling -package forecasting - -import scala.math.max - -import scalation.mathstat._ -import scalation.optimization._ - -import ARMA.hp - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARMA` class provides basic time series analysis capabilities for Auto-Regressive, - * Moving-Average (ARMA) models. In an ARMA(p, q) model, p refers to the order of the - * Auto-Regressive components and q refers to the Moving-Average compoenest of the model. - * ARMA models are often used for forecasting. - * Given time-series data stored in vector y, its next value y_t = y(t) - * may be predicted based on prior values of y and its noise: - * y_t = δ + Σ(φ_k y_t-k) + Σ(θ_k e_t-k) + e_t - * where δ is a constant, φ is the auto-regressive coefficient vector, - * θ is the moving-average vector, and e_t is the noise term. - *---------------------------------------------------------------------------------- - * @param y the response vector (time-series data) - * @param tt the time vector, if relevant (time index may suffice) - * @param hparam the hyper-parameters - */ -abstract class ARMA (y: VectorD, tt: VectorD = null, hparam: HyperParameter = ARMA.hp) - extends Forecaster (y, tt, hparam) - with Correlogram (y) - with Fit (dfm = hparam("p").toInt, df = y.dim - hparam("p").toInt): - - private val debug = debugf ("ARMA", true) // debug function - private val flaw = flawf ("ARMA") // flaw function - - m = y.dim // number of time points (@see `FitM`) - protected var p = hparam("p").toInt // p-th order Auto-Regressive and - protected var q = hparam("q").toInt // q-th order Moving-Average model - protected var φ = VectorD.nullv // AR(p) parameters/coefficients part - protected var θ = VectorD.nullv // MA(q) parameters/coefficients part - protected var δ = NO_DOUBLE // drift/intercept/constant term - protected var yf = MatrixD.nullm // the forecast matrix - time points x horizons - - if p > MAX_LAGS then flaw ("init", s"p = $p must not be greater than MAX_LAGS = $MAX_LAGS") - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the model name including its current hyper-parameter, e.g., ARMA(2, 1). - */ - override def modelName: String = s"ARMA($p, $q)" - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train/fit an `ARMA` model to the times-series data in vector y_. - * Estimate the coefficient vector φ for a p-th order Auto-Regressive ARMA(p) model. - * Uses Durbin-Levinson Algorithm (in `Correlogram`) to determine the coefficients. - * The φ vector is p-th row of psi matrix (ignoring the first (0th) column). - * @param x_null the data/input matrix (ignored, pass null) - * @param y_ the training/full response vector (e.g., full y) - */ - def train (x_null: MatrixD, y_ : VectorD): Unit = - m = y_.dim // length of relevant time-series - e = new VectorD (m) - resetDF (p, m - p) // reset the degrees of freedom - makeCorrelogram (y_) // correlogram computes psi matrix, gives ACF and PACF - - val mu = y_.mean // sample mean of y_ - val z = y_ - mu // optimization works better using zero-centered data - φ = new VectorD (p) // zeros for AR part - θ = new VectorD (q) // zeros for MA part - δ = 0.0 // drift/intercept for z (should end up close to zero) - val b = φ ++ θ :+ δ // combine all parameters -> vector to optimize - - def csse (b: VectorD): Double = // objective function - conditional sum of squared errors - φ = b(0 to p); θ = b(p to p+q); δ = b(b.dim-1) - val (yy, yp) = testSetup (z) // get and align actual and predicted values - val s = (yy - yp).normSq // sum of squared errors -// println (s"csse: s = $s, b = $b") - s - end csse - - def nll (b: VectorD): Double = // objective function - negative log-likelihood (MLE) - 0.0 // FIX - implement - end nll - - val optimizer = new BFGS (csse) // apply Quasi-Newton BFGS optimizer -// val optimizer = new ConjugateGradient (csse) // apply Conjugate Gradient optimizer - fails -// val optimizer = new CoordinateDescent (csse) // apply Coordinate Descent optimizer -// val optimizer = new NelderMeadSimplex (csse, 3) // apply Nelder-Mead Simplex optimizer -// val optimizer = new GridSearch (csse, 3); optimizer.setAxes () // apply GridSearch BFGS optimizer - close - val (fb, bb) = optimizer.solve (b) // optimal solution for the objective function and parameters - - φ = bb(0 to p); θ = bb(p to p+q); δ = bb(b.dim-1) // recover parameters for z - δ += mu * (1 - φ.sum) // uncenter - debug ("train", s"parameters for ARMA($p, $q) model: φ = $φ, θ = $θ, δ = $δ") - end train - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test PREDICTIONS of an ARMA forecasting model y_ = f(lags (y_)) + e and return its - * QoF vector. Testing may be be in-sample (on the training set) or out-of-sample - * (on the testing set) as determined by the parameters passed in. - * Note: must call train before test. - * @param x_null the training/testing data/input matrix (ignored, pass null) - * @param y_ the training/testing response/output vector (e.g., full y) - */ - def test (x_null: MatrixD, y_ : VectorD): (VectorD, VectorD) = - val (yy, yp) = testSetup (y_) // get and align actual and predicted values - resetDF (p+q, yy.dim - p+q) // reset the degrees of freedom - (yp, diagnose (yy, yp)) // evaluate and return the QoF of these predictions - end test - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test FORECASTS of an ARMA forecasting model y_ = f(lags (y_)) + e and return its - * QoF vector. Testing may be be in-sample (on the training set) or out-of-sample - * (on the testing set) as determined by the parameters passed in. - * Note: must call train before test. - * @param h the forecasting horizon, number of steps ahead to produce forecasts - * @param y_ the training/testing response/output vector (e.g., full y) - * @param redo whether to use existing forecasts or redo them (defaults to false) - */ - def testf (h: Int, y_ : VectorD, redo: Boolean = false): VectorD = - if yf == null || yf.dim2 < h+1 || redo then yf = forecastAll (h, y_) // redo all forecasts - val yy = y_(h to y_.dim) - val yf_h = yf(?, h)(h to y_.dim) // pull column h from the forecast matrix and align - resetDF (p, yy.dim - p) // reset the degrees of freedom - diagnose (yy, yf_h) // evaluate and return the QoF of these forecasts - end testf - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the parameter vector for the ARMA(p, q) model. - */ - override def parameter: VectorD = φ ++ θ :+ δ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Predict a value for time point/index t using 1-step ahead forecasts. - * y_t = φ_0 y_t-1 + φ_1 y_t-2 + ... + φ_p-1 y_t-p - * When k < 0 let y_k = y_0 (i.e., assume first value repeats back in time), - * but do not assume errors repeat. - * @see predictAll in `Forecaster` - * @param t the time point/index to be predicted - * @param y_ the actual values to use in making predictions - */ - def predict (t: Int, y_ : VectorD): Double = - if t < 1 || t > y_.dim then flaw ("predict", s"time index t = $t is out of range") - var sum = δ - for j <- 0 until p do sum += φ(j) * y_(max (0, t-1-j)) - for j <- 0 until q if t-j > 0 do sum += θ(j) * e(t-1-j) - if t < y_.dim then e(t) = y_(t) - sum // update the t-th error e_t - sum // prediction for y_t, yp_t - end predict - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all y_.dim time points and all horizons (1 through h-steps ahead). - * Record these in the yf matrix, where - * yf(t, k) = k-steps ahead forecast for y_t - * Note, column 0, yf(?, 0), is set to y (the actual time-series values). - * Forecast recurse down diagonals in the yf forecast matrix. - * The top right and bottom left triangles in yf matrix are not forecastable. - * @param h the maximum forecasting horizon, number of steps ahead to produce forecasts - * @param y_ the actual values to use in making forecasts - */ - def forecastAll (h: Int, y_ : VectorD): MatrixD = - yf = new MatrixD (y_.dim+h, h+1) // forecasts for all time points t & horizons to h - for t <- 0 until m do yf(t, 0) = y_(t) // first column is actual values, horizon 0 - for k <- 1 to h do - for t <- y_.indices do // make forecasts over all time points for horizon k - var sum = δ - for j <- 0 until p do sum += φ(j) * yf(max (0, t+k-1-j), max (0, k-1-j)) - yf(t+k, k) = sum // forecast down the diagonal - end for - debug ("forecastAll", s"yf(?, $k) = ${yf(?, k)}") - end for - yf // return matrix of forecasted values - end forecastAll - -end ARMA - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARMA` companion object provides factory methods for the `ARMA` class. - */ -object ARMA: - - /** Base hyper-parameter specification for `ARMA` class - */ - val hp = new HyperParameter - hp += ("p", 1, 1) - hp += ("q", 1, 1) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create an `ARMA` object. - * @param y the response vector (time series data) - * @param tt the time vector, if relevant (time index may suffice) - * @param hparam the hyper-parameters - */ - def apply (y: VectorD, tt: VectorD = null, hparam: HyperParameter = ARMA.hp): ARMA = - new ARMA (y, tt, hparam) - end apply - -end ARMA - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRMATest` main function tests the `ARMA` class on real data: Forecasting lake levels. - * Test the test, predictAll, testf and forecastAll methods over the whole times-series. - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.aRMATest - */ -@main def aRMATest (): Unit = - - import Example_LakeLevels.y - - val m = y.dim - val t = VectorD.range (1, m) - val h = 3 // the forecasting horizon - val q = 1; hp("q") = q // moving-average hyper-parameter q - - var mod: ARMA = null - for p <- 1 to 1 do // autoregressive hyper-parameter p - hp("p") = p // set p hyper-parameter - banner (s"Test: ARMA($p, $q}") - mod = new ARMA (y) // create model for time series data - mod.train (null, y) // train the model on full dataset - val yp = mod.testPred (y, t) -/* - val yf = mod.forecastAll (h, y) // forecast h-steps ahead for all y - println (s"yf = $yf") - println (s"yf.dims = ${yf.dims}") - assert (yf(?, 0)(0 until m) == y) // column 0 must agree with actual values - assert (yf(?, 1)(1 to m+1) == yp) // column 1 must agree with one step-ahead predictions - for k <- 1 to h do - println (s"evalaute QoF for horizon $k:") - println (Fit.fitMap (mod.testf (k, y))) // evaluate k-units ahead forecasts - end for -*/ - end for - - banner ("Select model based on ACF and PACF") - mod.plotFunc (mod.acF, "ACF") // Auto-Correlation Function (ACF) - mod.plotFunc (mod.pacF, "PACF") // Partial Auto-Correlation Function (PACF) - -end aRMATest - diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/ARMA2.scala.bak b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/ARMA2.scala.bak deleted file mode 100644 index cedb942b8..000000000 --- a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/ARMA2.scala.bak +++ /dev/null @@ -1,303 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author Hao Peng, John Miller, Michael Cotterell - * @version 2.0 - * @date Sat Jun 13 01:27:00 EST 2017 - * @see LICENSE (MIT style license file). - * - * @title Model: Auto-Regressive, Moving-Average (ARMA2) - * - * @see http://en.wikipedia.org/wiki/Autoregressive%E2%80%93moving-average_model - * @see http://www.emu.edu.tr/mbalcilar/teaching2007/econ604/lecture_notes.htm - * @see http://www.stat.berkeley.edu/~bartlett/courses/153-fall2010 - * @see http://www.princeton.edu/~apapanic/ORFE_405,_Financial_Time_Series_%28Fall_2011%29_files/slides12-13.pdf - */ - -package scalation -package modeling -package forecasting - -import scala.math.max - -import scalation.mathstat._ -import scalation.optimization._ - -import ARMA2.hp - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARMA2` class provides basic time series analysis capabilities for Auto-Regressive, - * Moving-Average (ARMA2) models. In an ARMA2(p, q) model, p refers to the order of the - * Auto-Regressive components and q refers to the Moving-Average compoenest of the model. - * ARMA2 models are often used for forecasting. - * Given time-series data stored in vector y, its next value y_t = y(t) - * may be predicted based on prior values of y and its noise: - * y_t = δ + Σ(φ_k y_t-k) + Σ(θ_k e_t-k) + e_t - * where δ is a constant, φ is the auto-regressive coefficient vector, - * θ is the moving-average vector, and e_t is the noise term. - *---------------------------------------------------------------------------------- - * @param y the response vector (time-series data) - * @param tt the time vector, if relevant (time index may suffice) - * @param hparam the hyper-parameters - */ -class ARMA2 (y: VectorD, tt: VectorD = null, hparam: HyperParameter = ARMA2.hp) - extends Forecaster (y, tt, hparam) - with Correlogram (y) - with Fit (dfm = hparam("p").toInt, df = y.dim - hparam("p").toInt): - - private val debug = debugf ("ARMA2", true) // debug function - private val flaw = flawf ("ARMA2") // flaw function - - m = y.dim // number of time points (@see `FitM`) - protected var p = hparam("p").toInt // p-th order Auto-Regressive and - protected var q = hparam("q").toInt // q-th order Moving-Average model - protected var φ = VectorD.nullv // AR(p) parameters/coefficients part - protected var θ = VectorD.nullv // MA(q) parameters/coefficients part - protected var δ = NO_DOUBLE // drift/intercept/constant term -// protected var yf = MatrixD.nullm // the forecast matrix - time points x horizons - - if p > MAX_LAGS then flaw ("init", s"p = $p must not be greater than MAX_LAGS = $MAX_LAGS") - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the model name including its current hyper-parameter, e.g., ARMA2(2, 1). - */ - override def modelName: String = s"ARMA2($p, $q)" - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train/fit an `ARMA2` model to the times-series data in vector y_. - * Estimate the coefficient vector φ for a p-th order Auto-Regressive ARMA2(p) model. - * Uses Durbin-Levinson Algorithm (in `Correlogram`) to determine the coefficients. - * The φ vector is p-th row of psi matrix (ignoring the first (0th) column). - * @param x_null the data/input matrix (ignored, pass null) - * @param y_ the training/full response vector (e.g., full y) - */ - def train (x_null: MatrixD, y_ : VectorD): Unit = - m = y_.dim // length of relevant time-series - resetDF (p+q, m - (p+q)) // reset the degrees of freedom - makeCorrelogram (y_) // correlogram computes psi matrix, gives ACF and PACF - - val mu = y_.mean // sample mean of y_ - val z = y_ - mu // optimization works better using zero-centered data - φ = new VectorD (p) // zeros for AR part - θ = new VectorD (q) // zeros for MA part - δ = 0.0 // drift/intercept for z (should end up close to zero) - val b = φ ++ θ :+ δ // combine all parameters -> vector to optimize - - def csse (b: VectorD): Double = // objective function - conditional sum of squared errors - φ = b(0 to p); θ = b(p to p+q); δ = b(b.dim-1) // pull parameters out of b vector - ssef (z, predictAll (z)) // compute loss function - end csse - - def nll (b: VectorD): Double = // objective function - negative log-likelihood (MLE) - 0.0 // FIX - implement - end nll - - val optimizer = new BFGS (csse) // apply Quasi-Newton BFGS optimizer -// val optimizer = new ConjugateGradient (csse) // apply Conjugate Gradient optimizer - fails -// val optimizer = new CoordinateDescent (csse) // apply Coordinate Descent optimizer -// val optimizer = new NelderMeadSimplex (csse, 3) // apply Nelder-Mead Simplex optimizer -// val optimizer = new GridSearch (csse, 3); optimizer.setAxes () // apply GridSearch BFGS optimizer - close - val (fb, bb) = optimizer.solve (b) // optimal solution for the objective function and parameters - - φ = bb(0 to p); θ = bb(p to p+q); δ = bb(b.dim-1) // recover parameters for z - δ += mu * (1 - φ.sum) // uncenter - debug ("train", s"parameters for ARMA2($p, $q) model: φ = $φ, θ = $θ, δ = $δ") - end train - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test PREDICTIONS of an ARMA2 forecasting model y_ = f(lags (y_)) + e and return its - * QoF vector. Testing may be be in-sample (on the training set) or out-of-sample - * (on the testing set) as determined by the parameters passed in. - * Note: must call train before test. - * @param x_null the training/testing data/input matrix (ignored, pass null) - * @param y_ the training/testing response/output vector (e.g., full y) - */ - def test (x_null: MatrixD, y_ : VectorD): (VectorD, VectorD) = - val (yy, yp) = testSetup (y_) // get and align actual and predicted values - resetDF (p+q, yy.dim - (p+q)) // reset the degrees of freedom - (yp, diagnose (yy, yp)) // evaluate and return the QoF of these predictions - end test - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test FORECASTS of an ARMA2 forecasting model y_ = f(lags (y_)) + e and return its - * QoF vector. Testing may be be in-sample (on the training set) or out-of-sample - * (on the testing set) as determined by the parameters passed in. - * Note: must call train before test. - * @param h the forecasting horizon, number of steps ahead to produce forecasts - * @param y_ the training/testing response/output vector (e.g., full y) - * @param redo whether to use existing forecasts or redo them (defaults to false) - */ - def testF (h: Int, y_ : VectorD): (VectorD, VectorD) = - if yf == null || yf.dim2 < h+1 then yf = forecastAll (h, y_) // redo all forecasts - val yy = y_(h to y_.dim) - val yfh = yf(?, h)(h to y_.dim) // pull column h from the forecast matrix and align - resetDF (p+q, yy.dim - (p+q)) // reset the degrees of freedom - (yfh, diagnose (yy, yfh)) // evaluate and return the QoF of these forecasts - end testF - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the parameter vector for the ARMA2(p, q) model. - */ - override def parameter: VectorD = φ ++ θ :+ δ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Predict a value for y_t+1 using 1-step ahead forecasts. - * y_t+1 = φ_0 y_t + φ_1 y_t-1 + ... + φ_p-1 y_t-(p-1) + - * θ_0 e_t + θ_1 e_t-1 + ... + θ_q-1 e_t-(q-1) + e_t+1 - * When k < 0 let y_k = y_0 (i.e., assume first value repeats back in time), - * but do not assume errors repeat. - * @see predictAll in `Forecaster` - * @param t the time point/index to be predicted - * @param y_ the actual values to use in making predictions - */ - def predict (t: Int, y_ : VectorD): Double = - var sum = δ // intercept - for j <- 0 until p do sum += φ(j) * y_(max (0, t-j)) - for j <- 0 until q if t-j >= 0 do sum += θ(j) * e(t-j) - if t < y_.dim-1 then e(t+1) = y_(t+1) - sum // update the error vector - sum // prediction for y_t, yp_t - end predict - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all y_.dim time points and all horizons (1 through h-steps ahead). - * Record these in the yf matrix, where - * yf(t, k) = k-steps ahead forecast for y_t - * Note, column 0, yf(?, 0), is set to y (the actual time-series values). - * Forecast recurse down diagonals in the yf forecast matrix. - * The top right and bottom left triangles in yf matrix are not forecastable. - * @param h the maximum forecasting horizon, number of steps ahead to produce forecasts - * @param y_ the actual values to use in making forecasts - */ - def forecastAll (h: Int, y_ : VectorD): MatrixD = - yf = new MatrixD (y_.dim+h, h+1) // forecasts for all time points t & horizons to h - for t <- 0 until m do yf(t, 0) = y_(t) // first column is actual values, horizon 0 - for k <- 1 to h do - for t <- y_.indices do // make forecasts over all time points for horizon k - var sum = δ - for j <- 0 until p do sum += φ(j) * yf(max (0, t+k-1-j), max (0, k-1-j)) - yf(t+k, k) = sum // forecast down the diagonal - end for - debug ("forecastAll", s"yf(?, $k) = ${yf(?, k)}") - end for - yf // return matrix of forecasted values - end forecastAll - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all y_.dim time points at horizon h (h-steps ahead). - * Assign to forecasting matrix and return h-step ahead forecast. - * For 1-step ahead (h = 1), - * y_t = δ + φ_0 y_t-1 + φ_1 y_t-2 + ... + φ_p-1 y_t-p - * When k < 0 let y_k = y_0 (i.e., assume first value repeats back in time). - * @param yf the forecasting matrix (time x horizons) - * @param y_ the actual values to use in making forecasts - * @param h the forecasting horizon, number of steps ahead to produce forecasts - */ - override def forecastAt (yf: MatrixD, y_ : VectorD, h: Int): VectorD = - if h < 1 then flaw ("forecastAt", s"horizon h = $h must be at least 1") - e(0) // assume error at time 0 is 0 - val m = y_.dim - for t <- y_.indices do // make forecasts over all time points for horizon k - val t1 = t+h-1 // time point prior to horizon - var sum = δ - for j <- 0 until p do sum += φ(j) * yf(max (0, t1-j), max (0, h-1-j)) - for j <- 0 until q if t1-j >= 0 do sum += θ(j) * e(t1-j) - yf(t+h, h) = sum // forecast down the diagonal - if h == 1 && t < m-1 then e(t+1) = y_(t+1) - sum // update the next element in the error vector - end for - yf(?, h) // return the h-step ahead forecast vector - end forecastAt - -end ARMA2 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARMA2` companion object provides factory methods for the `ARMA2` class. - */ -object ARMA2: - - /** Base hyper-parameter specification for `ARMA2` class - */ - val hp = new HyperParameter - hp += ("p", 1, 1) - hp += ("q", 0, 0) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create an `ARMA2` object. - * @param y the response vector (time series data) - * @param tt the time vector, if relevant (time index may suffice) - * @param hparam the hyper-parameters - */ - def apply (y: VectorD, tt: VectorD = null, hparam: HyperParameter = ARMA2.hp): ARMA2 = - new ARMA2 (y, tt, hparam) - end apply - -end ARMA2 - -import Example_LakeLevels.y - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRMA2Test` main function tests the `ARMA2` class on real data: Forecasting lake levels. - * Test predictions (one step ahead forecasts). - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.aRMA2Test - */ -@main def aRMA2Test (): Unit = - - banner (s"Test Predictions: ARMA(1, 0) on LakeLevels Dataset") - var mod = new ARMA2 (y) // create model for time series data ARMA(1, 0) - mod.train (null, y) // train the model on full dataset - val (yp, qof) = mod.test (null, y) // test the model on full dataset - println (mod.report (qof)) // report on Quality of Fit (QoF) - - banner (s"Test Predictions: ARMA(1, 1) on LakeLevels Dataset") - hp("q") = 1 // set moving-average hyper-parameter q to 1 - mod = new ARMA2 (y) // create model for time series data ARMA(1, 1) - mod.train (null, y) // train the model on full dataset - val (yp2, qof2) = mod.test (null, y) // test the model on full dataset - println (mod.report (qof2)) // report on Quality of Fit (QoF) - - banner ("Select model based on ACF and PACF") - mod.plotFunc (mod.acF, "ACF") // Auto-Correlation Function (ACF) - mod.plotFunc (mod.pacF, "PACF") // Partial Auto-Correlation Function (PACF) - -end aRMA2Test - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRMA2Test2` main function tests the `ARMA2` class on real data: Forecasting lake levels. - * Test the test, predictAll, testf and forecastAll methods over the whole times-series. - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.aRMA2Test2 - */ -@main def aRMA2Test2 (): Unit = - - val m = y.dim - val t = VectorD.range (1, m) - val h = 3 // the forecasting horizon - val q = 1; hp("q") = q // moving-average hyper-parameter q - - var mod: ARMA2 = null - for p <- 1 to 1 do // autoregressive hyper-parameter p - hp("p") = p // set p hyper-parameter - banner (s"Test: ARMA2($p, $q}") - mod = new ARMA2 (y) // create model for time series data - mod.train (null, y) // train the model on full dataset - val (yp, qof) = mod.test (null, y) -/* - val yf = mod.forecastAll (h, y) // forecast h-steps ahead for all y - println (s"yf = $yf") - println (s"yf.dims = ${yf.dims}") - assert (yf(?, 0)(0 until m) == y) // column 0 must agree with actual values - assert (yf(?, 1)(1 to m+1) == yp) // column 1 must agree with one step-ahead predictions - for k <- 1 to h do - println (s"evalaute QoF for horizon $k:") - println (Fit.fitMap (mod.testf (k, y))) // evaluate k-units ahead forecasts - end for -*/ - end for - - banner ("Select model based on ACF and PACF") - mod.plotFunc (mod.acF, "ACF") // Auto-Correlation Function (ACF) - mod.plotFunc (mod.pacF, "PACF") // Partial Auto-Correlation Function (PACF) - -end aRMA2Test2 - diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/ARX.scala.bak b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/ARX.scala.bak deleted file mode 100644 index 10d0b4511..000000000 --- a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/ARX.scala.bak +++ /dev/null @@ -1,532 +0,0 @@ - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Tue Feb 22 23:14:31 EST 2022 - * @see LICENSE (MIT style license file). - * - * @note Model: AutoRegressive with eXogenous Variables (Time Series Regression) - */ - -package scalation -package modeling -package forecasting - -import scala.math.{max, min} - -import scalation.mathstat._ - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARX` class supports regression for Time Series data. - * Multi-horizon forecasting supported via the RECURSIVE method. - * Given a response vector y, a predictor matrix x is built that consists of - * lagged y vectors, - * - * y_t = b dot x - * where x = [1, y_{t-1}, y_{t-2}, ... y_{t-lags}] - * - * @param x the input/predictor matrix built out of lags of y - (and optionally from exogenous variables ex) - * @param yy the output/response vector trimmed to match x.dim - * @param lags the maximum lag included (inclusive) - * @param fname the feature/variable names - * @param hparam the hyper-parameters (use Regression.hp for default) - */ -class ARX (x: MatrixD, yy: VectorD, lags: Int, fname: Array [String] = null, - hparam: HyperParameter = Regression.hp) - extends Regression (x, yy, fname, hparam) - with ForecasterX (lags): - - private val debug = debugf ("ARX", true) // debug function - private val flaw = flawf ("ARX") // flaw function - - modelName = s"ARX_$lags" - - debug ("init", s"$modelName: x.dims = ${x.dims}, yy.dim = ${yy.dim}") - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Produce a vector of size h, of 1 through h-steps ahead forecasts for the model. - * forecast the following time points: t+1, ..., t-1+h. - * Note, must create the yf matrix before calling the forecast method. - * Intended to work with rolling validation (analog of predict method) - * Must call `forecastAll` first. - * @param t the time point from which to make forecasts - * @param yf the forecasting matrix (time x horizons) - * @param h the forecasting horizon, number of steps ahead to produce forecasts - */ - def forecast (t: Int, yf: MatrixD, h: Int): VectorD = - if h < 1 then flaw ("forecast", s"horizon h = $h must be at least 1") - VectorD (for k <- 1 to h yield yf(t+k, k)) // get yf diagonal from time t - end forecast - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all y_.dim time points at horizon h (h-steps ahead). - * Assign to forecasting matrix and return h-step ahead forecast. - * For 1-step ahead (h = 1), - * y_t = δ + φ_0 y_t-1 + φ_1 y_t-2 + ... + φ_p-1 y_t-p - * When k < 0 let y_k = y_0 (i.e., assume first value repeats back in time). - * @param yf the forecasting matrix for the endogenous variable y (time x horizons) - * @param yx the matrix of endogenous y and exogenous x values - * @param h the forecasting horizon, number of steps ahead to produce forecasts - */ - def forecastAt (yf: MatrixD, yx: MatrixD, h: Int): VectorD = - if h < 1 then flaw ("forecastAt", s"horizon h = $h must be at least 1") - for t <- yx.indices do // make forecasts over all time points for horizon h - val t1 = t + h - 1 // time point prior to horizon - yf(t+h, h) = b dot yx(min (t1, yx.dim-1)) // forecast down the diagonal ?? - end for - yf(?, h) // return the h-step ahead forecast vector - end forecastAt - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test FORECASTS of a `ARX` forecasting model y_ = f(x) + e - * and return its forecasts and QoF vector. Testing may be in-sample - * (on the training set) or out-of-sample (on the testing set) as determined - * by the parameters passed in. Note: must call train and forecastAll before testF. - * @param h the forecasting horizon, number of steps ahead to produce forecasts - * @param y_ the testing/full response/output vector - * @param yx the matrix of endogenous y and exogenous x values - */ - def testF (h: Int, y_ : VectorD, yx: MatrixD): (VectorD, VectorD) = - val (yy, yfh) = testSetupF (y_, yx, h) // get and align actual and forecasted values - val params = x.dim2 - resetDF (params, yy.dim - params) // reset the degrees of freedom - println (s"testF: yy.dim = ${yy.dim}, yfh.dim = ${yfh.dim}") -// differ (yy, yfh) // uncomment for debugging - (yfh, diagnose (yy, yfh)) // return predictions and QoF vector - end testF - -end ARX - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARX` companion object provides factory methods. - */ -object ARX: - - private val debug = debugf ("ARX", true) // debug function - - private val TREND = false // include quadratic trend - private val DAY = false // include day of the week effect - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `ARX` object from a response vector. The input/data matrix - * x is formed from the lagged y vectors as columns in matrix x. - * @param y the original un-expanded output/response vector - * @param lags the maximum lag included (inclusive) - * @param hparam the hyper-parameters (use Regression.hp for default) - */ - def apply (y: VectorD, lags: Int, hparam: HyperParameter = Regression.hp): ARX = - val (x_, yy) = buildMatrix4TS (y, lags) // column for each lag - var x = VectorD.one (yy.dim) +^: x_ // add first column of all ones - - if TREND then - x = VectorD.range (0, yy.dim) +^: x // add trend/time - x = VectorD.range (0, yy.dim)~^2 +^: x // add quadratic trend/time - end if - if DAY then - val day = VectorI (for t <- yy.indices yield t % 7) - x = day.toDouble +^: x // add DAY of week as ordinal var - -// val dum = Variable.dummyVars (day) -// x = x ++^ dum // add DAY of week as dummy vars - end if - - println (s"apply: x.dims = ${x.dims}, yy.dim = ${yy.dim}") -// println (s"apply: x = $x \n yy = $yy") - new ARX (x, yy, lags, null, hparam) - end apply - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `ARX` object from a response vector. The input/data matrix - * x is formed from the lagged y vectors as columns in matrix x. - * In addition, lagged exogenous variables are added. - * @param y the original un-expanded output/response vector - * @param lags the maximum lag included (inclusive) - * @parax ex the input matrix for exogenous variables (one per column) - * @param hparam the hyper-parameters (use Regression.hp for default) - * @param elag1 the minimum exo lag included (inclusive) - * @param elag2 the maximum exo lag included (inclusive) - */ - def exo (y: VectorD, lags: Int, ex: MatrixD, hparam: HyperParameter = Regression.hp) - (elag1: Int = max (1, lags / 5), elag2: Int = max (1, lags)): ARX = - val (x_, yy) = buildMatrix4TS (y, lags) // column for each lag - var x = VectorD.one (yy.dim) +^: x_ // add first column of all ones - val endoCols = x.dim2 - println (s"endogenous: columns = $endoCols") - - x = x ++^ makeExoCols (lags, ex, elag1, elag2) // add columns for each lagged exo var - println (s"exogenous: columns = ${x.dim2 - endoCols}") - - if TREND then - x = VectorD.range (0, yy.dim) +^: x // add trend/time - x = VectorD.range (0, yy.dim)~^2 +^: x // add quadratic trend/time - end if - if DAY then - val day = VectorI (for t <- yy.indices yield t % 7) - val dum = Variable.dummyVars (day) - x = x ++^ dum // add DAY of week as dummy vars - end if - - println (s"exo: x.dims = ${x.dims}, yy.dim = ${yy.dim}") -// println (s"exo: x = $x \n yy = $yy") - new ARX (x, yy, lags, null, hparam) - end exo - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Make a matrix whose columns are lagged exogenous variables to be added to a data matrix. - * @param lags the maximum lag included (inclusive) for checking purposes - * @param ex the matrix of data for the exogenous variables - * @param elag1 the minimum exo lag included (inclusive) - * @param elag2 the maximum exo lag included (inclusive) - */ - def makeExoCols (lags: Int, ex: MatrixD, elag1: Int, elag2: Int): MatrixD = - var xx: MatrixD = buildMatrix4TS_exo (ex(?, 0), lags, elag1, elag2) - for j <- 1 until ex.dim2 do - xx = xx ++^ buildMatrix4TS_exo (ex(?, j), lags, elag1, elag2) - end for - println (s"addExoVars: collects lags of ${ex.dim2} exo variables into #xx.dim2 columns") - xx - end makeExoCols - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Split the x matrix and y vector into training and testing sets. - * @param x the x data/input matrix - * @param y the y response/output vector - * @param ratio the ratio of the TESTING set to the full dataset (most common 70-30, 80-20) - */ - def split_TnT (x: MatrixD, y: VectorD, ratio: Double = 0.30): (MatrixD, VectorD, MatrixD, VectorD) = - val n = x.dim - val tr_size = (n * (1.0 - ratio)).toInt - println (s"ARX.split_TnT: tr_size = $tr_size, te_size = ${n - tr_size}") - (x(0 until tr_size), y(0 until tr_size), x(tr_size until n), y(tr_size until n)) - end split_TnT - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Use rolling-validation to compute test Quality of Fit (QoF) measures - * by dividing the dataset into a TESTING SET (tr) and a TRAINING SET (te) - * as follows: [ <-- tr_size --> | <-- te_size --> ] - * This version calls predict for one-step ahead out-of-sample forecasts. - * @see `RollingValidation` - * @param mod the forecasting model being used (e.g., `ARX`) - * @param rc the retraining cycle (number of forecasts until retraining occurs) - */ - def rollValidate (mod: Predictor & Fit, rc: Int): Unit = - val x = mod.getX // get data/input matrix - val y = mod.getY // get response/output vector - val tr_size = RollingValidation.trSize (y.dim) // size of initial training set - val te_size = y.dim - tr_size // size of testing set - debug ("rollValidate", s"train: tr_size = $tr_size; test: te_size = $te_size, rc = $rc") - - val yp = new VectorD (te_size) // y-predicted over testing set - for i <- 0 until te_size do // iterate through testing set - val t = tr_size + i // next time point to forecast - if i % rc == 0 then mod.train (x(0 until t), y(0 until t)) // retrain on sliding training set - yp(i) = mod.predict (x(t-1)) // predict the next value - end for - - val (t, yy) = RollingValidation.align (tr_size, y) // align vectors - val df = max (0, mod.parameter.size - 1) // degrees of freedom for model - mod.resetDF (df, te_size - df) // reset degrees of freedom - new Plot (t, yy, yp, "Plot yy, yp vs. t", lines = true) - println (FitM.fitMap (mod.diagnose (yy, yp), QoF.values.map (_.toString))) - end rollValidate - -end ARX - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRXTest` main function tests the `ARX` class. - * This test is used to CHECK that the buildMatrix4TS function is working correctly. - * May get NaN for some maximum lags (p) due to multi-collinearity. - * > runMain scalation.modeling.forecasting.aRXTest - */ -@main def aRXTest (): Unit = - - val m = 30 - val y = VectorD.range (1, m) // used to CHECK the buildMatrix4TS function - - for p <- 1 to 10 do // autoregressive hyper-parameter p - banner (s"Test: ARX with $p lags") - val mod = ARX (y, p) // create model for time series data - mod.trainNtest ()() // train the model on full dataset - println (mod.summary) - - val yp = mod.predict (mod.getX) - new Plot (null, mod.getY, yp, s"y vs. yp for ${mod.modelName} with $p lags", lines = true) - end for - -end aRXTest - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRXTest2` main function tests the `ARX` class on real data: - * Forecasting lake levels. - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.aRXTest2 - */ -@main def aRXTest2 (): Unit = - - import Example_LakeLevels.y - val h = 2 // the forecasting horizon - - for p <- 1 to 8 do // autoregressive hyper-parameter p - banner (s"Test: ARX with $p lags") - val mod = ARX (y, p) // create model for time series data - mod.trainNtest ()() // train the model on full dataset - println (mod.summary) // parameter/coefficient statistics - - banner ("Predictions") - val yy = mod.getY // trimmed actual response vector - val xx = mod.getX - val yp = mod.predict (xx) // predicted response vector - new Plot (null, yy, yp, s"y vs. yp for ${mod.modelName} with $p lags", lines = true) - println (s"yp = $yp") - - banner ("Forecasts") -// val yf = mod.forecast (yp, h) // forecasted response matrix - val yf = mod.forecastAll (yy, xx, h) // forecasted response matrix - for k <- yf.indices2 do - new Plot (null, yy, yf(?, k), s"yy vs. yf_$k for ${mod.modelName} with $p lags", lines = true) - end for - println (s"yf = $yf") - println (s"yf.dims = ${yf.dims}") - assert (yf(?, 0) == yp) // first forecast = predicted values -/* - banner ("Forecast QoF") - println (testForecast (mod, y, yf, p)) // QoF -// println (Fit.fitMap (mod.testf (k, y))) // evaluate k-units ahead forecasts -*/ - end for - -end aRXTest2 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRXTest3` main function tests the `ARX` class on real data: - * Forecasting COVID-19 Daily Data. Does In-Sample Testing on Endogenous variable. - * > runMain scalation.modeling.forecasting.aRXTest3 - */ -@main def aRXTest3 (): Unit = - - val LAGS = 5 // number of lags of y - val h = 2 // forecasting horizon - - val exo_vars = Array.ofDim [String] (0) // no exogenous variables in this case - val (xx, yy) = Example_Covid.loadData (exo_vars, "new_deaths") - val iskip = yy.indexWhere (_ >= 6.0) // find day with at least 6 deaths - println (s"iskip = $iskip is first week with at least 6 deaths") - - val ex = xx(iskip until xx.dim) // trim away the first iskip rows - val y = yy(iskip until yy.dim) - println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") - - banner ("Test In-Sample ARX on COVID-19 Weekly Data") - val mod = ARX (y, LAGS) // create model for time series data - val (yp, qof) = mod.trainNtest ()() // train the model on full dataset - new Plot (null, mod.getY, yp, s"${mod.modelName}, y vs. yp", lines = true) - - banner (s"Multi-horizon forecasting using the recursive method") - val yx = mod.getX - val yf = mod.forecastAll (y, yx, h) // forecasted response matrix - for k <- 0 to h do - new Plot (null, y, yf(?, k), s"y vs. yf_$k for ${mod.modelName} with $LAGS lags", lines = true) - end for - println (s"yf = $yf") - println (s"yf.dims = ${yf.dims}, y.dim = ${y.dim}, yp.dim = ${yp.dim}") - val yf0 = yf(?, 0)(0 until y.dim) - val yf1 = yf(?, 1)(1 until y.dim) - Forecaster.differ (yf0, y) - Forecaster.differ (yf1, yp) - assert (yf0 =~ y) // zeroth forecast = actual values - assert (yf1 =~ yp) // first forecast = predicted values - - for k <- 1 to h do - val (yfh, qof) = mod.testF (k, y, yx) // k-steps ahead forecast and its QoF - println (s"Evaluate QoF for horizon $k:") - println (FitM.fitMap (qof, QoF.values.map (_.toString))) // evaluate k-steps ahead forecasts - end for - - banner (s"Feature Selection Technique: stepRegression") - val (cols, rSq) = mod.stepRegressionAll (cross = false) // R^2, R^2 bar, sMAPE, NA - val k = cols.size - println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for ARX with tech", lines = true) - println (mod.summary ()) - - banner ("Feature Importance") - println (s"Stepwise: rSq = $rSq") -// val imp = mod.importance (cols.toArray, rSq) -// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") - -end aRXTest3 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRXTest4` main function tests the `ARX` class on real data: - * Forecasting COVID-19 Weekly Data. Does In-Sample Testing on endogenous and exogenous variables. - * > runMain scalation.modeling.forecasting.aRXTest4 - */ -@main def aRXTest4 (): Unit = - - val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (xx, yy) = Example_Covid.loadData (exo_vars, "new_deaths") - val iskip = yy.indexWhere (_ >= 6.0) // find day with at least 6 deaths - println (s"iskip = $iskip is first week with at least 6 deaths") - - val ex = xx(iskip until xx.dim) // trim away the first iskip rows - val y = yy(iskip until yy.dim) - println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") - - banner ("Test In-Sample ARX.exo on COVID-19 Weekly Data") - val mod = ARX.exo (y, 10, ex)(1, 11) // create model for time series data with exo - val (yp, qof) = mod.trainNtest ()() // train the model on full dataset - new Plot (null, mod.getY, yp, s"${mod.modelName}, y vs. yp", lines = true) - -// val tech = SelectionTech.Forward // pick one feature selection technique -// val tech = SelectionTech.Backward - val tech = SelectionTech.Stepwise - - banner (s"Feature Selection Technique: $tech") - val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA - val k = cols.size - println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for ARX with tech", lines = true) - println (mod.summary ()) - - banner ("Feature Importance") - println (s"$tech: rSq = $rSq") -// val imp = mod.importance (cols.toArray, rSq) -// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") - -end aRXTest4 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRXTest5` main function tests the `ARX` class on real data: - * Forecasting COVID-19 Weekly Data. Does TnT Testing on endogenous and exogenous variables. - * Determine the terms to include in the model using Stepwise on In-Sample. - * > runMain scalation.modeling.forecasting.aRXTest5 - */ -@main def aRXTest5 (): Unit = - - val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (xx, yy) = Example_Covid.loadData (exo_vars, "new_deaths") - val iskip = yy.indexWhere (_ >= 6.0) // find day with at least 6 deaths - println (s"iskip = $iskip is first week with at least 6 deaths") - - val ex = xx(iskip until xx.dim) // trim away the first iskip rows - val y = yy(iskip until yy.dim) - println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") - - banner ("Test In-Sample ARX.exo on COVID-19 Weekly Data") - val mod = ARX.exo (y, 10, ex)(1, 11) // create model for time series data with exo - val (yp, qof) = mod.trainNtest ()() // train on full and test on full - new Plot (null, mod.getY, yp, s"${mod.modelName}, y vs. yp", lines = true) - -// val tech = SelectionTech.Forward // pick one feature selection technique -// val tech = SelectionTech.Backward - val tech = SelectionTech.Stepwise - - banner (s"Feature Selection Technique: $tech") - val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA - val k = cols.size - println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for ARX with tech", lines = true) - println (mod.summary ()) - - banner ("Feature Importance") - println (s"$tech: rSq = $rSq") -// val imp = mod.importance (cols.toArray, rSq) -// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") - - banner ("Run TnT on Best model") - val bmod = mod.getBest._3 // get the best model from feature selection - val (x_, y_, xtest, ytest) = ARX.split_TnT (bmod.getX, bmod.getY) - val (yptest, qoftest) = bmod.trainNtest (x_, y_)(xtest, ytest) // train on (x_, y_) and test on (xtest, ytest) - new Plot (null, ytest, yptest, s"${mod.modelName}, ytest vs. yptest", lines = true) - -end aRXTest5 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRXTest6` main function tests the `ARX` class on real data: - * Forecasting COVID-19 Weekly Data. Does Rolling Validation on endogenous and exogenous - * variables. Determine the terms to include in the model using Stepwise on In-Sample. - * > runMain scalation.modeling.forecasting.aRXTest6 - */ -@main def aRXTest6 (): Unit = - - val LAGS = 7 - - val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (xx, yy) = Example_Covid.loadData (exo_vars, "new_deaths") - val iskip = yy.indexWhere (_ >= 6.0) // find day with at least 6 deaths - println (s"iskip = $iskip is first week with at least 6 deaths") - - val ex = xx(iskip until xx.dim) // trim away the first iskip rows - val y = yy(iskip until yy.dim) - println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") - - banner ("Test In-Sample ARX.exo on COVID-19 Weekly Data") - val mod = ARX.exo (y, LAGS, ex)(1, LAGS+1) // create model for time series data with exo - - val (yp, qof) = mod.trainNtest ()() // train on full and test on full - new Plot (null, mod.getY, yp, s"${mod.modelName}, y vs. yp", lines = true) - -// val tech = SelectionTech.Forward // pick one feature selection technique -// val tech = SelectionTech.Backward - val tech = SelectionTech.Stepwise - - banner (s"Feature Selection Technique: $tech") - val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA - val k = cols.size - println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for ARX with tech", lines = true) - println (mod.summary ()) - - banner ("Feature Importance") - println (s"$tech: rSq = $rSq") -// val imp = mod.importance (cols.toArray, rSq) -// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") - - banner ("Run Rolling Validation on ARX Best model") - val bmod = mod.getBest._3 // get the best model from feature selection - ARX.rollValidate (bmod, 1) - -end aRXTest6 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRXTest7` main function tests the `ARX` class on real data: - * Forecasting COVID-19 Weekly Data. Preliminary investigation of Symbolic Regression. - * > runMain scalation.modeling.forecasting.aRXTest7 - */ -@main def aRXTest7 (): Unit = - - val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (xx, yy) = Example_Covid.loadData (exo_vars, "new_deaths") - val iskip = yy.indexWhere (_ >= 6.0) // find day with at least 6 deaths - println (s"iskip = $iskip is first week with at least 6 deaths") - - val ex = xx(iskip until xx.dim) // trim away the first iskip rows - val y = yy(iskip until yy.dim) - - banner ("Plot Variables on COVID-19 Weekly Data") - - for lag <- 0 to 4 do - val xx_ = ex(lag until y.dim) - val yy_ = y(0 until y.dim - lag) -// new Plot (xx_, yy_, null, s"deaths vs. exo-vars @ lag = $lag") - - val mod = SymbolicRegression (xx_, yy_, null, collection.mutable.Set (1.0), cross = false) - mod.trainNtest ()() - println (mod.summary ()) - end for - -end aRXTest7 - diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/ARX.scala.bak2 b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/ARX.scala.bak2 deleted file mode 100644 index 17bd75682..000000000 --- a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/ARX.scala.bak2 +++ /dev/null @@ -1,592 +0,0 @@ - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Tue Feb 22 23:14:31 EST 2022 - * @see LICENSE (MIT style license file). - * - * @note Model: AutoRegressive with eXogenous Variables (Time Series Regression) - */ - -package scalation -package modeling -package forecasting - -import scala.math.{max, min} - -import scalation.mathstat._ - -import scalation.modeling.{Regression => REGRESSION} -//import scalation.modeling.{RidgeRegression => REGRESSION} -//import scalation.modeling.{LassoRegression => REGRESSION} - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARX` class supports regression for Time Series data. - * Multi-horizon forecasting supported via the RECURSIVE method. - * Given a response vector y, a predictor matrix x is built that consists of - * lagged y vectors, - * - * y_t = b dot x - * where x = [1, y_{t-1}, y_{t-2}, ... y_{t-lags}] - * - * @param x the input/predictor matrix built out of lags of y - * (and optionally from exogenous variables ex) - * @param yy the output/response vector trimmed to match x.dim (@see ARX object) - * @param lags the maximum lag included (inclusive) - * @param fname the feature/variable names - * @param hparam the hyper-parameters (use REGRESSION.hp for default) - */ -class ARX (x: MatrixD, yy: VectorD, lags: Int, fname: Array [String] = null, - hparam: HyperParameter = REGRESSION.hp) - extends REGRESSION (x, yy, fname, hparam) - with ForecasterX (lags): - - private val debug = debugf ("ARX", true) // debug function - private val flaw = flawf ("ARX") // flaw function - - modelName = s"ARX_$lags" - - debug ("init", s"$modelName: x.dims = ${x.dims}, yy.dim = ${yy.dim}") - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Get the internally row trimed and column expanded input matrix and response vector. - */ - def getXY: (MatrixD, VectorD) = (x, yy) // (getX, getY) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Predict a value for y_t+1 using the 1-step ahead forecast. - * y_t+1 = f (y_t, ...) + e_t+1 - * @param t the time point from which to make prediction - * @param yx the matrix of endogenous y and exogenous x values - */ - def predict (t: Int, yx: MatrixD): Double = ??? -/* - // FIX - prints for debugging assertion failure yp(i) != yd(0) - println (yx) - println (s"t-1: ${yx(min (t-1, yx.dim-1))} --> ${b dot yx(min (t-1, yx.dim-1))}") - println (s"t: ${yx(min (t, yx.dim-1))} --> ${b dot yx(min (t, yx.dim-1))}") - b dot yx(min (t-1, yx.dim-1)) -*/ - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Produce a vector of size h, of 1 through h-steps ahead forecasts for the model. - * forecast the following time points: t+1, ..., t-1+h. - * Note, must create the yf matrix before calling the forecast method. - * Intended to work with rolling validation (analog of predict method) - * Must call `forecastAll` first. - * @param t the time point from which to make forecasts - * @param yf the forecast matrix (time x horizons) - * @param h the forecasting horizon, number of steps ahead to produce forecasts - */ - def forecast (t: Int, yf: MatrixD, h: Int): VectorD = - if h < 1 then flaw ("forecast", s"horizon h = $h must be at least 1") - VectorD (for k <- 1 to h yield yf(t+k, k)) // get yf diagonal from time t - end forecast - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all y_.dim time points at horizon h (h-steps ahead). - * Assign to FORECAST MATRIX and return h-step ahead forecast. - * Note, `predictAll` provides predictions for h = 1. - * @see `forecastAll` method in `Forecaster` trait. - * @param yf the forecast matrix for the endogenous variable y (time x horizons) - * @param yx the matrix of endogenous y and exogenous x values - * @param h the forecasting horizon, number of steps ahead to produce forecasts - */ - def forecastAt (yf: MatrixD, yx: MatrixD, h: Int): VectorD = - if h < 2 then flaw ("forecastAt", s"horizon h = $h must be at least 2") - - for t <- yx.indices do // make forecasts over all time points for horizon h - yf(t+h-1, h) = b dot yx(min (t, yx.dim-1)) // forecast down the diagonal ?? - yf(?, h) // return the h-step ahead forecast vector - end forecastAt - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test FORECASTS of a `ARX` forecasting model y_ = f(x) + e - * and return its forecasts and QoF vector. Testing may be in-sample - * (on the training set) or out-of-sample (on the testing set) as determined - * by the parameters passed in. Note: must call train and forecastAll before testF. - * @param h the forecasting horizon, number of steps ahead to produce forecasts - * @param y_ the testing/full response/output vector - * @param yx the matrix of endogenous y and exogenous x values - */ - def testF (h: Int, y_ : VectorD, yx: MatrixD): (VectorD, VectorD, VectorD) = - val (yy, yfh) = testSetupF (y_, yx, h) // get and align actual and forecasted values - val params = x.dim2 - resetDF (params, yy.dim - params) // reset the degrees of freedom - println (s"testF: yy.dim = ${yy.dim}, yfh.dim = ${yfh.dim}") -// differ (yy, yfh) // uncomment for debugging - (yy, yfh, diagnose (yy, yfh)) // return aligned actual, forecasted and QoF vectors - end testF - -end ARX - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ARX` companion object provides factory methods. - */ -object ARX: - - private val debug = debugf ("ARX", true) // debug function - - private var TREND = false // include quadratic trend - private val DAY = false // include day of the week effect - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Set whether to include a simple linear (in time) trend. - * @param trend flag indicating whether to include a trend - */ - def setTrend (trend: Boolean): Unit = TREND = trend - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create an `ARX` object from a response vector. The input/data matrix - * x is formed from the lagged y vectors as columns in matrix x. - * @param y the original un-expanded output/response vector - * @param lags the maximum lag included (inclusive) - * @param hparam the hyper-parameters (use REGRESSION.hp for default) - */ - def apply (y: VectorD, lags: Int, hparam: HyperParameter = REGRESSION.hp): ARX = - val (x_, yy) = buildMatrix4TS (y, lags) // column for each lag; yy is y trimmed - var x = VectorD.one (yy.dim) +^: x_ // add first column of all ones - - if TREND then - x = VectorD.range (0, yy.dim) +^: x // add trend/time - x = VectorD.range (0, yy.dim)~^2 +^: x // add quadratic trend/time - if DAY then - val day = VectorI (for t <- yy.indices yield t % 7) - x = day.toDouble +^: x // add DAY of week as ordinal var - -// val dum = Variable.dummyVars (day) -// x = x ++^ dum // add DAY of week as dummy vars - - debug ("apply", s"x.dims = ${x.dims}, yy.dim = ${yy.dim}") - debug ("apply", "x = $x \n yy = $yy") - new ARX (x, yy, lags, null, hparam) - end apply - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create an `ARX` object from a response vector. The input/data matrix - * x is formed from the lagged y vectors as columns in matrix x. - * In addition, lagged exogenous variables are added. - * @param y the original un-expanded output/response vector - * @param lags the maximum lag included (inclusive) - * @parax ex the input matrix for exogenous variables (one per column) - * @param hparam the hyper-parameters (use REGRESSION.hp for default) - * @param elag1 the minimum exo lag included (inclusive) - * @param elag2 the maximum exo lag included (inclusive) - */ - def exo (y: VectorD, lags: Int, ex: MatrixD, hparam: HyperParameter = REGRESSION.hp) - (elag1: Int = max (1, lags / 5), elag2: Int = max (1, lags)): ARX = - val (x_, yy) = buildMatrix4TS (y, lags) // column for each lag - var x = VectorD.one (yy.dim) +^: x_ // add first column of all ones - val endoCols = x.dim2 - println (s"endogenous: columns = $endoCols") - - x = x ++^ makeExoCols (lags, ex, elag1, elag2) // add columns for each lagged exo var - println (s"exogenous: columns = ${x.dim2 - endoCols}") - - if TREND then - x = VectorD.range (0, yy.dim) +^: x // add trend/time - x = VectorD.range (0, yy.dim)~^2 +^: x // add quadratic trend/time - end if - if DAY then - val day = VectorI (for t <- yy.indices yield t % 7) - val dum = Variable.dummyVars (day) - x = x ++^ dum // add DAY of week as dummy vars - end if - - debug ("exo", s"x.dims = ${x.dims}, yy.dim = ${yy.dim}") -// debug ("exo", s"x = $x \n yy = $yy") - new ARX (x, yy, lags, null, hparam) - end exo - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Make a matrix whose columns are lagged exogenous variables to be added to a data matrix. - * @param lags the maximum lag included (inclusive) for checking purposes - * @param ex the matrix of data for the exogenous variables - * @param elag1 the minimum exo lag included (inclusive) - * @param elag2 the maximum exo lag included (inclusive) - */ - def makeExoCols (lags: Int, ex: MatrixD, elag1: Int, elag2: Int): MatrixD = - var xx: MatrixD = buildMatrix4TS_exo (ex(?, 0), lags, elag1, elag2) - for j <- 1 until ex.dim2 do - xx = xx ++^ buildMatrix4TS_exo (ex(?, j), lags, elag1, elag2) - end for - println (s"addExoVars: collects lags of ${ex.dim2} exo variables into ${xx.dim2} columns") - xx - end makeExoCols - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Split the x matrix and y vector into training and testing sets. - * @param x the x data/input matrix - * @param y the y response/output vector - * @param ratio the ratio of the TESTING set to the full dataset (most common 70-30, 80-20) - */ - def split_TnT (x: MatrixD, y: VectorD, ratio: Double = 0.30): (MatrixD, VectorD, MatrixD, VectorD) = - val n = x.dim - val tr_size = (n * (1.0 - ratio)).toInt - println (s"ARX.split_TnT: tr_size = $tr_size, te_size = ${n - tr_size}") - (x(0 until tr_size), y(0 until tr_size), x(tr_size until n), y(tr_size until n)) - end split_TnT - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Use rolling-validation to compute test Quality of Fit (QoF) measures - * by dividing the dataset into a TRAINING SET (tr) and a TESTING SET (te) - * as follows: [ <-- tr_size --> | <-- te_size --> ] - * This version calls predict for one-step ahead out-of-sample forecasts. - * @see `RollingValidation` - * @param mod the forecasting model being used (e.g., `ARX`) - * @param rc the retraining cycle (number of forecasts until retraining occurs) - */ - def rollValidate (mod: Predictor & Fit, rc: Int): Unit = - val x = mod.getX // get data/input matrix - val y = mod.getY // get response/output vector - val te_size = RollingValidation.teSize (y.dim) // size of initial testing set - val tr_size = y.dim - te_size // size of initial training set - debug ("rollValidate", s"train: tr_size = $tr_size; test: te_size = $te_size, rc = $rc") - - val yp = new VectorD (te_size) // y-predicted over testing set - for i <- 0 until te_size do // iterate through testing set - val t = tr_size + i // next time point to forecast -// if i % rc == 0 then mod.train (x(0 until t), y(0 until t)) // retrain on sliding training set (growing set) - if i % rc == 0 then mod.train (x(i until t), y(i until t)) // retrain on sliding training set (fixed size set) - yp(i) = mod.predict (x(t-1)) // predict the next value (only for h=1) - end for - - val (t, yy) = RollingValidation.align (tr_size, y) // align vectors - val df = max (0, mod.parameter.size - 1) // degrees of freedom for model - mod.resetDF (df, te_size - df) // reset degrees of freedom - new Plot (t, yy, yp, "Plot yy, yp vs. t", lines = true) - println (FitM.fitMap (mod.diagnose (yy, yp), qoF_names)) - end rollValidate - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Use rolling-validation to compute test Quality of Fit (QoF) measures - * by dividing the dataset into a TRAINING SET (tr) and a TESTING SET (te) - * as follows: [ <-- tr_size --> | <-- te_size --> ] - * This version calls predict for h-steps ahead out-of-sample forecasts. - * @see `RollingValidation` - * @param mod the forecasting model being used (e.g., `ARX`) - * @param rc the retraining cycle (number of forecasts until retraining occurs) - * @param hh the forecasting horizon (1, 2, ... h) - * @param te_size the size of the testing set (negative => use ratio to calculate - def rollValidate (mod: ARX, rc: Int, hh: Int, te_size_ : Int = -1): Unit = - val x = mod.getX // get data/input matrix - val y = mod.getY // get response/output vector - val ftMat = new MatrixD (hh, Fit.N_QoF) - -// println (s"rollValidate x = $x") - val yf = mod.forecastAll (y, x, hh) // get in-sample forecast matrix -// println (s"rollValidate x = $x") // FIX - forecastAll destroys x ??? - - val te_size = if te_size_ > 0 then te_size_ // size of initial testing set - else RollingValidation.teSize (y.dim) // calculate using testing ratio - val tr_size = y.dim - te_size // size of initial training set - debug ("rollValidate", s"y.dim = ${y.dim}, train: tr_size = $tr_size; test: te_size = $te_size, rc = $rc") - -// val yp = new VectorD (te_size) // y-predicted over testing set (only for h=1) - for i <- 0 until te_size do // iterate through testing set - val t = tr_size + i // next time point to forecast -// if i % rc == 0 then mod.train (x(0 until t), y(0 until t)) // retrain on sliding training set (gtowing set) - if i % rc == 0 then mod.train (x(i until t), y(i until t)) // retrain on sliding training set (fixed size set) -// yp(i) = mod.predict (x(t-1)) // predict the next value (only for h=1) -// yp(i) = mod.predict (t-1, x) // predict the next value (only for h=1) - mod.forecast (t-1, yf, hh) // forecast the next h-values - end for // yf is updated down its diagonals - - val df = max (1, mod.parameter.size - 1) // degrees of freedom for model - mod.resetDF (df, te_size - df) // reset degrees of freedom - val (t, yy) = RollingValidation.align (tr_size, y) // align vectors - - for h <- 1 to hh do - val yfh = yf(tr_size until y.dim, h) - debug ("rollValidate", s"horizon $h: actual: yy.dim = ${yy.dim}, forecasted: yfh.dim = ${yfh.dim}") - new Plot (t, yy, yfh, s"Plot yy, yfh vs. t for horizon h = $h)", lines = true) - ftMat(h-1) = mod.diagnose (yy, yfh) - end for - - banner (s"rollValidate: Evaluate ${mod.modelName}'s QoF for the horizons: 1 to $hh") - println ("fitMap qof = ") - println (FitM.showFitMap (ftMat.transpose, QoF.values.map (_.toString))) - end rollValidate - */ - -end ARX - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRXTest` main function tests the `ARX` class. - * This test is used to CHECK that the buildMatrix4TS function is working correctly. - * May get NaN for some maximum lags (p) due to multi-collinearity. - * > runMain scalation.modeling.forecasting.aRXTest - */ -@main def aRXTest (): Unit = - - val m = 30 - val y = VectorD.range (1, m) // used to CHECK the buildMatrix4TS function - - for p <- 1 to 9 do // autoregressive hyper-parameter p - banner (s"Test: ARX with $p lags") - val mod = ARX (y, p) // create model for time series data - mod.trainNtest ()() // train the model on full dataset - println (mod.summary) - - val yp = mod.predict (mod.getX) - new Plot (null, mod.getY, yp, s"y vs. yp for ${mod.modelName} with $p lags", lines = true) - end for - -end aRXTest - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRXTest2` main function tests the `ARX` class on real data: - * Forecasting lake levels. - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.aRXTest2 - */ -@main def aRXTest2 (): Unit = - - import Example_LakeLevels.y - val h = 2 // the forecasting horizon - ARX.setTrend (true) - - for p <- 1 to 7 do // autoregressive hyper-parameter p - banner (s"Test: ARX with $p lags") - val mod = ARX (y, p) // create model for time series data - mod.trainNtest ()() // train the model on full dataset - println (mod.summary) // parameter/coefficient statistics - - banner ("Predictions") - val (yx, yy) = mod.getXY // trimmed, input matrix and actual response vector - println (s"y.dim = ${y.dim}, yy.dim = ${yy.dim}, yx.dims = ${yx.dims}") - println (s"y = $y") - println (s"yy = $yy") - val yp = mod.predict (yx) // predicted response vector - new Plot (null, yy, yp, s"y vs. yp for ${mod.modelName} with $p lags", lines = true) - println (s"yp = $yp") - - banner ("Forecasts") -// val yf = mod.forecast (yp, h) // forecasted response matrix - val yf = mod.forecastAll (y, yx, h) // forecasted response matrix - for k <- yf.indices2 do - new Plot (null, yy, yf(?, k), s"yy vs. yf_$k for ${mod.modelName} with $p lags", lines = true) - -// mod.testHorizons (h, y, yx) // calls testF for horizons 1 to h - ForecasterX.evalForecasts (mod, y, yx, h) - -// banner ("Forecast QoF") -// println (testForecast (mod, y, yf, p)) // QoF -// println (Fit.fitMap (mod.testf (k, y))) // evaluate k-units ahead forecasts - end for - -end aRXTest2 - -// val iskip = yy.indexWhere (_ >= 6.0) // find week with at least 6 deaths -// println (s"iskip = $iskip is first week with at least 6 deaths") - -import Example_Covid.{loadData, NO_EXO, response} - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRXTest3` main function tests the `ARX` class on real data: - * Forecasts COVID-19 Weekly Data using endogenous variable only. - * Does In-Sample Testing (In_ST). - * Determines the terms to include in the model using Feature Selection. - * > runMain scalation.modeling.forecasting.aRXTest3 - */ -@main def aRXTest3 (): Unit = - - val LAGS = 10 // number of lags of y - val h = 6 // forecasting horizon - - val (ex, y) = loadData (NO_EXO, response) - println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") - - banner ("Test In-Sample ARX on COVID-19 Weekly Data") - val mod = ARX (y, LAGS) // create ARX model for time series data - val (yp, qof) = mod.trainNtest ()() // train the model on full dataset - new Plot (null, mod.getY, yp, s"${mod.modelName}, y vs. yp", lines = true) - - banner (s"Multi-horizon forecasting using the recursive method") - val yx = mod.getX - val yf = mod.forecastAll (y, yx, h) // forecasted response matrix - for k <- 0 to h do - new Plot (null, y, yf(?, k), s"y vs. yf_$k for ${mod.modelName} with $LAGS lags @ horizon $k", lines = true) - -// mod.testHorizons (h, y, yx) // calls testF for horizons 1 to h - ForecasterX.evalForecasts (mod, y, yx, h) - - banner (s"Feature Selection Technique: stepRegression") - val (cols, rSq) = mod.stepRegressionAll (cross = false) // R^2, R^2 bar, sMAPE, NA - val k = cols.size - println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for ARX with tech", lines = true) - println (mod.summary ()) - - banner ("Feature Importance") - println (s"Stepwise: rSq = $rSq") -// val imp = mod.importance (cols.toArray, rSq) -// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") - -end aRXTest3 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRXTest4` main function tests the `ARX` class on real data: - * Forecasts COVID-19 Weekly Data using endogenous and exogenous variables. - * Does In-Sample Testing (In-ST). - * Determines the terms to include in the model using Feature Selection. - * > runMain scalation.modeling.forecasting.aRXTest4 - */ -@main def aRXTest4 (): Unit = - - val LAGS = 10 // number of lags of y - - val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (ex, y) = loadData (exo_vars, response) - println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") - - banner ("Test In-Sample ARX.exo on COVID-19 Weekly Data") - val mod = ARX.exo (y, LAGS, ex)(1, LAGS+1) // create model for time series data with exo - val (yp, qof) = mod.trainNtest ()() // train the model on full dataset - new Plot (null, mod.getY, yp, s"${mod.modelName}, y vs. yp", lines = true) - -// val tech = SelectionTech.Forward // pick one feature selection technique -// val tech = SelectionTech.Backward - val tech = SelectionTech.Stepwise - - banner (s"Feature Selection Technique: $tech") - val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA - val k = cols.size - println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for ARX with tech", lines = true) - println (mod.summary ()) - - banner ("Feature Importance") - println (s"$tech: rSq = $rSq") -// val imp = mod.importance (cols.toArray, rSq) -// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") - -end aRXTest4 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRXTest5` main function tests the `ARX` class on real data: - * Forecasts COVID-19 Weekly Data using endogenous and exogenous variables. - * Does In-Sample Testing (In-ST). - * Determines the terms to include in the model using Feature Selection. - * Run Train-n-Test (TnT) Split testing on best model. - * > runMain scalation.modeling.forecasting.aRXTest5 - */ -@main def aRXTest5 (): Unit = - - val LAGS = 10 // number of lags of y - - val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (ex, y) = loadData (exo_vars, response) - println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") - - banner ("Test In-Sample ARX.exo on COVID-19 Weekly Data") - val mod = ARX.exo (y, LAGS, ex)(1, LAGS+1) // create model for time series data with exo - val (yp, qof) = mod.trainNtest ()() // train on full and test on full - new Plot (null, mod.getY, yp, s"${mod.modelName}, y vs. yp", lines = true) - -// val tech = SelectionTech.Forward // pick one feature selection technique -// val tech = SelectionTech.Backward - val tech = SelectionTech.Stepwise - - banner (s"Feature Selection Technique: $tech") - val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA - val k = cols.size - println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for ARX with tech", lines = true) - println (mod.summary ()) - - banner ("Feature Importance") - println (s"$tech: rSq = $rSq") -// val imp = mod.importance (cols.toArray, rSq) -// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") - - banner ("Run TnT on Best model") - val bmod = mod.getBest._3 // get the best model from feature selection - val (x_, y_, xtest, ytest) = ARX.split_TnT (bmod.getX, bmod.getY) - val (yptest, qoftest) = bmod.trainNtest (x_, y_)(xtest, ytest) // train on (x_, y_) and test on (xtest, ytest) - new Plot (null, ytest, yptest, s"${mod.modelName}, ytest vs. yptest", lines = true) - -end aRXTest5 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRXTest6` main function tests the `ARX` class on real data: - * Forecasts COVID-19 Weekly Data using endogenous and exogenous variables. - * Does In-Sample Testing (In-ST). - * Determines the terms to include in the model using Feature Selection. - * Run Train-n-Test (TnT) Split testing on best model using Rolling Validation. - * > runMain scalation.modeling.forecasting.aRXTest6 - */ -@main def aRXTest6 (): Unit = - - val LAGS = 10 // number of lags (values from past) - val rc = 1 // retraining cycle - val h = 6 // forecasting horizon - - val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (ex, y) = loadData (exo_vars, response) - println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") - - banner ("Test In-Sample ARX.exo on COVID-19 Weekly Data") - val mod = ARX.exo (y, LAGS, ex)(1, LAGS+1) // create model for time series data with exo - - val (yp, qof) = mod.trainNtest ()() // train on full and test on full - new Plot (null, mod.getY, yp, s"${mod.modelName}, y vs. yp", lines = true) - -// val tech = SelectionTech.Forward // pick one feature selection technique -// val tech = SelectionTech.Backward - val tech = SelectionTech.Stepwise - - banner (s"Feature Selection Technique: $tech") - val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA - val k = cols.size - println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for ARX with tech", lines = true) - println (mod.summary ()) - - banner ("Feature Importance") - println (s"$tech: rSq = $rSq") -// val imp = mod.importance (cols.toArray, rSq) -// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") - - banner ("Run Rolling Validation on ARX Best model") - val bmod = mod.getBest._3.asInstanceOf [ARX] // get the best model from feature selection - ForecasterX.rollValidate (bmod, rc, h) - -end aRXTest6 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `aRXTest7` main function tests the `ARX` class on real data: - * Forecasts COVID-19 Weekly Data using endogenous and exogenous variables. - * Preliminary investigation of Symbolic Regression. - * > runMain scalation.modeling.forecasting.aRXTest7 - */ -@main def aRXTest7 (): Unit = - - val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (ex, y) = loadData (exo_vars, response) - println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") - - banner ("Plot Variables on COVID-19 Weekly Data") - for lag <- 10 to 10 do - val xx_ = ex(lag until y.dim) - val yy_ = y(0 until y.dim - lag) -// new Plot (xx_, yy_, null, s"deaths vs. exo-vars @ lag = $lag") - - val mod = SymbolicRegression (xx_, yy_, null, collection.mutable.Set (1.0), cross = false) - mod.trainNtest ()() - println (mod.summary ()) - end for - -end aRXTest7 - diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/Attention.scala.bak b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/Attention.scala.bak deleted file mode 100644 index 3ea85de37..000000000 --- a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/Attention.scala.bak +++ /dev/null @@ -1,204 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Mon Sep 4 13:09:52 EDT 2023 - * @see LICENSE (MIT style license file). - * - * @title Model Framework: Context and Attention for Transformers - * - * @see https://sebastianraschka.com/blog/2023/self-attention-from-scratch.html - * @see https://arxiv.org/pdf/1706.03762.pdf (main paper) - */ - -package scalation -package modeling -package forecasting - -import scala.math.sqrt - -import scalation.mathstat._ -import scalation.random.{RandomMatD, RandomTenD} - -import ActivationFun.f_softmax - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Attention` trait provides methods for computing context vectors, single-head - * attention matrices and multi-head attention matrices. - * @param n_var the size of the input vector x_t (number of variables) - * @param n_mod the size of the output (dimensionality of the model, d_model) - * @param heads the number of attention heads - */ -trait Attention (n_var: Int, n_mod: Int = 512, heads: Int = 8): - - val n_k = n_mod / heads // size per head (dimensionality d_k, d_v) - val rmg = RandomMatD (n_k, n_var, 1) // random (0, 1) matrix generator for q, k, v - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute the Query, Key, Value matrices from the given input and weight matrices. - * @param x the input matrix - * @param w_q the weight matrix for query Q - * @param w_v the weight matrix for key K - * @param w_v the weight matrix for value V - */ - def queryKeyValue (x: MatrixD, w_q: MatrixD, w_k: MatrixD, w_v: MatrixD): (MatrixD, MatrixD, MatrixD) = - val x_t = x.transpose - (w_q * x_t, w_k * x_t, w_v * x_t) - end queryKeyValue - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute a Context Vector from the given query at time t (q_t), key (K) and value (V). - * @param q_t the query vector at time t (based on input vector x_t) - * @param k the key matrix K - * @param v the value matrix V - */ - def context (q_t: VectorD, k: MatrixD, v: MatrixD): VectorD = - val root_n = sqrt (q_t.dim) - f_softmax.f_ (k * (q_t / root_n)) *: v - end context - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute a Self-Attention Weight Matrix from the given query (Q), key (K) and value (V). - * @param q the query matrix Q (q_t over all time) - * @param k the key matrix K - * @param v the value matrix V - */ - def attention (q: MatrixD, k: MatrixD, v: MatrixD): MatrixD = - val root_n = sqrt (q.dim2) - f_softmax.fM (q * (k.transpose / root_n)) * v - end attention - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compute a Multi-Head, Self-Attention Weight Matrix by taking attention for each head - * and concatenating them; finally multiplying by the overall weight matrix w_o. - * The operator ++^ concatenates matrices column-wise. - * @param q the query matrix Q (q_t over all time) - * @param k the key matrix K - * @param v the value matrix V - * @param w_q the weight tensor for query Q (w_q(i) matrix for i-th head) - * @param w_v the weight tensor for key K (w_k(i) matrix for i-th head) - * @param w_v the weight tensor for value V (w_v(i) matrix for i-th head) - * @param w_o the overall weight matrix to be applied to concatenated attention - */ - def attentionMH (q: MatrixD, k: MatrixD, v: MatrixD, - w_q: TensorD, w_k: TensorD, w_v: TensorD, - w_o: MatrixD): MatrixD = - var att = attention (q * w_q(0), k * w_k(0), v * w_v(0)) - for i <- 1 until heads do att = att ++^ attention (q * w_q(i), k * w_k(i), v * w_v(i)) - println (s"att.dims = ${att.dims}, w_o.dims = ${w_o.dims}") - att * w_o - end attentionMH - -end Attention - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Attention` object contains sample a input matrix from - * @see https://sebastianraschka.com/blog/2023/self-attention-from-scratch.html - * The example is from 6 words with 16 dimensional encoding. - */ -object Attention: - - val x = MatrixD ((6, 16), 0.3374, -0.1778, -0.3035, -0.5880, 0.3486, 0.6603, -0.2196, -0.3792, // row 0 - 0.7671, -1.1925, 0.6984, -1.4097, 0.1794, 1.8951, 0.4954, 0.2692, - - 0.5146, 0.9938, -0.2587, -1.0826, -0.0444, 1.6236, -2.3229, 1.0878, // row 1 - 0.6716, 0.6933, -0.9487, -0.0765, -0.1526, 0.1167, 0.4403, -1.4465, - - 0.2553, -0.5496, 1.0042, 0.8272, -0.3948, 0.4892, -0.2168, -1.7472, // row 2 - -1.6025, -1.0764, 0.9031, -0.7218, -0.5951, -0.7112, 0.6230, -1.3729, - - -1.3250, 0.1784, -2.1338, 1.0524, -0.3885, -0.9343, -0.4991, -1.0867, // row 3 - 0.8805, 1.5542, 0.6266, -0.1755, 0.0983, -0.0935, 0.2662, -0.5850, - - -0.0770, -1.0205, -0.1690, 0.9178, 1.5810, 1.3010, 1.2753, -0.2010, // row 4 - 0.4965, -1.5723, 0.9666, -1.1481, -1.1589, 0.3255, -0.6315, -2.8400, - - 0.8768, 1.6221, -1.4779, 1.1331, -1.2203, 1.3139, 1.0533, 0.1388, // row 5 - 2.2473, -0.8036, -0.2808, 0.7697, -0.6596, -0.7979, 0.1838, 0.2293) - - val m = x.dim // number of time points - val n = x.dim2 // size of input x_t - -end Attention - -import Attention._ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `attentionTest` function tests the `context` and `attention` top-level functions. - * > runMain scalation.modeling.forecasting.attentionTest - */ -@main def attentionTest (): Unit = - - val n_var = x.dim2 // number of variables in input vector x_t - val n_mod = 24 // size of each query/key vector (q_t, k_t, v_t) - val heads = 1 // number of attention heads - object att extends Attention (n_var, n_mod, heads) - - val (q, k, v) = att.queryKeyValue (x, att.rmg.gen, att.rmg.gen, att.rmg.gen) - - banner ("Dimensions for input x, query q, key k, value v") - println (s"x.dims = ${x.dims}") - println (s"q.dims = ${q.dims}") - println (s"k.dims = ${k.dims}") - println (s"v.dims = ${v.dims}") - - banner ("Attention Matrix") - val aw = att.attention (q, k, v) - println (s"aw.dims = ${aw.dims}") - println (s"aw = $aw") - - banner ("Context Vectors Collected into Matrix") - val cxt = new MatrixD (aw.dim, aw.dim2) - println (s"cxt.dims = ${cxt.dims}") - for i <- q.indices do cxt(i) = att.context (q(i), k, v) - println (s"cxt = $cxt") - - assert (cxt =~ aw) - -end attentionTest - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `attentionTest2` function tests the `attentionMH` top-level function. - * Test Multi-Head, Self-Head Attention. - * > runMain scalation.modeling.forecasting.attentionTest2 - */ -@main def attentionTest2 (): Unit = - - val n_var = x.dim2 // number of variables in input vector x_t - val n_mod = 72 // size of each query/key vector (q_t, k_t, v_t) - val heads = 3 // number of attention heads - object att extends Attention (n_var, n_mod, heads) - - val (q, k, v) = att.queryKeyValue (x, att.rmg.gen, att.rmg.gen, att.rmg.gen) - - banner ("Dimensions for input x, query q, key k, value v") - println (s"x.dims = ${x.dims}") - println (s"q.dims = ${q.dims}") - println (s"k.dims = ${k.dims}") - println (s"v.dims = ${v.dims}") - - // Multi-Head (MH) - - val rtg = RandomTenD (heads, n_mod, att.n_k, 1) // random (0, 1) tensor generator for q, k, v - val rmg = RandomMatD (n_mod, n_mod, 1) // random (0, 1) matrix generator for for w_o - - val wt_q = rtg.gen // MH query weight tensor: heads x n_mod x n_k - val wt_k = rtg.gen // MH key weight tensor: heads x n_mod x n_k - val wt_v = rtg.gen // MH value weight tensor; heads x n_mod x n_k - val w_o = rmg.gen // MH overall weight matrix: n_mod x n_mod - - banner ("Dimensions for query wt_q, key wt_k, value wt_v, overall w_o") - println (s"wt_q.dims = ${wt_q.dims}") - println (s"wt_k.dims = ${wt_k.dims}") - println (s"wt_v.dims = ${wt_v.dims}") - println (s"w_o.dims = ${w_o.dims}") - - banner ("Multi-Head Attention Matrix") - val aw = att.attentionMH (q, k, v, wt_q, wt_k, wt_v, w_o) - println (s"aw.dims = ${aw.dims}") - println (s"aw = $aw") - -end attentionTest2 - diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/ForecastUtil.scala.bak b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/ForecastUtil.scala.bak deleted file mode 100644 index c4697a22f..000000000 --- a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/ForecastUtil.scala.bak +++ /dev/null @@ -1,117 +0,0 @@ - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Sun Feb 13 16:22:21 EST 2022 - * @see LICENSE (MIT style license file). - * - * @title Model: Utilities for Time Series Forecasting - */ - -package scalation -package modeling -package forecasting - -import scala.math.max - -import scalation.mathstat._ - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Given a response vector y, build and return - * (1) an input/predictor MATRIX xx and - * (2) an output/multi-horizon output/response MATRIX yy. - * The first lag responses can't be predicted due to missing past values. - * The last h-1 responses can't be predicted due to missing future values. - * Therefore the number of rows in xx and yy is reduced to y.dim + 1 - lag - h. - * @param y the given output/response vector - * @param lag the maximum lag included (inclusive) - * @param h the forecasting horizon (1, 2, ... h) - */ -def buildMatrix4TS (y: VectorD, lag: Int, h: Int): (MatrixD, MatrixD) = - val xx = new MatrixD (y.dim + 1 - lag - h, lag) - val yy = new MatrixD (y.dim + 1 - lag - h, h) - for i <- lag to y.dim - h do - for j <- xx.indices2 do xx(i-lag, lag - 1 - j) = y(i - 1 - j) - for j <- yy.indices2 do yy(i-lag, j) = y(i + j) - end for -// println (s"buildMatrix4TS: xx = $xx \n yy = $yy") - (xx, yy) -end buildMatrix4TS - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Given a response vector y, build and return - * (1) an input/predictor MATRIX xx and - * (2) an output/single-horizon output/response VECTOR yy. - * The first response can't be predicted due to missing past values. - * Therefore the number of rows in xx and yy is reduced to y.dim - 1. - * @param y the given output/response vector - * @param lag the maximum lag included (inclusive) - */ -def buildMatrix4TS (y: VectorD, lag: Int): (MatrixD, VectorD) = - val xx = new MatrixD (y.dim - 1, lag) - val yy = new VectorD (y.dim - 1) - for i <- 1 until y.dim do - for j <- xx.indices2 do xx(i-1, lag - 1 - j) = y(max(i - 1 - j, 0)) - yy(i-1) = y(i) - end for -// println (s"buildMatrix4TS: xx = $xx \n yy = $yy") - (xx, yy) -end buildMatrix4TS - -/* -def buildMatrix4TS (y: VectorD, lag: Int): (MatrixD, VectorD) = - val xx = new MatrixD (y.dim - lag, lag) - val yy = new VectorD (y.dim - lag) - for i <- lag until y.dim do - for j <- xx.indices2 do xx(i-lag, lag - 1 - j) = y(i - 1 - j) - yy(i-lag) = y(i) - end for -// println (s"buildMatrix4TS: xx = $xx \n yy = $yy") - (xx, yy) -end buildMatrix4TS -*/ - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Given an exogenous variable vector ex corresponding to an endogenous response - * vector y, build and return an input/predictor MATRIX xx. - * The first lag responses can't be predicted due to missing past values. - * Therefore the number of rows in xx is reduced to ex.dim - lag. - * @param ex the exogenous variable vector - * @param lag the maximum lag included (inclusive) for the endogenous variable - * @param elag1 the minimum lag included (inclusive) for the exogenous variable - * @param elag2 the maximum lag included (inclusive) for the exogenous variable - */ -def buildMatrix4TS_exo (ex: VectorD, lag: Int, elag1: Int, elag2: Int): MatrixD = - val flaw = flawf ("top") - val n = elag2 - elag1 - if n < 1 then flaw ("buildMatrix4TS_exo", "min exo lag must be smaller than max exo lag") - if elag2 > lag then flaw ("buildMatrix4TS_exo", "exo lag cannot exceed endogenous lag") - - val xx = new MatrixD (ex.dim - lag, n) - for i <- lag until ex.dim do - for j <- xx.indices2 do xx(i-lag, n - 1 - j) = ex(i - elag1 - j) - end for -// println (s"buildMatrix4TS_exo: xx = $xx") - xx -end buildMatrix4TS_exo - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Test the actual response vector vs. forecasted matrix, returning the QoF - * for all forecasting horizons 1 to h. - * @param mod the fittable model (one that extends `Fit`) - * @param y the orginal actual response vector - * @param yf the forecasted response matrix - * @param p the number of variables/lags used in the model - */ -def testForecast (mod: Fit, y: VectorD, yf: MatrixD, p: Int): MatrixD = - MatrixD (for k <- yf.indices2 yield - val y_ = y(p + k until y.dim) - val yf_ = yf(?, k)(0 until y.dim - p - k) - println (s"y_.dim = ${y_.dim}, yf_.dim = ${yf_.dim}") - mod.resetDF (p, y.dim - p - (k+1)) // reset the degrees of freedom - mod.diagnose (y_, yf_)) // return the QoF of the forecasts -end testForecast - diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/ForecastUtil.scala.bak2 b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/ForecastUtil.scala.bak2 deleted file mode 100644 index 6e1b5dca5..000000000 --- a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/ForecastUtil.scala.bak2 +++ /dev/null @@ -1,112 +0,0 @@ - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Sun Feb 13 16:22:21 EST 2022 - * @see LICENSE (MIT style license file). - * - * @title Model Framework: Utilities for Time Series Forecasting - */ - -package scalation -package modeling -package forecasting - -import scala.math.max - -import scalation.mathstat._ - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Given a response vector y, build and return - * (1) an input/predictor MATRIX xx and - * (2) an output/multi-horizon output/response MATRIX yy. - * Used by Multi-Variate (MV) forecast models such as `RegressionMV4TS`. - * that use DIRECT multi-horizon forecasting. - * The first lag responses can't be predicted due to missing past values. - * The last h-1 responses can't be predicted due to missing future values. - * Therefore the number of rows in xx and yy is reduced to y.dim + 1 - lags - h. - * - * FIX - try to extend to "val xx = new MatrixD (y.dim - h, lags)" - * - * @param y the given output/response vector - * @param lags the maximum lag included (inclusive) - * @param h the forecasting horizon (1, 2, ... h) - */ -def buildMatrix4TS (y: VectorD, lags: Int, h: Int): (MatrixD, MatrixD) = - val xx = new MatrixD (y.dim + 1 - lags - h, lags) - val yy = new MatrixD (y.dim + 1 - lags - h, h) - for i <- lags to y.dim - h do - for j <- xx.indices2 do xx(i-lags, lags - 1 - j) = y(i - 1 - j) - for j <- yy.indices2 do yy(i-lags, j) = if i + j >= y.dim then -0.0 else y(i + j) -// for j <- yy.indices2 do yy(i-lags, j) = y(i + j) - end for -// println (s"buildMatrix4TS: xx = $xx \n yy = $yy") - (xx, yy) -end buildMatrix4TS - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Given a response vector y, build and return - * (1) an input/predictor MATRIX xx and - * (2) an output/single-horizon output/response VECTOR yy. - * Used by Single-Variate forecast models such as `Regression4TS`. - * that use RECURSIVE multi-horizon forecasting. - * The first response can't be predicted due to missing past values. - * Therefore the number of rows in xx and yy is reduced to y.dim - 1. - * @param y the given output/response vector - * @param lags the maximum lag included (inclusive) - */ -def buildMatrix4TS (y: VectorD, lags: Int): (MatrixD, VectorD) = - val xx = new MatrixD (y.dim - 1, lags) - val yy = new VectorD (y.dim - 1) - for i <- 1 until y.dim do - for j <- xx.indices2 do xx(i-1, lags - 1 - j) = y(max(i - 1 - j, 0)) - yy(i-1) = y(i) - end for -// println (s"buildMatrix4TS: xx = $xx \n yy = $yy") - (xx, yy) -end buildMatrix4TS - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Given an exogenous variable vector ex corresponding to an endogenous response - * vector y, build and return an input/predictor MATRIX xx. - * The first lag responses can't be predicted due to missing past values. - * Therefore the number of rows in xx is reduced to ex.dim - lags. - * @param ex the exogenous variable vector - * @param lags the maximum lag included (inclusive) for the endogenous variable - * @param elag1 the minimum lag included (inclusive) for the exogenous variable - * @param elag2 the maximum lag included (inclusive) for the exogenous variable - */ -def buildMatrix4TS_exo (ex: VectorD, lags: Int, elag1: Int, elag2: Int): MatrixD = - val flaw = flawf ("top") - val n = elag2 - elag1 - if n < 1 then flaw ("buildMatrix4TS_exo", "min exo lag must be smaller than max exo lag") -// if elag2 > lags then flaw ("buildMatrix4TS_exo", "exo lag cannot exceed endogenous lag") - - val xx = new MatrixD (ex.dim - lags, n) - for i <- lags until ex.dim do - for j <- xx.indices2 do xx(i-lags, n - 1 - j) = ex(i - elag1 - j) - end for -// println (s"buildMatrix4TS_exo: xx = $xx") - xx -end buildMatrix4TS_exo - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Test the actual response vector vs. forecasted matrix, returning the QoF - * for all forecasting horizons 1 to h. - * @param mod the fittable model (one that extends `Fit`) - * @param y the orginal actual response vector - * @param yf the forecasted response matrix - * @param p the number of variables/lags used in the model - */ -def testForecast (mod: Fit, y: VectorD, yf: MatrixD, p: Int): MatrixD = - MatrixD (for k <- yf.indices2 yield - val y_ = y(p + k until y.dim) - val yf_ = yf(?, k)(0 until y.dim - p - k) - println (s"y_.dim = ${y_.dim}, yf_.dim = ${yf_.dim}") - mod.resetDF (p, y.dim - p - (k+1)) // reset the degrees of freedom - mod.diagnose (y_, yf_)) // return the QoF of the forecasts -end testForecast - diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/GRU.scala.bak b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/GRU.scala.bak deleted file mode 100644 index ed8ea4fd4..000000000 --- a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/GRU.scala.bak +++ /dev/null @@ -1,321 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller, Sai - * @version 2.0 - * @date - * @see LICENSE (MIT style license file). - * - * @title Gated Recurrent Unit (GRU) - * - * Translated from Matlab to Scala - * @see https://www.math.ucla.edu/~minchen/doc/BPTTTutorial.pdf - */ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/* This program tests the BPTT process we manually developed for GRU. - * We calculate the gradients of GRU parameters with chain rule , and then - * compare them to the numerical gradients to check whether our chain rule - * derivation is correct. - * - * Here, we provided 2 versions of BPTT, backward_direct() and backward(). - * The former one is the direct idea to calculate gradient within each step - * and add them up (O(sentence_sizeˆ2) time) . The latter one is optimized to - * calculate the contribution of each step to the overall gradient , which is - * only O(sentence_size) time. - * - * This is very helpful for people who want to implement GRU in Caffe since - * Caffe does not support auto−differentiation. This is also very helpful for - * the people who want to know the details about Back-propagation Through - * Time algorithm in the Recurrent Neural Networks (such as GRU and LSTM) - * and also get a sense on how auto−differentiation is possible. - * - * NOTE: We does not involve SGD training here. With SGD training, this - * program would become a complete implementation of GRU which can be - * trained with sequence data . However, since this is only a CPU serial - * Matlab version of GRU, applying it on large datasets will be dramatically - * slow. - * - * by Minchen Li, at The University of British Columbia. 2016−04−21 - */ - -package scalation -package modeling -package forecasting - -import scala.math.log - -import scalation.mathstat.{MatrixD, VectorD} -import scalation.random.{NormalMat, NormalVec_c} - -import ActivationFun.{sigmoid_, softmax_, tanh_} -import MatrixD.outer - -def log_ (x: VectorD): VectorD = x.map (log) - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `CRU` class implements Gated Recurrent Unit (GRU) via Back Propagation Through - * Time (BPTT). - * val (x, y) = getTrainingData (vocabulary_size, sentence_size) - * @param x the input sequence/time series - * @param y the output sequence/time series - */ -class GRU (x: MatrixD, y: MatrixD): - - // set GRU and data scale - private val iMem_size = 4 - private val vocabulary_size = x.dim // e.g., 64, number of distinct words - private val sentence_size = x.dim2 // e.g., 20, number of words in a sentence (including start and end symbol) - // since we will only use one sentence for training, - // this is also the total steps during training. - - private val _1 = VectorD.one (iMem_size) // vector of all ones for e.g., 1 - z - - // initialize parameters (weights and biases) - // multiplier for input x_t of intermediate variables - // note: Matlab rand -> NormalMat - private val rmg1 = NormalMat (iMem_size, vocabulary_size, 0.0, 0.01) - private val U_z = rmg1.gen - private val U_r = rmg1.gen - private val U_c = rmg1.gen - - // multiplier for previous s of intermediate variables - private val rmg2 = NormalMat (iMem_size, iMem_size, 0.0, 0.01) - private val W_z = rmg2.gen - private val W_r = rmg2.gen - private val W_c = rmg2.gen - - // bias terms of intermediate variables - converted to VectorD - private val rvg1 = NormalVec_c (iMem_size, 0.0, 0.01) - private val b_z = rvg1.gen - private val b_r = rvg1.gen - private val b_c = rvg1.gen - - // decoder for generating output - private val rmg3 = NormalMat (vocabulary_size, iMem_size, 0.0, 0.01) - private val rvg3 = NormalVec_c (vocabulary_size, 0.0, 0.01) - private val V = rmg3.gen - private val b_V = rvg3.gen // bias of decoder - converted to Vector - - // previous s of step 1 - private val s_0 = rvg1.gen // converted to vector - - private val max_epochs = 2 // maximum number of iterations - private val L = new VectorD (sentence_size) // store loss function values - println (s"L = $L") - - // initialize results - // Matlab: zeros -> new MatrixD - private val s = new MatrixD (iMem_size, sentence_size) // hidden state - private val y_hat = new MatrixD (vocabulary_size, sentence_size) // predicted output - private val z = new MatrixD (iMem_size, sentence_size) // update gate - private val r = new MatrixD (iMem_size, sentence_size) // reset gate - private val c = new MatrixD (iMem_size, sentence_size) // candidate state - - // the partial derivative of weights and biases - private var ds_0 = new VectorD (s_0.dim) - private var dU_c = new MatrixD (U_c.dim, U_c.dim2) - private var dU_r = new MatrixD (U_r.dim, U_r.dim2) - private var dU_z = new MatrixD (U_z.dim, U_z.dim2) - private var dW_c = new MatrixD (W_c.dim, W_c.dim2) - private var dW_r = new MatrixD (W_r.dim, W_r.dim2) - private var dW_z = new MatrixD (W_z.dim, W_z.dim2) - - private var db_z = new VectorD (b_z.dim) - private var db_r = new VectorD (b_r.dim) - private var db_c = new VectorD (b_c.dim) - - private var db_V: VectorD = null - private var dV = new MatrixD (V.dim, V.dim2) - - private val eta = 0.1 // the learning rate - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train the GRU. - */ - def train (): Unit = - - for it <- 1 to max_epochs do - // forward propagate: get the intermediate and output results - forward () - - println (s"train: for epoch $it: loss function L = $L") - - // back propogate: calculate the gradient (the partial derivatives) - backward () - - // update the parameters (weights and biases) - update_params () - end for - end train - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forward propagate calculates, y_hat, loss and intermediate variables for each step. - */ - def forward (): Unit = - - // calculate result for step 1 since s_0 is not in s - // note: Matlab row wild-card : becomes ? - // note: Matlab starts at 1, Scala at 0 - - z(?, 0) = sigmoid_ (U_z * x(?, 0) + W_z * s_0 + b_z) - r(?, 0) = sigmoid_ (U_r * x(?, 0) + W_r * s_0 + b_r) - c(?, 0) = tanh_ (U_c * x(?, 0) + W_c * (s_0 * r(?, 0) ) + b_c) - s(?, 0) = (_1 - z(?, 0)) * c(?, 0) + z(?, 0) * s_0 - y_hat(?, 0) = softmax_ (V * s(?, 0) + b_V) - L(0) = (-y(?, 0) * log_ (y_hat(?, 0))).sum - - // calculate results for step 2 − sentence_size similarly (i-th word) - // note Matlab element-wise multiplication .* becomes * - - for word <- 1 until sentence_size do - z(?, word) = sigmoid_ (U_z * x(?, word) + W_z * s(?, word-1) + b_z) - r(?, word) = sigmoid_ (U_r * x(?, word) + W_r * s(?, word-1) + b_r) - c(?, word) = tanh_ (U_c * x(?, word) + W_c * (s(?, word-1) * r(?, word)) + b_c) - s(?, word) = (_1 - z(?, word)) * c(?, word) + z(?, word) * s(?, word-1) - y_hat(?, word) = softmax_ (V * s(?, word) + b_V) - L(word) = (-y(?, word) * log_ (y_hat(?, word))).sum - end for - end forward - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Backward propagate to calculate gradient using chain rule (O(sentence_size) time) - */ - def backward (): Unit = - - // calculate gradient using chain rule - // note Matlab: A' is A.transpose - // note Matlab: sum (delta_y, 2) returns the row sums of matrix delta_y - // note Matlab: delta_y(?, word) * s(?, word)' -> delta_y(?, word) outer s(?, word) - val delta_y = y_hat - y - db_V = delta_y.sumVr -// dV = new MatrixD (V.dim, V.dim2) - for word <- 0 until sentence_size do - dV += outer (delta_y(?, word), s(?, word)) // outer vector product - end for - - val ds_single = V.transpose * delta_y - - // calculate the derivative contribution of each step and add them up - var ds_cur = new VectorD (ds_single.dim) - for word <- sentence_size-1 to 1 by -1 do - ds_cur += ds_single(?, word) - val ds_cur_bk = ds_cur - - // mix for new state - val dtanhInput = (ds_cur * (_1 - z(?, word)) * (_1 - c(?, word) * c(?, word))) - db_c += dtanhInput - dU_c += outer (dtanhInput, x(?, word)) - dW_c += outer (dtanhInput, (s(?, word-1) * r(?, word))) - val dsr = W_c.transpose * dtanhInput - ds_cur = dsr * r(?, word) - - // reset gate - val dsigInput_r = dsr * s(?, word-1) * r(?, word) * (_1 - r(?, word)) - db_r += dsigInput_r - dU_r += outer (dsigInput_r, x(?, word)) - dW_r += outer (dsigInput_r, s(?, word-1)) - ds_cur += W_r.transpose * dsigInput_r - ds_cur += ds_cur_bk * z(?, word) - val dz = ds_cur_bk * (s(?, word-1) - c(?, word)) - - // update gate - val dsigInput_z = dz * z(?, word) * (_1 - z(?, word)) - db_z += dsigInput_z - dU_z += outer (dsigInput_z, x(?, word)) - dW_z += outer (dsigInput_z, s(?, word-1)) - ds_cur += W_z.transpose * dsigInput_z - end for - - // case: s_1 -> s_0 - ds_cur += ds_single(?, 0) - - val dtanhInput = (ds_cur * (_1 - z(?, 0)) * (_1 - c(?, 0) * c(?, 0))) - db_c += dtanhInput - dU_c += outer (dtanhInput, x(?, 0)) - dW_c += outer (dtanhInput, (s_0 * r(?, 0))) - val dsr = W_c.transpose * dtanhInput - ds_0 += dsr * r(?, 0) - - val dsigInput_r = dsr * s_0 * r(?, 0) * (_1 - r(?, 0)) - db_r += dsigInput_r - dU_r += outer (dsigInput_r, x(?, 0)) - dW_r += outer (dsigInput_r, s_0) - ds_0 += W_r.transpose * dsigInput_r - ds_0 += ds_cur * z(?, 0) - val dz = ds_cur * (s_0 - c(?, 0)) - - val dsigInput_z = dz * z(?, 0) * (_1 - z(?, 0)) - db_z += dsigInput_z - dU_z += outer (dsigInput_z, x(?, 0)) - dW_z += outer (dsigInput_z, s_0) - ds_0 += W_z.transpose * dsigInput_z - end backward - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Based on the calculated partial derivatives, update the parameters (weights - * and biases). - */ - def update_params (): Unit = - println ("To Be Implemented") - end update_params - -end GRU - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Create a fake training dataset: generate only one sentence for training. - * Only for testing. Needs to be changed to read in training data from files. - * The words are one-hot encoded into a column vector. - */ -def getTrainingData (vocabulary_size: Int, sentence_size: Int): (MatrixD, MatrixD) = - - import scalation.random.Randi - - assert (vocabulary_size > 2) // for start and end of sentence symbols - assert (sentence_size > 0) - - // define start and end of sentence in the vocabulary - val SENTENCE_START = new VectorD (vocabulary_size) // start: [1, 0, 0, ...] - SENTENCE_START(0) = 1 - val SENTENCE_END = new VectorD (vocabulary_size) // end: [0, 1, 0, ...] - SENTENCE_END(1) = 1 - - println (s"SENTENCE_START = $SENTENCE_START") - println (s"SENTENCE_END = $SENTENCE_END") - - // generate sentence - val i_ran = Randi (0, vocabulary_size-3) // random integer generator - val z_t = new MatrixD (vocabulary_size, sentence_size-1) // leave one slot for SENTENCE START - for word <- 0 until sentence_size-1 do - // generate a random word excludes start and end symbol - z_t(i_ran.igen+2, word) = 1 // set a 1 in position to indicate a word - end for - - val x_t = SENTENCE_START +^: z_t // training input matrix (prepend vector) - val y_t = z_t :^+ SENTENCE_END // training output matrix (append vector) - - (x_t, y_t) -end getTrainingData - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `gRUTest` main function tests the `GRU` class. - * > runMain scalation.modeling.forecasting.gRUTest - */ -@main def gRUTest (): Unit = - - val vocabulary_size = 5 - val sentence_size = 8 - - val (x_t, y_t) = getTrainingData (vocabulary_size, sentence_size) - - println (s"x_t = $x_t") - println (s"y_t = $y_t") - - banner ("Create a Gated Recurrent Unit (GRU)") - val mod = new GRU (x_t, y_t) - mod.train () -// mod.test () - -end gRUTest - diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/GRU.scala.bak2 b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/GRU.scala.bak2 deleted file mode 100644 index 8d48e08a0..000000000 --- a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/GRU.scala.bak2 +++ /dev/null @@ -1,370 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller, Sainagesh Veeravalli - * @version 2.0 - * @date Thu May 11 15:38:07 EDT 2023 - * @see LICENSE (MIT style license file). - * - * @title Gated Recurrent Unit (GRU) for Multivariate Time Series - * - * Translated from Matlab to Scala - * @see https://www.math.ucla.edu/~minchen/doc/BPTTTutorial.pdf - */ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/* This Matlab program tests the BPTT process we manually developed for GRU. - * We calculate the gradients of GRU parameters with chain rule, and then - * compare them to the numerical gradients to check whether our chain rule - * derivation is correct. - * - * Here, we provided 2 versions of BPTT, backward_direct() and backward(). - * The former one is the direct idea to calculate gradient within each step - * and add them up (O(seq_sizeˆ2) time) . The latter one is optimized to - * calculate the contribution of each step to the overall gradient, which is - * only O(seq_size) time. - * - * This is very helpful for people who want to implement GRU in Caffe since - * Caffe does not support auto−differentiation. This is also very helpful for - * the people who want to know the details about Back-propagation Through - * Time algorithm in the Recurrent Neural Networks (such as GRU and LSTM) - * and also get a sense on how auto−differentiation is possible. - * - * NOTE: We does not involve SGD training here. With SGD training, this - * program would become a complete implementation of GRU which can be - * trained with sequence data. However, since this is only a CPU serial - * Matlab version of GRU, applying it on large datasets will be dramatically - * slow. - * - * Matlab code: - * by Minchen Li, at The University of British Columbia. 2016−04−21 - */ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Standard RNN (GRU/LSTM) Formats for Multivariate Time Series Data: - * Matlab: - * - * Keras: 3D format expected by GRU/LSTM is [samples, timesteps, features]. - * => indexing [timestamp t, lags k, variable j] - * PyTorch: - */ - -package scalation -package modeling -package forecasting - -import scala.math.log - -import scalation.mathstat.{MatrixD, VectorD} -import scalation.random.{NormalMat, NormalVec_c} - -import ActivationFun.{sigmoid_, softmax_, tanh_} -import MatrixD.outer - -def log_ (x: VectorD): VectorD = x.map (log) - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `CRU` class implements Gated Recurrent Unit (GRU) via Back Propagation Through - * Time (BPTT). At each time point x_t, there is a vector representing several variables - * or the encoding of a word. - * Time series: (x_t: t = 0, 1, ..., seq_size-1) where seq_size is the number of time points/words - * @param x the input sequence/time series - * @param y the output sequence/time series - */ -class GRU (x: MatrixD, y: MatrixD): - - // set GRU and data scale - private val mem_size = 4 // memory size for hidden state - private val vocab_size = x.dim // e.g., 64, number of variable or distinct words - private val seq_size = x.dim2 // e.g., 20, number of words in a sentence (including start and end symbol) - // since we will only use one sentence for training, - // this is also the total steps during training. - - private val _1 = VectorD.one (mem_size) // vector of all ones for e.g., 1 - z - - // initialize parameters (weights and biases) - // multiplier for input x_t of intermediate variables - // note: Matlab rand -> NormalMat or NormalVec_c - private val rmg1 = NormalMat (mem_size, vocab_size, 0.0, 0.01) - private var U_z = rmg1.gen - private var U_r = rmg1.gen - private var U_c = rmg1.gen - - // multiplier for previous s of intermediate variables - private val rmg2 = NormalMat (mem_size, mem_size, 0.0, 0.01) - private var W_z = rmg2.gen - private var W_r = rmg2.gen - private var W_c = rmg2.gen - - // bias terms of intermediate variables - converted to VectorD - private val rvg1 = NormalVec_c (mem_size, 0.0, 0.01) - private var b_z = rvg1.gen - private var b_r = rvg1.gen - private var b_c = rvg1.gen - - // decoder for generating output - private val rmg3 = NormalMat (vocab_size, mem_size, 0.0, 0.01) - private val rvg3 = NormalVec_c (vocab_size, 0.0, 0.01) - private var V = rmg3.gen - private var b_V = rvg3.gen // bias of decoder - converted to Vector - - // previous s of step 1 - private var s_0 = rvg1.gen // converted to vector - - private val max_epochs = 20 // maximum number of iterations - private val L = new VectorD (seq_size) // store loss function values - println (s"L = $L") - - // initialize results - // Matlab: zeros -> new MatrixD - private val s = new MatrixD (mem_size, seq_size) // hidden state (change s -> h) - private val yp = new MatrixD (vocab_size, seq_size) // predicted output - private val z = new MatrixD (mem_size, seq_size) // update gate - private val r = new MatrixD (mem_size, seq_size) // reset gate - private val c = new MatrixD (mem_size, seq_size) // candidate state - - // the partial derivative of weights and biases - private var ds_0 = new VectorD (s_0.dim) - private var dU_c = new MatrixD (U_c.dim, U_c.dim2) - private var dU_r = new MatrixD (U_r.dim, U_r.dim2) - private var dU_z = new MatrixD (U_z.dim, U_z.dim2) - private var dW_c = new MatrixD (W_c.dim, W_c.dim2) - private var dW_r = new MatrixD (W_r.dim, W_r.dim2) - private var dW_z = new MatrixD (W_z.dim, W_z.dim2) - - private var db_z = new VectorD (b_z.dim) - private var db_r = new VectorD (b_r.dim) - private var db_c = new VectorD (b_c.dim) - - private var db_V: VectorD = null - private var dV = new MatrixD (V.dim, V.dim2) - - private val eta = 0.25 // the learning rate - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train the GRU using simple gradient descent. - */ - def train (): Unit = - - for it <- 1 to max_epochs do - // forward propagate: get the intermediate and output results - forward () - - println (s"train: for epoch $it: loss function L = $L") - println(s"train: for epoch $it: total loss function L.sum = ${L.sum}") - - // back propogate: calculate the gradient (the partial derivatives) - backward () - - // update the parameters (weights and biases) - update_params () - end for - end train - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forward propagate calculates, yp, loss and intermediate variables for each step. - */ - def forward (): Unit = - - // calculate result for step 1 since s_0 is not in s - // note: Matlab: row wild-card : becomes ? - // note: Matlab: starts at 1, Scala at 0 - - z(?, 0) = sigmoid_ (U_z * x(?, 0) + W_z * s_0 + b_z) - r(?, 0) = sigmoid_ (U_r * x(?, 0) + W_r * s_0 + b_r) - c(?, 0) = tanh_ (U_c * x(?, 0) + W_c * (s_0 * r(?, 0) ) + b_c) - s(?, 0) = (_1 - z(?, 0)) * c(?, 0) + z(?, 0) * s_0 - yp(?, 0) = softmax_ (V * s(?, 0) + b_V) - L(0) = (-y(?, 0) * log_ (yp(?, 0))).sum - - // calculate results for step 2 − seq_size similarly (t-th word) - // note Matlab element-wise multiplication .* becomes * - - for t <- 1 until seq_size do - z(?, t) = sigmoid_ (U_z * x(?, t) + W_z * s(?, t-1) + b_z) - r(?, t) = sigmoid_ (U_r * x(?, t) + W_r * s(?, t-1) + b_r) - c(?, t) = tanh_ (U_c * x(?, t) + W_c * (s(?, t-1) * r(?, t)) + b_c) - s(?, t) = (_1 - z(?, t)) * c(?, t) + z(?, t) * s(?, t-1) - yp(?, t) = softmax_ (V * s(?, t) + b_V) - L(t) = (-y(?, t) * log_ (yp(?, t))).sum - end for - end forward - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Backward propagate to calculate gradient using chain rule (O(seq_size) time). - */ - def backward (): Unit = - - // calculate gradient using chain rule - // note Matlab: A' is A.transpose - // note Matlab: sum (delta_y, 2) returns the row sums of matrix delta_y - // note Matlab: delta_y(?, word) * s(?, word)' -> delta_y(?, t) outer s(?, t) - - val delta_y = yp - y - db_V = delta_y.sumVr -// dV = new MatrixD (V.dim, V.dim2) - for t <- 0 until seq_size do dV += outer (delta_y(?, t), s(?, t)) // outer vector product - - val ds_single = V.transpose * delta_y - - // calculate the derivative contribution of each step and add them up - var ds_cur = new VectorD (ds_single.dim) - for t <- seq_size-1 to 1 by -1 do - ds_cur += ds_single(?, t) - val ds_cur_bk = ds_cur - - // mix for new state - val dtanhIn = (ds_cur * (_1 - z(?, t)) * (_1 - c(?, t) * c(?, t))) - db_c += dtanhIn - dU_c += outer (dtanhIn, x(?, t)) - dW_c += outer (dtanhIn, (s(?, t-1) * r(?, t))) - val dsr = W_c.transpose * dtanhIn - ds_cur = dsr * r(?, t) - - // reset gate (r) - val dsigIn_r = dsr * s(?, t-1) * r(?, t) * (_1 - r(?, t)) - db_r += dsigIn_r - dU_r += outer (dsigIn_r, x(?, t)) - dW_r += outer (dsigIn_r, s(?, t-1)) - ds_cur += W_r.transpose * dsigIn_r - ds_cur += ds_cur_bk * z(?, t) - val dz = ds_cur_bk * (s(?, t-1) - c(?, t)) - - // update gate (z) - val dsigIn_z = dz * z(?, t) * (_1 - z(?, t)) - db_z += dsigIn_z - dU_z += outer (dsigIn_z, x(?, t)) - dW_z += outer (dsigIn_z, s(?, t-1)) - ds_cur += W_z.transpose * dsigIn_z - end for - - // case: s_1 -> s_0 - ds_cur += ds_single(?, 0) - - val dtanhIn = (ds_cur * (_1 - z(?, 0)) * (_1 - c(?, 0) * c(?, 0))) - db_c += dtanhIn - dU_c += outer (dtanhIn, x(?, 0)) - dW_c += outer (dtanhIn, (s_0 * r(?, 0))) - val dsr = W_c.transpose * dtanhIn - ds_0 += dsr * r(?, 0) - - val dsigIn_r = dsr * s_0 * r(?, 0) * (_1 - r(?, 0)) - db_r += dsigIn_r - dU_r += outer (dsigIn_r, x(?, 0)) - dW_r += outer (dsigIn_r, s_0) - ds_0 += W_r.transpose * dsigIn_r - ds_0 += ds_cur * z(?, 0) - val dz = ds_cur * (s_0 - c(?, 0)) - - val dsigIn_z = dz * z(?, 0) * (_1 - z(?, 0)) - db_z += dsigIn_z - dU_z += outer (dsigIn_z, x(?, 0)) - dW_z += outer (dsigIn_z, s_0) - ds_0 += W_z.transpose * dsigIn_z - end backward - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Based on the calculated partial derivatives, update the parameters (weights - * and biases). - */ - def update_params (): Unit = - U_z -= dU_z * eta - U_r -= dU_r * eta - U_c -= dU_c * eta - W_z -= dW_z * eta - W_r -= dW_r * eta - W_c -= dW_c * eta - b_z -= db_z * eta - b_r -= db_r * eta - b_c -= db_c * eta - V -= dV * eta - b_V -= db_V * eta - end update_params - -end GRU - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Generate a fake sequence dataset: generate only one sentence for training. - * Only for testing. Needs to be changed to read in training data from files. - * The words are one-hot encoded into a column vector. - * @param vocab_size the number of variables/word encoding size - * @param seq_size the sequence size (number of time points/words) - */ -def genSequenceData (vocab_size: Int, seq_size: Int): (MatrixD, MatrixD) = - - import scalation.random.Randi - - assert (vocab_size > 2) // for start and end of sentence symbols - assert (seq_size > 0) - - // define start and end of sentence in the vocabulary - val SENTENCE_START = new VectorD (vocab_size) // start: [1, 0, 0, ...] - SENTENCE_START(0) = 1 - val SENTENCE_END = new VectorD (vocab_size) // end: [0, 1, 0, ...] - SENTENCE_END(1) = 1 - - println (s"SENTENCE_START = $SENTENCE_START") - println (s"SENTENCE_END = $SENTENCE_END") - - // generate sentence - val i_ran = Randi (0, vocab_size-3) // random integer generator - val z_t = new MatrixD (vocab_size, seq_size-1) // leave one slot for SENTENCE START - for t <- 0 until seq_size-1 do - // generate a random word excludes start and end symbol - z_t(i_ran.igen+2, t) = 1 // set a 1 in position to indicate a word - end for - - val x_t = SENTENCE_START +^: z_t // training input matrix (prepend vector) - val y_t = z_t :^+ SENTENCE_END // training output matrix (append vector) - - (x_t, y_t) -end genSequenceData - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `gRUTest` main function tests the `GRU` class on randomly generated - * sequence data meant to represent encoded words - * > runMain scalation.modeling.forecasting.gRUTest - */ -@main def gRUTest (): Unit = - - val vocab_size = 5 - val seq_size = 8 - - val (x_t, y_t) = genSequenceData (vocab_size, seq_size) - - println (s"x_t = $x_t") - println (s"y_t = $y_t") - - banner ("Create a Gated Recurrent Unit (GRU)") - val mod = new GRU (x_t, y_t) - mod.train () -// mod.test () - -end gRUTest - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `gRUTest2` main function tests the `GRU` class on sequence data read as words - * in a file that encoded and pass into `GRU` - * > runMain scalation.modeling.forecasting.gRUTest2 - */ -@main def gRUTest2 (): Unit = - - println ("read words from a text file") - -end gRUTest2 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `gRUTest3` main function tests the `GRU` class on sequence data corresponding - * to multivariate time series data - * in a file that encoded and pass into `GRU` - * > runMain scalation.modeling.forecasting.gRUTest3 - */ -@main def gRUTest3 (): Unit = - - println ("read multivariate time series from a CSV file") - -end gRUTest3 - diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/GRU.scala.bak3 b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/GRU.scala.bak3 deleted file mode 100644 index f8b80544f..000000000 --- a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/GRU.scala.bak3 +++ /dev/null @@ -1,421 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller, Sainagesh Veeravalli - * @version 2.0 - * @date Thu May 11 15:38:07 EDT 2023 - * @see LICENSE (MIT style license file). - * - * @title Gated Recurrent Unit (GRU) for Multivariate Time Series - * - * @see https://www.frontiersin.org/articles/10.3389/fncom.2021.678158/full - * - * Translated from Matlab to Scala - * @see https://www.math.ucla.edu/~minchen/doc/BPTTTutorial.pdf - */ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/* This Matlab program tests the BPTT process we manually developed for GRU. - * We calculate the gradients of GRU parameters with chain rule, and then - * compare them to the numerical gradients to check whether our chain rule - * derivation is correct. - * - * Here, we provided 2 versions of BPTT, backward_direct() and backward(). - * The former one is the direct idea to calculate gradient within each step - * and add them up (O(n_seqˆ2) time) . The latter one is optimized to - * calculate the contribution of each step to the overall gradient, which is - * only O(n_seq) time. - * - * This is very helpful for people who want to implement GRU in Caffe since - * Caffe does not support auto−differentiation. This is also very helpful for - * the people who want to know the details about Back-propagation Through - * Time algorithm in the Recurrent Neural Networks (such as GRU and LSTM) - * and also get a sense on how auto−differentiation is possible. - * - * NOTE: We does not involve SGD training here. With SGD training, this - * program would become a complete implementation of GRU which can be - * trained with sequence data. However, since this is only a CPU serial - * Matlab version of GRU, applying it on large datasets will be dramatically - * slow. - * - * Matlab code: - * by Minchen Li, at The University of British Columbia. 2016−04−21 - */ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Standard RNN (GRU/LSTM) Formats for Multivariate Time Series Data: - * Matlab: - * - * Keras: 3D format expected by GRU/LSTM is [samples, timesteps, features]. - * => indexing [timestamp t, lags k, variable j] - * PyTorch: - */ - -package scalation -package modeling -package forecasting - -import scala.math.log - -import scalation.mathstat.{MatrixD, VectorD} -import scalation.random.{NormalMat, NormalVec_c} - -import ActivationFun.{sigmoid_, softmax_, tanh_} -import MatrixD.outer - -def log_ (x: VectorD): VectorD = x.map (log) - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `GRU` class implements Gated Recurrent Unit (GRU) via Back Propagation Through - * Time (BPTT). At each time point x_t, there is a vector representing several variables - * or the encoding of a word. Intended to work for guessing the next work in a sentence - * or for multi-horizon forecasting. - * Time series: (x_t: t = 0, 1, ..., n_seq-1) where n_seq is the number of time points/words - * @param x the input sequence/time series - * @param y the output sequence/time series - * @param fname the feature/variable names - * @param n_mem the size for hidden state (h) (dimensionality of memory) - */ -class GRU (x: MatrixD, y: MatrixD, fname: Array [String] = null, n_mem: Int = 4): - - private val CLASSIF = false // whether to apply classification (e.g., guess next word) or forecast a value - - // set GRU and data scale - private val n_seq = x.dim // e.g., 20, number of words in a sentence (including start and end symbol) - private val n_var = x.dim2 // e.g., 64, number of variable or distinct words (vocabulary size) - // since we will only use one sentence for training, - // this is also the total steps during training. - - private val _1 = VectorD.one (n_mem) // vector of all ones for e.g., 1 - z - - // initialize parameters (weights and biases) - // multiplier for input x_t of intermediate variables - private val rmg1 = NormalMat (n_mem, n_var, 0.0, 0.01) - private var Uz = rmg1.gen - private var Ur = rmg1.gen - private var Uc = rmg1.gen - - // multiplier for previous s of intermediate variables - private val rmg2 = NormalMat (n_mem, n_mem, 0.0, 0.01) - private var Wz = rmg2.gen - private var Wr = rmg2.gen - private var Wc = rmg2.gen - - // bias terms of intermediate variables - converted to VectorD - private val rvg1 = NormalVec_c (n_mem, 0.0, 0.01) - private var b_z = rvg1.gen - private var b_r = rvg1.gen - private var b_c = rvg1.gen - - // decoder for generating output - private val rmg3 = NormalMat (n_var, n_mem, 0.0, 0.01) - private val rvg3 = NormalVec_c (n_var, 0.0, 0.01) - private var V = rmg3.gen - private var b_V = rvg3.gen // bias of decoder - converted to Vector - - // previous state h of step 1 - private var h_0 = rvg1.gen // converted to vector - - private val max_epochs = 20 // maximum number of iterations - private val L = new VectorD (n_seq) // store loss function values - println (s"L = $L") - - // initialize results - // Matlab: zeros -> new MatrixD - private val z = new MatrixD (n_seq, n_mem) // update gate (z) - private val r = new MatrixD (n_seq, n_mem) // reset gate (r) - private val c = new MatrixD (n_seq, n_mem) // candidate state (c) - private val h = new MatrixD (n_seq, n_mem) // hidden state (h) - private val yp = new MatrixD (n_seq, n_var) // predicted output - - // the partial derivative of weights and biases - private var dh_0 = new VectorD (h_0.dim) - private var dUc = new MatrixD (Uc.dim, Uc.dim2) - private var dUr = new MatrixD (Ur.dim, Ur.dim2) - private var dUz = new MatrixD (Uz.dim, Uz.dim2) - private var dWc = new MatrixD (Wc.dim, Wc.dim2) - private var dWr = new MatrixD (Wr.dim, Wr.dim2) - private var dWz = new MatrixD (Wz.dim, Wz.dim2) - - private var db_z = new VectorD (b_z.dim) - private var db_r = new VectorD (b_r.dim) - private var db_c = new VectorD (b_c.dim) - - private var db_V: VectorD = null - private var dV = new MatrixD (V.dim, V.dim2) - - private val eta = 0.02 // the learning rate (0.25 for gRUTest) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train the GRU using simple gradient descent. - */ - def train (): Unit = - - for it <- 1 to max_epochs do - // forward propagate: get the intermediate and output results - forward () - - println (s"train: for epoch $it: loss function L = $L") - println(s"train: for epoch $it: total loss function L.sum = ${L.sum}") - - // back propogate: calculate the gradient (the partial derivatives) - backward () - - // update the parameters (weights and biases) - update_params () - end for - end train - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forward propagate calculates yp, loss and intermediate variables for each step. - */ - def forward (): Unit = - - // calculate result for step 1 since h_0 is not in state h - - z(0) = sigmoid_ (Uz * x(0) + Wz * h_0 + b_z) // update gate - r(0) = sigmoid_ (Ur * x(0) + Wr * h_0 + b_r) // reset gate - c(0) = tanh_ (Uc * x(0) + Wc * (h_0 * r(0) ) + b_c) // candidate state - h(0) = z(0) * h_0 + (_1 - z(0)) * c(0) // hidden state - if CLASSIF then - yp(0) = softmax_ (V * h(0) + b_V) // activation: softmax for classification - L(0) = (-y(0) * log_ (yp(0))).sum // cross-entropy loss function - else - yp(0) = V * h(0) + b_V // activation: id for forecasting - L(0) = (y(0) - yp(0)).normSq // sse loss function - end if - - // calculate results for step 2 − n_seq similarly (t-th word) - - for t <- 1 until n_seq do - z(t) = sigmoid_ (Uz * x(t) + Wz * h(t-1) + b_z) // update gate - r(t) = sigmoid_ (Ur * x(t) + Wr * h(t-1) + b_r) // reset gate - c(t) = tanh_ (Uc * x(t) + Wc * (h(t-1) * r(t)) + b_c) // candidate state - h(t) = z(t) * h(t-1) + (_1 - z(t)) * c(t) // hidden state - if CLASSIF then - yp(t) = softmax_ (V * h(t) + b_V) // activation: softmax for classification - L(t) = (-y(t) * log_ (yp(t))).sum // cross-entropy loss function - else - yp(t) = V * h(t) + b_V // activation: id for forecasting - L(t) = (y(t) - yp(t)).normSq // sse loss function - end if - end for - end forward - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Backward propagate to calculate gradient using chain rule (O(n_seq) time). - * FIX - add option of using sse loss function and fix affected partial derivatives - */ - def backward (): Unit = - - // calculate gradient using chain rule - - println (s"yp.dims = ${yp.dims}, y.dims = ${y.dims}") - val delta_y = yp - y - db_V = delta_y.sumVr -// dV = new MatrixD (V.dim, V.dim2) - for t <- 0 until n_seq do dV += outer (delta_y(t), h(t)) // outer vector product - -// val dh_single = V.transpose * delta_y - val dh_single = delta_y * V // n_seq by n_mem matrix - - // calculate the derivative contribution of each step and add them up - var dh_cur = new VectorD (dh_single.dim2) - for t <- n_seq-1 to 1 by -1 do - dh_cur += dh_single(t) - val dh_cur_bk = dh_cur - - // mix for new state - val dtanhIn = (dh_cur * (_1 - z(t)) * (_1 - c(t) * c(t))) - db_c += dtanhIn - dUc += outer (dtanhIn, x(t)) - dWc += outer (dtanhIn, (h(t-1) * r(t))) - val dhr = Wc.transpose * dtanhIn - dh_cur = dhr * r(t) - - // reset gate (r) - val dsigIn_r = dhr * h(t-1) * r(t) * (_1 - r(t)) - db_r += dsigIn_r - dUr += outer (dsigIn_r, x(t)) - dWr += outer (dsigIn_r, h(t-1)) - dh_cur += Wr.transpose * dsigIn_r - dh_cur += dh_cur_bk * z(t) - val dz = dh_cur_bk * (h(t-1) - c(t)) - - // update gate (z) - val dsigIn_z = dz * z(t) * (_1 - z(t)) - db_z += dsigIn_z - dUz += outer (dsigIn_z, x(t)) - dWz += outer (dsigIn_z, h(t-1)) - dh_cur += Wz.transpose * dsigIn_z - end for - - // case: state s_1 -> h_0 - dh_cur += dh_single(0) - - val dtanhIn = (dh_cur * (_1 - z(0)) * (_1 - c(0) * c(0))) - db_c += dtanhIn - dUc += outer (dtanhIn, x(0)) - dWc += outer (dtanhIn, (h_0 * r(0))) - val dhr = Wc.transpose * dtanhIn - dh_0 += dhr * r(0) - - val dsigIn_r = dhr * h_0 * r(0) * (_1 - r(0)) - db_r += dsigIn_r - dUr += outer (dsigIn_r, x(0)) - dWr += outer (dsigIn_r, h_0) - dh_0 += Wr.transpose * dsigIn_r - dh_0 += dh_cur * z(0) - val dz = dh_cur * (h_0 - c(0)) - - val dsigIn_z = dz * z(0) * (_1 - z(0)) - db_z += dsigIn_z - dUz += outer (dsigIn_z, x(0)) - dWz += outer (dsigIn_z, h_0) - dh_0 += Wz.transpose * dsigIn_z - end backward - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Based on the calculated partial derivatives, update the parameters (weights - * and biases). - */ - def update_params (): Unit = - Uz -= dUz * eta - Ur -= dUr * eta - Uc -= dUc * eta - Wz -= dWz * eta - Wr -= dWr * eta - Wc -= dWc * eta - b_z -= db_z * eta - b_r -= db_r * eta - b_c -= db_c * eta - V -= dV * eta - b_V -= db_V * eta - end update_params - -end GRU - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `GRU` companion object provides factory methods. - */ -object GRU: - - import ActivationFun._ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `GRU` with automatic rescaling from a data matrix and response matrix. - * @param x the input/data matrix - * @param y the output/response matrix - * @param fname the feature/variable names - * @param n_mem the size of the hidden state (dimensionality of memory) - */ - def rescale (x: MatrixD, y: MatrixD, fname: Array [String] = null, n_mem: Int = 4): GRU = - val x_s = rescaleX (x, f_sigmoid) - val y_s = rescaleY (y, f_sigmoid)._1 - -// println (s" scaled: x = $x_s \n scaled y = $y_s") - new GRU (x_s, y_s, fname, n_mem) - end rescale - -end GRU - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Generate a fake sequence dataset: generate only one sentence for training. - * Only for testing. Needs to be changed to read in training data from files. - * The words are one-hot encoded into a column vector. - * @param n_seq the sequence size (number of time points/words) - * @param n_var the number of variables/word encoding size - */ -def genSequenceData (n_seq: Int, n_var: Int): (MatrixD, MatrixD) = - - import scalation.random.Randi - - assert (n_var > 2) // for start and end of sentence symbols - assert (n_seq > 0) - - // define start and end of sentence in the vocabulary - val SENTENCE_START = new VectorD (n_var) // start: [1, 0, 0, ...] - SENTENCE_START(0) = 1 - val SENTENCE_END = new VectorD (n_var) // end: [0, 1, 0, ...] - SENTENCE_END(1) = 1 - - println (s"SENTENCE_START = $SENTENCE_START") - println (s"SENTENCE_END = $SENTENCE_END") - - // generate sentence - val i_ran = Randi (0, n_var-3) // random integer generator - val z_t = new MatrixD (n_seq-1, n_var) // leave one slot for SENTENCE START - for t <- 0 until n_seq-1 do - // generate a random word excludes start and end symbol - z_t(t, i_ran.igen+2) = 1 // set a 1 in position to indicate a word - end for - - val x_t = SENTENCE_START +: z_t // training input matrix (prepend vector) - val y_t = z_t :+ SENTENCE_END // training output matrix (append vector) - - (x_t, y_t) -end genSequenceData - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `gRUTest` main function tests the `GRU` class on randomly generated - * sequence data meant to represent encoded words - * > runMain scalation.modeling.forecasting.gRUTest - */ -@main def gRUTest (): Unit = - - val n_seq = 8 - val n_var = 5 - - val (x_t, y_t) = genSequenceData (n_seq, n_var) - - println (s"x_t = $x_t") - println (s"y_t = $y_t") - - banner ("Create a Gated Recurrent Unit (GRU)") - val mod = new GRU (x_t, y_t) - mod.train () -// mod.test () - -end gRUTest - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `gRUTest2` main function tests the `GRU` class on sequence data read as words - * in a file that encoded and pass into `GRU` - * > runMain scalation.modeling.forecasting.gRUTest2 - */ -@main def gRUTest2 (): Unit = - - println ("read words from a text file") - -// FIX - find example text - -end gRUTest2 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `gRUTest3` main function tests the `GRU` class on sequence/time series data - * corresponding to the lake level dataset using multiple lags. - * > runMain scalation.modeling.forecasting.gRUTest3 - */ -@main def gRUTest3 (): Unit = - - import Example_LakeLevels.y - val lag = 2 // number of lags to include - val hh = 2 // forecasting horizon - FIX - currently lags == hh - - val y_s = scaleV (extreme (y), (-2.0, 2.0))(y) // rescale y to active domain of sigmoid, tanh - - val (x, yy) = RegressionMV4TS.buildMatrix (y_s, lag, hh) // column for each lag - - println (s"x.dims = ${x.dims}, yy.dims = ${yy.dims}") - - banner ("Create a Gated Recurrent Unit (GRU)") - val mod = new GRU (x, yy) // call constructor - mod.train () -// mod.test () - -end gRUTest3 - diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/GRU.scala.bak4 b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/GRU.scala.bak4 deleted file mode 100644 index 6b3b273e3..000000000 --- a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/GRU.scala.bak4 +++ /dev/null @@ -1,432 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller, Sainagesh Veeravalli - * @version 2.0 - * @date Thu May 11 15:38:07 EDT 2023 - * @see LICENSE (MIT style license file). - * - * @title Gated Recurrent Unit (GRU) for Multivariate Time Series - * - * @see https://www.frontiersin.org/articles/10.3389/fncom.2021.678158/full - * - * Translated from Matlab to Scala - * @see https://www.math.ucla.edu/~minchen/doc/BPTTTutorial.pdf - */ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/* This Matlab program tests the BPTT process we manually developed for GRU. - * We calculate the gradients of GRU parameters with chain rule, and then - * compare them to the numerical gradients to check whether our chain rule - * derivation is correct. - * - * Here, we provided 2 versions of BPTT, backward_direct() and backward(). - * The former one is the direct idea to calculate gradient within each step - * and add them up (O(n_seqˆ2) time) . The latter one is optimized to - * calculate the contribution of each step to the overall gradient, which is - * only O(n_seq) time. - * - * This is very helpful for people who want to implement GRU in Caffe since - * Caffe does not support auto−differentiation. This is also very helpful for - * the people who want to know the details about Back-propagation Through - * Time algorithm in the Recurrent Neural Networks (such as GRU and LSTM) - * and also get a sense on how auto−differentiation is possible. - * - * NOTE: We does not involve SGD training here. With SGD training, this - * program would become a complete implementation of GRU which can be - * trained with sequence data. However, since this is only a CPU serial - * Matlab version of GRU, applying it on large datasets will be dramatically - * slow. - * - * Matlab code: - * by Minchen Li, at The University of British Columbia. 2016−04−21 - */ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Standard RNN (GRU/LSTM) Formats for Multivariate Time Series Data: - * Matlab: - * - * Keras: 3D format expected by GRU/LSTM is [samples, timesteps, features]. - * => indexing [timestamp t, lags k, variable j] - * PyTorch: - */ - -package scalation -package modeling -package forecasting - -import scala.math.log - -import scalation.mathstat.{MatrixD, VectorD} -import scalation.random.{NormalMat, NormalVec_c} - -import ActivationFun.{sigmoid_, softmax_, tanh_} -import MatrixD.outer - -def log_ (x: VectorD): VectorD = x.map (log) - - -case class Gate (n_seq: Int, n_mem: Int, n_var: Int): - - val v = new MatrixD (n_seq, n_mem) // gate value time x state - var dU = new MatrixD (n_mem, n_var) // partial w.r.t. weight matrix U - var dW = new MatrixD (n_mem, n_mem) // partial w.r.t. weight matrix W - var db = new VectorD (n_mem) // partial w.r.t. bias vector b - - def apply (t: Int): VectorD = v(t) - def update (t: Int, vv: VectorD): Unit = v(t) = vv - -end Gate - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `GRU` class implements Gated Recurrent Unit (GRU) via Back Propagation Through - * Time (BPTT). At each time point x_t, there is a vector representing several variables - * or the encoding of a word. Intended to work for guessing the next work in a sentence - * or for multi-horizon forecasting. - * Time series: (x_t: t = 0, 1, ..., n_seq-1) where n_seq is the number of time points/words - * @param x the input sequence/time series - * @param y the output sequence/time series - * @param fname the feature/variable names - * @param n_mem the size for hidden state (h) (dimensionality of memory) - */ -class GRU (x: MatrixD, y: MatrixD, fname: Array [String] = null, n_mem: Int = 4): - - private val CLASSIF = false // whether to apply classification (e.g., guess next word) or forecast a value - - // set GRU and data scale - private val n_seq = x.dim // e.g., 20, number of words in a sentence (including start and end symbol) - private val n_var = x.dim2 // e.g., 64, number of variable or distinct words (vocabulary size) - // since we will only use one sentence for training, - // this is also the total steps during training. - - private val _1 = VectorD.one (n_mem) // vector of all ones for e.g., 1 - z - - // initialize parameters (weights and biases) - // multiplier for input x_t of intermediate variables - private val rmg1 = NormalMat (n_mem, n_var, 0.0, 0.01) - private var Uz = rmg1.gen - private var Ur = rmg1.gen - private var Uc = rmg1.gen - - // multiplier for previous s of intermediate variables - private val rmg2 = NormalMat (n_mem, n_mem, 0.0, 0.01) - private var Wz = rmg2.gen - private var Wr = rmg2.gen - private var Wc = rmg2.gen - - // bias terms of intermediate variables - converted to VectorD - private val rvg1 = NormalVec_c (n_mem, 0.0, 0.01) - private var b_z = rvg1.gen - private var b_r = rvg1.gen - private var b_c = rvg1.gen - - // decoder for generating output - private val rmg3 = NormalMat (n_var, n_mem, 0.0, 0.01) - private val rvg3 = NormalVec_c (n_var, 0.0, 0.01) - private var V = rmg3.gen - private var b_V = rvg3.gen // bias of decoder - converted to Vector - - // previous state h of step 1 (t = -1) - private var h_init = rvg1.gen // converted to vector - - private val max_epochs = 20 // maximum number of iterations - private val L = new VectorD (n_seq) // store loss function values - println (s"L = $L") - - private val z = Gate (n_seq, n_mem, n_var) // update gate (z) - private val r = Gate (n_seq, n_mem, n_var) // reset gate (r) - private val c = Gate (n_seq, n_mem, n_var) // candidate state (c) - -/* - private val z = new MatrixD (n_seq, n_mem) // update gate (z) - private var dUz = new MatrixD (n_mem, n_var) - private var dWz = new MatrixD (n_mem, n_mem) - private var db_z = new VectorD (n_mem) - - private val r = new MatrixD (n_seq, n_mem) // reset gate (r) - private var dUr = new MatrixD (n_mem, n_var) - private var dWr = new MatrixD (n_mem, n_mem) - private var db_r = new VectorD (n_mem) - - private val c = new MatrixD (n_seq, n_mem) // candidate state (c) - private var dUc = new MatrixD (n_mem, n_var) - private var dWc = new MatrixD (n_mem, n_mem) - private var db_c = new VectorD (n_mem) -*/ - - private val h = new MatrixD (n_seq, n_mem) // hidden state (h) - private val yp = new MatrixD (n_seq, n_var) // predicted output - - // the partial derivative of weights and biases - private var dh_init = new VectorD (h_init.dim) - - private var db_V: VectorD = null - private var dV = new MatrixD (V.dim, V.dim2) - - private val eta = 0.02 // the learning rate (0.25 for gRUTest) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train the GRU using simple gradient descent. - */ - def train (): Unit = - - for it <- 1 to max_epochs do - // forward propagate: get the intermediate and output results - forward () - - println (s"train: for epoch $it: loss function L = $L") - println(s"train: for epoch $it: total loss function L.sum = ${L.sum}") - - // back propogate: calculate the gradient (the partial derivatives) - backward () - - // update the parameters (weights and biases) - update_params () - end for - end train - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forward propagate calculates yp, loss and intermediate variables for each step. - */ - def forward (): Unit = - for t <- 0 until n_seq do - val h_pre = if t == 0 then h_init else h(t-1) // get previous hidden state - z(t) = sigmoid_ (Uz * x(t) + Wz * h_pre + b_z) // update gate - r(t) = sigmoid_ (Ur * x(t) + Wr * h_pre + b_r) // reset gate - c(t) = tanh_ (Uc * x(t) + Wc * (h_pre * r(t)) + b_c) // candidate state - h(t) = z(t) * h_pre + (_1 - z(t)) * c(t) // hidden state - if CLASSIF then - yp(t) = softmax_ (V * h(t) + b_V) // activation: softmax for classification - L(t) = (-y(t) * log_ (yp(t))).sum // cross-entropy loss function - else - yp(t) = V * h(t) + b_V // activation: id for forecasting - L(t) = (y(t) - yp(t)).normSq // sse loss function - end if - end for - end forward - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Backward propagate to calculate gradient using chain rule (O(n_seq) time). - * FIX - add option of using sse loss function and fix affected partial derivatives - */ - def backward (): Unit = - - // calculate gradient using chain rule - - println (s"yp.dims = ${yp.dims}, y.dims = ${y.dims}") - val delta_y = yp - y - db_V = delta_y.sumVr -// dV = new MatrixD (V.dim, V.dim2) - for t <- 0 until n_seq do dV += outer (delta_y(t), h(t)) // outer vector product - -// val dh_single = V.transpose * delta_y - val dh_single = delta_y * V // n_seq by n_mem matrix - - // calculate the derivative contribution of each step and add them up - var dh_cur = new VectorD (dh_single.dim2) - for t <- n_seq-1 to 1 by -1 do - dh_cur += dh_single(t) - val dh_cur_bk = dh_cur - - // mix for new candidate state - val dtanhIn = (dh_cur * (_1 - z(t)) * (_1 - c(t) * c(t))) - c.db += dtanhIn - c.dU += outer (dtanhIn, x(t)) - c.dW += outer (dtanhIn, (h(t-1) * r(t))) - val dhr = Wc.transpose * dtanhIn - dh_cur = dhr * r(t) - - // reset gate (r) - val dsigIn_r = dhr * h(t-1) * r(t) * (_1 - r(t)) - r.db += dsigIn_r - r.dU += outer (dsigIn_r, x(t)) - r.dW += outer (dsigIn_r, h(t-1)) - dh_cur += Wr.transpose * dsigIn_r - dh_cur += dh_cur_bk * z(t) - - // update gate (z) - val dz = dh_cur_bk * (h(t-1) - c(t)) - val dsigIn_z = dz * z(t) * (_1 - z(t)) - z.db += dsigIn_z - z.dU += outer (dsigIn_z, x(t)) - z.dW += outer (dsigIn_z, h(t-1)) - dh_cur += Wz.transpose * dsigIn_z - end for - - // case: state s_1 -> h_init - dh_cur += dh_single(0) - - val dtanhIn = (dh_cur * (_1 - z(0)) * (_1 - c(0) * c(0))) - c.db += dtanhIn - c.dU += outer (dtanhIn, x(0)) - c.dW += outer (dtanhIn, (h_init * r(0))) - val dhr = Wc.transpose * dtanhIn - dh_init += dhr * r(0) - - val dsigIn_r = dhr * h_init * r(0) * (_1 - r(0)) - r.db += dsigIn_r - r.dU += outer (dsigIn_r, x(0)) - r.dW += outer (dsigIn_r, h_init) - dh_init += Wr.transpose * dsigIn_r - dh_init += dh_cur * z(0) - - // update gate (z) - val dz = dh_cur * (h_init - c(0)) - val dsigIn_z = dz * z(0) * (_1 - z(0)) - z.db += dsigIn_z - z.dU += outer (dsigIn_z, x(0)) - z.dW += outer (dsigIn_z, h_init) - dh_init += Wz.transpose * dsigIn_z - end backward - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Based on the calculated partial derivatives, update the parameters (weights - * and biases). - */ - def update_params (): Unit = - // update gate (z) - Uz -= z.dU * eta - Wz -= z.dW * eta - b_z -= z.db * eta - - // reset gate (r) - Ur -= r.dU * eta - Wr -= r.dW * eta - b_r -= r.db * eta - - // candidate state (c) - Uc -= c.dU * eta - Wc -= c.dW * eta - b_c -= c.db * eta - - V -= dV * eta - b_V -= db_V * eta - end update_params - -end GRU - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `GRU` companion object provides factory methods. - */ -object GRU: - - import ActivationFun._ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `GRU` with automatic rescaling from a data matrix and response matrix. - * @param x the input/data matrix - * @param y the output/response matrix - * @param fname the feature/variable names - * @param n_mem the size of the hidden state (dimensionality of memory) - */ - def rescale (x: MatrixD, y: MatrixD, fname: Array [String] = null, n_mem: Int = 4): GRU = - val x_s = rescaleX (x, f_sigmoid) - val y_s = rescaleY (y, f_sigmoid)._1 - -// println (s" scaled: x = $x_s \n scaled y = $y_s") - new GRU (x_s, y_s, fname, n_mem) - end rescale - -end GRU - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Generate a fake sequence dataset: generate only one sentence for training. - * Only for testing. Needs to be changed to read in training data from files. - * The words are one-hot encoded into a column vector. - * @param n_seq the sequence size (number of time points/words) - * @param n_var the number of variables/word encoding size - */ -def genSequenceData (n_seq: Int, n_var: Int): (MatrixD, MatrixD) = - - import scalation.random.Randi - - assert (n_var > 2) // for start and end of sentence symbols - assert (n_seq > 0) - - // define start and end of sentence in the vocabulary - val SENTENCE_START = new VectorD (n_var) // start: [1, 0, 0, ...] - SENTENCE_START(0) = 1 - val SENTENCE_END = new VectorD (n_var) // end: [0, 1, 0, ...] - SENTENCE_END(1) = 1 - - println (s"SENTENCE_START = $SENTENCE_START") - println (s"SENTENCE_END = $SENTENCE_END") - - // generate sentence - val i_ran = Randi (0, n_var-3) // random integer generator - val z_t = new MatrixD (n_seq-1, n_var) // leave one slot for SENTENCE START - for t <- 0 until n_seq-1 do - // generate a random word excludes start and end symbol - z_t(t, i_ran.igen+2) = 1 // set a 1 in position to indicate a word - end for - - val x_t = SENTENCE_START +: z_t // training input matrix (prepend vector) - val y_t = z_t :+ SENTENCE_END // training output matrix (append vector) - - (x_t, y_t) -end genSequenceData - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `gRUTest` main function tests the `GRU` class on randomly generated - * sequence data meant to represent encoded words - * > runMain scalation.modeling.forecasting.gRUTest - */ -@main def gRUTest (): Unit = - - val n_seq = 8 - val n_var = 5 - - val (x_t, y_t) = genSequenceData (n_seq, n_var) - - println (s"x_t = $x_t") - println (s"y_t = $y_t") - - banner ("Create a Gated Recurrent Unit (GRU)") - val mod = new GRU (x_t, y_t) - mod.train () -// mod.test () - -end gRUTest - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `gRUTest2` main function tests the `GRU` class on sequence data read as words - * in a file that encoded and pass into `GRU` - * > runMain scalation.modeling.forecasting.gRUTest2 - */ -@main def gRUTest2 (): Unit = - - println ("read words from a text file") - -// FIX - find example text - -end gRUTest2 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `gRUTest3` main function tests the `GRU` class on sequence/time series data - * corresponding to the lake level dataset using multiple lags. - * > runMain scalation.modeling.forecasting.gRUTest3 - */ -@main def gRUTest3 (): Unit = - - import Example_LakeLevels.y - val lag = 2 // number of lags to include - val hh = 2 // forecasting horizon - FIX - currently lags == hh - - val y_s = scaleV (extreme (y), (-2.0, 2.0))(y) // rescale y to active domain of sigmoid, tanh - - val (x, yy) = RegressionMV4TS.buildMatrix (y_s, lag, hh) // column for each lag - - println (s"x.dims = ${x.dims}, yy.dims = ${yy.dims}") - - banner ("Create a Gated Recurrent Unit (GRU)") - val mod = new GRU (x, yy) // call constructor - mod.train () -// mod.test () - -end gRUTest3 - diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/GRU.scala.bak5 b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/GRU.scala.bak5 deleted file mode 100644 index 01079ee26..000000000 --- a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/GRU.scala.bak5 +++ /dev/null @@ -1,401 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller, Sainagesh Veeravalli - * @version 2.0 - * @date Thu May 11 15:38:07 EDT 2023 - * @see LICENSE (MIT style license file). - * - * @title Gated Recurrent Unit (GRU) for Multivariate Time Series - * - * @see https://www.frontiersin.org/articles/10.3389/fncom.2021.678158/full - * - * Translated from Matlab to Scala - * @see https://www.math.ucla.edu/~minchen/doc/BPTTTutorial.pdf - */ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/* This Matlab program tests the BPTT process we manually developed for GRU. - * We calculate the gradients of GRU parameters with chain rule, and then - * compare them to the numerical gradients to check whether our chain rule - * derivation is correct. - * - * Here, we provided 2 versions of BPTT, backward_direct() and backward(). - * The former one is the direct idea to calculate gradient within each step - * and add them up (O(n_seqˆ2) time) . The latter one is optimized to - * calculate the contribution of each step to the overall gradient, which is - * only O(n_seq) time. - * - * This is very helpful for people who want to implement GRU in Caffe since - * Caffe does not support auto−differentiation. This is also very helpful for - * the people who want to know the details about Back-propagation Through - * Time algorithm in the Recurrent Neural Networks (such as GRU and LSTM) - * and also get a sense on how auto−differentiation is possible. - * - * NOTE: We does not involve SGD training here. With SGD training, this - * program would become a complete implementation of GRU which can be - * trained with sequence data. However, since this is only a CPU serial - * Matlab version of GRU, applying it on large datasets will be dramatically - * slow. - * - * Matlab code: - * by Minchen Li, at The University of British Columbia. 2016−04−21 - */ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Standard RNN (GRU/LSTM) Formats for Multivariate Time Series Data: - * Matlab: - * - * Keras: 3D format expected by GRU/LSTM is [samples, timesteps, features]. - * => indexing [timestamp t, lags k, variable j] - * PyTorch: - */ - -package scalation -package modeling -package forecasting - -import scala.math.log - -import scalation.mathstat.{MatrixD, VectorD} -import scalation.random.{NormalMat, NormalVec_c} - -import ActivationFun.{sigmoid_, softmax_, tanh_} -import MatrixD.outer - -def log_ (x: VectorD): VectorD = x.map (log) - - -case class Gate (n_seq: Int, n_mem: Int, n_var: Int): - - val v = new MatrixD (n_seq, n_mem) // gate value: time x state - var dU = new MatrixD (n_mem, n_var) // partial w.r.t. weight matrix U - var dW = new MatrixD (n_mem, n_mem) // partial w.r.t. weight matrix W - var db = new VectorD (n_mem) // partial w.r.t. bias vector b - - def apply (t: Int): VectorD = v(t) - def update (t: Int, vv: VectorD): Unit = v(t) = vv - def += (a1: MatrixD, a2: MatrixD, a3: VectorD): Unit = - { dU += a1; dW += a2; db += a3 } - - def += (dIn: VectorD, x_t: VectorD, h_tm1: VectorD): Unit = - { dU += outer (dIn, x_t); dW += outer (dIn, h_tm1); db += dIn } - -end Gate - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `GRU` class implements Gated Recurrent Unit (GRU) via Back Propagation Through - * Time (BPTT). At each time point x_t, there is a vector representing several variables - * or the encoding of a word. Intended to work for guessing the next work in a sentence - * or for multi-horizon forecasting. - * Time series: (x_t: t = 0, 1, ..., n_seq-1) where n_seq is the number of time points/words - * @param x the input sequence/time series - * @param y the output sequence/time series - * @param fname the feature/variable names - * @param n_mem the size for hidden state (h) (dimensionality of memory) - */ -class GRU (x: MatrixD, y: MatrixD, fname: Array [String] = null, n_mem: Int = 4): - - private val CLASSIF = false // whether to apply classification (e.g., guess next word) or forecast a value - private val max_epochs = 20 // maximum number of iterations - private val eta = 0.02 // the learning rate (0.25 for gRUTest) - - private val n_seq = x.dim // e.g., 20, number of words in a sentence (including start and end symbol) - private val n_var = x.dim2 // e.g., 64, number of variable or distinct words (vocabulary size) - // since we will only use one sentence for training, - // this is also the total steps during training. - - private val _1 = VectorD.one (n_mem) // vector of all ones for e.g., 1 - z - - // initialize parameters (weights and biases) - private val rmg1 = NormalMat (n_mem, n_var, 0.0, 0.01) - private var Uz = rmg1.gen - private var Ur = rmg1.gen - private var Uc = rmg1.gen - - // multiplier for previous s of intermediate variables - private val rmg2 = NormalMat (n_mem, n_mem, 0.0, 0.01) - private var Wz = rmg2.gen - private var Wr = rmg2.gen - private var Wc = rmg2.gen - - // bias terms of intermediate variables - converted to VectorD - private val rvg1 = NormalVec_c (n_mem, 0.0, 0.01) - private var b_z = rvg1.gen - private var b_r = rvg1.gen - private var b_c = rvg1.gen - - // decoder for generating output - private val rmg3 = NormalMat (n_var, n_mem, 0.0, 0.01) - private val rvg3 = NormalVec_c (n_var, 0.0, 0.01) - private var V = rmg3.gen - private var b_V = rvg3.gen // bias of decoder - converted to Vector - - // previous state h of step 1 (t = -1) - private var h_m1 = rvg1.gen // hidden state @ t = -1, converted to vector - - private val L = new VectorD (n_seq) // store loss function values - println (s"L = $L") - - private val z = Gate (n_seq, n_mem, n_var) // update gate (z) - private val r = Gate (n_seq, n_mem, n_var) // reset gate (r) - private val c = Gate (n_seq, n_mem, n_var) // candidate state (c) - - private val h = new MatrixD (n_seq, n_mem) // hidden state (h) - private val yp = new MatrixD (n_seq, n_var) // predicted output - - // the partial derivative of weights and biases - private var dh_m1 = new VectorD (h_m1.dim) - - private var db_V: VectorD = null - private var dV = new MatrixD (V.dim, V.dim2) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train the GRU using simple gradient descent. - */ - def train (): Unit = - - for it <- 1 to max_epochs do - // forward propagate: get the intermediate and output results - forward () - - println (s"train: for epoch $it: loss function L = $L") - println(s"train: for epoch $it: total loss function L.sum = ${L.sum}") - - // back propogate: calculate the gradient (the partial derivatives) - backward () - - // update the parameters (weights and biases) - update_params () - end for - end train - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forward propagate calculates yp, loss and intermediate variables for each step. - */ - def forward (): Unit = - for t <- 0 until n_seq do - val h_pre = if t == 0 then h_m1 else h(t-1) // get previous hidden state - z(t) = sigmoid_ (Uz * x(t) + Wz * h_pre + b_z) // update gate - r(t) = sigmoid_ (Ur * x(t) + Wr * h_pre + b_r) // reset gate - c(t) = tanh_ (Uc * x(t) + Wc * (h_pre * r(t)) + b_c) // candidate state - h(t) = z(t) * h_pre + (_1 - z(t)) * c(t) // hidden state - if CLASSIF then - yp(t) = softmax_ (V * h(t) + b_V) // activation: softmax for classification - L(t) = (-y(t) * log_ (yp(t))).sum // cross-entropy loss function - else - yp(t) = V * h(t) + b_V // activation: id for forecasting - L(t) = (y(t) - yp(t)).normSq // sse loss function - end if - end for - end forward - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Backward propagate to calculate gradients using chain rules in O(n_seq) time. - * FIX - add option of using sse loss function and fix affected partial derivatives - */ - def backward (): Unit = - - println (s"yp.dims = ${yp.dims}, y.dims = ${y.dims}") - val e = yp - y // negative error - db_V = e.sumVr - for t <- 0 until n_seq do dV += outer (e(t), h(t)) // outer vector product - - val dh_single = e * V // n_seq by n_mem matrix - var dh = new VectorD (dh_single.dim2) // partial for hidden state (dh) - - // calculate the derivative contribution of each step and add them up - - for t <- n_seq-1 to 1 by -1 do - dh += dh_single(t) // update partial for hidden state (dh) # time t - val dh_bk = dh // save dh - - // mix for new candidate state (c) - val dIn_c = (dh * (_1 - z(t)) * (_1 - c(t) * c(t))) // input to tanh for candidate (c) - c += (outer (dIn_c, x(t)), outer (dIn_c, (h(t-1) * r(t))), dIn_c) // update partials for c candidate - val dhr = Wc.Ƭ * dIn_c - dh = dhr * r(t) - - // reset gate (r) - val dIn_r = dhr * h(t-1) * r(t) * (_1 - r(t)) // input to sigmoid reset gate r - r += (dIn_r, x(t), h(t-1)) -// r += (outer (dIn_r, x(t)), outer (dIn_r, h(t-1)), dIn_r) // update partials for r gate - dh += Wr.Ƭ * dIn_r + dh_bk * z(t) - - // update gate (z) - val dIn_z = dh_bk * (h(t-1) - c(t)) * z(t) * (_1 - z(t)) // input to sigmoid update gate z - z += (dIn_z, x(t), h(t-1)) -// z += (outer (dIn_z, x(t)), outer (dIn_z, h(t-1)), dIn_z) // update partials for z gate - dh += Wz.Ƭ * dIn_z - end for - - // end case @ time t = 0 -> use h_m1 - - dh += dh_single(0) // update partial for hidden state (dh) @ time 0 - - val dIn_c = (dh * (_1 - z(0)) * (_1 - c(0) * c(0))) - c += (outer (dIn_c, x(0)), outer (dIn_c, (h_m1 * r(0))), dIn_c) - val dhr = Wc.Ƭ * dIn_c - dh_m1 += dhr * r(0) - - val dIn_r = dhr * h_m1 * r(0) * (_1 - r(0)) - r += (outer (dIn_r, x(0)), outer (dIn_r, h_m1), dIn_r) - dh_m1 += Wr.Ƭ * dIn_r + dh * z(0) - - val dIn_z = dh * (h_m1 - c(0)) * z(0) * (_1 - z(0)) - z += (outer (dIn_z, x(0)), outer (dIn_z, h_m1), dIn_z) - dh_m1 += Wz.Ƭ * dIn_z - end backward - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Based on the calculated partial derivatives, update the parameters (weights - * and biases). - */ - def update_params (): Unit = - // update gate (z) - Uz -= z.dU * eta - Wz -= z.dW * eta - b_z -= z.db * eta - - // reset gate (r) - Ur -= r.dU * eta - Wr -= r.dW * eta - b_r -= r.db * eta - - // candidate state (c) - Uc -= c.dU * eta - Wc -= c.dW * eta - b_c -= c.db * eta - - // output layer - V -= dV * eta - b_V -= db_V * eta - end update_params - -end GRU - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `GRU` companion object provides factory methods. - */ -object GRU: - - import ActivationFun._ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `GRU` with automatic rescaling from a data matrix and response matrix. - * @param x the input/data matrix - * @param y the output/response matrix - * @param fname the feature/variable names - * @param n_mem the size of the hidden state (dimensionality of memory) - */ - def rescale (x: MatrixD, y: MatrixD, fname: Array [String] = null, n_mem: Int = 4): GRU = - val x_s = rescaleX (x, f_sigmoid) - val y_s = rescaleY (y, f_sigmoid)._1 - -// println (s" scaled: x = $x_s \n scaled y = $y_s") - new GRU (x_s, y_s, fname, n_mem) - end rescale - -end GRU - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Generate a fake sequence dataset: generate only one sentence for training. - * Only for testing. Needs to be changed to read in training data from files. - * The words are one-hot encoded into a column vector. - * @param n_seq the sequence size (number of time points/words) - * @param n_var the number of variables/word encoding size - */ -def genSequenceData (n_seq: Int, n_var: Int): (MatrixD, MatrixD) = - - import scalation.random.Randi - - assert (n_var > 2) // for start and end of sentence symbols - assert (n_seq > 0) - - // define start and end of sentence in the vocabulary - val SENTENCE_START = new VectorD (n_var) // start: [1, 0, 0, ...] - SENTENCE_START(0) = 1 - val SENTENCE_END = new VectorD (n_var) // end: [0, 1, 0, ...] - SENTENCE_END(1) = 1 - - println (s"SENTENCE_START = $SENTENCE_START") - println (s"SENTENCE_END = $SENTENCE_END") - - // generate sentence - val i_ran = Randi (0, n_var-3) // random integer generator - val z_t = new MatrixD (n_seq-1, n_var) // leave one slot for SENTENCE START - for t <- 0 until n_seq-1 do - // generate a random word excludes start and end symbol - z_t(t, i_ran.igen+2) = 1 // set a 1 in position to indicate a word - end for - - val x_t = SENTENCE_START +: z_t // training input matrix (prepend vector) - val y_t = z_t :+ SENTENCE_END // training output matrix (append vector) - - (x_t, y_t) -end genSequenceData - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `gRUTest` main function tests the `GRU` class on randomly generated - * sequence data meant to represent encoded words - * > runMain scalation.modeling.forecasting.gRUTest - */ -@main def gRUTest (): Unit = - - val n_seq = 8 - val n_var = 5 - - val (x_t, y_t) = genSequenceData (n_seq, n_var) - - println (s"x_t = $x_t") - println (s"y_t = $y_t") - - banner ("Create a Gated Recurrent Unit (GRU)") - val mod = new GRU (x_t, y_t) - mod.train () -// mod.test () - -end gRUTest - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `gRUTest2` main function tests the `GRU` class on sequence data read as words - * in a file that encoded and pass into `GRU` - * > runMain scalation.modeling.forecasting.gRUTest2 - */ -@main def gRUTest2 (): Unit = - - println ("read words from a text file") - -// FIX - find example text - -end gRUTest2 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `gRUTest3` main function tests the `GRU` class on sequence/time series data - * corresponding to the lake level dataset using multiple lags. - * > runMain scalation.modeling.forecasting.gRUTest3 - */ -@main def gRUTest3 (): Unit = - - import Example_LakeLevels.y - val lag = 2 // number of lags to include - val hh = 2 // forecasting horizon - FIX - currently lags == hh - - val y_s = scaleV (extreme (y), (-2.0, 2.0))(y) // rescale y to active domain of sigmoid, tanh - - val (x, yy) = RegressionMV4TS.buildMatrix (y_s, lag, hh) // column for each lag - - println (s"x.dims = ${x.dims}, yy.dims = ${yy.dims}") - - banner ("Create a Gated Recurrent Unit (GRU)") - val mod = new GRU (x, yy) // call constructor - mod.train () -// mod.test () - -end gRUTest3 - diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/GRU.scala.bak6 b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/GRU.scala.bak6 deleted file mode 100644 index 6f61ff152..000000000 --- a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/GRU.scala.bak6 +++ /dev/null @@ -1,393 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller, Sainagesh Veeravalli - * @version 2.0 - * @date Thu May 11 15:38:07 EDT 2023 - * @see LICENSE (MIT style license file). - * - * @title Model: Gated Recurrent Unit (GRU) for Multivariate Time Series - * - * @see https://www.frontiersin.org/articles/10.3389/fncom.2021.678158/full - * - * Translated from Matlab to Scala - * @see https://www.math.ucla.edu/~minchen/doc/BPTTTutorial.pdf - */ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/* This Matlab program tests the BPTT process we manually developed for GRU. - * We calculate the gradients of GRU parameters with chain rule, and then - * compare them to the numerical gradients to check whether our chain rule - * derivation is correct. - * - * Here, we provided 2 versions of BPTT, backward_direct() and backward(). - * The former one is the direct idea to calculate gradient within each step - * and add them up (O(n_seqˆ2) time) . The latter one is optimized to - * calculate the contribution of each step to the overall gradient, which is - * only O(n_seq) time. - * - * This is very helpful for people who want to implement GRU in Caffe since - * Caffe does not support auto−differentiation. This is also very helpful for - * the people who want to know the details about Back-propagation Through - * Time algorithm in the Recurrent Neural Networks (such as GRU and LSTM) - * and also get a sense on how auto−differentiation is possible. - * - * NOTE: We does not involve SGD training here. With SGD training, this - * program would become a complete implementation of GRU which can be - * trained with sequence data. However, since this is only a CPU serial - * Matlab version of GRU, applying it on large datasets will be dramatically - * slow. - * - * Matlab code: - * by Minchen Li, at The University of British Columbia. 2016−04−21 - */ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Standard RNN (GRU/LSTM) Formats for Multivariate Time Series Data: - * Matlab: - * - * Keras: 3D format expected by GRU/LSTM is [samples, timesteps, features]. - * => indexing [timestamp t, lags k, variable j] - * PyTorch: - */ - -package scalation -package modeling -package forecasting - -import scala.math.log - -import scalation.mathstat.{MatrixD, VectorD} -import scalation.random.{NormalMat, NormalVec_c} - -import ActivationFun.{sigmoid_, softmax_, tanh_} -import MatrixD.outer - -def log_ (x: VectorD): VectorD = x.map (log) - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Gate` case class holds information on the gate's value and its partial derivatives. - * @param n_seq the length of the time series - * @param n_mem the size for hidden state (h) (dimensionality of memory) - * @param n_var the number of variables - */ -case class Gate (n_seq: Int, n_mem: Int, n_var: Int): - - val v = new MatrixD (n_seq, n_mem) // gate value: time x state - var dU = new MatrixD (n_mem, n_var) // partial w.r.t. weight matrix U - var dW = new MatrixD (n_mem, n_mem) // partial w.r.t. weight matrix W - var db = new VectorD (n_mem) // partial w.r.t. bias vector b - - def apply (t: Int): VectorD = v(t) - - def update (t: Int, vv: VectorD): Unit = v(t) = vv - - def += (dIn: VectorD, x_t: VectorD, h_tm1: VectorD): Unit = - { dU += outer (dIn, x_t); dW += outer (dIn, h_tm1); db += dIn } - -end Gate - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `GRU` class implements Gated Recurrent Unit (GRU) via Back Propagation Through - * Time (BPTT). At each time point x_t, there is a vector representing several variables - * or the encoding of a word. Intended to work for guessing the next work in a sentence - * or for multi-horizon forecasting. - * Time series: (x_t: t = 0, 1, ..., n_seq-1) where n_seq is the number of time points/words - * @param x the input sequence/time series - * @param y the output sequence/time series - * @param fname the feature/variable names - * @param n_mem the size for hidden state (h) (dimensionality of memory) - */ -class GRU (x: MatrixD, y: MatrixD, fname: Array [String] = null, n_mem: Int = 4): - - private val CLASSIF = false // whether to apply classification (e.g., guess next word) or forecast a value - private val max_epochs = 20 // maximum number of iterations - private val eta = 0.02 // the learning rate (0.25 for gRUTest) - - private val n_seq = x.dim // e.g., 20, number of words in a sentence (including start and end symbol) - private val n_var = x.dim2 // e.g., 64, number of variables or distinct words (vocabulary size) - // since we will only use one sentence for training, - // this is also the total steps during training. - - private val _1 = VectorD.one (n_mem) // vector of all ones for e.g., 1 - z - - // initialize parameters (weights and biases) - private val rmg1 = NormalMat (n_mem, n_var, 0.0, 0.01) // random (Normal) matrix generators - private val rmg2 = NormalMat (n_mem, n_mem, 0.0, 0.01) - private val rmg3 = NormalMat (n_var, n_mem, 0.0, 0.01) - private val rvg1 = NormalVec_c (n_mem, 0.0, 0.01) // random (Normal) vector generators - private val rvg3 = NormalVec_c (n_var, 0.0, 0.01) - - private var Uz = rmg1.gen // parameters for update gate z - private var Wz = rmg2.gen - private var b_z = rvg1.gen - - private var Ur = rmg1.gen // parameters for reset gate r - private var Wr = rmg2.gen - private var b_r = rvg1.gen - - private var Uc = rmg1.gen // parameters for candidate state mixin c - private var Wc = rmg2.gen - private var b_c = rvg1.gen - - // decoder for generating output - private var V = rmg3.gen // decoder weight matrix - private var b_V = rvg3.gen // decoder bias vector - - private val z = Gate (n_seq, n_mem, n_var) // update gate z - private val r = Gate (n_seq, n_mem, n_var) // reset gate r - private val c = Gate (n_seq, n_mem, n_var) // candidate state mixin c - - private var h_m1 = rvg1.gen // hidden state @ t = -1 (m1 means minus 1) - private val h = new MatrixD (n_seq, n_mem) // hidden state h - private val yp = new MatrixD (n_seq, n_var) // predicted output - private val L = new VectorD (n_seq) // store loss function values - - // the partial derivative of weights and biases (outside gates) - private var dh_m1 = new VectorD (h_m1.dim) - private var db_V: VectorD = null - private var dV = new MatrixD (V.dim, V.dim2) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train the GRU using simple gradient descent. - */ - def train (): Unit = - for it <- 1 to max_epochs do - forward () // forward propagate: get intermediate and output results - - println (s"train: for epoch $it: loss function L = $L") - println (s"train: for epoch $it: total loss function L.sum = ${L.sum}") - - backward () // back propagate: calculate gradients (partial derivatives) - - update_params () // update parameters (weights and biases) - end for - end train - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forward propagate calculates yp, loss and intermediate variables for each step. - */ - def forward (): Unit = - for t <- 0 until n_seq do - val h_pre = if t == 0 then h_m1 else h(t-1) // get previous hidden state - z(t) = sigmoid_ (Uz * x(t) + Wz * h_pre + b_z) // update gate - r(t) = sigmoid_ (Ur * x(t) + Wr * h_pre + b_r) // reset gate - c(t) = tanh_ (Uc * x(t) + Wc * (h_pre * r(t)) + b_c) // candidate state - h(t) = z(t) * h_pre + (_1 - z(t)) * c(t) // hidden state - if CLASSIF then - yp(t) = softmax_ (V * h(t) + b_V) // activation: softmax for classification - L(t) = (-y(t) * log_ (yp(t))).sum // cross-entropy loss function - else - yp(t) = V * h(t) + b_V // activation: id for forecasting - L(t) = (y(t) - yp(t)).normSq // sse loss function - end if - end for - end forward - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Backward propagate to calculate gradients using chain rules in O(n_seq) time. - * FIX - add option of using sse loss function and fix affected partial derivatives - */ - def backward (): Unit = - - import ActivationFun.{sigmoidD, tanhD} - - // start back-propagation with the final/feed-forward (ff) layer (uses id for activation) - - val e = yp - y // negative error matrix - db_V = e.sumVr // vector of row sums - for t <- 0 until n_seq do dV += outer (e(t), h(t)) // outer vector product - val dh_ff = e * V // partial w.r.t. h: n_seq by n_mem matrix - var dh = new VectorD (dh_ff.dim2) // hold partial for hidden state (dh) @ time t - var dIn, dhr: VectorD = null - - // calculate the derivative contribution of each step and add them up - - for t <- n_seq-1 to 1 by -1 do // move back in time to t = 1 - dh += dh_ff(t) // update partial for hidden state (dh) @ time t - val dh_bk = dh // save dh - - dIn = dh * (_1 - z(t)) * tanhD (c(t)) // input to tanh for candidate mixin c - c += (dIn, x(t), h(t-1) * r(t)) // update partials for c mixin - dhr = Wc.Ƭ * dIn // Ƭ => matrix transpose - dh = dhr * r(t) - - dIn = dhr * h(t-1) * sigmoidD (r(t)) // input to sigmoid reset gate r - r += (dIn, x(t), h(t-1)) // update partials for r gate - dh += Wr.Ƭ * dIn + dh_bk * z(t) - - dIn = dh_bk * (h(t-1) - c(t)) * sigmoidD (z(t)) // input to sigmoid update gate z - z += (dIn, x(t), h(t-1)) // update partials for z gate - dh += Wz.Ƭ * dIn - end for - - // end case @ time t = 0 -> use h_m1 for hidden state - - dh += dh_ff(0) // update partial for hidden state (dh) @ t = 0 - - dIn = dh * (_1 - z(0)) * tanhD (c(0)) - c += (dIn, x(0), h_m1 * r(0)) // update partials for c mixin @ t = 0 - dhr = Wc.Ƭ * dIn - dh_m1 += dhr * r(0) - - dIn = dhr * h_m1 * sigmoidD (r(0)) - r += (dIn, x(0), h_m1) // update partials for r gate @ t = 0 - dh_m1 += Wr.Ƭ * dIn + dh * z(0) - - dIn = dh * (h_m1 - c(0)) * sigmoidD (z(0)) - z += (dIn, x(0), h_m1) // update partials for z gate @ t = 0 - dh_m1 += Wz.Ƭ * dIn - end backward - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Based on the calculated partial derivatives, update the parameters (weights - * and biases). - */ - def update_params (): Unit = - // update gate (z) - Uz -= z.dU * eta - Wz -= z.dW * eta - b_z -= z.db * eta - - // reset gate (r) - Ur -= r.dU * eta - Wr -= r.dW * eta - b_r -= r.db * eta - - // candidate state (c) - Uc -= c.dU * eta - Wc -= c.dW * eta - b_c -= c.db * eta - - // output layer - V -= dV * eta - b_V -= db_V * eta - end update_params - -end GRU - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `GRU` companion object provides factory methods. - */ -object GRU: - - import ActivationFun._ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `GRU` with automatic rescaling from a data matrix and response matrix. - * @param x the input/data matrix - * @param y the output/response matrix - * @param fname the feature/variable names - * @param n_mem the size of the hidden state (dimensionality of memory) - */ - def rescale (x: MatrixD, y: MatrixD, fname: Array [String] = null, n_mem: Int = 4): GRU = - val x_s = rescaleX (x, f_sigmoid) - val y_s = rescaleY (y, f_sigmoid)._1 - -// println (s" scaled: x = $x_s \n scaled y = $y_s") - new GRU (x_s, y_s, fname, n_mem) - end rescale - -end GRU - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Generate a fake sequence dataset: generate only one sentence for training. - * Only for testing. Needs to be changed to read in training data from files. - * The words are one-hot encoded into a column vector. - * @param n_seq the sequence size (number of time points/words) - * @param n_var the number of variables/word encoding size - */ -def genSequenceData (n_seq: Int, n_var: Int): (MatrixD, MatrixD) = - - import scalation.random.Randi - - assert (n_var > 2) // for start and end of sentence symbols - assert (n_seq > 0) - - // define start and end of sentence in the vocabulary - val SENTENCE_START = new VectorD (n_var) // start: [1, 0, 0, ...] - SENTENCE_START(0) = 1 - val SENTENCE_END = new VectorD (n_var) // end: [0, 1, 0, ...] - SENTENCE_END(1) = 1 - - println (s"SENTENCE_START = $SENTENCE_START") - println (s"SENTENCE_END = $SENTENCE_END") - - // generate sentence - val i_ran = Randi (0, n_var-3) // random integer generator - val z_t = new MatrixD (n_seq-1, n_var) // leave one slot for SENTENCE START - for t <- 0 until n_seq-1 do - // generate a random word excludes start and end symbol - z_t(t, i_ran.igen+2) = 1 // set a 1 in position to indicate a word - end for - - val x_t = SENTENCE_START +: z_t // training input matrix (prepend vector) - val y_t = z_t :+ SENTENCE_END // training output matrix (append vector) - - (x_t, y_t) -end genSequenceData - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `gRUTest` main function tests the `GRU` class on randomly generated - * sequence data meant to represent encoded words - * > runMain scalation.modeling.forecasting.gRUTest - */ -@main def gRUTest (): Unit = - - val n_seq = 8 - val n_var = 5 - - val (x_t, y_t) = genSequenceData (n_seq, n_var) - - println (s"x_t = $x_t") - println (s"y_t = $y_t") - - banner ("Create a Gated Recurrent Unit (GRU)") - val mod = new GRU (x_t, y_t) - mod.train () -// mod.test () - -end gRUTest - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `gRUTest2` main function tests the `GRU` class on sequence data read as words - * in a file that encoded and pass into `GRU` - * > runMain scalation.modeling.forecasting.gRUTest2 - */ -@main def gRUTest2 (): Unit = - - println ("read words from a text file") - -// FIX - find example text - -end gRUTest2 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `gRUTest3` main function tests the `GRU` class on sequence/time series data - * corresponding to the lake level dataset using multiple lags. - * > runMain scalation.modeling.forecasting.gRUTest3 - */ -@main def gRUTest3 (): Unit = - - import Example_LakeLevels.y - val lags = 2 // number of lags to include - val hh = 2 // forecasting horizon - FIX - currently lags == hh - - val y_s = scaleV (extreme (y), (-2.0, 2.0))(y) // rescale y to active domain of sigmoid, tanh - - val (x, yy) = buildMatrix4TS (y_s, lags, hh) // column for each lag - - println (s"x.dims = ${x.dims}, yy.dims = ${yy.dims}") - - banner ("Create a Gated Recurrent Unit (GRU)") - val mod = new GRU (x, yy) // call constructor - mod.train () -// mod.test () - -end gRUTest3 - diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/QuadRegressionMV4TS.scala.bak b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/QuadRegressionMV4TS.scala.bak deleted file mode 100644 index 84dc3db20..000000000 --- a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/QuadRegressionMV4TS.scala.bak +++ /dev/null @@ -1,380 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Sun Feb 13 16:22:21 EST 2022 - * @see LICENSE (MIT style license file). - * - * @title Model: Quadratic Multi-Variate Regression for Time Series - */ - -package scalation -package modeling -package forecasting - -import scala.math.max - -import scalation.mathstat._ - -import neuralnet.RegressionMV - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `QuadRegressionMV4TS` object supports regression for Time Series data. - * Given a response vector y, and a predictor matrix x is built that consists of - * lagged y vectors. Additional future response vectors are built for training. - * y_t = b dot x - * where x = [y_{t-1}, y_{t-2}, ... y_{t-lag}]. - * Matrix x includes constant, linear and quadratic terms. - */ -object QuadRegressionMV4TS: - - private val debug = debugf ("QuadRegressionMV4TS", true) // debug function - private val flaw = flawf ("QuadRegressionMV4TS") // flaw function - private val MISSING = -0.0 // missing value - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `RegressionMV` object from a Time Series response vector y. - * The input/data matrix x is formed from the lagged y vectors as columns in matrix x. - * Quadratic terms are added to the model, one for each lag. - * @param y the original un-expanded output/response vector - * @param lag the maximum lag included (inclusive) - * @param h the forecasting horizon (1, 2, ... h) - * @param intercept whether to add a column of all ones to the matrix (intercept) - * @param hparam the hyper-parameters ((use Regression.hp for default) - */ - def apply (y: VectorD, lag: Int, h: Int, intercept: Boolean = true, - hparam: HyperParameter = Regression.hp): RegressionMV = - var (x, yy) = buildMatrix4TS (y, lag, h) // column for each lag - x = x ++^ x~^2 // add quadratic terms - if intercept then x = VectorD.one (yy.dim) +^: x // add first column of all ones - - val mod = new RegressionMV (x, yy, null, hparam) - mod.modelName = s"QuadRegressionMV4TS_$lag" - mod - end apply - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `RegressionMV` object from a response vector. The input/data matrix - * x is formed from the lagged y vectors as columns in matrix x. - * In addition, lagged exogenous variables are added. - * @param y the original un-expanded output/response vector - * @param lag the maximum lag included (inclusive) - * @parax ex the input matrix for 1st exogenous variable - * @parax ex2 the input matrix for 2nd exogenous variable (optional) - * @parax ex3 the input matrix for 3rd exogenous variable (optional) - * @param h the forecasting horizon (1, 2, ... h) - * @param intercept whether to add a column of all ones to the matrix (intercept) - * @param hparam the hyper-parameters (use Regression.hp for default) - * @param elag1 the minimum exo lag included (inclusive) - * @param elag2 the maximum exo lag included (inclusive) - def exo (y: VectorD, lag: Int, ex: VectorD, ex2: VectorD = null, ex3: VectorD = null, - h: Int, intercept: Boolean = true, hparam: HyperParameter = Regression.hp) - (elag1: Int = max (1, lag / 5), - elag2: Int = max (1, lag)): RegressionMV = - var (x, yy) = buildMatrix4TS (y, lag, h) // column for each lag - if intercept then x = VectorD.one (yy.dim) +^: x // add first column of all ones - val endoCols = x.dim2 - println (s"endogenous: columns = $endoCols") - - var xx = buildMatrix4TS_exo (ex, lag, elag1, elag2) - x = x ++^ xx // add columns for 1st lagged exo var - if ex2 != null then - val xx2 = buildMatrix4TS_exo (ex2, lag, elag1, elag2) - x = x ++^ xx2 // add columns for 2nd lagged exo var - end if - if ex3 != null then - val xx3 = buildMatrix4TS_exo (ex3, lag, elag1, elag2) - x = x ++^ xx3 // add columns for 2nd lagged exo var - end if - println (s"exogenous: columns = ${x.dim2 - endoCols}") - - println (s"exo: x.dims = ${x.dims}, yy.dim = ${yy.dim}") -// println (s"exo: x = $x \n yy = $yy") - val mod = new RegressionMV (x, yy, null, hparam) - mod.modelName = s"QuadRegressionMV4TS.exo_$lag" - mod - end exo - */ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `RegressionMV` object from a response vector to fit a quadratic - * surface to Time Series data. The input/data matrix x is formed from the - * lagged y vectors as columns in matrix x. - * In addition, lagged exogenous variables are added. - * @param y the original un-expanded output/response vector - * @param lag the maximum lag included (inclusive) - * @parax ex the input matrix for exogenous variables (one per column) - * @param h the forecasting horizon (1, 2, ... h) - * @param intercept whether to add a column of all ones to the matrix (intercept) - * @param hparam the hyper-parameters (use Regression.hp for default) - * @param elag1 the minimum exo lag included (inclusive) - * @param elag2 the maximum exo lag included (inclusive) - */ - def exo (y: VectorD, lag: Int, ex: MatrixD, h: Int, - intercept: Boolean = true, hparam: HyperParameter = Regression.hp) - (elag1: Int = max (1, lag / 5), - elag2: Int = max (1, lag)): RegressionMV = - var (x, yy) = buildMatrix4TS (y, lag, h) // column for each lag - x = x ++^ x~^2 // add quadratic terms - if intercept then x = VectorD.one (yy.dim) +^: x // add first column of all ones - val endoCols = x.dim2 - println (s"endogenous: columns = $endoCols") - - x = x ++^ Regression4TS.makeExoCols (lag, ex, elag1, elag2) // add columns for each lagged exo var - println (s"exogenous: columns = ${x.dim2 - endoCols}") - - println (s"exo: x.dims = ${x.dims}, yy.dim = ${yy.dim}") -// println (s"exo: x = $x \n yy = $yy") - val mod = new RegressionMV (x, yy, null, hparam) - mod.modelName = s"QuadRegressionMV4TS.exo$lag" - mod - end exo - -end QuadRegressionMV4TS - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `quadRegressionMV4TSTest` main function tests the `QuadRegressionMV4TS` object. - * This test is used to CHECK that the buildMatrix4TS function is working correctly. - * May get NaN for some maximum lags (p) due to multi-collinearity. - * > runMain scalation.modeling.forecasting.quadRegressionMV4TSTest - */ -@main def quadRegressionMV4TSTest (): Unit = - - val m = 30 - val y = VectorD.range (1, m) // used to CHECK the buildMatrix4TS function - val h = 3 // the forecasting horizon - - for p <- 5 to 5 do // autoregressive hyper-parameter p - banner (s"Test: QuadRegressionMV4TS with $p lags") - val mod = QuadRegressionMV4TS (y, p, h) // create model for time series data - mod.trainNtest ()() // train the model on full dataset - println (mod.summary) - - val yy = mod.getY - val yp = mod.predict (mod.getX) - for k <- yp.indices2 do - new Plot (null, yy(?, k), yp(?, k), s"yy_$k vs. yp_$k for ${mod.modelName} (h=${k+1}) with $p lags", lines = true) - end for - end for - -end quadRegressionMV4TSTest - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `quadRegressionMV4TSTest2` main function tests the `RegressionMV4TS` class on real data: - * Forecasting lake levels. Uses quadratic regression. - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.quadRegressionMV4TSTest2 - */ -@main def quadRegressionMV4TSTest2 (): Unit = - - import Example_LakeLevels.y - val m = y.dim - val h = 2 // the forecasting horizon - - for p <- 1 to 7 do // autoregressive hyper-parameter p - banner (s"Test: QuadRegressionMV4TS with $p lags") - val mod = QuadRegressionMV4TS (y, p, h) // create model for time series data - mod.trainNtest ()() // train the model on full dataset - println (mod.summary) - - banner ("Predictions/Forecasts") // direct forecasting technique - val yy = mod.getY - val yf = mod.predict (mod.getX) - for k <- yf.indices2 do - new Plot (null, yy(?, k), yf(?, k), s"yy_$k vs. yf_$k for ${mod.modelName} (h=${k+1}) with $p lags", lines = true) - end for - println (s"yf = $yf") - println (s"yf.dims = ${yf.dims}") - end for - -end quadRegressionMV4TSTest2 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `quadRegressionMV4TSTest3` main function tests the `RegressionMV4TS` class on real data: - * Forecasting COVID-19 Weekly Data. Uses quadratic regression, In-Sample Testing using - * endogenous variable. - * > runMain scalation.modeling.forecasting.quadRegressionMV4TSTest3 - */ -@main def quadRegressionMV4TSTest3 (): Unit = - - val LAGS = 7 // number of lags - val h = 4 // forecasting horizon - - val exo_vars = Array ("icu_patients") - val (xx, yy) = Example_Covid.loadData (exo_vars, "new_deaths") - val iskip = yy.indexWhere (_ >= 6.0) // find day with at least 6 deaths - println (s"iskip = $iskip is first day with at least 6 deaths") - - val ex = xx(iskip until xx.dim) // trim away the first iskip rows - val y = yy(iskip until yy.dim) - println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") - - banner ("Test In-Sample QuadRegressionMV4TS on COVID-19 Weekly Data") -// val mod = QuadRegressionMV4TS (y, LAGS, h) // create model for time series data - val mod = QuadRegressionMV4TS.rescale (y, LAGS, h) // create model for time series data - scaling - val (yp, qof) = mod.trainNtest ()() // train on full and test on full - val yy_ = y(LAGS until y.dim) - - for k <- 0 until h do - new Plot (null, yy_, yp(?, k), s"${mod.modelName}, yy vs. yp @ h = $k", lines = true) - end for - - banner (s"Feature Selection Technique: Stepwise") - val (cols, rSq) = mod.stepRegressionAll (cross = false) // R^2, R^2 bar, sMAPE, NA - val k = cols.size - println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for RegressionMV4TS with tech", lines = true) - println (mod.summary ()) - - banner ("Feature Importance") - println (s"Stepwise: rSq = $rSq") -// val imp = mod.importance (cols.toArray, rSq) -// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") - -end quadRegressionMV4TSTest3 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `quadRegressionMV4TSTest4` main function tests the `RegressionMV4TS` class on real data: - * Forecasting COVID-19 Weekly Data. Uses quadratic regression, In-Sample Testing using endogenous - * and exogeneous variables. - * > runMain scalation.modeling.forecasting.quadRegressionMV4TSTest4 - */ -@main def quadRegressionMV4TSTest4 (): Unit = - - val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (xx, yy) = Example_Covid.loadData (exo_vars, "new_deaths") - val iskip = yy.indexWhere (_ >= 6.0) // find day with at least 6 deaths - println (s"iskip = $iskip is first day with at least 6 deaths") - - val ex = xx(iskip until xx.dim) // trim away the first iskip rows - val y = yy(iskip until yy.dim) - println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") - - banner ("Test In-Sample QuadRegressionMV4TS.exo on COVID-19 Weekly Data") - val mod = QuadRegressionMV4TS.exo (y, 10, ex, 4)(1, 11) // create model for time series data with exo - val (yp, qof) = mod.trainNtest ()() // train on full and test on full - val yy_ = y(10 until y.dim) - new Plot (null, yy_, yp(?, 0), s"${mod.modelName}, yy vs. yp", lines = true) - -// val tech = SelectionTech.Forward // pick one feature selection technique -// val tech = SelectionTech.Backward - val tech = SelectionTech.Stepwise - - banner (s"Feature Selection Technique: $tech") - val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA - val k = cols.size - println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for RegressionMV4TS with tech", lines = true) - println (mod.summary ()) - - banner ("Feature Importance") - println (s"$tech: rSq = $rSq") -// val imp = mod.importance (cols.toArray, rSq) -// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") - -end quadRegressionMV4TSTest4 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `quadRegressionMV4TSTest5` main function tests the `QuadRegressionMV4TS` class on real data: - * Forecasting COVID-19 Weekly Data. Uses Quadratic Regression. Does TnT Testing on endogenous - * and exogenous variables. Determine the terms to include in the model for TnT from using - * Stepwise on In-Sample. - * > runMain scalation.modeling.forecasting.quadRegressionMV4TSTest5 - */ -@main def quadRegressionMV4TSTest5 (): Unit = - - val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (xx, yy) = Example_Covid.loadData (exo_vars, "new_deaths") - val iskip = yy.indexWhere (_ >= 6.0) // find day with at least 6 deaths - println (s"iskip = $iskip is first day with at least 6 deaths") - - val ex = xx(iskip until xx.dim) // trim away the first iskip rows - val y = yy(iskip until yy.dim) - println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") - - banner ("Test In-Sample QuadRegressionMV4TS.exo on COVID-19 Weekly Data") - val mod = QuadRegressionMV4TS.exo (y, 10, ex, 4)(1, 11) // create model for time series data with exo - val (yp, qof) = mod.trainNtest ()() // train on full and test on full - val yy_ = y(10 until y.dim) - new Plot (null, yy_, yp(?, 0), s"${mod.modelName}, yy vs. yp", lines = true) - -// val tech = SelectionTech.Forward // pick one feature selection technique -// val tech = SelectionTech.Backward - val tech = SelectionTech.Stepwise - - banner (s"Feature Selection Technique: $tech") - val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA - val k = cols.size - println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for RegressionMV4TS with tech", lines = true) - println (mod.summary ()) - - banner ("Feature Importance") - println (s"$tech: rSq = $rSq") -// val imp = mod.importance (cols.toArray, rSq) -// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") - - banner ("Run TnT on Best model") - val bmod = mod.getBest._3 // get the best model from feature selection - val (x_, y_, xtest, ytest) = RegressionMV4TS.split_TnT (bmod.getX, bmod.getY) - val (yptest, qoftest) = bmod.trainNtest (x_, y_)(xtest, ytest) // train on (x_, y_) and test on (xtest, ytest) - new Plot (null, ytest(?, 0), yptest(?, 0), s"${mod.modelName}, ytest vs. yptest", lines = true) - -end quadRegressionMV4TSTest5 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `quadRegressionMV4TSTest6` main function tests the `QuadRegressionMV4TS` class on real data: - * Forecasting COVID-19 Weekly Data. Uses Quadratic Regression. Does TnT Testing on endogenous - * and exogenous variables. Determine the terms to include in the model for TnT from using - * Stepwise on In-Sample. - * > runMain scalation.modeling.forecasting.quadRegressionMV4TSTest6 - */ -@main def quadRegressionMV4TSTest6 (): Unit = - - val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (xx, yy) = Example_Covid.loadData (exo_vars, "new_deaths") - val iskip = yy.indexWhere (_ >= 6.0) // find day with at least 6 deaths - println (s"iskip = $iskip is first day with at least 6 deaths") - - val ex = xx(iskip until xx.dim) // trim away the first iskip rows - val y = yy(iskip until yy.dim) - println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") - - banner ("Test In-Sample QuadRegressionMV4TS.exo on COVID-19 Weekly Data") - val mod = QuadRegressionMV4TS.exo (y, 10, ex, 4)(1, 11) // create model for time series data with exo - val (yp, qof) = mod.trainNtest ()() // train on full and test on full - val yy_ = y(10 until y.dim) - new Plot (null, yy_, yp(?, 0), s"${mod.modelName}, yy vs. yp", lines = true) - -// val tech = SelectionTech.Forward // pick one feature selection technique -// val tech = SelectionTech.Backward - val tech = SelectionTech.Stepwise - - banner (s"Feature Selection Technique: $tech") - val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA - val k = cols.size - println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for RegressionMV4TS with tech", lines = true) - println (mod.summary ()) - - banner ("Feature Importance") - println (s"$tech: rSq = $rSq") -// val imp = mod.importance (cols.toArray, rSq) -// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") - - banner ("Run Rolling Validation on QuadRegressionMV4TS Best model") - val bmod = mod.getBest._3 // get the best model from feature selection - RegressionMV4TS.rollValidate (bmod, 1) - -end quadRegressionMV4TSTest6 - diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/QuadSpline.scala.bak b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/QuadSpline.scala.bak deleted file mode 100644 index 226eef599..000000000 --- a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/QuadSpline.scala.bak +++ /dev/null @@ -1,236 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 1.6 - * @date Tue May 11 16:25:40 EDT 2021 - * @see LICENSE (MIT style license file). - * - * @title Model: Quadratic Spline - */ - -package scalation.analytics -package forecaster - -import scalation.linalgebra.{MatriD, MatrixD, VectoD, VectorD} -import scalation.plot.Plot -import scalation.util.banner - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `QuadSpline` class fits quadratic splines to time-series data that are equally - * spaced in time. A sliding window consisting of three data points is perfectly fit - * to a quadratic curve. - *

    - * y_t = a + bt + ct^2 - *

    - * Note, slope matching and smoothness issues are ignored. - * @see wordsandbuttons.online/quadratic_splines_are_useful_too.html - * Any time point from t = 3 to the end of time series may be forecasted. - * @param y the time-series - * @param hparam the hyper-parameters - */ -class QuadSpline (y: VectoD, hp: HyperParameter = null) - extends ForecasterVec (y, 1) with NoFeatureSelectionF -{ - private val DEBUG = true // debug flag - private val shift = 2 // shift center from y_t or y_{t-2} - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the model name including its current hyper-parameter. - */ - override def modelName: String = "QuadSpline" - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Based on three points y_{t-1}, y_t, t_{t+1}, determine values for the - * coefficients 'a', 'b' and 'c'. - * @param t the center time point - */ - def splineFit (t: Int): (Double, Double, Double) = - { - val c = 0.5 * (y(t+1) - 2*y(t) + y(t-1)) - val b = 0.5 * (y(t+1) - y(t-1) - 4*c*t) - val a = y(t) - b*t - c*t*t - (a, b, c) - } // splineFit - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Evaluate the spline function at time point 't', given the coefficients 'a', 'b' and 'c'. - * @param t the time - * @param a the constant term - * @param b the linear term coefficient - * @param c the quadratic term coefficient - */ - def spline (t: Double, a: Double, b: Double, c: Double): Double = - { - a + b*t + c*t*t - } // spline - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast a one-step ahead value for 'y_t' based on the quadratic curve fit to - * the previous three vales: y_{t-3}, y_{t-2}, t_{t-1}. - * @param t the time at which to forecast y - */ - def forecast1 (t: Int): Double = - { - if (t <= shift) y(t) - else { - val (a, b, c) = splineFit (t-shift) - spline (t, a, b, c) - } // if - } // forecast1 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train/fit a `QuadSpline` model to the times-series data in vector 'y_'. - * Note: for `QuadSpline` there are no parameters to train. - * @param x_null the data/input matrix (ignored) - * @param y_ the response/output vector (currently only works for y) - */ - override def train (x_null: MatriD, y_ : VectoD): QuadSpline = { super.train (null, y_); this } - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the parameter vector (its null). - */ - def parameter: VectoD = null - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return a vector that is the predictions of a quad spline model, by making forecasts - * for all values from time 3 to the end of the time-series. Note, y_0, y_1 and y_2 - * can't have forecasts, since they would need a value for y_{-1}. - */ - override def predictAll (): VectoD = - { - val yf = new VectorD (m) // forecasts for all z - for (t <- 0 to shift) yf(t) = y(t) // copy actual value - for (t <- shift+1 until m) yf(t) = forecast1(t) // enter forecasted value - yf // return the vector of predicted values - } // predictAll - - def predictAllz (): VectoD = predictAll () - stats.mu - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all 'm' time points and all horizons (1 through 'h'-steps ahead). - * Record these in the 'yf' matrix, where - *

    - * yf(t, k) = k-steps ahead forecast for y_t - *

    - * Note, 'yf.col(0)' is set to 'y' (the actual time-series values). - * @param h the maximum forecasting horizon, number of steps ahead to produce forecasts - */ - def forecastAll (h: Int): MatriD = - { - yf = new MatrixD (m, h+1) // forecasts for all time points t & horizons to h - yf.setCol (0, y) // first column is actual values, horizon 0 - for (k <- 1 to h) { - yf(0, k) = y(0) // copy first actual value - for (t <- 1 until m) { // forecast the rest - yf(t, k) = forecast1 (t) // FIX - implement for other h beyond 1 - } // for - if (DEBUG) println (s"forecastAll: yf.col ($k) = ${yf.col (k)}") - } // for - yf // return matrix of forecasted values - } // forecastAll - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test the curve produced by the multiple splines. - */ - def testCurve (): Unit = - { - println (s"y = $y") - for (i <- 1 until y.dim - 1) { - val (a, b, c) = splineFit(i) - for (j <- -2 to 2) { - val t = i + 0.5 * j - val f_t = spline (t, a, b, c) - print (s"spline($t) = $f_t \t") - } // for - println () - } // for - } // testCurve - -} // QuadSpline class - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `QuadSpline` companion object provides factory methods for the `QuadSpline` class. - */ -object QuadSpline -{ - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `QuadSpline` object. - * @param y the response vector (time series data) - * @param hparam the hyper-parameters - */ - def apply (y: VectoD, hparam: HyperParameter = null): QuadSpline = - { - new QuadSpline (y, hparam) - } // apply - -} // RandomWalk object - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `QuadSplineTest` object is used to test the `QuadSpline` class. - * Forecasting Fibonacci numbers. - * > runMain scalation.analytics.forecaster.QuadSplineTest - */ -object QuadSplineTest extends App -{ - val y = VectorD (1, 2, 3, 5, 8, 13, 21, 34, 55, 89) - - banner ("RandomWalk Model") - val rw = new RandomWalk (y) - rw.train (null, y).eval () - println (rw.report) - val yp = rw.predictAll () - new Plot (null, y, yp, "RandomWalk: y vs. yp", lines = true) - - banner ("QuadSpline Model") - val qs = new QuadSpline (y) - qs.train (null, y).eval () - println (qs.report) - qs.testCurve () - val yf = qs.predictAll () - new Plot (null, y, yf, "QuadSpline: y vs. yf", lines = true) - -} // QuadSplineTest object - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `QuadSplineTest2` object is used to test the `QuadSpline` class. - * > runMain scalation.analytics.forecaster.QuadSplineTest2 - */ -object QuadSplineTest2 extends App -{ - // TBD - -} // QuadSplineTest object - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `QuadSplineTest3` object is used to test the `QuadSpline` class. - * Forecasting lake levels. - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.analytics.forecaster.QuadSplineTest3 - */ -object QuadSplineTest3 extends App -{ - import ForecasterVec.y - - banner ("RandomWalk Model") - val rw = new RandomWalk (y) - rw.train (null, y).eval () - println (rw.report) - val yp = rw.predictAll () - new Plot (null, y, yp, "RandomWalk: y vs. yp", lines = true) - - banner ("QuadSpline Model") - val qs = new QuadSpline (y) - qs.train (null, y).eval () - println (qs.report) - val yf = qs.predictAll () - new Plot (null, y, yf, "QuadSpline: y vs. yf", lines = true) - - val mix = (yp + yf) * 0.5 - new Plot (null, y, mix, "Mix: y vs. mix", lines = true) - -} // QuadSplineTest3 object - diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/Regression4TS.scala.bak b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/Regression4TS.scala.bak deleted file mode 100644 index 8503d9309..000000000 --- a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/Regression4TS.scala.bak +++ /dev/null @@ -1,237 +0,0 @@ - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Sun Feb 13 16:22:21 EST 2022 - * @see LICENSE (MIT style license file). - * - * @title Model: Regression for Time Series - */ - -package scalation -package modeling -package forecasting - -import scalation.mathstat._ - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Regression4TS` object supports regression for Time Series data. - * Given a response vector y, and a predictor matrix x is built that consists of - * lagged y vectors. - * y_t = b dot x - * where x = [y_{t-1}, y_{t-2}, ... y_{t-lag}]. - */ -object Regression4TS: - - private val debug = debugf ("Regression4TS", true) // debug function - private val flaw = flawf ("Regression4TS") // flaw function - private val MISSING = -0.0 // missing value - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `Regression` object from a response vector. The input/data matrix - * x is formed from the lagged y vectors as columns in matix x. - * @param y the original un-expanded output/response vector - * @param lag the maximum lag included (inclusive) - * @param hparam the hyper-parameters (use Regression.hp for default) - */ - def apply (y: VectorD, lag: Int, - hparam: HyperParameter = Regression.hp): Regression = - var (x, yy) = buildMatrix4TS (y, lag) // column for each lag - x = VectorD.one (yy.dim) +^: x // add first column of all ones - - debug ("apply", s" x = $x \n yy = $yy") - val mod = new Regression (x, yy, null, hparam) - mod.modelName = s"Regression4TS_$lag" - mod - end apply - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast h steps ahead using the recursive method, returning forecasts in - * matrix yf with columns: [1-step, 2-steps, ... h-steps]. - * @param yp the predicted response vector (horizon 1 forecasts) - * @param h the forecasting horizon - */ - def forecast (mod: Regression, yp: VectorD, h: Int): MatrixD = - val xx = mod.getX // get the predictor matrix - val b = mod.parameter // get the model parameters - val b_ = b(1 until b.dim) // paramters excluding intercept - - val yf = new MatrixD (yp.dim, h) // matrix to hold forecasts - yf(?, 0) = yp // column 0 is predicted values - for k <- 1 until h do // forecast into future: columns 1 to h-1 - for i <- yf.indices do - val xy = xx(i)(k+1 until xx.dim2) ++ yf(i)(0 until k) // last from xx ++ first from yf -// println (s"xy = $xy") - yf(i, k) = b(0) + (b_ dot xy) // record forecasted value - end for - end for - yf - end forecast - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `Regression` object that uses multiple regression to fit a quadratic - * surface to Time Series data. - * @param y the original un-expanded output/response vector - * @param lag the maximum lag included (inclusive) - * @param hparam the hyper-parameters ((use Regression.hp for default) - */ - def quadratic (y: VectorD, lag: Int, - hparam: HyperParameter = Regression.hp): Regression = - var (x, yy) = buildMatrix4TS (y, lag) // column for each lag - val xx = new MatrixD (x.dim, 2*x.dim2+1) - xx(?, 0) = VectorD.one (yy.dim) // add first column of all ones - for j <- x.indices2 do // add terms in an interleaved fashion - xx(?, 2*j+1) = x(?, j) // linear terms - xx(?, 2*j+2) = x(?, j)~^2 // add quadratic terms - end for - - debug ("quadratic", s" xx = $xx \n yy = $yy") - val mod = new Regression (xx, yy, null, hparam) - mod.modelName = s"Regression4TS.quadratic$lag" - mod - end quadratic - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast h steps ahead using the recursive method, returning forecasts in - * matrix yf with columns: [1-step, 2-steps, ... h-steps]. - * @param yp the predicted response vector (horizon 1 forecasts) - * @param h the forecasting horizon - */ - def forecastq (mod: Regression, yp: VectorD, h: Int): MatrixD = - val xx = mod.getX // get the predictor matrix - val b = mod.parameter // get the model parameters - val b_ = b(1 until b.dim) // paramters excluding intercept - - val yf = new MatrixD (yp.dim, h) // matrix to hold forecasts - yf(?, 0) = yp // column 0 is predicted values - for k <- 1 until h do // forecast into future: columns 1 to h-1 - for i <- yf.indices do - val xi = xx(i) - val yi = yf(i) - var sum = b(0) - var l = 0 - for j <- 1 until b.dim-1 by 2 do // add terms in an interleaved fashion - if j+k+1 < b.dim then - sum += b(j) * xi(j+k) // linear terms - sum += b(j+1) * xi(j+k+1) // add quadratic terms - else - sum += b(j) * yi(l) - sum += b(j+1) * yi(l)~^2 - l += 1 - end if - end for - yf(i, k) = sum // record forecasted value - end for - end for - yf - end forecastq - -end Regression4TS - -import Regression4TS._ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `regression4TSTest` main function tests the `Regression4TS` class. - * This test is used to CHECK that the buildMatrix4TS function is working correctly. - * May get NaN for some maximum lags (p) due to multi-collinearity. - * > runMain scalation.modeling.forecasting.regression4TSTest - */ -@main def regression4TSTest (): Unit = - - val m = 30 - val y = VectorD.range (1, m) // used to CHECK the buildMatrix4TS function - - for p <- 1 to 10 do // autoregressive hyper-parameter p - banner (s"Test: Regression4TS with $p lags") - val mod = Regression4TS (y, p) // create model for time series data - mod.trainNtest ()() // train the model on full dataset - println (mod.summary) - - val yp = mod.predict (mod.getX) - new Plot (null, mod.getY, yp, s"y vs. yp for ${mod.modelName} with $p lags", lines = true) - end for - -end regression4TSTest - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `regression4TSTest2` main function tests the `Regression4TS` class on real data: - * Forecasting lake levels. - * Test the test, predictAll, testForecast and forecastAll methods over the whole times-series. - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.regression4TSTest2 - */ -@main def regression4TSTest2 (): Unit = - - import Example_LakeLevels.y - val m = y.dim - val h = 3 // the forecasting horizon - - for p <- 1 to 8 do // autoregressive hyper-parameter p - banner (s"Test: Regression4TS with $p lags") - val mod = Regression4TS (y, p) // create model for time series data - mod.trainNtest ()() // train the model on full dataset - println (mod.summary) // parameter/coefficient statistics - - banner ("Predictions") - val yy = mod.getY // trimmed actual response vector - val yp = mod.predict (mod.getX) // predicted response vector - new Plot (null, mod.getY, yp, s"y vs. yp for ${mod.modelName} with $p lags", lines = true) - println (s"yp = $yp") - - banner ("Forecasts") - val yf = forecast (mod, yp, h) // forecasted response matrix - for k <- yf.indices2 do - new Plot (null, yy, yf(?, k), s"yy vs. yf_$k for ${mod.modelName} with $p lags", lines = true) - end for - println (s"yf = $yf") - println (s"yf.dims = ${yf.dims}") - assert (yf(?, 0) == yp) // first forecast = predicted values - - println (testForecast (mod, y, yf, p)) // QoF - end for - -end regression4TSTest2 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `regression4TSTest3` main function tests the `Regression4TS` class on real data: - * Forecasting lake levels. Uses quadratic regression. - * Test the test, predictAll, testf and forecastAll methods over the whole times-series. - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.regression4TSTest3 - */ -@main def regression4TSTest3 (): Unit = - - import Example_LakeLevels.y - val m = y.dim - val h = 3 // the forecasting horizon - - for p <- 1 to 1 do // autoregressive hyper-parameter p - banner (s"Test: Regression4TS with $p lags") - val mod = Regression4TS.quadratic (y, p) // create model for time series data - mod.trainNtest ()() // train the model on full dataset - println (mod.summary) // parameter/coefficient statistics - - banner ("Predictions") - val yy = mod.getY // trimmed actual response vector - val yp = mod.predict (mod.getX) - new Plot (null, mod.getY, yp, s"y vs. yp for ${mod.modelName} with $p lags", lines = true) - println (s"yp = $yp") - - banner ("Forecasts") - val yf = forecastq (mod, yp, h) // forecasted response matrix - for k <- yf.indices2 do - new Plot (null, yy, yf(?, k), s"yy vs. yf_$k for ${mod.modelName} with $p lags", lines = true) - end for - println (s"yf = $yf") - println (s"yf.dims = ${yf.dims}") - assert (yf(?, 0) == yp) // first forecast = predicted values - - println (testForecast (mod, y, yf, p)) // QoF - -// println (Fit.fitMap (mod.testf (k, y))) // evaluate k-units ahead forecasts - end for - -end regression4TSTest3 - diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/Regression4TS.scala.bak2 b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/Regression4TS.scala.bak2 deleted file mode 100644 index 898cc7ebf..000000000 --- a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/Regression4TS.scala.bak2 +++ /dev/null @@ -1,529 +0,0 @@ - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Tue Feb 22 23:14:31 EST 2022 - * @see LICENSE (MIT style license file). - * - * @title Model: Regression for Time Series - */ - -package scalation -package modeling -package forecasting - -import scala.math.{max, min} - -import scalation.mathstat._ - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Regression4TS` class supports regression for Time Series data. - * Given a response vector y, a predictor matrix x is built that consists of - * lagged y vectors, - * - * y_t = b dot x - * where x = [y_{t-1}, y_{t-2}, ... y_{t-lags}] - * - * @param x the input/predictor matrix built out of lags of y - * @param yy the output/response vector trimmed to match x.dim - * @param lags the maximum lag included (inclusive) - * @param fname the feature/variable names - * @param hparam the hyper-parameters (use Regression.hp for default) - */ -class Regression4TS (x: MatrixD, yy: VectorD, lags: Int, fname: Array [String] = null, - hparam: HyperParameter = Regression.hp) - extends Regression (x, yy, fname, hparam) - with ForecasterX (x, yy, lags): - - private val debug = debugf ("Regression4TS", true) // debug function - private val flaw = flawf ("Regression4TS") // flaw function - private val MISSING = -0.0 // missing value - - modelName = s"Regression4TS_$lags" - - debug ("init", s"$modelName: x.dims = ${x.dims}, yy.dim = ${yy.dim}") - -// FIX - add methods similar to those in Forecaster - may need another trait - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast h steps ahead using the recursive method, returning forecasts in - * matrix yf with columns: [1-step, 2-steps, ... h-steps]. - * @param yp the predicted response vector (horizon 1 forecasts) - * @param h the forecasting horizon - * def forecast (yp: VectorD, h: Int): MatrixD = - - * @param t the time point from which to make forecasts - * @param yf the forecasting matrix for the endogenous variable y (time x horizons) - * @param yx the matrix of endogenous y and exogenous x values - * @param h the forecasting horizon, number of steps ahead to produce forecasts - */ - def forecast (t: Int, yf: MatrixD, yx: MatrixD, h: Int): VectorD = -// val b_ = b(1 until b.dim) // parameters excluding intercept - - yf(?, 0) = yx(?, 1) // yp // column 0 is predicted values - for k <- 1 to h do // forecast into future: columns 1 to h-1 - for i <- yf.indices do - val xy = x(i)(k+1 until x.dim2) ++ yf(i)(0 until k) // last from x ++ first from yf -// println (s"xy = $xy") -// yf(i, k) = b(0) + (b_ dot xy) // record forecasted value - yf(i, k) = b dot xy // record forecasted value - end for - end for - yf(?, h-1) - end forecast - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all y_.dim time points at horizon h (h-steps ahead). - * Assign to forecasting matrix and return h-step ahead forecast. - * For 1-step ahead (h = 1), - * y_t = δ + φ_0 y_t-1 + φ_1 y_t-2 + ... + φ_p-1 y_t-p - * When k < 0 let y_k = y_0 (i.e., assume first value repeats back in time). - * @param yxf the forecasting tensor (time x horizons x variables) - * @param y_ the actual values to use in making forecasts - * @param h the forecasting horizon, number of steps ahead to produce forecasts - * def forecastAt (yxf: TensorD, y_ : VectorD, h: Int): VectorD = - * @param yf the forecasting matrix for the endogenous variable y (time x horizons) - * @param yx the matrix of endogenous y and exogenous x values - * @param h the forecasting horizon, number of steps ahead to produce forecasts - */ - def forecastAt (yf: MatrixD, yx: MatrixD, h: Int): VectorD = - if h < 1 then flaw ("forecastAt", s"horizon h = $h must be at least 1") - for t <- yx.indices do // make forecasts over all time points for horizon h - val t1 = t + h - 1 // time point prior to horizon - yf(t+h, h) = b dot yx(min (t1, yx.dim-1)) // forecast down the diagonal ?? - end for - yf(?, h) // return the h-step ahead forecast vector - end forecastAt - -end Regression4TS - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Regression4TS` companion object provides factory methods. - */ -object Regression4TS: - - private val debug = debugf ("Regression4TS", true) // debug function - private val flaw = flawf ("Regression4TS") // flaw function - - private val TREND = false // include quadratic trend - private val DAY = false // include day of the week effect - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `Regression4TS` object from a response vector. The input/data matrix - * x is formed from the lagged y vectors as columns in matrix x. - * @param y the original un-expanded output/response vector - * @param lags the maximum lag included (inclusive) - * @param hparam the hyper-parameters (use Regression.hp for default) - */ - def apply (y: VectorD, lags: Int, - hparam: HyperParameter = Regression.hp): Regression4TS = - var (x, yy) = buildMatrix4TS (y, lags) // column for each lag - x = VectorD.one (yy.dim) +^: x // add first column of all ones - - if TREND then - x = VectorD.range (0, yy.dim) +^: x // add trend/time - x = VectorD.range (0, yy.dim)~^2 +^: x // add quadratic trend/time - end if - if DAY then - val day = VectorI (for t <- yy.indices yield t % 7) - x = day.toDouble +^: x // add DAY of week as ordinal var - -// val dum = Variable.dummyVars (day) -// x = x ++^ dum // add DAY of week as dummy vars - end if - -// println (s"apply: x = $x \n yy = $yy") - new Regression4TS (x, yy, lags, null, hparam) - end apply - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Make a matrix whose columns are lagged exogenous variables to be added to a data matrix. - * @param lags the maximum lag included (inclusive) for checking purposes - * @param ex the matrix of data for the exogenous variables - * @param elag1 the minimum exo lag included (inclusive) - * @param elag2 the maximum exo lag included (inclusive) - */ - def makeExoCols (lags: Int, ex: MatrixD, elag1: Int, elag2: Int): MatrixD = - var xx: MatrixD = buildMatrix4TS_exo (ex(?, 0), lags, elag1, elag2) - for j <- 1 until ex.dim2 do - xx = xx ++^ buildMatrix4TS_exo (ex(?, j), lags, elag1, elag2) - end for - println (s"addExoVars: collects lags of $ex.dim2 exo variables into #xx.dim2 columns") - xx - end makeExoCols - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `Regression4TS` object from a response vector. The input/data matrix - * x is formed from the lagged y vectors as columns in matrix x. - * In addition, lagged exogenous variables are added. - * @param y the original un-expanded output/response vector - * @param lags the maximum lag included (inclusive) - * @parax ex the input matrix for exogenous variables (one per column) - * @param hparam the hyper-parameters (use Regression.hp for default) - * @param elag1 the minimum exo lag included (inclusive) - * @param elag2 the maximum exo lag included (inclusive) - */ - def exo (y: VectorD, lags: Int, ex: MatrixD, hparam: HyperParameter = Regression.hp) - (elag1: Int = max (1, lags / 5), - elag2: Int = max (1, lags)): Regression4TS = - var (x, yy) = buildMatrix4TS (y, lags) // column for each lag - x = VectorD.one (yy.dim) +^: x // add first column of all ones - val endoCols = x.dim2 - println (s"endogenous: columns = $endoCols") - - x = x ++^ makeExoCols (lags, ex, elag1, elag2) // add columns for each lagged exo var - println (s"exogenous: columns = ${x.dim2 - endoCols}") - - if TREND then - x = VectorD.range (0, yy.dim) +^: x // add trend/time - x = VectorD.range (0, yy.dim)~^2 +^: x // add quadratic trend/time - end if - if DAY then - val day = VectorI (for t <- yy.indices yield t % 7) - val dum = Variable.dummyVars (day) - x = x ++^ dum // add DAY of week as dummy vars - end if - - println (s"exo: x.dims = ${x.dims}, yy.dim = ${yy.dim}") -// println (s"exo: x = $x \n yy = $yy") - new Regression4TS (x, yy, lags, null, hparam) - end exo - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Split the x matrix and y vector into training and testing sets. - * @param x the x data/input matrix - * @param y the y response/output vector - * @param ratio the ratio of the TESTING set to the full dataset (most common 70-30, 80-20) - */ - def split_TnT (x: MatrixD, y: VectorD, ratio: Double = 0.30): (MatrixD, VectorD, MatrixD, VectorD) = - val n = x.dim - val tr_size = (n * (1.0 - ratio)).toInt - println (s"Regression4TS.split_TnT: tr_size = $tr_size, te_size = ${n - tr_size}") - (x(0 until tr_size), y(0 until tr_size), x(tr_size until n), y(tr_size until n)) - end split_TnT - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Use rolling-validation to compute test Quality of Fit (QoF) measures - * by dividing the dataset into a TESTING SET (tr) and a TRAINING SET (te) - * as follows: [ <-- tr_size --> | <-- te_size --> ] - * This version calls predict for one-step ahead out-of-sample forecasts. - * @see `RollingValidation` - * @param mod the forecasting model being used (e.g., `Regression4TS`) - * @param rc the retraining cycle (number of forecasts until retraining occurs) - */ - def rollValidate (mod: Predictor & Fit, rc: Int): Unit = - val x = mod.getX // get data/input matrix - val y = mod.getY // get response/output vector - val tr_size = RollingValidation.trSize (y.dim) // size of initial training set - val te_size = y.dim - tr_size // size of testing set - debug ("rollValidate", s"train: tr_size = $tr_size; test: te_size = $te_size, rc = $rc") - - val yp = new VectorD (te_size) // y-predicted over testing set - for i <- 0 until te_size do // iterate through testing set - val t = tr_size + i // next time point to forecast - if i % rc == 0 then mod.train (x(0 until t), y(0 until t)) // retrain on sliding training set - yp(i) = mod.predict (x(t-1)) // predict the next value - end for - - val (t, yy) = RollingValidation.align (tr_size, y) // align vectors - val df = max (0, mod.parameter.size - 1) // degrees of freedom for model - mod.resetDF (df, te_size - df) // reset degrees of freedom - new Plot (t, yy, yp, "Plot yy, yp vs. t", lines = true) - println (FitM.fitMap (mod.diagnose (yy, yp), QoF.values.map (_.toString))) - end rollValidate - -end Regression4TS - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `regression4TSTest` main function tests the `Regression4TS` class. - * This test is used to CHECK that the buildMatrix4TS function is working correctly. - * May get NaN for some maximum lags (p) due to multi-collinearity. - * > runMain scalation.modeling.forecasting.regression4TSTest - */ -@main def regression4TSTest (): Unit = - - val m = 30 - val y = VectorD.range (1, m) // used to CHECK the buildMatrix4TS function - - for p <- 1 to 10 do // autoregressive hyper-parameter p - banner (s"Test: Regression4TS with $p lags") - val mod = Regression4TS (y, p) // create model for time series data - mod.trainNtest ()() // train the model on full dataset - println (mod.summary) - - val yp = mod.predict (mod.getX) - new Plot (null, mod.getY, yp, s"y vs. yp for ${mod.modelName} with $p lags", lines = true) - end for - -end regression4TSTest - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `regression4TSTest2` main function tests the `Regression4TS` class on real data: - * Forecasting lake levels. - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.regression4TSTest2 - */ -@main def regression4TSTest2 (): Unit = - - import Example_LakeLevels.y - val m = y.dim - val h = 2 // the forecasting horizon - - for p <- 1 to 8 do // autoregressive hyper-parameter p - banner (s"Test: Regression4TS with $p lags") - val mod = Regression4TS (y, p) // create model for time series data - mod.trainNtest ()() // train the model on full dataset - println (mod.summary) // parameter/coefficient statistics - - banner ("Predictions") - val yy = mod.getY // trimmed actual response vector - val xx = mod.getX - val yp = mod.predict (xx) // predicted response vector - new Plot (null, yy, yp, s"y vs. yp for ${mod.modelName} with $p lags", lines = true) - println (s"yp = $yp") - - banner ("Forecasts") -// val yf = mod.forecast (yp, h) // forecasted response matrix - val yf = mod.forecastAll (yy, xx, h) // forecasted response matrix - for k <- yf.indices2 do - new Plot (null, yy, yf(?, k), s"yy vs. yf_$k for ${mod.modelName} with $p lags", lines = true) - end for - println (s"yf = $yf") - println (s"yf.dims = ${yf.dims}") - assert (yf(?, 0) == yp) // first forecast = predicted values -/* - banner ("Forecast QoF") - println (testForecast (mod, y, yf, p)) // QoF -// println (Fit.fitMap (mod.testf (k, y))) // evaluate k-units ahead forecasts -*/ - end for - -end regression4TSTest2 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `regression4TSTest3` main function tests the `Regression4TS` class on real data: - * Forecasting COVID-19 Daily Data. Does In-Sample Testing on Endogenous variable. - * > runMain scalation.modeling.forecasting.regression4TSTest3 - */ -@main def regression4TSTest3 (): Unit = - - val LAGS = 5 // number of lags of y - val h = 2 // forecasting horizon - - val exo_vars = Array ("icu_patients") // no exogenous variables in this case - val (xx, yy) = Example_Covid.loadData (exo_vars, "new_deaths") - val iskip = yy.indexWhere (_ >= 6.0) // find day with at least 6 deaths - println (s"iskip = $iskip is first week with at least 6 deaths") - - val ex = xx(iskip until xx.dim) // trim away the first iskip rows - val y = yy(iskip until yy.dim) - println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") - - banner ("Test In-Sample Regression4TS on COVID-19 Weekly Data") - val mod = Regression4TS (y, LAGS) // create model for time series data - val (yp, qof) = mod.trainNtest ()() // train the model on full dataset - new Plot (null, mod.getY, yp, s"${mod.modelName}, y vs. yp", lines = true) - - banner (s"Multi-horizon forecasting using the recursive method") - val yf = mod.forecastAll (y, mod.getX, h) // forecasted response matrix - for k <- 0 to h do - new Plot (null, y, yf(?, k), s"y vs. yf_$k for ${mod.modelName} with $LAGS lags", lines = true) - end for - println (s"yf = $yf") - println (s"yf.dims = ${yf.dims}, y.dim = ${y.dim}, yp.dim = ${yp.dim}") - val yf0 = yf(?, 0)(0 until y.dim) - val yf1 = yf(?, 1)(1 until y.dim) - Forecaster.differ (yf0, y) - Forecaster.differ (yf1, yp) - assert (yf0 =~ y) // zeroth forecast = actual values - assert (yf1 =~ yp) // first forecast = predicted values - -/* - banner (s"Feature Selection Technique: stepRegression") - val (cols, rSq) = mod.stepRegressionAll (cross = false) // R^2, R^2 bar, sMAPE, NA - val k = cols.size - println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for Regression4TS with tech", lines = true) - println (mod.summary ()) - - banner ("Feature Importance") - println (s"Stepwise: rSq = $rSq") - val imp = mod.importance (cols.toArray, rSq) -// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") -*/ - -end regression4TSTest3 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `regression4TSTest4` main function tests the `Regression4TS` class on real data: - * Forecasting COVID-19 Weekly Data. Does In-Sample Testing on endogenous and exogenous variables. - * > runMain scalation.modeling.forecasting.regression4TSTest4 - */ -@main def regression4TSTest4 (): Unit = - - val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (xx, yy) = Example_Covid.loadData (exo_vars, "new_deaths") - val iskip = yy.indexWhere (_ >= 6.0) // find day with at least 6 deaths - println (s"iskip = $iskip is first week with at least 6 deaths") - - val ex = xx(iskip until xx.dim) // trim away the first iskip rows - val y = yy(iskip until yy.dim) - println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") - - banner ("Test In-Sample Regression4TS.exo on COVID-19 Weekly Data") - val mod = Regression4TS.exo (y, 10, ex)(1, 11) // create model for time series data with exo - val (yp, qof) = mod.trainNtest ()() // train the model on full dataset - new Plot (null, mod.getY, yp, s"${mod.modelName}, y vs. yp", lines = true) - -// val tech = SelectionTech.Forward // pick one feature selection technique -// val tech = SelectionTech.Backward - val tech = SelectionTech.Stepwise - - banner (s"Feature Selection Technique: $tech") - val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA - val k = cols.size - println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for Regression4TS with tech", lines = true) - println (mod.summary ()) - - banner ("Feature Importance") - println (s"$tech: rSq = $rSq") - val imp = mod.importance (cols.toArray, rSq) -// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") - -end regression4TSTest4 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `regression4TSTest5` main function tests the `Regression4TS` class on real data: - * Forecasting COVID-19 Weekly Data. Does TnT Testing on endogenous and exogenous variables. - * Determine the terms to include in the model using Stepwise on In-Sample. - * > runMain scalation.modeling.forecasting.regression4TSTest5 - */ -@main def regression4TSTest5 (): Unit = - - val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (xx, yy) = Example_Covid.loadData (exo_vars, "new_deaths") - val iskip = yy.indexWhere (_ >= 6.0) // find day with at least 6 deaths - println (s"iskip = $iskip is first week with at least 6 deaths") - - val ex = xx(iskip until xx.dim) // trim away the first iskip rows - val y = yy(iskip until yy.dim) - println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") - - banner ("Test In-Sample Regression4TS.exo on COVID-19 Weekly Data") - val mod = Regression4TS.exo (y, 10, ex)(1, 11) // create model for time series data with exo - val (yp, qof) = mod.trainNtest ()() // train on full and test on full - new Plot (null, mod.getY, yp, s"${mod.modelName}, y vs. yp", lines = true) - -// val tech = SelectionTech.Forward // pick one feature selection technique -// val tech = SelectionTech.Backward - val tech = SelectionTech.Stepwise - - banner (s"Feature Selection Technique: $tech") - val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA - val k = cols.size - println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for Regression4TS with tech", lines = true) - println (mod.summary ()) - - banner ("Feature Importance") - println (s"$tech: rSq = $rSq") - val imp = mod.importance (cols.toArray, rSq) -// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") - - banner ("Run TnT on Best model") - val bmod = mod.getBest._3 // get the best model from feature selection - val (x_, y_, xtest, ytest) = Regression4TS.split_TnT (bmod.getX, bmod.getY) - val (yptest, qoftest) = bmod.trainNtest (x_, y_)(xtest, ytest) // train on (x_, y_) and test on (xtest, ytest) - new Plot (null, ytest, yptest, s"${mod.modelName}, ytest vs. yptest", lines = true) - -end regression4TSTest5 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `regression4TSTest6` main function tests the `Regression4TS` class on real data: - * Forecasting COVID-19 Weekly Data. Does Rolling Validation on endogenous and exogenous - * variables. Determine the terms to include in the model using Stepwise on In-Sample. - * > runMain scalation.modeling.forecasting.regression4TSTest6 - */ -@main def regression4TSTest6 (): Unit = - - val LAGS = 7 - - val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (xx, yy) = Example_Covid.loadData (exo_vars, "new_deaths") - val iskip = yy.indexWhere (_ >= 6.0) // find day with at least 6 deaths - println (s"iskip = $iskip is first week with at least 6 deaths") - - val ex = xx(iskip until xx.dim) // trim away the first iskip rows - val y = yy(iskip until yy.dim) - println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") - - banner ("Test In-Sample Regression4TS.exo on COVID-19 Weekly Data") - val mod = Regression4TS.exo (y, LAGS, ex)(1, LAGS+1) // create model for time series data with exo - - val (yp, qof) = mod.trainNtest ()() // train on full and test on full - new Plot (null, mod.getY, yp, s"${mod.modelName}, y vs. yp", lines = true) - -// val tech = SelectionTech.Forward // pick one feature selection technique -// val tech = SelectionTech.Backward - val tech = SelectionTech.Stepwise - - banner (s"Feature Selection Technique: $tech") - val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA - val k = cols.size - println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for Regression4TS with tech", lines = true) - println (mod.summary ()) - - banner ("Feature Importance") - println (s"$tech: rSq = $rSq") - val imp = mod.importance (cols.toArray, rSq) -// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") - - banner ("Run Rolling Validation on Regression4TS Best model") - val bmod = mod.getBest._3 // get the best model from feature selection - Regression4TS.rollValidate (bmod, 1) - -end regression4TSTest6 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `regression4TSTest7` main function tests the `Regression4TS` class on real data: - * Forecasting COVID-19 Weekly Data. Preliminary investigation of Symbolic Regression. - * > runMain scalation.modeling.forecasting.regression4TSTest7 - */ -@main def regression4TSTest7 (): Unit = - - val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (xx, yy) = Example_Covid.loadData (exo_vars, "new_deaths") - val iskip = yy.indexWhere (_ >= 6.0) // find day with at least 6 deaths - println (s"iskip = $iskip is first week with at least 6 deaths") - - val ex = xx(iskip until xx.dim) // trim away the first iskip rows - val y = yy(iskip until yy.dim) - - banner ("Plot Variables on COVID-19 Weekly Data") - - for lag <- 0 to 4 do - val xx_ = ex(lag until y.dim) - val yy_ = y(0 until y.dim - lag) -// new Plot (xx_, yy_, null, s"deaths vs. exo-vars @ lag = $lag") - - val mod = SymbolicRegression (xx_, yy_, null, collection.mutable.Set (1.0), cross = false) - mod.trainNtest ()() - println (mod.summary ()) - end for - -end regression4TSTest7 - diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/Regression4TS.scala.bak3 b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/Regression4TS.scala.bak3 deleted file mode 100644 index 9f95a3d63..000000000 --- a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/Regression4TS.scala.bak3 +++ /dev/null @@ -1,535 +0,0 @@ - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Tue Feb 22 23:14:31 EST 2022 - * @see LICENSE (MIT style license file). - * - * @title Model: Regression for Time Series - */ - -package scalation -package modeling -package forecasting - -import scala.math.{max, min} - -import scalation.mathstat._ - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Regression4TS` class supports regression for Time Series data. - * Given a response vector y, a predictor matrix x is built that consists of - * lagged y vectors, - * - * y_t = b dot x - * where x = [1, y_{t-1}, y_{t-2}, ... y_{t-lags}] - * - * @param x the input/predictor matrix built out of lags of y - * @param yy the output/response vector trimmed to match x.dim - * @param lags the maximum lag included (inclusive) - * @param fname the feature/variable names - * @param hparam the hyper-parameters (use Regression.hp for default) - */ -class Regression4TS (x: MatrixD, yy: VectorD, lags: Int, fname: Array [String] = null, - hparam: HyperParameter = Regression.hp) - extends Regression (x, yy, fname, hparam) - with ForecasterX (x, yy, lags): - - private val debug = debugf ("Regression4TS", true) // debug function - private val flaw = flawf ("Regression4TS") // flaw function - private val MISSING = -0.0 // missing value - - modelName = s"Regression4TS_$lags" - - debug ("init", s"$modelName: x.dims = ${x.dims}, yy.dim = ${yy.dim}") - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Produce a vector of size h, of 1 through h-steps ahead forecasts for the model. - * forecast the following time points: t+1, ..., t-1+h. - * Note, must create the yf matrix before calling the forecast method. - * Intended to work with rolling validation (analog of predict method) - * Must call `forecastAll` first. - * @param t the time point from which to make forecasts - * @param yf the forecasting matrix (time x horizons) - * @param h the forecasting horizon, number of steps ahead to produce forecasts - */ - def forecast (t: Int, yf: MatrixD, h: Int): VectorD = - if h < 1 then flaw ("forecast", s"horizon h = $h must be at least 1") - VectorD (for k <- 1 to h yield yf(t+k, k)) // get yf diagonal from time t - end forecast - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Forecast values for all y_.dim time points at horizon h (h-steps ahead). - * Assign to forecasting matrix and return h-step ahead forecast. - * For 1-step ahead (h = 1), - * y_t = δ + φ_0 y_t-1 + φ_1 y_t-2 + ... + φ_p-1 y_t-p - * When k < 0 let y_k = y_0 (i.e., assume first value repeats back in time). - * @param yf the forecasting matrix for the endogenous variable y (time x horizons) - * @param yx the matrix of endogenous y and exogenous x values - * @param h the forecasting horizon, number of steps ahead to produce forecasts - */ - def forecastAt (yf: MatrixD, yx: MatrixD, h: Int): VectorD = - if h < 1 then flaw ("forecastAt", s"horizon h = $h must be at least 1") - for t <- yx.indices do // make forecasts over all time points for horizon h - val t1 = t + h - 1 // time point prior to horizon - yf(t+h, h) = b dot yx(min (t1, yx.dim-1)) // forecast down the diagonal ?? - end for - yf(?, h) // return the h-step ahead forecast vector - end forecastAt - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test FORECASTS of a `Regression4TS` forecasting model y_ = f(x) + e - * and return its forecasts and QoF vector. Testing may be in-sample - * (on the training set) or out-of-sample (on the testing set) as determined - * by the parameters passed in. Note: must call train and forecastAll before testF. - * @param h the forecasting horizon, number of steps ahead to produce forecasts - * @param y_ the testing/full response/output vector - * @param yx the matrix of endogenous y and exogenous x values - */ - def testF (h: Int, y_ : VectorD, yx: MatrixD): (VectorD, VectorD) = - val (yy, yfh) = testSetupF (y_, yx, h) // get and align actual and forecasted values - val params = x.dim2 - resetDF (params, yy.dim - params) // reset the degrees of freedom - println (s"testF: yy.dim = ${yy.dim}, yfh.dim = ${yfh.dim}") -// differ (yy, yfh) // uncomment for debugging - (yfh, diagnose (yy, yfh)) // return predictions and QoF vector - end testF - -end Regression4TS - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Regression4TS` companion object provides factory methods. - */ -object Regression4TS: - - private val debug = debugf ("Regression4TS", true) // debug function - private val flaw = flawf ("Regression4TS") // flaw function - - private val TREND = false // include quadratic trend - private val DAY = false // include day of the week effect - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `Regression4TS` object from a response vector. The input/data matrix - * x is formed from the lagged y vectors as columns in matrix x. - * @param y the original un-expanded output/response vector - * @param lags the maximum lag included (inclusive) - * @param hparam the hyper-parameters (use Regression.hp for default) - */ - def apply (y: VectorD, lags: Int, - hparam: HyperParameter = Regression.hp): Regression4TS = - var (x, yy) = buildMatrix4TS (y, lags) // column for each lag - x = VectorD.one (yy.dim) +^: x // add first column of all ones - - if TREND then - x = VectorD.range (0, yy.dim) +^: x // add trend/time - x = VectorD.range (0, yy.dim)~^2 +^: x // add quadratic trend/time - end if - if DAY then - val day = VectorI (for t <- yy.indices yield t % 7) - x = day.toDouble +^: x // add DAY of week as ordinal var - -// val dum = Variable.dummyVars (day) -// x = x ++^ dum // add DAY of week as dummy vars - end if - - println (s"apply: x.dims = ${x.dims}, yy.dim = ${yy.dim}") -// println (s"apply: x = $x \n yy = $yy") - new Regression4TS (x, yy, lags, null, hparam) - end apply - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Make a matrix whose columns are lagged exogenous variables to be added to a data matrix. - * @param lags the maximum lag included (inclusive) for checking purposes - * @param ex the matrix of data for the exogenous variables - * @param elag1 the minimum exo lag included (inclusive) - * @param elag2 the maximum exo lag included (inclusive) - */ - def makeExoCols (lags: Int, ex: MatrixD, elag1: Int, elag2: Int): MatrixD = - var xx: MatrixD = buildMatrix4TS_exo (ex(?, 0), lags, elag1, elag2) - for j <- 1 until ex.dim2 do - xx = xx ++^ buildMatrix4TS_exo (ex(?, j), lags, elag1, elag2) - end for - println (s"addExoVars: collects lags of ${ex.dim2} exo variables into #xx.dim2 columns") - xx - end makeExoCols - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `Regression4TS` object from a response vector. The input/data matrix - * x is formed from the lagged y vectors as columns in matrix x. - * In addition, lagged exogenous variables are added. - * @param y the original un-expanded output/response vector - * @param lags the maximum lag included (inclusive) - * @parax ex the input matrix for exogenous variables (one per column) - * @param hparam the hyper-parameters (use Regression.hp for default) - * @param elag1 the minimum exo lag included (inclusive) - * @param elag2 the maximum exo lag included (inclusive) - */ - def exo (y: VectorD, lags: Int, ex: MatrixD, hparam: HyperParameter = Regression.hp) - (elag1: Int = max (1, lags / 5), - elag2: Int = max (1, lags)): Regression4TS = - var (x, yy) = buildMatrix4TS (y, lags) // column for each lag - x = VectorD.one (yy.dim) +^: x // add first column of all ones - val endoCols = x.dim2 - println (s"endogenous: columns = $endoCols") - - x = x ++^ makeExoCols (lags, ex, elag1, elag2) // add columns for each lagged exo var - println (s"exogenous: columns = ${x.dim2 - endoCols}") - - if TREND then - x = VectorD.range (0, yy.dim) +^: x // add trend/time - x = VectorD.range (0, yy.dim)~^2 +^: x // add quadratic trend/time - end if - if DAY then - val day = VectorI (for t <- yy.indices yield t % 7) - val dum = Variable.dummyVars (day) - x = x ++^ dum // add DAY of week as dummy vars - end if - - println (s"exo: x.dims = ${x.dims}, yy.dim = ${yy.dim}") -// println (s"exo: x = $x \n yy = $yy") - new Regression4TS (x, yy, lags, null, hparam) - end exo - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Split the x matrix and y vector into training and testing sets. - * @param x the x data/input matrix - * @param y the y response/output vector - * @param ratio the ratio of the TESTING set to the full dataset (most common 70-30, 80-20) - */ - def split_TnT (x: MatrixD, y: VectorD, ratio: Double = 0.30): (MatrixD, VectorD, MatrixD, VectorD) = - val n = x.dim - val tr_size = (n * (1.0 - ratio)).toInt - println (s"Regression4TS.split_TnT: tr_size = $tr_size, te_size = ${n - tr_size}") - (x(0 until tr_size), y(0 until tr_size), x(tr_size until n), y(tr_size until n)) - end split_TnT - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Use rolling-validation to compute test Quality of Fit (QoF) measures - * by dividing the dataset into a TESTING SET (tr) and a TRAINING SET (te) - * as follows: [ <-- tr_size --> | <-- te_size --> ] - * This version calls predict for one-step ahead out-of-sample forecasts. - * @see `RollingValidation` - * @param mod the forecasting model being used (e.g., `Regression4TS`) - * @param rc the retraining cycle (number of forecasts until retraining occurs) - */ - def rollValidate (mod: Predictor & Fit, rc: Int): Unit = - val x = mod.getX // get data/input matrix - val y = mod.getY // get response/output vector - val tr_size = RollingValidation.trSize (y.dim) // size of initial training set - val te_size = y.dim - tr_size // size of testing set - debug ("rollValidate", s"train: tr_size = $tr_size; test: te_size = $te_size, rc = $rc") - - val yp = new VectorD (te_size) // y-predicted over testing set - for i <- 0 until te_size do // iterate through testing set - val t = tr_size + i // next time point to forecast - if i % rc == 0 then mod.train (x(0 until t), y(0 until t)) // retrain on sliding training set - yp(i) = mod.predict (x(t-1)) // predict the next value - end for - - val (t, yy) = RollingValidation.align (tr_size, y) // align vectors - val df = max (0, mod.parameter.size - 1) // degrees of freedom for model - mod.resetDF (df, te_size - df) // reset degrees of freedom - new Plot (t, yy, yp, "Plot yy, yp vs. t", lines = true) - println (FitM.fitMap (mod.diagnose (yy, yp), QoF.values.map (_.toString))) - end rollValidate - -end Regression4TS - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `regression4TSTest` main function tests the `Regression4TS` class. - * This test is used to CHECK that the buildMatrix4TS function is working correctly. - * May get NaN for some maximum lags (p) due to multi-collinearity. - * > runMain scalation.modeling.forecasting.regression4TSTest - */ -@main def regression4TSTest (): Unit = - - val m = 30 - val y = VectorD.range (1, m) // used to CHECK the buildMatrix4TS function - - for p <- 1 to 10 do // autoregressive hyper-parameter p - banner (s"Test: Regression4TS with $p lags") - val mod = Regression4TS (y, p) // create model for time series data - mod.trainNtest ()() // train the model on full dataset - println (mod.summary) - - val yp = mod.predict (mod.getX) - new Plot (null, mod.getY, yp, s"y vs. yp for ${mod.modelName} with $p lags", lines = true) - end for - -end regression4TSTest - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `regression4TSTest2` main function tests the `Regression4TS` class on real data: - * Forecasting lake levels. - * @see cran.r-project.org/web/packages/fpp/fpp.pdf - * > runMain scalation.modeling.forecasting.regression4TSTest2 - */ -@main def regression4TSTest2 (): Unit = - - import Example_LakeLevels.y - val m = y.dim - val h = 2 // the forecasting horizon - - for p <- 1 to 8 do // autoregressive hyper-parameter p - banner (s"Test: Regression4TS with $p lags") - val mod = Regression4TS (y, p) // create model for time series data - mod.trainNtest ()() // train the model on full dataset - println (mod.summary) // parameter/coefficient statistics - - banner ("Predictions") - val yy = mod.getY // trimmed actual response vector - val xx = mod.getX - val yp = mod.predict (xx) // predicted response vector - new Plot (null, yy, yp, s"y vs. yp for ${mod.modelName} with $p lags", lines = true) - println (s"yp = $yp") - - banner ("Forecasts") -// val yf = mod.forecast (yp, h) // forecasted response matrix - val yf = mod.forecastAll (yy, xx, h) // forecasted response matrix - for k <- yf.indices2 do - new Plot (null, yy, yf(?, k), s"yy vs. yf_$k for ${mod.modelName} with $p lags", lines = true) - end for - println (s"yf = $yf") - println (s"yf.dims = ${yf.dims}") - assert (yf(?, 0) == yp) // first forecast = predicted values -/* - banner ("Forecast QoF") - println (testForecast (mod, y, yf, p)) // QoF -// println (Fit.fitMap (mod.testf (k, y))) // evaluate k-units ahead forecasts -*/ - end for - -end regression4TSTest2 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `regression4TSTest3` main function tests the `Regression4TS` class on real data: - * Forecasting COVID-19 Daily Data. Does In-Sample Testing on Endogenous variable. - * > runMain scalation.modeling.forecasting.regression4TSTest3 - */ -@main def regression4TSTest3 (): Unit = - - val LAGS = 5 // number of lags of y - val h = 2 // forecasting horizon - - val exo_vars = Array.ofDim [String] (0) // no exogenous variables in this case - val (xx, yy) = Example_Covid.loadData (exo_vars, "new_deaths") - val iskip = yy.indexWhere (_ >= 6.0) // find day with at least 6 deaths - println (s"iskip = $iskip is first week with at least 6 deaths") - - val ex = xx(iskip until xx.dim) // trim away the first iskip rows - val y = yy(iskip until yy.dim) - println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") - - banner ("Test In-Sample Regression4TS on COVID-19 Weekly Data") - val mod = Regression4TS (y, LAGS) // create model for time series data - val (yp, qof) = mod.trainNtest ()() // train the model on full dataset - new Plot (null, mod.getY, yp, s"${mod.modelName}, y vs. yp", lines = true) - - banner (s"Multi-horizon forecasting using the recursive method") - val yx = mod.getX - val yf = mod.forecastAll (y, yx, h) // forecasted response matrix - for k <- 0 to h do - new Plot (null, y, yf(?, k), s"y vs. yf_$k for ${mod.modelName} with $LAGS lags", lines = true) - end for - println (s"yf = $yf") - println (s"yf.dims = ${yf.dims}, y.dim = ${y.dim}, yp.dim = ${yp.dim}") - val yf0 = yf(?, 0)(0 until y.dim) - val yf1 = yf(?, 1)(1 until y.dim) - Forecaster.differ (yf0, y) - Forecaster.differ (yf1, yp) - assert (yf0 =~ y) // zeroth forecast = actual values - assert (yf1 =~ yp) // first forecast = predicted values - - for k <- 1 to h do - val (yfh, qof) = mod.testF (k, y, yx) // k-steps ahead forecast and its QoF - println (s"Evaluate QoF for horizon $k:") - println (FitM.fitMap (qof, QoF.values.map (_.toString))) // evaluate k-steps ahead forecasts - end for - - banner (s"Feature Selection Technique: stepRegression") - val (cols, rSq) = mod.stepRegressionAll (cross = false) // R^2, R^2 bar, sMAPE, NA - val k = cols.size - println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for Regression4TS with tech", lines = true) - println (mod.summary ()) - - banner ("Feature Importance") - println (s"Stepwise: rSq = $rSq") - val imp = mod.importance (cols.toArray, rSq) -// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") - -end regression4TSTest3 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `regression4TSTest4` main function tests the `Regression4TS` class on real data: - * Forecasting COVID-19 Weekly Data. Does In-Sample Testing on endogenous and exogenous variables. - * > runMain scalation.modeling.forecasting.regression4TSTest4 - */ -@main def regression4TSTest4 (): Unit = - - val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (xx, yy) = Example_Covid.loadData (exo_vars, "new_deaths") - val iskip = yy.indexWhere (_ >= 6.0) // find day with at least 6 deaths - println (s"iskip = $iskip is first week with at least 6 deaths") - - val ex = xx(iskip until xx.dim) // trim away the first iskip rows - val y = yy(iskip until yy.dim) - println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") - - banner ("Test In-Sample Regression4TS.exo on COVID-19 Weekly Data") - val mod = Regression4TS.exo (y, 10, ex)(1, 11) // create model for time series data with exo - val (yp, qof) = mod.trainNtest ()() // train the model on full dataset - new Plot (null, mod.getY, yp, s"${mod.modelName}, y vs. yp", lines = true) - -// val tech = SelectionTech.Forward // pick one feature selection technique -// val tech = SelectionTech.Backward - val tech = SelectionTech.Stepwise - - banner (s"Feature Selection Technique: $tech") - val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA - val k = cols.size - println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for Regression4TS with tech", lines = true) - println (mod.summary ()) - - banner ("Feature Importance") - println (s"$tech: rSq = $rSq") - val imp = mod.importance (cols.toArray, rSq) -// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") - -end regression4TSTest4 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `regression4TSTest5` main function tests the `Regression4TS` class on real data: - * Forecasting COVID-19 Weekly Data. Does TnT Testing on endogenous and exogenous variables. - * Determine the terms to include in the model using Stepwise on In-Sample. - * > runMain scalation.modeling.forecasting.regression4TSTest5 - */ -@main def regression4TSTest5 (): Unit = - - val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (xx, yy) = Example_Covid.loadData (exo_vars, "new_deaths") - val iskip = yy.indexWhere (_ >= 6.0) // find day with at least 6 deaths - println (s"iskip = $iskip is first week with at least 6 deaths") - - val ex = xx(iskip until xx.dim) // trim away the first iskip rows - val y = yy(iskip until yy.dim) - println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") - - banner ("Test In-Sample Regression4TS.exo on COVID-19 Weekly Data") - val mod = Regression4TS.exo (y, 10, ex)(1, 11) // create model for time series data with exo - val (yp, qof) = mod.trainNtest ()() // train on full and test on full - new Plot (null, mod.getY, yp, s"${mod.modelName}, y vs. yp", lines = true) - -// val tech = SelectionTech.Forward // pick one feature selection technique -// val tech = SelectionTech.Backward - val tech = SelectionTech.Stepwise - - banner (s"Feature Selection Technique: $tech") - val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA - val k = cols.size - println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for Regression4TS with tech", lines = true) - println (mod.summary ()) - - banner ("Feature Importance") - println (s"$tech: rSq = $rSq") - val imp = mod.importance (cols.toArray, rSq) -// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") - - banner ("Run TnT on Best model") - val bmod = mod.getBest._3 // get the best model from feature selection - val (x_, y_, xtest, ytest) = Regression4TS.split_TnT (bmod.getX, bmod.getY) - val (yptest, qoftest) = bmod.trainNtest (x_, y_)(xtest, ytest) // train on (x_, y_) and test on (xtest, ytest) - new Plot (null, ytest, yptest, s"${mod.modelName}, ytest vs. yptest", lines = true) - -end regression4TSTest5 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `regression4TSTest6` main function tests the `Regression4TS` class on real data: - * Forecasting COVID-19 Weekly Data. Does Rolling Validation on endogenous and exogenous - * variables. Determine the terms to include in the model using Stepwise on In-Sample. - * > runMain scalation.modeling.forecasting.regression4TSTest6 - */ -@main def regression4TSTest6 (): Unit = - - val LAGS = 7 - - val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (xx, yy) = Example_Covid.loadData (exo_vars, "new_deaths") - val iskip = yy.indexWhere (_ >= 6.0) // find day with at least 6 deaths - println (s"iskip = $iskip is first week with at least 6 deaths") - - val ex = xx(iskip until xx.dim) // trim away the first iskip rows - val y = yy(iskip until yy.dim) - println (s"ex.dims = ${ex.dims}, y.dim = ${y.dim}") - - banner ("Test In-Sample Regression4TS.exo on COVID-19 Weekly Data") - val mod = Regression4TS.exo (y, LAGS, ex)(1, LAGS+1) // create model for time series data with exo - - val (yp, qof) = mod.trainNtest ()() // train on full and test on full - new Plot (null, mod.getY, yp, s"${mod.modelName}, y vs. yp", lines = true) - -// val tech = SelectionTech.Forward // pick one feature selection technique -// val tech = SelectionTech.Backward - val tech = SelectionTech.Stepwise - - banner (s"Feature Selection Technique: $tech") - val (cols, rSq) = mod.selectFeatures (tech, cross = false) // R^2, R^2 bar, sMAPE, NA - val k = cols.size - println (s"k = $k, n = ${mod.getX.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "sMAPE", "NA"), - s"R^2 vs n for Regression4TS with tech", lines = true) - println (mod.summary ()) - - banner ("Feature Importance") - println (s"$tech: rSq = $rSq") - val imp = mod.importance (cols.toArray, rSq) -// for (c, r) <- imp do println (s"col = $c, \t ${header(c)}, \t importance = $r") - - banner ("Run Rolling Validation on Regression4TS Best model") - val bmod = mod.getBest._3 // get the best model from feature selection - Regression4TS.rollValidate (bmod, 1) - -end regression4TSTest6 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `regression4TSTest7` main function tests the `Regression4TS` class on real data: - * Forecasting COVID-19 Weekly Data. Preliminary investigation of Symbolic Regression. - * > runMain scalation.modeling.forecasting.regression4TSTest7 - */ -@main def regression4TSTest7 (): Unit = - - val exo_vars = Array ("icu_patients", "hosp_patients", "new_tests", "people_vaccinated") - val (xx, yy) = Example_Covid.loadData (exo_vars, "new_deaths") - val iskip = yy.indexWhere (_ >= 6.0) // find day with at least 6 deaths - println (s"iskip = $iskip is first week with at least 6 deaths") - - val ex = xx(iskip until xx.dim) // trim away the first iskip rows - val y = yy(iskip until yy.dim) - - banner ("Plot Variables on COVID-19 Weekly Data") - - for lag <- 0 to 4 do - val xx_ = ex(lag until y.dim) - val yy_ = y(0 until y.dim - lag) -// new Plot (xx_, yy_, null, s"deaths vs. exo-vars @ lag = $lag") - - val mod = SymbolicRegression (xx_, yy_, null, collection.mutable.Set (1.0), cross = false) - mod.trainNtest ()() - println (mod.summary ()) - end for - -end regression4TSTest7 - diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/RollingValidation.scala.bak b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/RollingValidation.scala.bak deleted file mode 100644 index 6480ab5d3..000000000 --- a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/RollingValidation.scala.bak +++ /dev/null @@ -1,478 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Wed Jun 17 12:08:35 EDT 2020 - * @see LICENSE (MIT style license file). - * - * @title Model Framework: Rolling Validation for Forecasters - */ - -package scalation -package modeling -package forecasting - -import scalation.mathstat._ - -import Fit._ - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Chop the testing te and training tr datasets out of the full dataset - * for rolling validation where the training set is before the testing set. - * @param x the full data/input matrix - * @param y the full response/output vector - * @param te the start (inclusive) of the testing region - * @param te_size the size of the testing region - * @param tr_size the size of the training region - */ -def chopr (x: MatrixD, y: VectorD, te: Int, te_size: Int, tr_size: Int): - (MatrixD, VectorD, MatrixD, VectorD) = - val DEBUG = false // debug flag - - val te2 = te + te_size // end (exclusive) of testing region - val tr = te - tr_size // start of training region - - val x_e = x(te until te2) // testing data/input matrix - val y_e = y(te until te2) // testing response/output vector - val x_ = x(tr until te) // training data/input matrix - val y_ = y(tr until te) // training response/output vector - - if DEBUG then - println (s"test: x_e($te .. ${te2 - 1})") - println (s"test: y_e($te .. ${te2 - 1})") - println (s"train: x_($tr .. ${te - 1})") - println (s"train: y_($tr .. ${te - 1})") - end if - - (x_e, y_e, x_, y_) -end chopr - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Chop the testing te and training tr datasets out of the full dataset - * for rolling validation where the training set is before the testing set. - * This version works for models without an x componenet, only y. - * @param y the full response/output vector - * @param te the start (inclusive) of the testing region - * @param te_size the size of the testing region - * @param tr_size the size of the training region - */ -def chopr (y: VectorD, te: Int, te_size: Int, tr_size: Int): - (VectorD, VectorD) = - val DEBUG = false // debug flag - - val te2 = te + te_size // end (exclusive) of testing region - val tr = te - tr_size // start of training region - - val y_e = y(te until te2) // testing response/output vector - val y_ = y(tr until te) // training response/output vector - - if DEBUG then - println (s"test: y_e($te .. ${te2 - 1})") - println (s"train: y_($tr .. ${te - 1})") - end if - - (y_e, y_) -end chopr - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Shift the training dataset right by d2 = xy2._2.dim instances, filling in from - * the testing dataset. Used to update the training dataset before retraining, - * e.g., in rolling validation. - * @param xy1 the training dataset (matrix, vector) - * @param xy2 the portion of the testing dataset to be shifted in (matrix, vector) - */ -def shiftr (xy1: (MatrixD, VectorD), xy2: (MatrixD, VectorD)): (MatrixD, VectorD) = - val d1 = xy1._2.dim // number of training instances - val d2 = xy2._2.dim // number of testing instances to shift in - val gap = d1 - d2 // gap from training to be keep - if xy1._1.dim != d1 then println ("shiftr: dimension mismatch between matrix and vector in xy1") - if xy2._1.dim != d2 then println ("shiftr: dimension mismatch between matrix and vector in xy2") - if gap < 1 then println ("shiftr: no gap => nothing needed from training set") - - val x = new MatrixD (d1, xy1._1.dim2) - val y = new VectorD (d1) - for i <- y.indices do - if i < gap then - for j <- x.indices2 do x(i, j) = xy1._1(i+d2, j) - y(i) = xy1._2(i+d2) - else - for j <- x.indices2 do x(i, j) = xy2._1(i-gap, j) - y(i) = xy2._2(i-gap) - end if - end for - (x, y) -end shiftr - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Shift the training dataset right by d2 = y2.dim instances, filling in from - * the testing dataset. Used to update the training dataset before retraining, - * e.g., in rolling validation. - * This version works for models without an x componenet, only y. - * @param y1 the training dataset (vector) - * @param y2 the portion of the testing dataset to be shifted in (vector) - */ -def shiftr (y1: VectorD, y2: VectorD): VectorD = - val d1 = y1.dim // number of training instances - val d2 = y2.dim // number of testing instances to shift in - val gap = d1 - d2 // gap from training to be keep - if gap < 1 then println ("shiftr: no gap => nothing needed from training set") - - val y = new VectorD (d1) - for i <- y.indices do y(i) = if i < gap then y1(i+d2) else y2(i-gap) - y -end shiftr - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `RollingValidation` object provides 1-fold rolling validations, e.g., - * for m = 1200 and k = 1, kt = 5: - * 1: tr(ain) 0 until 600, te(st) 600 until 1200 - * In rolling validation for this case, each retraining dataset has 600 instances, - * while the testing dataset has 600. Re-training occurs before every kt = 2 - * forecasts are made. - */ -object RollingValidation: - - private val debug = debugf ("RollingValidation", true) // debug function - private val flaw = flawf ("RollingValidation") // debug function - private val DEBUG2 = false // verbose debug flag - private val TR_RATIO = 0.5 // min ratio train to full datasets - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Calculate the size (number of instances) for a training dataset. - * @param m the size of the full dataset - */ - def trSize (m: Int): Int = (m * TR_RATIO).toInt - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Use rolling 1-fold cross-validation to compute test Quality of Fit (QoF) measures - * by dividing the dataset into a test dataset and a training dataset. - * The test dataset is defined by a range of indices (test start until start + te_size) - * and tr_size of the data before this is the training dataset. - *------------------------------------------------------------------------- - * This version is for models that have an x component and y component, e.g., `Regression4TS`. - * @see analytics.package.scala for chopr and shiftr methods - * @param mod the forecastering model being used (e.g., `QuadRegression4TS`) - * @param kt_ the frequency of re-training (number of forecasts to make before re-training) (defaults to 5) - * @param h the forecasting horizon, number of steps ahead to produce forecasts (defaults to 1) - */ - def crossValidate (mod: Regression4TS, kt_ : Int = 5, h: Int = 1): Array [Statistic] = - val x = mod.getX // get the (opt. expanded) data/input matrix - val y = mod.getY // get the (opt. expanded) response/output vector - val m = y.dim // number of instances in full dataset - val tr_size = trSize (m) // size of each training dataset - val te_size = m - tr_size // size of each testing dataset - val kt = if (kt_ < 0) te_size else kt_ // given size or size of testing dataset - - debug ("crossValidate", s"m = $m, tr_size = $tr_size, te_size = $te_size, kt = $kt, h = $h") - - if kt < h then flaw ("crossValidate", s"kt = $kt must be at least h = $h") - - val stats = qofStatTable // table of statistics for QoF measures - var te = tr_size // start of initial testing region - - banner (s"crossValidate: iteration 0: test start te = $te") - val (x_e, y_e, x_, y_) = chopr (x, y, te, te_size, tr_size) // chop out testing and training regions - - var xy = (x_, y_) // initial training dataset (matrix, vector) - var ym = xy._2.mean // mean of actual training response - val yf = new VectorD (y_e.dim) // vector to hold forecasts - var rt = 0 // re-training counter - - for i <- y_e.indices do // iterate thru testing instances - if i % kt == 0 then // trigger re-training every kt-th iteration - rt += 1 - if i > 0 then - xy = shiftr (xy, (x_e(i-kt until i), y_e(i-kt until i))) // update training dataset by shifting -// ym = xy._2.mean // update training mean - end if - mod.train (xy._1, xy._2) // periodically re-train model on updated training dataset - if (DEBUG2) println (s"crossValidate: rt = $rt, parameter = ${mod.parameter}") - end if -// yf(i) = mod.predict (x_e(i)) // save i-th forecasted value for h = 1 - yf(i) = mod.forecast (x_e(i), h)(i, h) // save i-th forecasted value - FIX - end for - -// FIX - what should the mean be: ym (from tr) or ym2 (from te)? -// val ym2 = y_e.mean -// mod.eval (ym, y_e, yf) // evaluate model on testing dataset - - val qof = mod.diagnose (y_e, yf) // get Quality of Fit (QoF) measures - tallyQof (stats, qof) - debug ("crossValidate", s"number of re-trainings rt = $rt \nqof = " + qof) - debug ("crossValidate", mod.report (qof) + "\n" + mod.summary) - new Plot (null, y_e, yf, s"crossValidate (h = $h): ${mod.modelName} fold 0", lines = true) - // plot actual test response against forecasted test response - stats // return the statistics table - end crossValidate - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Use rolling 1-fold cross-validation to compute test Quality of Fit (QoF) measures - * by dividing the dataset into a test dataset and a training dataset. - * The test dataset is defined by a range of indices (test start until start + te_size) - * and tr_size of the data before this is the training dataset. - *------------------------------------------------------------------------- - * This version is for models that have no x component, only the y component, e.g., `AR`. - * @see analytics.package.scala for chopr and shiftr methods - * @param mod the forecastering model being used (e.g., `ARIMA`) - * @param kt_ the frequency of re-training (number of forecasts to make before re-training) (defaults to 5) - * @param h the forecasting horizon, number of steps ahead to produce forecasts (defaults to 1) - */ - def crossValidate2 (mod: Forecaster & Fit, kt_ : Int = 5, h: Int = 1): Array [Statistic] = - val y = mod.getY // get the (opt. expanded) response/output vector - val m = y.dim // number of instances in full dataset - val tr_size = trSize (m) // size of each training dataset - val te_size = m - tr_size // size of each testing dataset - val kt = if kt_ < 0 then te_size else kt_ // given size or size of testing dataset - - debug ("crossValidate2", s"m = $m, tr_size = $tr_size, te_size = $te_size, kt = $kt, h = $h") - - if kt < h then flaw ("crossValidate2", s"kt = $kt must be at least h = $h") - - val stats = qofStatTable // table of statistics for QoF measures - var te = tr_size // start of initial testing region - - banner (s"crossValidate2: iteration 0: test start te = $te") - val (y_e, y_) = chopr (y, te, te_size, tr_size) // chop out testing and training regions - - var yy = y_ // initial training dataset (vector) - var ym = yy.mean // mean of actual training response - val yf = new VectorD (y_e.dim) // vector to hold forecasts - var rt = 0 // re-training counter - -// for i <- y_e.indices do // iterate thru testing instances - for i <- 0 until yf.dim-h+1 do // iterate thru testing instances - if i % kt == 0 then // trigger re-training every kt-th iteration - rt += 1 - if i > 0 then - yy = shiftr (yy, y_e(i-kt until i)) // update training dataset by shifting -// ym = yy.mean // update training mean - end if - mod.train (null, yy) // periodically re-train model on updated training dataset - if (DEBUG2) println (s"crossValidate2: rt = $rt, parameter = ${mod.parameter}") - end if - // use time t = tr_size + i to adjust the index with respect to the original y - yf(i+h-1) = mod.forecastX (y, tr_size + i, h) // , i % kt) // save i-th forecasted value - end for - - for i <- 0 until h-1 do yf(i) = y_e(i) // when h > 1, fill in initial blanks in yf with actual y values - -// FIX - what should the mean be: ym (from tr) or ym2 (from te)? -// val ym2 = y_e.mean -// mod.eval (ym, y_e, yf) // evaluate model on testing dataset -// val e = y_e - yf // must create local e since the original e may be required for MA models -// mod.diagnose (e, y_e, yf) -// mod.evalf (y_e, yf) - - val qof = mod.diagnose (y_e, yf) // get Quality of Fit (QoF) measures - tallyQof (stats, qof) - debug ("crossValidate2", s"number of re-trainings rt = $rt \nqof = " + qof) - debug ("crossValidate2", mod.report (qof)) - new Plot (null, y_e, yf, s"crossValidate2 (h = $h): ${mod.modelName} fold 0", lines = true) - // plot actual test response against forecasted test response - stats // return the statistics table - end crossValidate2 - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Use rolling 1-fold cross-validation to compute test Quality of Fit (QoF) measures - * by dividing the dataset into a test dataset and a training dataset. - * The test dataset is defined by a range of indices (test start until start + te_size) - * and tr_size of the data before this is the training dataset. - *------------------------------------------------------------------------- - * This version is for models that have no x component, only the y component, e.g., `AR`. - * @see analytics.package.scala for chopr and shiftr methods - * @param mod the forecastering model being used (e.g., `ARIMA`) - * @param kt_ the frequency of re-training (number of forecasts to make before re-training) (defaults to 5) - * @param h the forecasting horizon, number of steps ahead to produce forecasts (defaults to 1) - * - def crossValidate2S (mod: SARIMA, kt_ : Int = 5, h: Int = 1): Array [Statistic] = - val y = mod.getY // get the (opt. expanded) response/output vector - val m = y.dim // number of instances in full dataset - val tr_size = trSize (m) // size of each training dataset - val te_size = m - tr_size // size of each testing dataset - val kt = if kt_ < 0 then te_size else kt_ // given size or size of testing dataset - - debug ("crossValidate2S", s"m = $m, tr_size = $tr_size, te_size = $te_size, kt = $kt, h = $h") - - if kt < h then flaw ("crossValidate2S", s"kt = $kt must be at least h = $h") - - val stats = qofStatTable // table of statistics for QoF measures - val te = tr_size // start of initial testing region - - banner (s"crossValidate2S: iteration 0: test start te = $te") - val (y_e, y_) = chopr (y, te, te_size, tr_size) // chop out testing and training regions - - var yy = y_ // initial training dataset (vector) - var ym = yy.mean // mean of actual training response - val yf = new VectorD (y_e.dim) // vector to hold forecasts - var rt = 0 // re-training counter - -// for i <- y_e.indices do // iterate thru testing instances - for i <- 0 until yf.dim-h+1 do // iterate thru testing instances - yy = y(i until i+tr_size) - mod.setTS (yy) - - if i % kt == 0 then // trigger re-training every kt-th iteration - rt += 1 - mod.train () // periodically re-train model on updated training dataset - if (DEBUG2) println (s"crossValidate2: rt = $rt") //, parameter = ${mod.parameter}") - else mod.updateFittedValues() // update the fitted values without retraining - - - // use time t = tr_size + i to adjust the index with respect to the original y - yf(i+h-1) = mod.forecast (yy.dim, h).last // save i-th forecasted value - end for - - for i <- 0 until h-1 do yf(i) = y_e(i) // when h > 1, fill in initial blanks in yf with actual y values - -// FIX - what should the mean be: ym (from tr) or ym2 (from te)? -// val ym2 = y_e.mean -// mod.eval (ym, y_e, yf) // evaluate model on testing dataset -// val e = y_e - yf // must create local e since the original e may be required for MA models -// mod.diagnose (e, y_e, yf) -// mod.eval (y_e, yf) - - val (yp, qof) = mod.test (y_e, yf) // get Quality of Fit (QoF) measures - tallyQof (stats, qof) - debug ("crossValidate2S", s"number of re-trainings rt = $rt \nqof = " + qof) - debug (crossValidate2S", mod.fitMap) - new Plot (null, y_e, yf, s"crossValidate2S (h = $h): ${mod.modelName} fold 0", lines = true) - // plot actual test response against forecasted test response - stats // return the statistics table - end crossValidate2S - */ - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Use rolling 1-fold cross-validation to compute test Quality of Fit (QoF) measures - * by dividing the dataset into a test dataset and a training dataset. - * The test dataset is defined by a range of indices (test start until start + te_size) - * and tr_size of the data before this is the training dataset. - *------------------------------------------------------------------------- - * This version is for models that have an x component and y component, e.g., `NeuralNet_3L1_4TS`. - * @see analytics.package.scala for chopr and shiftr methods - * @param mod the forecastering model being used (e.g., `NeuralNet_3L1_4TS`) - * @param kt_ the frequency of re-training (number of forecasts to make before re-training) (defaults to 50) - * @param h the forecasting horizon, number of steps ahead to produce forecasts (defaults to 1) - * - def crossValidate3 (mod: Forecaster, kt_ : Int = 50, h: Int = 1): Array [Statistic] = - val x = mod.getX // get the (opt. expanded) data/input matrix - val y = mod.getY // get the (opt. expanded) response/output vector - val m = y.dim // number of instances in full dataset - val tr_size = trSize (m) // size of each training dataset - val te_size = m - tr_size // size of each testing dataset - val kt = if (kt_ < 0) te_size else kt_ // given size or size of testing dataset - - debug("crossValidate3", s"m = $m, tr_size = $tr_size, te_size = $te_size, kt = $kt, h = $h") - - if kt < h then flaw ("crossValidate3", s"kt = $kt must be at least h = $h") - - val stats = qofStatTable // table of statistics for QoF measures - var te = tr_size // start of initial testing region - - banner (s"crossValidate3: iteration 0: test start te = $te") - val (x_e, y_e, x_, y_) = chopr (x, y, te, te_size, tr_size) // chop out testing and training regions - - var xy = (x_, y_) // initial training dataset (matrix, vector) - var ym = xy._2.mean // mean of actual training response - val yf = new VectorD (y_e.dim) // vector to hold forecasts - var rt = 0 // re-training counter - - for i <- y_e.indices do // iterate thru testing instances - if i % kt == 0 then // trigger re-training every kt-th iteration - rt += 1 - if i > 0 then - xy = shiftr (xy, (x_e(i-kt until i), y_e(i-kt until i))) // update training dataset by shifting -// ym = xy._2.mean // update training mean - end if - mod.train (xy._1, xy._2) // periodically re-train model on updated training dataset - if DEBUG2 then println (s"crossValidate3: rt = $rt, parameter = ${mod.parameter}") - end if -// yf(i) = mod.predict (x_e(i)) // save i-th forecasted value for h = 1 - yf(i) = mod.forecast (x_e, i, h) // save i-th forecasted value - end for - -// FIX - what should the mean be: ym (from tr) or ym2 (from te)? -// val ym2 = y_e.mean - mod.eval (ym, y_e, yf) // evaluate model on testing dataset - - val qof = mod.fitA(0).fit // get Quality of Fit (QoF) measures - tallyQof (stats, qof) - debug ("crossValidate3", s"number of re-trainings rt = $rt \nqof = " + qof) -// debug ("crossValidate3", mod.report (qof) + "\n" + mod.summary) - new Plot (null, y_e, yf, s"crossValidate3 (h = $h): ${mod.modelName} fold 0", lines = true) - // plot actual test response against forecasted test response - stats // return the statistics table - end crossValidate3 - */ - -end RollingValidation - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `rollingValidationTest` object is used to test the crossValidate method - * in the `RollingValidation` object. - * > runMain scalation.analytics.forecaster.rollingValidationTest - */ -@main def rollingValidationTest (): Unit = - - import scalation.random.Normal - - val m = 1200 // number of instances - val x = new MatrixD (m, 2) // data/input matrix - val y = new VectorD (m) // response/output vector - val e = Normal (0, 20000000) // noise - - for i <- y.indices do - val j = i + 1 - x(i, 0) = 0.0000001 * (j - m/2)~^3 * - 5 * j - x(i, 1) = 10 * j - 0.0001 * j~^2 - y(i) = 10.0 + 3 * x(i, 0) + 2 * x(i, 1) + e.gen - end for - - val h = 1 // forecasting horizon, try changing - banner (s"Regression4TS full dataset results at forecasting horizon h = $h") - val mod = new Regression4TS (x, y, 3) - mod.train (null, y) // train the model on full dataset - val (yp, qof) = mod.test (null, y) // test the model on full dataset - println (mod.report (qof)) // report on Quality of Fit (QoF) - - banner (s"Regression4TS rolling validation results at forecasting horizon h = $h") - FitM.showQofStatTable (RollingValidation.crossValidate (mod, h = h)) - -end rollingValidationTest - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `rollingValidationTest2` object is used to test the crossValidate2 method - * in the `RollingValidation` object. - * > runMain scalation.analytics.forecaster.rollingValidationTest2 - */ -@main def rollingValidationTest2 (): Unit = - - import scalation.random.Normal - - val m = 1200 // number of instances - val y = new VectorD (m) // response/output vector - val e = Normal (0, 100) // noise - - y(0) = 50.0 - for i <- 1 until y.dim do y(i) = 0.8 * y(i-1) + e.gen - - println (s"y.min = ${y.min}, y.max = ${y.max}") - - val h = 2 // forecasting horizon, try changing - banner (s"AR full dataset results at forecasting horizon h = $h") - ARMA.hp("p") = 2 - val mod = new AR (y) // create an AR(p) model - mod.train (null, y) // train the model on full dataset - val (yp, qof) = mod.test (null, y) // test the model on full dataset - println (mod.report (qof)) // report on Quality of Fit (QoF) - - banner (s"AR rolling validation validation results at forecasting horizon h = $h") - FitM.showQofStatTable (RollingValidation.crossValidate2 (mod, h = h)) - -end rollingValidationTest2 - diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/RollingValidation.scala.bak2 b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/RollingValidation.scala.bak2 deleted file mode 100644 index bd55a1148..000000000 --- a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/old/RollingValidation.scala.bak2 +++ /dev/null @@ -1,343 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Wed Jun 17 12:08:35 EDT 2020 - * @see LICENSE (MIT style license file). - * - * @title Model Framework: Rolling Validation for Forecasters - */ - -package scalation -package modeling -package forecasting - -import scalation.mathstat._ - -import Fit._ - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Chop the testing te and training tr datasets out of the full dataset - * for rolling validation where the training set is before the testing set. - * @param x the full data/input matrix - * @param y the full response/output vector - * @param te the start (inclusive) of the testing region - * @param te_size the size of the testing region - * @param tr_size the size of the training region - */ -def chopr (x: MatrixD, y: VectorD, te: Int, te_size: Int, tr_size: Int): - (MatrixD, VectorD, MatrixD, VectorD) = - val DEBUG = true // debug flag - - val te2 = te + te_size // end (exclusive) of testing region - val tr = te - tr_size // start of training region - - val x_e = x(te until te2) // testing data/input matrix - val y_e = y(te until te2) // testing response/output vector - val x_ = x(tr until te) // training data/input matrix - val y_ = y(tr until te) // training response/output vector - - if DEBUG then - println (s"chopr:test: x_e($te .. ${te2 - 1})") - println (s"chopr:test: y_e($te .. ${te2 - 1})") - println (s"chopr:train: x_($tr .. ${te - 1})") - println (s"chopr:train: y_($tr .. ${te - 1})") - end if - - (x_e, y_e, x_, y_) -end chopr - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Chop the testing te and training tr datasets out of the full dataset - * for rolling validation where the training set is before the testing set. - * This version works for models without an x componenet, only y. - * @param y the full response/output vector - * @param te the start (inclusive) of the testing region - * @param te_size the size of the testing region - * @param tr_size the size of the training region - */ -def chopr (y: VectorD, te: Int, te_size: Int, tr_size: Int): - (VectorD, VectorD) = - val DEBUG = true // debug flag - - val te2 = te + te_size // end (exclusive) of testing region - val tr = te - tr_size // start of training region - - val y_e = y(te until te2) // testing response/output vector - val y_ = y(tr until te) // training response/output vector - - if DEBUG then - println (s"chopr:test: y_e($te .. ${te2 - 1})") - println (s"chopr:train: y_($tr .. ${te - 1})") - end if - - (y_e, y_) -end chopr - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `RollingValidation` object provides k-fold rolling-validation, e.g., - * for TR_RATIO = 0.5, m = 1000 and k = 10 the folds are defined as follows: - * 0: tr(ain) 0 until 500, te(st) 500 until 550 - * 1: tr(ain) 50 until 550, te(st) 550 until 600 - * 2: tr(ain) 100 until 600, te(st) 600 until 650 - * 3: tr(ain) 150 until 650, te(st) 650 until 700 - * 4: tr(ain) 200 until 700, te(st) 700 until 750 - * 5: tr(ain) 250 until 750, te(st) 750 until 800 - * 6: tr(ain) 300 until 800, te(st) 800 until 850 - * 7: tr(ain) 350 until 850, te(st) 850 until 900 - * 8: tr(ain) 400 until 900, te(st) 900 until 950 - * 9: tr(ain) 450 until 950, te(st) 950 until 1000 - * In rolling validation for this case, each re-training set has 500 instances, - * and the testing set has 500 as well, with folds of length 50. - * Re-training occurs for every fold. - */ -object RollingValidation: - - private val debug = debugf ("RollingValidation", true) // debug function - private val flaw = flawf ("RollingValidation") // debug function - private val DEBUG2 = false // verbose debug flag - private val TR_RATIO = 0.5 // min ratio train to full datasets - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Calculate the size (number of instances) for a training dataset. - * @param m the size of the full dataset - */ - def trSize (m: Int): Int = (m * TR_RATIO).toInt - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Use rolling-validation to compute test Quality of Fit (QoF) measures - * by dividing the dataset into a TESTING SET and a TRAINING SET. - * The test dataset is defined by a range of indices (test start until - * start + te_size) * and the data before this is the training dataset. - * This version calls predict for one-step ahead out-of-sample forecasts. - * @param mod the forecastering model being used (e.g., `ARIMA`) - * @param rc the retraining cycle (number of forecasting until retraining occurs) - */ - def rollValidate (mod: Forecaster & Fit, rc: Int): Unit = - val y = mod.getY // get the (opt. expanded) response/output vector - val m = y.dim // number of instances in full dataset - val tr_size = trSize (m) // size of each training set - val te_size = m - tr_size // size of each testing set - - debug ("rollValidate", s"m = $m, tr_size = $tr_size, te_size = $te_size, rc = $rc") - - val yp = new VectorD (te_size) // y-predicted over testing set - - for i <- 0 until te_size do // iterate through testing set - val t = tr_size + i // next time point to forecast - if i % rc == 0 then // retrain 0 until t, every rc forecasts - val y_ = y(0 until t) // slice out training set - mod.train (null, y_) // train on training set - end if - yp(i) = mod.predict (t-1, y) // predict the next value - end for - - val t = VectorD.range (tr_size, m) // relevant time range - val yy = y(tr_size until m) // actual response vector sliced - val df = mod.parameter.size - 1 // degrees of freedom for model - mod.resetDF (df, te_size - df) // reset degrees of freedom - new Plot (t, yy, yp, "Plot yy, yp vs. t", lines = true) - println (FitM.fitMap (mod.diagnose (yy, yp), QoF.values.map (_.toString))) - end rollValidate - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Use rolling-validation to compute test Quality of Fit (QoF) measures - * by dividing the dataset into a TESTING SET and a TRAINING SET. - * The test dataset is defined by a range of indices (test start until - * start + te_size) and the data before this is the training dataset. - * This version calls forecast for h-steps ahead out-of-sample forecasts. - * @param mod the forecastering model being used (e.g., `ARIMA`) - * @param rc the retraining cycle (number of forecasting until retraining occurs) - * @param h the forecasting horizon (h-steps ahead) - */ - def rollValidate (mod: Forecaster & Fit, rc: Int, h: Int): Unit = - val y = mod.getY // get the (opt. expanded) response/output vector - val yf = mod.forecastAll (y, h) // get the in-sample forecasting matrix - val m = y.dim // number of instances in full dataset - val tr_size = trSize (m) // size of each training set - val te_size = m - tr_size // size of each testing set - - debug ("rollValidate", s"m = $m, tr_size = $tr_size, te_size = $te_size, rc = $rc, h = $h") - - val yp = new VectorD (te_size) // y-predicted over testing set (only for h=1) - - for i <- 0 until te_size do // iterate through testing set - val t = tr_size + i // next time point to forecast - if i % rc == 0 then // retrain 0 until t, every rc forecasts - val y_ = y(0 until t) // slice out training set - mod.train (null, y_) // train on training set - end if - yp(i) = mod.predict (t-1, y) // predict the next value (only for h=1) - val yd = mod.forecast (t-1, yf, y, h) // forecast the next h-values - // yf is updated down its diagonals - debug ("rollValidate", s"for (i, t) = ($i, $t): yp($i) = ${yp(i)}, yd = $yd") - assert (yp(i) =~ yd(0)) // make sure h=1 forecasts agree with predictions - end for // yf is updated down its diagonals - - val t = VectorD.range (tr_size, m) // relevant time ranmge - val yy = y(tr_size until m) // actual response vector trimed - val df = mod.parameter.size - 1 // degrees of freedom for model - mod.resetDF (df, te_size - df) // reset degrees of freedom - new Plot (t, yy, yp, s"Plot yy, yp vs. t (h = 1)", lines = true) - - for k <- 1 to h do - val yfh = yf(tr_size until m, k) - new Plot (t, yy, yfh, s"Plot yy, yfh vs. t (h = $k)", lines = true) - println (FitM.fitMap (mod.diagnose (yy, yfh), QoF.values.map (_.toString))) - end for - - end rollValidate - -/* - val cp = mod.cap // maximum lag (how far into the past) - val st = te_size - cp // size of shift from original y -// val yf = new MatrixD (te_size+cp+h, h+2) // extend before and after - val yf = new MatrixD (y.dim+h, h+2) // extend before and after - for t <- 0 until te_size + cp do yf(t, 0) = y(st+t) // first column is the timestep (e.g., logical day) -// for t <- yf.indices do yf(t, h+1) = te_size + t // last column is time (logical day) - for t <- yf.indices do yf(t, h+1) = t // last column is time (logical day) -*/ - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Use kf-fold rolling-validation to compute test Quality of Fit (QoF) measures - * by dividing the dataset into a TESTING SET and a TRAINING SET. - * The test dataset is defined by a range of indices (test start until start + te_size) - * and tr_size of the data before this is the training dataset. - * Further, divide the testing set into k FOLDS and let the training set be the - * tr_size elements before each fold. Call train, test and testF for each fold. - * @param mod the forecastering model being used (e.g., `ARIMA`) - * @param kf the number of testing folds - * @param h the forecasting horizon, number of steps ahead to produce forecasts (defaults to 1) - */ - def rollValidatek (mod: Forecaster & Fit, kf: Int = 10, h: Int = 1): Array [Statistic] = - val y = mod.getY // get the (opt. expanded) response/output vector - val m = y.dim // number of instances in full dataset - val tr_size = trSize (m) // size of each training set - val te_size = m - tr_size // size of each testing set - val t1_size = te_size / kf // size of each testing set fold - - debug ("rollValidatek", s"m = $m, tr_size = $tr_size, te_size = $te_size, kf = $kf, h = $h") - - if t1_size < h then flaw ("rollValidate", s"t1_size = $t1_size must be at least h = $h") - - val stats = qofStatTable // table of statistics for QoF measures - var te = tr_size // start of initial testing region - var tef = te // test start for fold 0 - - for fold <- 0 until kf do // iterate over each fold - banner (s"rollValidatek: fold $fold test start tef = $tef") - val (y_e, y_) = chopr (y, tef, t1_size, tr_size) // chop out testing and training regions - mod.train (null, y_) // train on training set - val (yp, qof) = mod.test (null, y_) // test predictions on in-sample testing - println (mod.report (qof)) // report prediction Quality of Fit (QoF) - - val (yfh, qofh) = mod.testF (h, y_e) // test forecasts on testing set fold - println (mod.report (qofh)) // report forecast Quality of Fit (QoF) - tallyQof (stats, qofh) - tef += t1_size // start test start for next fold - end for - - stats // return the statistics table - end rollValidatek - -end RollingValidation - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `rollingValidationTest` main function is used to test the rollValidate method - * in the `RollingValidation` object. - * > runMain scalation.modeling.forecasting.rollingValidationTest - */ -@main def rollingValidationTest (): Unit = - - import scalation.random.Normal - - val m = 1200 // number of instances - val y = new VectorD (m) // response/output vector - val e = Normal (0, 100) // noise - - y(0) = 50.0 - for i <- 1 until y.dim do y(i) = 0.8 * y(i-1) + e.gen - - val p = 3 // order of the model - val h = 2 // forecasting horizon, try changing - println (s"y.min = ${y.min}, y.max = ${y.max}") - - banner (s"AR full dataset results at forecasting horizon h = $h") - - ARMA.hp("p") = p -// val mod = new AR (y) // create an AR(p) model - val mod = new ARMA (y) // create an ARMA(p, 0) model - - mod.train (null, y) // train the model on full dataset - val (yp, qof) = mod.test (null, y) // test the model on full dataset - println (mod.report (qof)) // report on Quality of Fit (QoF) - - banner (s"AR rolling validation validation results at forecasting horizon h = $h") - FitM.showQofStatTable (RollingValidation.rollValidatek (mod, h = h)) - -end rollingValidationTest - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `rollingValidationTest2` main function is used to test the rollValidate method - * in the `RollingValidation` object. - * > runMain scalation.modeling.forecasting.rollingValidationTest2 - */ -@main def rollingValidationTest2 (): Unit = - - import Example_LakeLevels.y - - val p = 3 // order of the model - val h = 2 // forecasting horizon, try changing - println (s"y.min = ${y.min}, y.max = ${y.max}") - - banner (s"AR full dataset results at forecasting horizon h = $h") - - ARMA.hp("p") = p -// val mod = new AR (y) // create an AR(p) model - val mod = new ARMA (y) // create an ARMA(p, 0) model - - val (yp, qof) = mod.trainNtest ()() // train-test model on full dataset - - val t = VectorD.range (49 until 97) // note original y must be shifted - new Plot (t, y(50 until 98), yp(49 until 97), "y, yp vs t 2nd half", lines = true) - - val rc = 2 // retrain cycle - banner (s"AR($p) one-step ahead rolling validation results") - RollingValidation.rollValidate (mod, rc) - - banner (s"AR($p) $h-steps rolling validation results") - RollingValidation.rollValidate (mod, rc, h) - -end rollingValidationTest2 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `rollingValidationTest3` main function is used to test the rollValidate method - * in the `RollingValidation` object. - * > runMain scalation.modeling.forecasting.rollingValidationTest3 - */ -@main def rollingValidationTest3 (): Unit = - - val y = VectorD.range (1, 25) - - val h = 2 // forecasting horizon, try changing - banner (s"RW full dataset results at forecasting horizon h = $h") - val mod = new RandomWalk (y) // create an RW model - mod.train (null, y) // train the model on full dataset - - val (yp, qof) = mod.test (null, y) // test the model on full dataset - println (mod.report (qof)) // report on Quality of Fit (QoF) - println (s"yp = $yp") // print prediction matrix - - val yf = mod.forecastAll (y, h) // produce all foreacts up horizon h - println (s"yf = $yf") // print forecast matrix - - banner (s"RW rolling validation validation results at forecasting horizon h = $h") - FitM.showQofStatTable (RollingValidation.rollValidatek (mod, 3, h)) - -end rollingValidationTest3 - diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/quadSplineTest.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/quadSplineTest.class deleted file mode 100644 index 97b1af11c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/quadSplineTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/quadSplineTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/quadSplineTest.tasty deleted file mode 100644 index 60f0c4129..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/quadSplineTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/quadSplineTest2.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/quadSplineTest2.class deleted file mode 100644 index 1fcea2d2e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/quadSplineTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/quadSplineTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/quadSplineTest2.tasty deleted file mode 100644 index f6196ac97..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/quadSplineTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/quadSplineTest3.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/quadSplineTest3.class deleted file mode 100644 index 470a049db..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/quadSplineTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/quadSplineTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/quadSplineTest3.tasty deleted file mode 100644 index d4d80c05c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/quadSplineTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/quadSplineTest4.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/quadSplineTest4.class deleted file mode 100644 index 216a146ea..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/quadSplineTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/quadSplineTest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/quadSplineTest4.tasty deleted file mode 100644 index 62602341f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/quadSplineTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/randomWalkTest.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/randomWalkTest.class deleted file mode 100644 index 02a2d6070..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/randomWalkTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/randomWalkTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/randomWalkTest.tasty deleted file mode 100644 index c8320bff9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/randomWalkTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/randomWalkTest2.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/randomWalkTest2.class deleted file mode 100644 index 96ff20b5c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/randomWalkTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/randomWalkTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/randomWalkTest2.tasty deleted file mode 100644 index 461486fd9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/randomWalkTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/randomWalkTest3.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/randomWalkTest3.class deleted file mode 100644 index 06a69cac0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/randomWalkTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/randomWalkTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/randomWalkTest3.tasty deleted file mode 100644 index ef2648e2c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/randomWalkTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/randomWalkTest4.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/randomWalkTest4.class deleted file mode 100644 index 650410004..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/randomWalkTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/randomWalkTest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/randomWalkTest4.tasty deleted file mode 100644 index e0587bb5a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/randomWalkTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TS2Test.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TS2Test.class deleted file mode 100644 index 6aba5a7eb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TS2Test.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TS2Test.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TS2Test.tasty deleted file mode 100644 index 50351542d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TS2Test.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TS2Test2.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TS2Test2.class deleted file mode 100644 index 11132381f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TS2Test2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TS2Test2.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TS2Test2.tasty deleted file mode 100644 index 86ff99569..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TS2Test2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TS2Test3.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TS2Test3.class deleted file mode 100644 index 02b7045ec..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TS2Test3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TS2Test3.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TS2Test3.tasty deleted file mode 100644 index 9a0cac5ce..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TS2Test3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TS2Test4.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TS2Test4.class deleted file mode 100644 index 5e3d6e811..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TS2Test4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TS2Test4.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TS2Test4.tasty deleted file mode 100644 index 697a76138..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TS2Test4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TS2Test5.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TS2Test5.class deleted file mode 100644 index 70a1b062a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TS2Test5.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TS2Test5.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TS2Test5.tasty deleted file mode 100644 index 3b7ad426b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TS2Test5.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TS2Test6.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TS2Test6.class deleted file mode 100644 index b86b14056..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TS2Test6.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TS2Test6.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TS2Test6.tasty deleted file mode 100644 index bb6a298a6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TS2Test6.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TSTest.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TSTest.class deleted file mode 100644 index c58f6d400..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TSTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TSTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TSTest.tasty deleted file mode 100644 index 14a41a70d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TSTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TSTest2.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TSTest2.class deleted file mode 100644 index 112b7e07a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TSTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TSTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TSTest2.tasty deleted file mode 100644 index 0ccd1a59e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TSTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TSTest3.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TSTest3.class deleted file mode 100644 index 0c38a65b7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TSTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TSTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TSTest3.tasty deleted file mode 100644 index e1c2a9b8e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TSTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TSTest4.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TSTest4.class deleted file mode 100644 index bc16fba8b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TSTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TSTest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TSTest4.tasty deleted file mode 100644 index cc5b24b47..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TSTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TSTest5.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TSTest5.class deleted file mode 100644 index e63696bc4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TSTest5.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TSTest5.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TSTest5.tasty deleted file mode 100644 index 709a7828a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TSTest5.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TSTest6.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TSTest6.class deleted file mode 100644 index 4100b16de..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TSTest6.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TSTest6.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TSTest6.tasty deleted file mode 100644 index 8daa58476..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TSTest6.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TSTest7.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TSTest7.class deleted file mode 100644 index 0ae745caf..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TSTest7.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TSTest7.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TSTest7.tasty deleted file mode 100644 index 449a6cec3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeGB4TSTest7.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeMT4TSTest.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeMT4TSTest.class deleted file mode 100644 index 6241a81aa..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeMT4TSTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeMT4TSTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeMT4TSTest.tasty deleted file mode 100644 index 90f307af9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeMT4TSTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeMT4TSTest2.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeMT4TSTest2.class deleted file mode 100644 index 0dfd23d21..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeMT4TSTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeMT4TSTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeMT4TSTest2.tasty deleted file mode 100644 index 311252abd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeMT4TSTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeMT4TSTest3.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeMT4TSTest3.class deleted file mode 100644 index 8bb325501..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeMT4TSTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeMT4TSTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeMT4TSTest3.tasty deleted file mode 100644 index 586b1452e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeMT4TSTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeMT4TSTest4.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeMT4TSTest4.class deleted file mode 100644 index f6f639d22..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeMT4TSTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeMT4TSTest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeMT4TSTest4.tasty deleted file mode 100644 index 8eb8ac3f3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeMT4TSTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeMT4TSTest5.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeMT4TSTest5.class deleted file mode 100644 index 33df2e153..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeMT4TSTest5.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeMT4TSTest5.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeMT4TSTest5.tasty deleted file mode 100644 index e38770b76..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeMT4TSTest5.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeMT4TSTest6.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeMT4TSTest6.class deleted file mode 100644 index 5127cfea9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeMT4TSTest6.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeMT4TSTest6.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeMT4TSTest6.tasty deleted file mode 100644 index 39798b5d3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeMT4TSTest6.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeMT4TSTest7.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeMT4TSTest7.class deleted file mode 100644 index 149ca82de..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeMT4TSTest7.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeMT4TSTest7.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeMT4TSTest7.tasty deleted file mode 100644 index e18eca1bf..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeMT4TSTest7.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF4TSTest.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF4TSTest.class deleted file mode 100644 index dafa0559f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF4TSTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF4TSTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF4TSTest.tasty deleted file mode 100644 index f079cd90a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF4TSTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF4TSTest2.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF4TSTest2.class deleted file mode 100644 index 9f89aff68..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF4TSTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF4TSTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF4TSTest2.tasty deleted file mode 100644 index 1d7c2307b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF4TSTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF4TSTest3.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF4TSTest3.class deleted file mode 100644 index 661b9dc22..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF4TSTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF4TSTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF4TSTest3.tasty deleted file mode 100644 index a3312db0a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF4TSTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF4TSTest4.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF4TSTest4.class deleted file mode 100644 index c6f44f20b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF4TSTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF4TSTest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF4TSTest4.tasty deleted file mode 100644 index cfaa2ea25..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF4TSTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF4TSTest5.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF4TSTest5.class deleted file mode 100644 index df31f7f92..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF4TSTest5.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF4TSTest5.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF4TSTest5.tasty deleted file mode 100644 index 2dcb1f1c9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF4TSTest5.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF4TSTest6.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF4TSTest6.class deleted file mode 100644 index bf825abfb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF4TSTest6.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF4TSTest6.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF4TSTest6.tasty deleted file mode 100644 index 7cd16e7db..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF4TSTest6.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF4TSTest7.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF4TSTest7.class deleted file mode 100644 index 3c8bd9b9a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF4TSTest7.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF4TSTest7.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF4TSTest7.tasty deleted file mode 100644 index d9cf76ae5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF4TSTest7.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF_MT4TSTest.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF_MT4TSTest.class deleted file mode 100644 index d11f3d773..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF_MT4TSTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF_MT4TSTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF_MT4TSTest.tasty deleted file mode 100644 index 16ad17490..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF_MT4TSTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF_MT4TSTest2.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF_MT4TSTest2.class deleted file mode 100644 index 228478ed0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF_MT4TSTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF_MT4TSTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF_MT4TSTest2.tasty deleted file mode 100644 index 3f1e40dcb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF_MT4TSTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF_MT4TSTest3.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF_MT4TSTest3.class deleted file mode 100644 index a35be0f5c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF_MT4TSTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF_MT4TSTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF_MT4TSTest3.tasty deleted file mode 100644 index 211102cc9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF_MT4TSTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF_MT4TSTest4.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF_MT4TSTest4.class deleted file mode 100644 index fe44a7aeb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF_MT4TSTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF_MT4TSTest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF_MT4TSTest4.tasty deleted file mode 100644 index 145dcbebb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF_MT4TSTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF_MT4TSTest5.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF_MT4TSTest5.class deleted file mode 100644 index 73c4c3e49..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF_MT4TSTest5.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF_MT4TSTest5.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF_MT4TSTest5.tasty deleted file mode 100644 index 5e2899049..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF_MT4TSTest5.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF_MT4TSTest6.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF_MT4TSTest6.class deleted file mode 100644 index 41e439887..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF_MT4TSTest6.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF_MT4TSTest6.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF_MT4TSTest6.tasty deleted file mode 100644 index 1a895be5f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF_MT4TSTest6.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF_MT4TSTest7.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF_MT4TSTest7.class deleted file mode 100644 index cadd6ac9e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF_MT4TSTest7.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF_MT4TSTest7.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF_MT4TSTest7.tasty deleted file mode 100644 index 96bacede7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/regressionTreeRF_MT4TSTest7.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/rollingValidationTest.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/rollingValidationTest.class deleted file mode 100644 index a80bac1c9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/rollingValidationTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/rollingValidationTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/rollingValidationTest.tasty deleted file mode 100644 index 0bf30971d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/rollingValidationTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/rollingValidationTest2.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/rollingValidationTest2.class deleted file mode 100644 index d3044be6d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/rollingValidationTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/rollingValidationTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/rollingValidationTest2.tasty deleted file mode 100644 index cbe9ab415..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/rollingValidationTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/rollingValidationTest3.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/rollingValidationTest3.class deleted file mode 100644 index f47ae7a29..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/rollingValidationTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/rollingValidationTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/rollingValidationTest3.tasty deleted file mode 100644 index cd65cc52e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/rollingValidationTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/rollingValidationTest4.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/rollingValidationTest4.class deleted file mode 100644 index fc35fa327..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/rollingValidationTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/rollingValidationTest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/rollingValidationTest4.tasty deleted file mode 100644 index 44bad5784..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/rollingValidationTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/simpleExpSmoothingTest.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/simpleExpSmoothingTest.class deleted file mode 100644 index e14e30c2f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/simpleExpSmoothingTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/simpleExpSmoothingTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/simpleExpSmoothingTest.tasty deleted file mode 100644 index c7d36b4cf..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/simpleExpSmoothingTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/simpleExpSmoothingTest2.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/simpleExpSmoothingTest2.class deleted file mode 100644 index d47d671c9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/simpleExpSmoothingTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/simpleExpSmoothingTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/simpleExpSmoothingTest2.tasty deleted file mode 100644 index f1373b9b6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/simpleExpSmoothingTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/simpleExpSmoothingTest3.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/simpleExpSmoothingTest3.class deleted file mode 100644 index 7f489abfb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/simpleExpSmoothingTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/simpleExpSmoothingTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/simpleExpSmoothingTest3.tasty deleted file mode 100644 index 018165a77..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/simpleExpSmoothingTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/simpleExpSmoothingTest4.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/simpleExpSmoothingTest4.class deleted file mode 100644 index c586a8c8e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/simpleExpSmoothingTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/simpleExpSmoothingTest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/simpleExpSmoothingTest4.tasty deleted file mode 100644 index 2d2f709f8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/simpleExpSmoothingTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/simpleExpSmoothingTest5.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/simpleExpSmoothingTest5.class deleted file mode 100644 index 6ee78e7c4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/simpleExpSmoothingTest5.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/simpleExpSmoothingTest5.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/simpleExpSmoothingTest5.tasty deleted file mode 100644 index 4dec74ab2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/simpleExpSmoothingTest5.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/simpleMovingAverageTest.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/simpleMovingAverageTest.class deleted file mode 100644 index a49b98f31..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/simpleMovingAverageTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/simpleMovingAverageTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/simpleMovingAverageTest.tasty deleted file mode 100644 index b2ab8dca6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/simpleMovingAverageTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/simpleMovingAverageTest2.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/simpleMovingAverageTest2.class deleted file mode 100644 index b2dac95d8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/simpleMovingAverageTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/simpleMovingAverageTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/simpleMovingAverageTest2.tasty deleted file mode 100644 index 69c999cd2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/simpleMovingAverageTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/simpleMovingAverageTest3.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/simpleMovingAverageTest3.class deleted file mode 100644 index a372bebd6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/simpleMovingAverageTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/simpleMovingAverageTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/simpleMovingAverageTest3.tasty deleted file mode 100644 index 6e6eb41e8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/simpleMovingAverageTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/simpleMovingAverageTest4.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/simpleMovingAverageTest4.class deleted file mode 100644 index 828ae020c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/simpleMovingAverageTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/simpleMovingAverageTest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/simpleMovingAverageTest4.tasty deleted file mode 100644 index 9a6ef0f2d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/simpleMovingAverageTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/simpleMovingAverageTest5.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/simpleMovingAverageTest5.class deleted file mode 100644 index 6fe7ce824..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/simpleMovingAverageTest5.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/simpleMovingAverageTest5.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/simpleMovingAverageTest5.tasty deleted file mode 100644 index 013e0958d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/simpleMovingAverageTest5.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/stationaryTest.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/stationaryTest.class deleted file mode 100644 index d5568dfb4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/stationaryTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/stationaryTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/stationaryTest.tasty deleted file mode 100644 index ae5a43259..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/stationaryTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/stationaryTest2.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/stationaryTest2.class deleted file mode 100644 index 61c73ab3b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/stationaryTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/stationaryTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/stationaryTest2.tasty deleted file mode 100644 index 1ee5e7df7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/stationaryTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/stationaryTest3.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/stationaryTest3.class deleted file mode 100644 index e349f0719..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/stationaryTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/stationaryTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/stationaryTest3.tasty deleted file mode 100644 index af1a950bf..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/stationaryTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/tensor_forecast_matrix.txt b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/tensor_forecast_matrix.txt deleted file mode 100644 index f08f0c7bf..000000000 --- a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/tensor_forecast_matrix.txt +++ /dev/null @@ -1,45 +0,0 @@ - -/*---------------------------------------------------------------------------- - -The FORECASTING TENSOR yxf: Example Calculation for AR(3) - move back the diagonal -and up after reaching column 0. - -yxf | h=0 h=1 h=2 ------------------------ -t=0 | [1.0] 0.0 0.0 - | \ \ -t=1 | [2.0] 1.1 0.0 - | \ \ -t=2 | 3.0 [1.9] 0.9 - | \ \ -t=3 | 4.0 3.1 [2.1] - | \ \ -t=4 | 5.0 3.9 2.9 - | \ \ -t=4 | 6.0 5.1 2.9 - -yf(3, 2, 0) = a + rdot = a + b(0) * yxf(2, 1, 0) + b(1) * yxf(1, 0, 0) + b(2) * yxf(0, 0, 0) - -Each sheet represents a variable (n1 endogenous (y) and n2 exogenous (x)), -e.g., endogenous: new_deaths, new_deaths^2 - exogenous: icu_patients, hosp_patients, new_tests, people_vaccinated, people_vaccinated^2 - -TensorD: time x horizon x variable - 170 4 7 - -Model: max lags per variable; selected lags per variable - -Note: 'a' is the constant term and rdot multiplies the parameter vector 'b' times -elements in a diagonal in reverse. Also, the upper right triangle is unknowable -unless back-casting is used. - -Column h = 0: zeroth horizon forecasts are the actual (e.g., today's known) values in the time series -Column h = 1: horizon one forecasts are the one-step ahead (e.g., tomorrow's) forecasts -Column h = 2: horizon two forecasts are the two-steps ahead (e.g., day after tomorrow's) forecasts - -Row time t = 3: yxf(3, 0, 0) = 4.0 = the actual value for day 3, - yxf(3, 1, 0) = 3.1 = the one-step ahead forecast for day 3, made yesterday - yxf(3, 2, 0) = 2.1 = the two-steps ahead forecast for day 3, made two days ago - -----------------------------------------------------------------------------*/ - diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/trendModelTest.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/trendModelTest.class deleted file mode 100644 index 57827b4e0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/trendModelTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/trendModelTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/trendModelTest.tasty deleted file mode 100644 index 708726f5c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/trendModelTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/trendModelTest2.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/trendModelTest2.class deleted file mode 100644 index 4d87eab7a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/trendModelTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/trendModelTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/trendModelTest2.tasty deleted file mode 100644 index 99fd773a6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/trendModelTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/trendModelTest3.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/trendModelTest3.class deleted file mode 100644 index ef35d440e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/trendModelTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/trendModelTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/trendModelTest3.tasty deleted file mode 100644 index 3d1928b4d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/trendModelTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/varTest.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/varTest.class deleted file mode 100644 index 57c15511f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/varTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/varTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/varTest.tasty deleted file mode 100644 index 30a19ce9e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/varTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/varTest2.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/varTest2.class deleted file mode 100644 index 13aeb9338..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/varTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/varTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/varTest2.tasty deleted file mode 100644 index b4b0c2313..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/varTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/varTest3.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/varTest3.class deleted file mode 100644 index e00ce2759..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/varTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/varTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/varTest3.tasty deleted file mode 100644 index 1d5db2587..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/varTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/varTest4.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/varTest4.class deleted file mode 100644 index 03b3ec348..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/varTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/varTest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/varTest4.tasty deleted file mode 100644 index 2abcf087d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/varTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/varTest5.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/varTest5.class deleted file mode 100644 index 8122e0001..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/varTest5.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/varTest5.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/varTest5.tasty deleted file mode 100644 index 5875e4efa..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/varTest5.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/varTest6.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/varTest6.class deleted file mode 100644 index 6ebb93828..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/varTest6.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/varTest6.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/varTest6.tasty deleted file mode 100644 index 265566f49..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/varTest6.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/weightedMovingAverageTest.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/weightedMovingAverageTest.class deleted file mode 100644 index 13745aaf7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/weightedMovingAverageTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/weightedMovingAverageTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/weightedMovingAverageTest.tasty deleted file mode 100644 index a233d79d2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/weightedMovingAverageTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/weightedMovingAverageTest2.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/weightedMovingAverageTest2.class deleted file mode 100644 index 497b64298..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/weightedMovingAverageTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/weightedMovingAverageTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/weightedMovingAverageTest2.tasty deleted file mode 100644 index fced92b25..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/weightedMovingAverageTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/weightedMovingAverageTest3.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/weightedMovingAverageTest3.class deleted file mode 100644 index e919d130b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/weightedMovingAverageTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/weightedMovingAverageTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/weightedMovingAverageTest3.tasty deleted file mode 100644 index 90517a603..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/weightedMovingAverageTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/weightedMovingAverageTest4.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/weightedMovingAverageTest4.class deleted file mode 100644 index e7b965d2d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/weightedMovingAverageTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/weightedMovingAverageTest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/weightedMovingAverageTest4.tasty deleted file mode 100644 index 2131a3eeb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/weightedMovingAverageTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/weightedMovingAverageTest5.class b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/weightedMovingAverageTest5.class deleted file mode 100644 index daee51764..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/weightedMovingAverageTest5.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/weightedMovingAverageTest5.tasty b/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/weightedMovingAverageTest5.tasty deleted file mode 100644 index 9451a6099..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/forecasting_old/weightedMovingAverageTest5.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/imputationTest.class b/target/scala-3.6.4/classes/scalation/modeling/imputationTest.class deleted file mode 100644 index 1040ce3f5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/imputationTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/imputationTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/imputationTest.tasty deleted file mode 100644 index 705fad98b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/imputationTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/imputationTest2.class b/target/scala-3.6.4/classes/scalation/modeling/imputationTest2.class deleted file mode 100644 index 3bdabf061..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/imputationTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/imputationTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/imputationTest2.tasty deleted file mode 100644 index 63d4d1d1d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/imputationTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/index.html b/target/scala-3.6.4/classes/scalation/modeling/index.html deleted file mode 100644 index 11c5ce2a4..000000000 --- a/target/scala-3.6.4/classes/scalation/modeling/index.html +++ /dev/null @@ -1,57 +0,0 @@ - - -

    Source files in modeling Package

    -

    - - \ No newline at end of file diff --git a/target/scala-3.6.4/classes/scalation/modeling/kNN_RegressionTest.class b/target/scala-3.6.4/classes/scalation/modeling/kNN_RegressionTest.class deleted file mode 100644 index d12e7f6fa..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/kNN_RegressionTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/kNN_RegressionTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/kNN_RegressionTest.tasty deleted file mode 100644 index 99cb2c523..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/kNN_RegressionTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/kNN_RegressionTest2.class b/target/scala-3.6.4/classes/scalation/modeling/kNN_RegressionTest2.class deleted file mode 100644 index 7282c195d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/kNN_RegressionTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/kNN_RegressionTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/kNN_RegressionTest2.tasty deleted file mode 100644 index 1c3c75118..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/kNN_RegressionTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/kNN_RegressionTest3.class b/target/scala-3.6.4/classes/scalation/modeling/kNN_RegressionTest3.class deleted file mode 100644 index f64c8f360..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/kNN_RegressionTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/kNN_RegressionTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/kNN_RegressionTest3.tasty deleted file mode 100644 index e28d6f3b7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/kNN_RegressionTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/lassoRegressionTest.class b/target/scala-3.6.4/classes/scalation/modeling/lassoRegressionTest.class deleted file mode 100644 index 6fad85b49..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/lassoRegressionTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/lassoRegressionTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/lassoRegressionTest.tasty deleted file mode 100644 index 23f3fad5e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/lassoRegressionTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/lassoRegressionTest2.class b/target/scala-3.6.4/classes/scalation/modeling/lassoRegressionTest2.class deleted file mode 100644 index c18a6a079..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/lassoRegressionTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/lassoRegressionTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/lassoRegressionTest2.tasty deleted file mode 100644 index 4f09a62af..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/lassoRegressionTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/lassoRegressionTest3.class b/target/scala-3.6.4/classes/scalation/modeling/lassoRegressionTest3.class deleted file mode 100644 index b950b6806..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/lassoRegressionTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/lassoRegressionTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/lassoRegressionTest3.tasty deleted file mode 100644 index 8014b544c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/lassoRegressionTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/matrixTransformTest.class b/target/scala-3.6.4/classes/scalation/modeling/matrixTransformTest.class deleted file mode 100644 index a600a4a49..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/matrixTransformTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/matrixTransformTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/matrixTransformTest.tasty deleted file mode 100644 index a6bd9af10..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/matrixTransformTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/matrixTransformTest2.class b/target/scala-3.6.4/classes/scalation/modeling/matrixTransformTest2.class deleted file mode 100644 index 15df31853..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/matrixTransformTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/matrixTransformTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/matrixTransformTest2.tasty deleted file mode 100644 index f07e600e0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/matrixTransformTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CNN_1D$.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CNN_1D$.class deleted file mode 100644 index f4b11a957..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CNN_1D$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CNN_1D$package$.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CNN_1D$package$.class deleted file mode 100644 index 2fbc85505..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CNN_1D$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CNN_1D$package.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CNN_1D$package.class deleted file mode 100644 index 2a544e93b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CNN_1D$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CNN_1D$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CNN_1D$package.tasty deleted file mode 100644 index 92374a268..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CNN_1D$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CNN_1D.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CNN_1D.class deleted file mode 100644 index 3b2e50d98..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CNN_1D.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CNN_1D.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CNN_1D.tasty deleted file mode 100644 index b07448a81..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CNN_1D.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CNN_2D$.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CNN_2D$.class deleted file mode 100644 index 337c831d6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CNN_2D$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CNN_2D$package$.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CNN_2D$package$.class deleted file mode 100644 index da0e0d6fa..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CNN_2D$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CNN_2D$package.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CNN_2D$package.class deleted file mode 100644 index 5336389a8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CNN_2D$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CNN_2D$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CNN_2D$package.tasty deleted file mode 100644 index ac9ff1c37..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CNN_2D$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CNN_2D.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CNN_2D.class deleted file mode 100644 index 512b10c1c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CNN_2D.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CNN_2D.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CNN_2D.tasty deleted file mode 100644 index 918b05c92..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CNN_2D.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CoFilter_1D$.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CoFilter_1D$.class deleted file mode 100644 index 907191b10..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CoFilter_1D$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CoFilter_1D$package$.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CoFilter_1D$package$.class deleted file mode 100644 index e8f01e3f1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CoFilter_1D$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CoFilter_1D$package.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CoFilter_1D$package.class deleted file mode 100644 index 01bad85f5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CoFilter_1D$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CoFilter_1D$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CoFilter_1D$package.tasty deleted file mode 100644 index ad49b250a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CoFilter_1D$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CoFilter_1D.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CoFilter_1D.class deleted file mode 100644 index 35c784f84..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CoFilter_1D.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CoFilter_1D.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CoFilter_1D.tasty deleted file mode 100644 index fcbdacdaf..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CoFilter_1D.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CoFilter_2D$.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CoFilter_2D$.class deleted file mode 100644 index e19adb42b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CoFilter_2D$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CoFilter_2D$package$.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CoFilter_2D$package$.class deleted file mode 100644 index e507306c1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CoFilter_2D$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CoFilter_2D$package.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CoFilter_2D$package.class deleted file mode 100644 index 37b1c1735..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CoFilter_2D$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CoFilter_2D$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CoFilter_2D$package.tasty deleted file mode 100644 index 503c9ed87..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CoFilter_2D$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CoFilter_2D.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CoFilter_2D.class deleted file mode 100644 index c7bb5e0a5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CoFilter_2D.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CoFilter_2D.scala.bak b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CoFilter_2D.scala.bak deleted file mode 100644 index cd78876b4..000000000 --- a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CoFilter_2D.scala.bak +++ /dev/null @@ -1,214 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Sat Mar 27 14:40:57 EDT 2021 - * @see LICENSE (MIT style license file). - * - * @note Model Part: Convolutional Filter (tow dimensional) - * - * @see leonardoaraujosantos.gitbook.io/artificial-inteligence/machine_learning/deep_learning/convolution - * @see e2eml.school/convolution_one_d.html - */ - -package scalation -package modeling -package neuralnet - -import scala.math.min - -import scalation.mathstat._ -import scalation.random.RandomMatD - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `CoFilter_2D` class provides a convolution filter (cofilter) for - * taking a weighted average over a window of an input matrix. - * @param width the width of the cofilter - */ -class CoFilter_2D (width: Int = 5): - - private val rmg = RandomMatD (width, width, 2.0) // random matrix genertor - private var mat = rmg.gen // the filter's matrix - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Update the parameters, i.e., the filter's matrix. - * @param vec_ the new matrix parameters - */ - def update (mat_ : MatrixD): Unit = mat = mat_ - -end CoFilter_2D - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `CoFilter_2D` object provides the convolution and pooling operators. - * @see `mathstat.MatrixD` for infix implementations of - * conv (*+) -- valid convolution, no reversal - * convs (*~+) -- same convolution, no reversal - */ -object CoFilter_2D: - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the 'full' (less common) convolution of cofilter c and input matrix x. - * @param c the cofilter matrix of coefficients - * @param x the input/data matrix - */ - def convf (c: MatrixD, x: MatrixD): MatrixD = - val y = new MatrixD (c.dim + x.dim - 1, c.dim2 + x.dim2 - 1) - for k <- y.indices; l <- y.indices2 do - var sum = 0.0 - for i <- 0 until min (k+1, c.dim); j <- 0 until min (l+1, c.dim2) do - if k - i < x.dim && l - j < x.dim2 then sum += c(i, j) * x(k - i, l - j) - y(k, l) = sum - end for - y - end convf - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the 'same' convolution of cofilter c and input matrix x. - * Same means that the size of the result is the same as the input (via padding). - * @param c the cofilter matrix of coefficients - * @param x the input/data matrix - */ - def convs (c: MatrixD, x: MatrixD): MatrixD = - val y = new MatrixD (x.dim, x.dim2) - val off = (c.dim / 2, c.dim2 / 2) - for k <- off._1 until y.dim + off._1; l <- off._2 until y.dim2 + off._2 do - var sum = 0.0 - for i <- 0 until min (k+1, c.dim); j <- 0 until min (l+1, c.dim2) do - if k - i < x.dim && l - j < x.dim2 then sum += c(i, j) * x(k - i, l - j) - y(k-off._1, l-off._2) = sum - end for - y - end convs - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the 'valid' (no padding) convolution of cofilter c and input matrix x. - * Caveat: does not include reversal. - * @param c the cofilter matrix of coefficients - * @param x the input/data matrix - */ - def conv (c: MatrixD, x: MatrixD): MatrixD = - val y = new MatrixD (x.dim - c.dim + 1, x.dim2 - c.dim2 + 1) - for k <- y.indices; l <- y.indices2 do - y(k, l) = (x(k until k + c.dim, l until l + c.dim2) *~ c).sum - y - end conv - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the convolution over all data instances. - * @param c the cofilter matrix of coefficient - * @param x the input/data matrix - * - def conv (c: MatrixD, x: MatrixD): MatrixD = - MatrixD (for i <- x.indices yield conv (c, x(i))) - end conv - */ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the max-pooling results over all pooling windows. - * @param x the input/data matrix - * @param s the size (s x s) of the pooling window - */ - def pool (x: MatrixD, s: Int = 2): MatrixD = - val p = new MatrixD (x.dim / s, x.dim2 / s) - for j <- p.indices; k <- p.indices2 do - val (jj, kk) = (s * j, s * k) - p(j, k) = x(jj until jj+s, kk until kk+s).mmax - p - end pool - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the pooling results over all data instances. - * @param x the input/data matrix - * @param s the the size of the pooling window - * - def pool (x: MatrixD, s: Int): MatrixD = - MatrixD (for i <- x.indices yield pool (x(i), s)) - end pool - */ - -end CoFilter_2D - -import CoFilter_2D._ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `coFilter_2DTest` main function is used to test the `CoFilter_2D` class. - * Test using the simple example from CNN_2D section of the ScalaTion textbook. - * > runMain scalation.modeling.neuralnet.coFilter_2DTest - * -@main def coFilter_2DTest (): Unit = - - val x = MatrixD ((2, 5), 1, 2, 3, 4, 5, - 6, 7, 8, 9, 10) - val c = MatrixD (0.5, 1, 0.5) - val φ = conv (c, x) - val z = pool (φ, 3) - - println (s"input x = $x") - println (s"filter c = $c") - println (s"feature map φ = $φ") - println (s"pooled z = $z") - -end coFilter_2DTest - */ - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `coFilter_2DTest2` main function is used to test the `CoFilter_2D` class. - * Test using the Example Calculation from the 2D CNN section of the ScalaTion textbook. - * > runMain scalation.modeling.neuralnet.coFilter_2DTest2 - */ -@main def coFilter_2DTest2 (): Unit = - - import ActivationFun.f_reLU - - val x = MatrixD ((5, 5), 0, 0, 2, 1, 0, - 0, 0, 0, 1, 2, - 1, 2, 2, 0, 2, - 2, 0, 0, 0, 1, - 2, 2, 2, 0, 1) - - val c = MatrixD ((2, 2), 1, 1, - 0, 1) - val φ = conv (c, x) - val φa = f_reLU.fM (φ) - val p = pool (φa, 2) - val z = p.flatten - - println (s"input matrix x = $x") - println (s"conv. filter c = $c") - println (s"feature map φ = $φ") - println (s"feature map-a φa = $φa") - println (s"pooled map p = $p") - println (s"hidden vector z = $z") - -end coFilter_2DTest2 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `coFilter_2DTest3` main function is used to test the `CoFilter_2D` class's - * convolutional operator. - * > runMain scalation.modeling.neuralnet.coFilter_2DTest3 - * -@main def coFilter_2DTest3 (): Unit = - - val c = MatrixD (1, 2, 3, 4, 5) - val x = MatrixD (1, 2, 3, 4, 5, 6, 7) - - banner (s"c convolution x") - println (s"c = $c") - println (s"x = $x") - - banner ("Full Convolution: convf (c, x)") - println (s"y = ${convf (c, x)}") - banner ("Same Convolution: convs (c, x)") - println (s"y = ${convs (c, x)}") - banner ("Valid Convolution: conv (c.reverse, x)") - println (s"y = ${conv (c.reverse, x)}") - - banner ("Valid Convolution: conv (c, x)") // without expected reversal - println (s"y = ${conv (c, x)}") - -end coFilter_2DTest3 - */ - diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CoFilter_2D.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CoFilter_2D.tasty deleted file mode 100644 index 148186d58..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/CoFilter_2D.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/ELM_3L1$.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/ELM_3L1$.class deleted file mode 100644 index a525faeac..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/ELM_3L1$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/ELM_3L1$package$.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/ELM_3L1$package$.class deleted file mode 100644 index 1d565b9df..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/ELM_3L1$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/ELM_3L1$package.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/ELM_3L1$package.class deleted file mode 100644 index 2e2f2281f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/ELM_3L1$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/ELM_3L1$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/ELM_3L1$package.tasty deleted file mode 100644 index e7153301b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/ELM_3L1$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/ELM_3L1.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/ELM_3L1.class deleted file mode 100644 index 743efa05a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/ELM_3L1.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/ELM_3L1.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/ELM_3L1.tasty deleted file mode 100644 index 63b452e83..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/ELM_3L1.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/Example_Concrete$.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/Example_Concrete$.class deleted file mode 100644 index 4224a9125..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/Example_Concrete$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/Example_Concrete$package$.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/Example_Concrete$package$.class deleted file mode 100644 index ff3f73eb7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/Example_Concrete$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/Example_Concrete$package.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/Example_Concrete$package.class deleted file mode 100644 index ff08e501a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/Example_Concrete$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/Example_Concrete$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/Example_Concrete$package.tasty deleted file mode 100644 index 832b2075e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/Example_Concrete$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/Example_Concrete.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/Example_Concrete.class deleted file mode 100644 index 81c7ccfda..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/Example_Concrete.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/Example_Concrete.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/Example_Concrete.tasty deleted file mode 100644 index da451cc96..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/Example_Concrete.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NetParam$.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NetParam$.class deleted file mode 100644 index d8496288d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NetParam$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NetParam$package$.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NetParam$package$.class deleted file mode 100644 index 4002e096c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NetParam$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NetParam$package.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NetParam$package.class deleted file mode 100644 index c414d66a8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NetParam$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NetParam$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NetParam$package.tasty deleted file mode 100644 index 5618193f7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NetParam$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NetParam.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NetParam.class deleted file mode 100644 index 068335a56..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NetParam.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NetParam.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NetParam.tasty deleted file mode 100644 index c04344fd0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NetParam.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_2L$.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_2L$.class deleted file mode 100644 index 190f91684..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_2L$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_2L$package$.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_2L$package$.class deleted file mode 100644 index dfafcb45f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_2L$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_2L$package.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_2L$package.class deleted file mode 100644 index f606117ce..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_2L$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_2L$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_2L$package.tasty deleted file mode 100644 index af02eb503..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_2L$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_2L.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_2L.class deleted file mode 100644 index dce5854f1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_2L.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_2L.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_2L.tasty deleted file mode 100644 index 145f7e65c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_2L.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_2L_Ck$.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_2L_Ck$.class deleted file mode 100644 index a1d084f42..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_2L_Ck$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_2L_Ck.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_2L_Ck.class deleted file mode 100644 index 6de054106..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_2L_Ck.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_2L_Ck.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_2L_Ck.tasty deleted file mode 100644 index 463a42a8d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_2L_Ck.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_3L$.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_3L$.class deleted file mode 100644 index 27a5b9232..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_3L$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_3L$package$.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_3L$package$.class deleted file mode 100644 index 9afb633ed..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_3L$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_3L$package.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_3L$package.class deleted file mode 100644 index 53dc74280..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_3L$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_3L$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_3L$package.tasty deleted file mode 100644 index a60301ca3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_3L$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_3L.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_3L.class deleted file mode 100644 index 305f13216..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_3L.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_3L.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_3L.tasty deleted file mode 100644 index bc04d2c87..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_3L.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_3L_C2$.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_3L_C2$.class deleted file mode 100644 index a013ce2ba..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_3L_C2$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_3L_C2$package$.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_3L_C2$package$.class deleted file mode 100644 index d6386a137..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_3L_C2$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_3L_C2$package.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_3L_C2$package.class deleted file mode 100644 index 882e4ad57..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_3L_C2$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_3L_C2$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_3L_C2$package.tasty deleted file mode 100644 index 40dde4199..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_3L_C2$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_3L_C2.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_3L_C2.class deleted file mode 100644 index 45a96c5e8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_3L_C2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_3L_C2.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_3L_C2.tasty deleted file mode 100644 index 8e1ed90c4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_3L_C2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_3L_Ck$package$.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_3L_Ck$package$.class deleted file mode 100644 index 6fbbe9001..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_3L_Ck$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_3L_Ck$package.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_3L_Ck$package.class deleted file mode 100644 index f89e808b2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_3L_Ck$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_3L_Ck$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_3L_Ck$package.tasty deleted file mode 100644 index 17b2e7793..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_3L_Ck$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_XL$.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_XL$.class deleted file mode 100644 index e31031f38..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_XL$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_XL$package$.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_XL$package$.class deleted file mode 100644 index ff0cc9ff3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_XL$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_XL$package.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_XL$package.class deleted file mode 100644 index bdb0e59ec..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_XL$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_XL$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_XL$package.tasty deleted file mode 100644 index e16610922..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_XL$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_XL.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_XL.class deleted file mode 100644 index 27f6599c8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_XL.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_XL.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_XL.tasty deleted file mode 100644 index 3db056d8d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_XL.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_XLT$.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_XLT$.class deleted file mode 100644 index a04a6475d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_XLT$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_XLT$package$.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_XLT$package$.class deleted file mode 100644 index d7cdd0a23..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_XLT$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_XLT$package.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_XLT$package.class deleted file mode 100644 index d0d28597d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_XLT$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_XLT$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_XLT$package.tasty deleted file mode 100644 index 05e25c19e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_XLT$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_XLT.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_XLT.class deleted file mode 100644 index d91df8d94..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_XLT.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_XLT.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_XLT.tasty deleted file mode 100644 index d90735c1c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/NeuralNet_XLT.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/Optimizer$.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/Optimizer$.class deleted file mode 100644 index 15a35d7cc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/Optimizer$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/Optimizer.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/Optimizer.class deleted file mode 100644 index 6d2e9bca0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/Optimizer.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/Optimizer.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/Optimizer.tasty deleted file mode 100644 index 7dd7fd0d6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/Optimizer.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/Optimizer_Adam.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/Optimizer_Adam.class deleted file mode 100644 index 13cc6f215..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/Optimizer_Adam.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/Optimizer_Adam.scala.bak b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/Optimizer_Adam.scala.bak deleted file mode 100644 index 0db1344b8..000000000 --- a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/Optimizer_Adam.scala.bak +++ /dev/null @@ -1,264 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller, Hao Peng - * @version 2.0 - * @date Sat Mar 5 22:38:03 EST 2022 - * @see LICENSE (MIT style license file). - * - * @note Optimization: ADAptive Moment estimation (Adam) Optimizer - */ - -// U N D E R D E V E L O P M E N T - -package scalation -package modeling -package neuralnet - -import scala.math.min - -import scalation.mathstat._ - -import Optimizer._ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Optimizer_Adam` class provides functions to optimize the parameters (weights - * and biases) of Neural Networks with various numbers of layers. - * This optimizer implements a - * @see https://arxiv.org/pdf/1412.6980.pdf - */ -class Optimizer_Adam extends Optimizer: - - private val debug = debugf ("Optimizer_Adam", true) // debug function - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Given training data x and y for a 2-layer, multi-output Neural Network, fit - * the parameter/weight matrix b. Iterate over several epochs, where each epoch - * divides the training set into nB batches. Each batch is used to update the - * the parameter's weights. - * @param x the m-by-n input matrix (training data consisting of m input vectors) - * @param y the m-by-ny output matrix (training data consisting of m output vectors) - * @param bb the array of parameters (weights & biases) between every two adjacent layers - * @param eta_ the initial learning/convergence rate - * @param ff the array of activation function family for every two adjacent layers - */ - def optimize2 (x: MatrixD, y: MatrixD, - bb: NetParams, eta_ : Double, ff: Array [AFF]): (Double, Int) = - val permGen = permGenerator (x.dim) // permutation vector generator - val b = bb(0) // net-parameters: weight matrix and bias vector - val f = ff(0) // activation function - val bSize = min (hp("bSize").toInt, x.dim) // batch size - val maxEpochs = hp("maxEpochs").toInt // maximum number of epochs - val upLimit = hp("upLimit").toInt // limit on increasing lose - val β1 = hp("beta").toDouble // momentum hyper-parameter - val β2 = hp("beta2").toDouble // second momentum hyper-parameter - val nB = x.dim / bSize // the number of batches - var eta = eta_ // set initial learning rate - - var mt = new MatrixD (b.w.dim, b.w.dim2) // first moment estimate - var vt = new MatrixD (b.w.dim, b.w.dim2) // second raw moment estimate - - println (s"optimize2: bSize = $bSize, nB = $nB") - - var sse_best_ = -0.0 - var (go, epoch) = (true, 1) - cfor (go && epoch <= maxEpochs, epoch += 1) { // iterate over each epoch - val batches = permGen.igen.chop (nB) // permute indices & split into nB batches - - for ib <- batches do b -= updateWeight (x(ib), y(ib), epoch) // iteratively update weight matrix b - - val sse = (y - f.fM (b * x)).normFSq // recompute sum of squared errors - collectLoss (sse) // collect loss per epoch - debug ("optimize2", s"parameters for $epoch th epoch: sse = $sse") - val (b_best, sse_best) = stopWhen (Array (b), sse) - if b_best != null then - b.set (b_best (0)) - sse_best_ = sse_best // save best in sse_best_ - go = false - else - if epoch % ADJUST_PERIOD == 0 then eta *= ADJUST_FACTOR // adjust the learning rate - end if - } // cfor - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /* Update the parameter/weight matrix b based on the current batch. - * Take a step in the direction opposite to the gradient. - * @see https://arxiv.org/pdf/1412.6980.pdf - * @param x the input matrix for the current batch - * @param y the output matrix for the current batch - */ - inline def updateWeight (x: MatrixD, y: MatrixD, t: Int): MatrixD = - val yp = f.fM (b * x) // Yp = f(XB) - val ee = yp - y // negative of the error matrix - val gt = f.dM (yp) *~ ee // delta matrix for y - - mt = mt * β1 + gt * (1 - β1) // update biased first moment estimate) - vt = vt * β2 + gt~^2 * (1 - β2) // update biased second raw moment estimate) - val mht = mt / (1 - β1~^t) // compute bias-corrected first moment estimate) - val vht = vt / (1 - β2~^t) // compute bias-corrected second raw moment estimate) - val d = mht / (vht~^0.5 + EPSILON) // parameter update correction matrix (corrected delta) - - val eta_o_sz = eta / x.dim // eta over the current batch size - x.transpose * d * eta_o_sz // gradient-based change in input-output weights (bup) - end updateWeight - - debug ("optimize2", s"parameters b = $b") - if go then ((y - f.fM (b * x)).normFSq, maxEpochs) // return best and number of epochs - else (sse_best_, epoch - upLimit) - end optimize2 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Given training data x and y for a 3-layer Neural Network, fit the parameters - * (weights and biases) a & b. Iterate over several epochs, where each epoch divides - * the training set into nB batches. Each batch is used to update the weights. - * @param x the m-by-n input matrix (training data consisting of m input vectors) - * @param y the m-by-ny output matrix (training data consisting of m output vectors) - * @param bb the array of parameters (weights & biases) between every two adjacent layers - * @param eta_ the initial learning/convergence rate - * @param ff the array of activation function family for every two adjacent layers - */ - def optimize3 (x: MatrixD, y: MatrixD, - bb: NetParams, eta_ : Double, ff: Array [AFF]): (Double, Int) = - val permGen = permGenerator (x.dim) // permutation vector generator - val (a, b) = (bb(0), bb(1)) // two sets of net-parameters - val (f, f1) = (ff(0), ff(1)) // two activation functions - val bSize = min (hp("bSize").toInt, x.dim) // batch size - val maxEpochs = hp("maxEpochs").toInt // maximum number of epochs - val upLimit = hp("upLimit").toInt // limit on increasing lose - val beta = hp("beta").toDouble // momentum hyper-parameter - val nB = x.dim / bSize // the number of batches - var eta = eta_ // counter for number of times moving up - var moa = new MatrixD (a.w.dim, a.w.dim2) // momentum matrix a - var mob = new MatrixD (b.w.dim, b.w.dim2) // momentum matrix b - - println (s"optimize3: bSize = $bSize, nB = $nB") - - var sse_best_ = -0.0 - var (go, epoch) = (true, 1) - cfor (go && epoch <= maxEpochs, epoch += 1) { // iterate over each epoch - val batches = permGen.igen.chop (nB) // permute indices & split into nB batches - - for ib <- batches do - val ab = updateWeight (x(ib), y(ib)) // iteratively update weight matrices a & b - a -= ab._1; b -= ab._2 - end for - - val sse = (y - b * f1.fM (f.fM (a * x))).normFSq - collectLoss (sse) // collect the loss per epoch -// debug ("optimize3", s"parameters for $epoch th epoch: sse = $sse") - val (b_best, sse_best) = stopWhen (Array (a, b), sse) - if b_best != null then - a.set (b_best(0)) - b.set (b_best(1)) - sse_best_ = sse_best // save best in sse_best_ - go = false - else - if epoch % ADJUST_PERIOD == 0 then eta *= ADJUST_FACTOR // adjust the learning rate - end if - } // cfor - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /* Compute the parameter/weight matrices a and b updates based on the current batch. - * A step in the direction opposite to the gradient. - * @param x the input matrix for the current batch - * @param y the output matrix for the current batch - */ - inline def updateWeight (x: MatrixD, y: MatrixD): (NetParam, NetParam) = - val z = f.fM (a * x) // Z = f(XA) - val yp = f1.fM (b * z) // Yp = f(ZB) - val ee = yp - y // negative of the error matrix - val d1 = f1.dM (yp) *~ ee // delta matrix for y - val d0 = f.dM (z) *~ (d1 * b.w.transpose) // delta matrix for z - - val eta_o_sz = eta / x.dim // eta over current batch size - moa = moa * beta + x.transpose * d0 * eta_o_sz // update momentum a - mob = mob * beta + z.transpose * d1 * eta_o_sz // update momentum b - (NetParam (moa, d0.mean * eta), // change to a parameters (weights and biases) - NetParam (mob, d1.mean * eta)) // change to b parameters (weights and biases) - end updateWeight - - debug ("optimize3", s"parameters a = $a \n b = $b") - if go then ((y - b * f1.fM (f.fM (a * x))).normFSq, maxEpochs) // return best and number of epochs - else (sse_best_, epoch - upLimit) - end optimize3 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Given training data x and y, fit the parameter/weight matrices bw and - * bias vectors bi. Iterate over several epochs, where each epoch divides the - * training set into nB batches. Each batch is used to update the weights. - * @param x the m-by-n input matrix (training data consisting of m input vectors) - * @param y the m-by-ny output matrix (training data consisting of m output vectors) - * @param b the array of parameters (weights & biases) between every two adjacent layers - * @param eta_ the initial learning/convergence rate - * @param f the array of activation function family for every two adjacent layers - */ - def optimize (x: MatrixD, y: MatrixD, - b: NetParams, eta_ : Double, f: Array [AFF]): (Double, Int) = - val permGen = permGenerator (x.dim) // permutation vector generator - val bSize = min (hp("bSize").toInt, x.dim) // batch size - val maxEpochs = hp("maxEpochs").toInt // maximum number of epochs - val upLimit = hp("upLimit").toInt // limit on increasing lose - val beta = hp("beta").toDouble // momentum hyper-parameter - val nB = x.dim / bSize // the number of batches - var eta = eta_ // counter for number of times moving up - var sse = 0.0 // stores accumulated sse over batches for epoch - println (s"optimize: bSize = $bSize, nB = $nB") - - val nl = f.size // number of layers - val layers = 0 until nl // range for layers - val z = Array.ofDim [MatrixD] (nl+1) // array to store activations, layer by layer - val d = Array.ofDim [MatrixD] (nl) // array to store all deltas - val mo = Array.ofDim [MatrixD] (nl) // momentum array - for l <- layers do mo(l) = new MatrixD (b(l).w.dim, b(l).w.dim2) - - var sse_best_ = -0.0 - var (go, epoch) = (true, 1) - cfor (go && epoch <= maxEpochs, epoch += 1) { // iterate over each epoch - sse = 0.0 - val batches = permGen.igen.chop (nB) // permute indices &split into nB batches - - for ib <- batches do sse += updateWeight (x(ib), y(ib)) // update parameter array b - - collectLoss (sse) // collect the loss per epoch -// debug ("optimize", s" parameters for $epoch th epoch: b = $b, sse = $sse") - val (b_best, sse_best) = stopWhen (b, sse) - if b_best != null then - for l <- b.indices do b(l).set (b_best(l)) - sse_best_ = sse_best // save best in sse_best_ - go = false - else - if epoch % ADJUST_PERIOD == 0 then eta *= ADJUST_FACTOR // adjust the learning rate - end if - } // cfor - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /* Compute the parameter array b updates based on the current batch. - * A step in the direction opposite to the gradient. - * @param x the input matrix for the current batch - * @param y the output matrix for the current batch - */ - inline def updateWeight (x: MatrixD, y: MatrixD): Double = - z(0) = x // initial activation, which is the input matrix - for l <- layers do z(l+1) = f(l).fM (b(l) * z(l)) // feedforward and store all activations - - val yp = z.last // predicted value of y - val ee = yp - y // negative of the error matrix - d(nl-1) = f.last.dM (yp) *~ ee // delta for the last layer before output - for l <- nl-2 to 0 by -1 do - d(l) = f(l).dM (z(l+1)) *~ (d(l+1) * b(l+1).w.transpose) // deltas for all previous hidden layers - - val eta_o_sz = eta / x.dim // learning rate divided by size of mini-batch - for l <- layers do -// b(l).w *= 1.0 - eta * (lambda / x.dim) // regularization factor, weight decay - mo(l) = mo(l) * beta + z(l).transpose * d(l) * eta_o_sz // update l-th momentum - b(l) -= (mo(l), d(l).mean * eta) // update l-th parameter (weights and biases) - end for - - ee.normFSq // return the sse of this batch - end updateWeight - - debug ("optimize", s"parameters b = $b") - if go then (sse, maxEpochs) // return best and number of epochs - else (sse_best_, epoch - upLimit) - end optimize -end Optimizer_Adam - diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/Optimizer_Adam.scala.bak2 b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/Optimizer_Adam.scala.bak2 deleted file mode 100644 index 02244e2e5..000000000 --- a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/Optimizer_Adam.scala.bak2 +++ /dev/null @@ -1,283 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller, Hao Peng, Yousef Fekri Dabanloo - * @version 2.0 - * @date Sat Mar 5 22:38:03 EST 2022 - * @see LICENSE (MIT style license file). - * - * @note Optimization: ADAptive Moment estimation (Adam) Optimizer - */ - -// U N D E R D E V E L O P M E N T - -package scalation -package modeling -package neuralnet - -import scala.math.min - -import scalation.mathstat._ - -import Optimizer._ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Optimizer_Adam` class provides functions to optimize the parameters (weights - * and biases) of Neural Networks with various numbers of layers. - * This optimizer implements a - * @see https://arxiv.org/pdf/1412.6980.pdf - */ -class Optimizer_Adam extends Optimizer: - - private val debug = debugf ("Optimizer_Adam", true) // debug function - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Given training data x and y for a 2-layer, multi-output Neural Network, fit - * the parameter/weight matrix b. Iterate over several epochs, where each epoch - * divides the training set into nB batches. Each batch is used to update the - * the parameter's weights. - * @param x the m-by-n input matrix (training data consisting of m input vectors) - * @param y the m-by-ny output matrix (training data consisting of m output vectors) - * @param bb the array of parameters (weights & biases) between every two adjacent layers - * @param eta_ the initial learning/convergence rate - * @param ff the array of activation function family for every two adjacent layers - */ - def optimize2 (x: MatrixD, y: MatrixD, - bb: NetParams, eta_ : Double, ff: Array [AFF]): (Double, Int) = - val permGen = permGenerator (x.dim) // permutation vector generator - val b = bb(0) // net-parameters: weight matrix and bias vector - val f = ff(0) // activation function - val bSize = min (hp("bSize").toInt, x.dim) // batch size - val maxEpochs = hp("maxEpochs").toInt // maximum number of epochs - val upLimit = hp("upLimit").toInt // limit on increasing lose - val β1 = hp("beta").toDouble // momentum hyper-parameter - val β2 = hp("beta2").toDouble // second momentum hyper-parameter - val nB = x.dim / bSize // the number of batches - var eta = eta_ // set initial learning rate - val η = eta -// val ν = hp("nu").toDouble // 0 => SGD, 1 => (normalized) SHB - var p = new MatrixD (b.w.dim, b.w.dim2) // momentum matrix (first moment estimate) - var v = new MatrixD (b.w.dim, b.w.dim2) // second raw moment estimate - println (s"optimize2: bSize = $bSize, nB = $nB") - - var sse_best_ = -0.0 - var (go, epoch) = (true, 1) - cfor (go && epoch <= maxEpochs, epoch += 1) { // iterate over each epoch - val batches = permGen.igen.chop (nB) // permute indices & split into nB batches - - for ib <- batches do b -= updateWeight (x(ib), y(ib), epoch) // iteratively update weight matrix b - - val sse = (y - f.fM (b * x)).normFSq // recompute sum of squared errors - collectLoss (sse) // collect loss per epoch - debug ("optimize2", s"parameters for $epoch th epoch: sse = $sse") - val (b_best, sse_best) = stopWhen (Array (b), sse) - if b_best != null then - b.set (b_best (0)) - sse_best_ = sse_best // save best in sse_best_ - go = false - else - if epoch % ADJUST_PERIOD == 0 then eta *= ADJUST_FACTOR // adjust the learning rate - end if - } // cfor - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /* Update the parameter/weight matrix b based on the current batch. - * Take a step in the direction opposite to the gradient. - * @see https://arxiv.org/pdf/1412.6980.pdf - * @param x the input matrix for the current batch - * @param y the output matrix for the current batch - * @param t the t-th power based on epoch - * - inline def updateWeight (x: MatrixD, y: MatrixD, t: Int): MatrixD = - val yp = f.fM (b * x) // Yp = f(XB) - val ee = yp - y // negative of the error matrix - val gt = f.dM (yp) *~ ee // delta matrix for y - - mt = mt * β1 + gt * (1 - β1) // update biased first moment estimate) - vt = vt * β2 + gt~^2 * (1 - β2) // update biased second raw moment estimate) - val mht = mt / (1 - β1~^t) // compute bias-corrected first moment estimate) - val vht = vt / (1 - β2~^t) // compute bias-corrected second raw moment estimate) - val d = mht / (vht~^0.5 + EPSILON) // parameter update correction matrix (corrected delta) - - val eta_o_sz = eta / x.dim // eta over the current batch size - x.transpose * d * eta_o_sz // gradient-based change in input-output weights (bup) - end updateWeight - */ - - inline def updateWeight (x: MatrixD, y: MatrixD, t: Int): MatrixD = - val α = η / x.dim // eta over the current batch size - val yp = f.fM (b * x) // prediction: Yp = f(XB) - val ε = yp - y // negative of error matrix - val δ = f.dM (yp) ⊙ ε // delta matrix for y - val g = x.Ƭ * δ // + b.w * l // gradient matrix - - p = g * (1 - β1) + p * β1 // update biased first moment estimate - v = v * β2 + g ~^ 2 * (1 - β2) // update biased second raw moment estimate - - val pH = p / (1 - β1 ~^ t) // compute bias-corrected first moment estimate - val vH = v / (1 - β2 ~^ t) // compute bias-corrected second raw moment estimate - - (pH / (vH ~^ 0.5 + EPSILON)) * α // gradient-based change in input-output weights (bup) - // + b.w * l is for AdamW - end updateWeight - - debug ("optimize2", s"parameters b = $b") - if go then ((y - f.fM (b * x)).normFSq, maxEpochs) // return best and number of epochs - else (sse_best_, epoch - upLimit) - end optimize2 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Given training data x and y for a 3-layer Neural Network, fit the parameters - * (weights and biases) a & b. Iterate over several epochs, where each epoch divides - * the training set into nB batches. Each batch is used to update the weights. - * @param x the m-by-n input matrix (training data consisting of m input vectors) - * @param y the m-by-ny output matrix (training data consisting of m output vectors) - * @param bb the array of parameters (weights & biases) between every two adjacent layers - * @param eta_ the initial learning/convergence rate - * @param ff the array of activation function family for every two adjacent layers - */ - def optimize3 (x: MatrixD, y: MatrixD, - bb: NetParams, eta_ : Double, ff: Array [AFF]): (Double, Int) = - val permGen = permGenerator (x.dim) // permutation vector generator - val (a, b) = (bb(0), bb(1)) // two sets of net-parameters - val (f, f1) = (ff(0), ff(1)) // two activation functions - val bSize = min (hp("bSize").toInt, x.dim) // batch size - val maxEpochs = hp("maxEpochs").toInt // maximum number of epochs - val upLimit = hp("upLimit").toInt // limit on increasing lose - val beta = hp("beta").toDouble // momentum hyper-parameter - val nB = x.dim / bSize // the number of batches - var eta = eta_ // counter for number of times moving up - var moa = new MatrixD (a.w.dim, a.w.dim2) // momentum matrix a - var mob = new MatrixD (b.w.dim, b.w.dim2) // momentum matrix b - - println (s"optimize3: bSize = $bSize, nB = $nB") - - var sse_best_ = -0.0 - var (go, epoch) = (true, 1) - cfor (go && epoch <= maxEpochs, epoch += 1) { // iterate over each epoch - val batches = permGen.igen.chop (nB) // permute indices & split into nB batches - - for ib <- batches do - val ab = updateWeight (x(ib), y(ib)) // iteratively update weight matrices a & b - a -= ab._1; b -= ab._2 - end for - - val sse = (y - b * f1.fM (f.fM (a * x))).normFSq - collectLoss (sse) // collect the loss per epoch -// debug ("optimize3", s"parameters for $epoch th epoch: sse = $sse") - val (b_best, sse_best) = stopWhen (Array (a, b), sse) - if b_best != null then - a.set (b_best(0)) - b.set (b_best(1)) - sse_best_ = sse_best // save best in sse_best_ - go = false - else - if epoch % ADJUST_PERIOD == 0 then eta *= ADJUST_FACTOR // adjust the learning rate - end if - } // cfor - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /* Compute the parameter/weight matrices a and b updates based on the current batch. - * A step in the direction opposite to the gradient. - * @param x the input matrix for the current batch - * @param y the output matrix for the current batch - */ - inline def updateWeight (x: MatrixD, y: MatrixD): (NetParam, NetParam) = - val z = f.fM (a * x) // Z = f(XA) - val yp = f1.fM (b * z) // Yp = f(ZB) - val ee = yp - y // negative of the error matrix - val d1 = f1.dM (yp) *~ ee // delta matrix for y - val d0 = f.dM (z) *~ (d1 * b.w.transpose) // delta matrix for z - - val eta_o_sz = eta / x.dim // eta over current batch size - moa = moa * beta + x.transpose * d0 * eta_o_sz // update momentum a - mob = mob * beta + z.transpose * d1 * eta_o_sz // update momentum b - (NetParam (moa, d0.mean * eta), // change to a parameters (weights and biases) - NetParam (mob, d1.mean * eta)) // change to b parameters (weights and biases) - end updateWeight - - debug ("optimize3", s"parameters a = $a \n b = $b") - if go then ((y - b * f1.fM (f.fM (a * x))).normFSq, maxEpochs) // return best and number of epochs - else (sse_best_, epoch - upLimit) - end optimize3 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Given training data x and y, fit the parameter/weight matrices bw and - * bias vectors bi. Iterate over several epochs, where each epoch divides the - * training set into nB batches. Each batch is used to update the weights. - * @param x the m-by-n input matrix (training data consisting of m input vectors) - * @param y the m-by-ny output matrix (training data consisting of m output vectors) - * @param b the array of parameters (weights & biases) between every two adjacent layers - * @param eta_ the initial learning/convergence rate - * @param f the array of activation function family for every two adjacent layers - */ - def optimize (x: MatrixD, y: MatrixD, - b: NetParams, eta_ : Double, f: Array [AFF]): (Double, Int) = - val permGen = permGenerator (x.dim) // permutation vector generator - val bSize = min (hp("bSize").toInt, x.dim) // batch size - val maxEpochs = hp("maxEpochs").toInt // maximum number of epochs - val upLimit = hp("upLimit").toInt // limit on increasing lose - val beta = hp("beta").toDouble // momentum hyper-parameter - val nB = x.dim / bSize // the number of batches - var eta = eta_ // counter for number of times moving up - var sse = 0.0 // stores accumulated sse over batches for epoch - println (s"optimize: bSize = $bSize, nB = $nB") - - val nl = f.size // number of layers - val layers = 0 until nl // range for layers - val z = Array.ofDim [MatrixD] (nl+1) // array to store activations, layer by layer - val d = Array.ofDim [MatrixD] (nl) // array to store all deltas - val mo = Array.ofDim [MatrixD] (nl) // momentum array - for l <- layers do mo(l) = new MatrixD (b(l).w.dim, b(l).w.dim2) - - var sse_best_ = -0.0 - var (go, epoch) = (true, 1) - cfor (go && epoch <= maxEpochs, epoch += 1) { // iterate over each epoch - sse = 0.0 - val batches = permGen.igen.chop (nB) // permute indices &split into nB batches - - for ib <- batches do sse += updateWeight (x(ib), y(ib)) // update parameter array b - - collectLoss (sse) // collect the loss per epoch -// debug ("optimize", s" parameters for $epoch th epoch: b = $b, sse = $sse") - val (b_best, sse_best) = stopWhen (b, sse) - if b_best != null then - for l <- b.indices do b(l).set (b_best(l)) - sse_best_ = sse_best // save best in sse_best_ - go = false - else - if epoch % ADJUST_PERIOD == 0 then eta *= ADJUST_FACTOR // adjust the learning rate - end if - } // cfor - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /* Compute the parameter array b updates based on the current batch. - * A step in the direction opposite to the gradient. - * @param x the input matrix for the current batch - * @param y the output matrix for the current batch - */ - inline def updateWeight (x: MatrixD, y: MatrixD): Double = - z(0) = x // initial activation, which is the input matrix - for l <- layers do z(l+1) = f(l).fM (b(l) * z(l)) // feedforward and store all activations - - val yp = z.last // predicted value of y - val ee = yp - y // negative of the error matrix - d(nl-1) = f.last.dM (yp) *~ ee // delta for the last layer before output - for l <- nl-2 to 0 by -1 do - d(l) = f(l).dM (z(l+1)) *~ (d(l+1) * b(l+1).w.transpose) // deltas for all previous hidden layers - - val eta_o_sz = eta / x.dim // learning rate divided by size of mini-batch - for l <- layers do -// b(l).w *= 1.0 - eta * (lambda / x.dim) // regularization factor, weight decay - mo(l) = mo(l) * beta + z(l).transpose * d(l) * eta_o_sz // update l-th momentum - b(l) -= (mo(l), d(l).mean * eta) // update l-th parameter (weights and biases) - end for - - ee.normFSq // return the sse of this batch - end updateWeight - - debug ("optimize", s"parameters b = $b") - if go then (sse, maxEpochs) // return best and number of epochs - else (sse_best_, epoch - upLimit) - end optimize -end Optimizer_Adam - diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/Optimizer_Adam.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/Optimizer_Adam.tasty deleted file mode 100644 index 5f49f6a6a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/Optimizer_Adam.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/Optimizer_SGD.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/Optimizer_SGD.class deleted file mode 100644 index c64794b45..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/Optimizer_SGD.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/Optimizer_SGD.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/Optimizer_SGD.tasty deleted file mode 100644 index cd2241177..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/Optimizer_SGD.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/Optimizer_SGDM.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/Optimizer_SGDM.class deleted file mode 100644 index c2f3070af..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/Optimizer_SGDM.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/Optimizer_SGDM.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/Optimizer_SGDM.tasty deleted file mode 100644 index 5a2de4eec..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/Optimizer_SGDM.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/PredictorMV$.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/PredictorMV$.class deleted file mode 100644 index 311f38a79..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/PredictorMV$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/PredictorMV$package$.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/PredictorMV$package$.class deleted file mode 100644 index a0ffde087..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/PredictorMV$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/PredictorMV$package.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/PredictorMV$package.class deleted file mode 100644 index 3887a7a70..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/PredictorMV$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/PredictorMV$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/PredictorMV$package.tasty deleted file mode 100644 index d2c7691a6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/PredictorMV$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/PredictorMV.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/PredictorMV.class deleted file mode 100644 index e26dcf5a4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/PredictorMV.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/PredictorMV.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/PredictorMV.tasty deleted file mode 100644 index de91fd4b1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/PredictorMV.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/RegressionMV$.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/RegressionMV$.class deleted file mode 100644 index e1a8e08d9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/RegressionMV$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/RegressionMV$package$.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/RegressionMV$package$.class deleted file mode 100644 index beea8ea18..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/RegressionMV$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/RegressionMV$package.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/RegressionMV$package.class deleted file mode 100644 index 3ed99b994..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/RegressionMV$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/RegressionMV$package.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/RegressionMV$package.tasty deleted file mode 100644 index ee98675ef..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/RegressionMV$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/RegressionMV.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/RegressionMV.class deleted file mode 100644 index d4e7fc2ab..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/RegressionMV.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/RegressionMV.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/RegressionMV.tasty deleted file mode 100644 index 02474e801..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/RegressionMV.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/StoppingRule.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/StoppingRule.class deleted file mode 100644 index fe37122df..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/StoppingRule.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/StoppingRule.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/StoppingRule.tasty deleted file mode 100644 index a2297be8a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/StoppingRule.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/cNN_1DTest.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/cNN_1DTest.class deleted file mode 100644 index d7c465f7f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/cNN_1DTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/cNN_1DTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/cNN_1DTest.tasty deleted file mode 100644 index 4fab07486..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/cNN_1DTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/cNN_1DTest2.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/cNN_1DTest2.class deleted file mode 100644 index f3fbe7055..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/cNN_1DTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/cNN_1DTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/cNN_1DTest2.tasty deleted file mode 100644 index 82078053e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/cNN_1DTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/cNN_1DTest3.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/cNN_1DTest3.class deleted file mode 100644 index a7b1c71e3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/cNN_1DTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/cNN_1DTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/cNN_1DTest3.tasty deleted file mode 100644 index bf8e86ce8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/cNN_1DTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/cNN_2DTest2.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/cNN_2DTest2.class deleted file mode 100644 index af2e18af2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/cNN_2DTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/cNN_2DTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/cNN_2DTest2.tasty deleted file mode 100644 index 524ce3292..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/cNN_2DTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/cNN_2DTest3.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/cNN_2DTest3.class deleted file mode 100644 index 412cf6d1b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/cNN_2DTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/cNN_2DTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/cNN_2DTest3.tasty deleted file mode 100644 index 2a0074610..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/cNN_2DTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/coFilter_1DTest.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/coFilter_1DTest.class deleted file mode 100644 index 5068f96b0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/coFilter_1DTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/coFilter_1DTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/coFilter_1DTest.tasty deleted file mode 100644 index ad6ce4acb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/coFilter_1DTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/coFilter_1DTest2.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/coFilter_1DTest2.class deleted file mode 100644 index f3b2c7e09..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/coFilter_1DTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/coFilter_1DTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/coFilter_1DTest2.tasty deleted file mode 100644 index 1993f09dd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/coFilter_1DTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/coFilter_1DTest3.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/coFilter_1DTest3.class deleted file mode 100644 index c3bef5de0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/coFilter_1DTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/coFilter_1DTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/coFilter_1DTest3.tasty deleted file mode 100644 index 4680e5ebf..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/coFilter_1DTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/coFilter_2DTest.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/coFilter_2DTest.class deleted file mode 100644 index 084aa62c0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/coFilter_2DTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/coFilter_2DTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/coFilter_2DTest.tasty deleted file mode 100644 index 086c2a882..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/coFilter_2DTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/coFilter_2DTest2.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/coFilter_2DTest2.class deleted file mode 100644 index 816158556..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/coFilter_2DTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/coFilter_2DTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/coFilter_2DTest2.tasty deleted file mode 100644 index 26566d10b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/coFilter_2DTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/coFilter_2DTest3.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/coFilter_2DTest3.class deleted file mode 100644 index 9ba907b37..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/coFilter_2DTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/coFilter_2DTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/coFilter_2DTest3.tasty deleted file mode 100644 index 354e8ce22..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/coFilter_2DTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/eLM_3L1Test.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/eLM_3L1Test.class deleted file mode 100644 index 8c7939b23..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/eLM_3L1Test.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/eLM_3L1Test.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/eLM_3L1Test.tasty deleted file mode 100644 index 32d7939e9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/eLM_3L1Test.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/eLM_3L1Test2.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/eLM_3L1Test2.class deleted file mode 100644 index 60a18eaa4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/eLM_3L1Test2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/eLM_3L1Test2.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/eLM_3L1Test2.tasty deleted file mode 100644 index 1137fde56..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/eLM_3L1Test2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/eLM_3L1Test3.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/eLM_3L1Test3.class deleted file mode 100644 index 86babd3a4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/eLM_3L1Test3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/eLM_3L1Test3.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/eLM_3L1Test3.tasty deleted file mode 100644 index b00d6450e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/eLM_3L1Test3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/eLM_3L1Test4.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/eLM_3L1Test4.class deleted file mode 100644 index d3bf63600..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/eLM_3L1Test4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/eLM_3L1Test4.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/eLM_3L1Test4.tasty deleted file mode 100644 index 729058324..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/eLM_3L1Test4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/eLM_3L1Test5.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/eLM_3L1Test5.class deleted file mode 100644 index 65a2b7459..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/eLM_3L1Test5.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/eLM_3L1Test5.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/eLM_3L1Test5.tasty deleted file mode 100644 index 912b59680..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/eLM_3L1Test5.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/eLM_3L1Test6.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/eLM_3L1Test6.class deleted file mode 100644 index 992baea5d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/eLM_3L1Test6.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/eLM_3L1Test6.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/eLM_3L1Test6.tasty deleted file mode 100644 index a01c908d4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/eLM_3L1Test6.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/example_ConcreteTest.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/example_ConcreteTest.class deleted file mode 100644 index 5d343d674..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/example_ConcreteTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/example_ConcreteTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/example_ConcreteTest.tasty deleted file mode 100644 index 833aed7dd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/example_ConcreteTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/example_ConcreteTest2.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/example_ConcreteTest2.class deleted file mode 100644 index baea87a7e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/example_ConcreteTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/example_ConcreteTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/example_ConcreteTest2.tasty deleted file mode 100644 index 7ce02e880..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/example_ConcreteTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/example_ConcreteTest3.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/example_ConcreteTest3.class deleted file mode 100644 index e0e598dd3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/example_ConcreteTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/example_ConcreteTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/example_ConcreteTest3.tasty deleted file mode 100644 index ec6055d06..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/example_ConcreteTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/index.html b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/index.html deleted file mode 100644 index d9c19ef1c..000000000 --- a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/index.html +++ /dev/null @@ -1,23 +0,0 @@ - - -

    Source files in neuralnet Package

    -

    - - \ No newline at end of file diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_2LTest.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_2LTest.class deleted file mode 100644 index 5e61c4c3b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_2LTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_2LTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_2LTest.tasty deleted file mode 100644 index 7b46f4edf..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_2LTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_2LTest2.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_2LTest2.class deleted file mode 100644 index aa474e47c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_2LTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_2LTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_2LTest2.tasty deleted file mode 100644 index 6563f72c2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_2LTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_2LTest3.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_2LTest3.class deleted file mode 100644 index 8c34bbbd0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_2LTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_2LTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_2LTest3.tasty deleted file mode 100644 index c8536ed01..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_2LTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_2LTest4.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_2LTest4.class deleted file mode 100644 index 7dfc4a269..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_2LTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_2LTest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_2LTest4.tasty deleted file mode 100644 index ed4a9fd00..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_2LTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_2LTest5.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_2LTest5.class deleted file mode 100644 index bd56a714c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_2LTest5.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_2LTest5.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_2LTest5.tasty deleted file mode 100644 index cb086b3e4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_2LTest5.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_2LTest6.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_2LTest6.class deleted file mode 100644 index a8f094b7b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_2LTest6.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_2LTest6.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_2LTest6.tasty deleted file mode 100644 index e685890e3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_2LTest6.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_2LTest7.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_2LTest7.class deleted file mode 100644 index 1c935a63f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_2LTest7.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_2LTest7.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_2LTest7.tasty deleted file mode 100644 index 9d1e05106..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_2LTest7.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_2LTest8.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_2LTest8.class deleted file mode 100644 index c75ae2de9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_2LTest8.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_2LTest8.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_2LTest8.tasty deleted file mode 100644 index 6fcd4bc8d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_2LTest8.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_2LTest9.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_2LTest9.class deleted file mode 100644 index 67c0e2584..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_2LTest9.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_2LTest9.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_2LTest9.tasty deleted file mode 100644 index a30833cbe..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_2LTest9.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_2L_CkTest.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_2L_CkTest.class deleted file mode 100644 index d305f53ff..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_2L_CkTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_2L_CkTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_2L_CkTest.tasty deleted file mode 100644 index 2c365a374..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_2L_CkTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest.class deleted file mode 100644 index fbf442e72..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest.tasty deleted file mode 100644 index f086c4dbe..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest10.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest10.class deleted file mode 100644 index 057c47fd4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest10.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest10.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest10.tasty deleted file mode 100644 index 6da8d5fb4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest10.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest11.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest11.class deleted file mode 100644 index 987f1ea0e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest11.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest11.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest11.tasty deleted file mode 100644 index 0fdffab80..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest11.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest12.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest12.class deleted file mode 100644 index dd9d397fb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest12.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest12.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest12.tasty deleted file mode 100644 index cc90a5925..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest12.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest2.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest2.class deleted file mode 100644 index a1f576d58..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest2.tasty deleted file mode 100644 index a908f6468..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest3.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest3.class deleted file mode 100644 index 1ceea2240..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest3.tasty deleted file mode 100644 index 053258ca1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest4.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest4.class deleted file mode 100644 index f28d99225..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest4.tasty deleted file mode 100644 index ebeba883d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest5.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest5.class deleted file mode 100644 index 829b2e49b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest5.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest5.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest5.tasty deleted file mode 100644 index b93613c8b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest5.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest6.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest6.class deleted file mode 100644 index 6b60455a6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest6.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest6.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest6.tasty deleted file mode 100644 index b273df156..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest6.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest7.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest7.class deleted file mode 100644 index 7410141c8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest7.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest7.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest7.tasty deleted file mode 100644 index ed0147cd8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest7.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest8.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest8.class deleted file mode 100644 index 016efc3d2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest8.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest8.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest8.tasty deleted file mode 100644 index 03e2f8d41..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest8.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest9.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest9.class deleted file mode 100644 index 54cfbd02e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest9.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest9.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest9.tasty deleted file mode 100644 index e05c4261e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3LTest9.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3L_C2Test.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3L_C2Test.class deleted file mode 100644 index 9b2a5af34..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3L_C2Test.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3L_C2Test.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3L_C2Test.tasty deleted file mode 100644 index cafcd5755..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_3L_C2Test.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_XLTTest.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_XLTTest.class deleted file mode 100644 index 386720e0e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_XLTTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_XLTTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_XLTTest.tasty deleted file mode 100644 index 84736c70a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_XLTTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_XLTTest2.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_XLTTest2.class deleted file mode 100644 index 52491a5ce..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_XLTTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_XLTTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_XLTTest2.tasty deleted file mode 100644 index 238c15b15..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_XLTTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_XLTest.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_XLTest.class deleted file mode 100644 index 14aa441a9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_XLTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_XLTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_XLTest.tasty deleted file mode 100644 index 3c703391c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_XLTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_XLTest2.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_XLTest2.class deleted file mode 100644 index ac2660dc5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_XLTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_XLTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_XLTest2.tasty deleted file mode 100644 index 807e3da28..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_XLTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_XLTest3.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_XLTest3.class deleted file mode 100644 index 67061ed72..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_XLTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_XLTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_XLTest3.tasty deleted file mode 100644 index 4c0f84912..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_XLTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_XLTest4.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_XLTest4.class deleted file mode 100644 index 94d53620e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_XLTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_XLTest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_XLTest4.tasty deleted file mode 100644 index d3cabf20d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_XLTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_XLTest5.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_XLTest5.class deleted file mode 100644 index 5a2eb77f6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_XLTest5.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_XLTest5.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_XLTest5.tasty deleted file mode 100644 index 3b04f008a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_XLTest5.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_XLTest6.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_XLTest6.class deleted file mode 100644 index 02881ee2e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_XLTest6.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_XLTest6.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_XLTest6.tasty deleted file mode 100644 index 56d9afee2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_XLTest6.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_XLTest7.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_XLTest7.class deleted file mode 100644 index ee3941965..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_XLTest7.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_XLTest7.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_XLTest7.tasty deleted file mode 100644 index 5e9012a8d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/neuralNet_XLTest7.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/old/NeuralNet_3L.scala.bak b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/old/NeuralNet_3L.scala.bak deleted file mode 100644 index 89c1b492d..000000000 --- a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/old/NeuralNet_3L.scala.bak +++ /dev/null @@ -1,607 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Fri Mar 16 15:13:38 EDT 2018 - * @see LICENSE (MIT style license file). - * - * @title Model: Neural Network with 3 Layers (input, hidden and output layers) - * - * @see hebb.mit.edu/courses/9.641/2002/lectures/lecture03.pdf - */ - -package scalation -package modeling -package neuralnet - -import scala.runtime.ScalaRunTime.stringOf - -import scala.math.exp -import scalation.mathstat._ - -import ActivationFun._ -import Initializer._ -import Optimizer._ - -// FIX - make sure bb vs. a & b are in agreement => maybe should remove NeuralNet_3L and just XL instead - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `NeuralNet_3L` class supports multi-output, 3-layer (input, hidden and output) - * Neural-Networks. It can be used for both classification and prediction, - * depending on the activation functions used. Given several input vectors and output - * vectors (training data), fit the parameters a and b connecting the layers, - * so that for a new input vector v, the net can predict the output value, i.e., - * yp = f1 (b * f (a * v)) - * where f and f1 are the activation functions and the parameter a and b - * are the parameters between input-hidden and hidden-output layers. - * Unlike `NeuralNet_2L` which adds input x0 = 1 to account for the intercept/bias, - * `NeuralNet_3L` explicitly adds bias. - * @param x the m-by-n input/data matrix (training data consisting of m input vectors) - * @param y the m-by-ny output/response matrix (training data consisting of m output vectors) - * @param fname_ the feature/variable names (if null, use x_j's) - * @param nz the number of nodes in hidden layer (-1 => use default formula) - * @param hparam the hyper-parameters for the model/network - * @param f the activation function family for layers 1->2 (input to output) - * @param f1 the activation function family for layers 2->3 (hidden to output) - * @param itran the inverse transformation function returns response matrix to original scale - */ -class NeuralNet_3L (x: MatrixD, y: MatrixD, fname_ : Array [String] = null, - private var nz: Int = -1, hparam: HyperParameter = Optimizer.hp, - f: AFF = f_sigmoid, f1: AFF = f_id, - val itran: FunctionM2M = null) - extends PredictorMV (x, y, fname_, hparam) - with Fit (dfm = x.dim2, df = x.dim - x.dim2): // under-estimate of degrees of freedom - - private val debug = debugf ("NeuralNet_3L", false) // debug function - private val eta = hp("eta").toDouble // learning rate - private val bSize = hp("bSize").toInt // batch size - private val maxEpochs = hp("maxEpochs").toInt // maximum number of training epochs/iterations -// val opti = new Optimizer_SGD () // parameter optimizer SGD - val opti = new Optimizer_SGDM () // parameter optimizer SGDM - - // Guidelines for setting the number of nodes in hidden layer: - if nz < 1 then nz = 2 * x.dim2 + 1 // [1] default number of nodes for hidden layer -// if nz < 1 then nz = 2 * x.dim2 + y.dim2 // [2] default number of nodes for hidden layer - - private val (n, ny) = (x.dim2, y.dim2) - private val a = new NetParam (weightMat (n, nz), new VectorD (nz)) // parameters (weights & biases) in to hid - b = new NetParam (weightMat (nz, ny), new VectorD (ny)) // parameters (weights & biases) hid to out - bb = Array (a, b.asInstanceOf [NetParam]) // inside array - - modelName = s"NeuralNet_3L_${f.name}_${f1.name}" - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Given training data x_ and y_, fit the parameters bb. - * Minimize the error in the prediction by adjusting the parameters bb. - * @param x_ the training/full data/input matrix - * @param y_ the training/full response/output matrix - */ - def train (x_ : MatrixD = x, y_ : MatrixD = y): Unit = - val epochs = opti.optimize3 (x_, y_, bb, eta, Array (f, f1)) // optimize parameters bb - println (s"ending epoch = $epochs") - estat.tally (epochs._2) - assert (bb(0) =~ a) - assert (bb(1) =~ b.asInstanceOf [NetParam]) - end train - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Given training data x_ and y_, fit the parameters bb. - * Minimize the error in the prediction by adjusting the parameters bb. - * This version preforms an interval search for the best eta value. - * @param x_ the training/full data/input matrix - * @param y_ the training/full response/output matrix - */ - override def train2 (x_ : MatrixD = x, y_ : MatrixD = y): Unit = - val etaI = (0.25 * eta, 4.0 * eta) // quarter to four times eta - val epochs = opti.auto_optimize (x_, y_, bb, etaI, Array (f, f1), opti.optimize3) // optimize parameters bb - println (s"ending epoch = $epochs") - estat.tally (epochs._2) - end train2 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test a predictive model y_ = f(x_) + e and return its QoF vector. - * Testing may be be in-sample (on the training set) or out-of-sample - * (on the testing set) as determined by the parameters passed in. - * Note: must call train before test. - * @param x_ the testing/full data/input matrix (defaults to full x) - * @param y_ the testing/full response/output matrix (defaults to full y) - */ - def test (x_ : MatrixD = x, y_ : MatrixD = y): (MatrixD, MatrixD) = - println (s"T E S T: bb = ${stringOf (bb)}") - val yp = predict (x_) // make predictions - val yy = if itran == null then y_ else itran (y_) // undo scaling, if used - e = yy - yp // RECORD the residuals/errors (@see `Predictor`) - val qof = MatrixD (for k <- yy.indices2 yield diagnose (yy(?, k), yp(?, k))).transpose - (yp, qof) // return predictions and QoF vector - end test - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Make plots for each output/response variable (column of matrix y). - * Overriden as the response matrix may be transformed or rescaled. - * @param yy_ the testing/full actual response/output matrix (defaults to full y) - * @param yp the testing/full predicted response/output matrix (defaults to full y) - */ - override def makePlots (yy_ : MatrixD, yp: MatrixD): Unit = - val yy = if itran == null then yy_ else itran (yy_) // undo scaling, if used - val (ryy, ryp) = orderByYY (yy, yp) // order by yy - for k <- ryy.indices2 do - new Plot (null, ryy(?, k), ryp(?, k), s"$modelName: y$k black/actual vs. red/predicted") - end for - end makePlots - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Given a new input vector v, predict the output/response vector f(v). - * @param v the new input vector - */ - def predict (v: VectorD): VectorD = -// val yp = f1.f_ (b dot f.f_ (a dot v)) // scaled prediction - val yp = f1.f_ (bb(1) dot f.f_ (bb(0) dot v)) // scaled prediction - if itran == null then yp - else - debug ("predict", "vector has itran") - itran (MatrixD (yp))(0) // back to original scale - end if - end predict - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Given an input matrix v, predict the output/response matrix f(v). - * @param v the input matrix - */ - override def predict (v: MatrixD = x): MatrixD = -// val yp = f1.fM (b * (f.fM (a * v))) // scaled predictions - val yp = f1.fM (bb(1) * (f.fM (bb(0) * v))) // scaled predictions - if itran == null then yp - else - debug ("predict", "matrix has itran") - itran (yp) // back to original scale - end if - end predict - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Build a sub-model that is restricted to the given columns of the data matrix. - * @param x_cols the columns that the new model is restricted to - */ - def buildModel (x_cols: MatrixD): NeuralNet_3L = - new NeuralNet_3L (x_cols, y, null, -1, hparam, f, f1, itran) - end buildModel - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Produce a QoF summary for a model with diagnostics for each predictor x_j - * and the overall Quality of Fit (QoF). - * FIX - only known to be valid for id activation function - * @see https://community.wolfram.com/groups/-/m/t/1319745 - * @param x_ the testing/full data/input matrix - * @param fname_ the array of feature/variable names - * @param b_ the parameters/coefficients for the model - */ - def summary2 (x_ : MatrixD = getX, fname_ : Array [String] = fname, - b_ : MatrixD = parameter): String = -// summary (x_, fname_, b_(?, 0), null) // summary from `Fit` - "summary2 not implemented yet" - end summary2 - -end NeuralNet_3L - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `NeuralNet_3L` companion object provides factory methods for creating three-layer - * (one hidden layer) neural networks. Note, 'scale' is defined in `Scaling`. - */ -object NeuralNet_3L extends Scaling: - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `NeuralNet_3L` with automatic rescaling from a combined data matrix. - * @param xy the combined input and output matrix - * @param fname the feature/variable names - * @param nz the number of nodes in hidden layer (-1 => use default formula) - * @param hparam the hyper-parameters - * @param f the activation function family for layers 1->2 (input to output) - * @param f1 the activation function family for layers 2->3 (hidden to output) - * @param col the first designated response column (defaults to the last column) - */ - def apply (xy: MatrixD, fname: Array [String] = null, - nz: Int = -1, hparam: HyperParameter = Optimizer.hp, - f: AFF = f_sigmoid, f1: AFF = f_id) - (col: Int = xy.dim2 - 1): NeuralNet_3L = - var itran: FunctionM2M = null // inverse transform -> original scale - val (x, y) = (xy(?, 0 until col), xy(?, col until xy.dim2)) - - val x_s = if scale then rescaleX (x, f) - else x - val y_s = if f1.bounds != null then { val y_i = rescaleY (y, f1); itran = y_i._2; y_i._1 } - else y - -// println (s" scaled: x = $x_s \n scaled y = $y_s") - new NeuralNet_3L (x_s, y_s, fname, nz, hparam, f, f1, itran) - end apply - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `NeuralNet_3L` with automatic rescaling from a data matrix and response vector. - * @param x the input/data matrix - * @param y the output/response matrix - * @param fname the feature/variable names - * @param nz the number of nodes in hidden layer (-1 => use default formula) - * @param hparam the hyper-parameters - * @param f the activation function family for layers 1->2 (input to output) - * @param f1 the activation function family for layers 2->3 (hidden to output) - */ - def rescale (x: MatrixD, y: MatrixD, fname: Array [String] = null, - nz: Int = -1, hparam: HyperParameter = Optimizer.hp, - f: AFF = f_sigmoid, f1: AFF = f_id): NeuralNet_3L = - var itran: FunctionM2M = null // inverse transform -> original scale - - val x_s = if scale then rescaleX (x, f) - else x - val y_s = if f1.bounds != null then { val y_i = rescaleY (y, f1); itran = y_i._2; y_i._1 } - else y - -// println (s" scaled: x = $x_s \n scaled y = $y_s") - new NeuralNet_3L (x_s, y_s, fname, nz, hparam, f, f1, itran) - end rescale - -end NeuralNet_3L - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `neuralNet_3LTest` main function is used to test the `NeuralNet_3L` class. - * Try changing the eta and bSize hyper-parameters, as well as the activation function. - * > runMain scalation.modeling.neuralnet.neuralNet_3LTest - */ -@main def neuralNet_3LTest (): Unit = - - val x = MatrixD ((12, 3), 1.0, 0.2, 0.3, // training data - input matrix (m=12 vectors) - 1.0, 0.2, 0.5, - 1.0, 0.2, 0.7, - 1.0, 0.3, 0.3, - 1.0, 0.3, 0.5, - 1.0, 0.3, 0.7, - - 1.0, 0.4, 0.3, - 1.0, 0.4, 0.3, - 1.0, 0.4, 0.7, - 1.0, 0.5, 0.5, - 1.0, 0.5, 0.3, - 1.0, 0.5, 0.7) - - val y0 = x.map (x_i => sigmoid (VectorD (2.0, 1.0, 2.0) dot (x_i))) - val y1 = x.map (x_i => sigmoid (VectorD (2.0, 2.0, 2.0) dot (x_i))) - val y = MatrixD (y0, y1).transpose - - println (s"input matrix x = $x") - println (s"output matrix y = $y") - - Optimizer.hp("eta") = 3.0 // set the learning rate (large for small dataset) - Optimizer.hp("bSize") = 6.0 // set the batch size (small for small dataset) -// val mod = new NeuralNet_3L (x, y) // create NeuralNet_3L model with sigmoid (default) - val mod = new NeuralNet_3L (x, y, f = f_tanh) // create NeuralNet_3L model with tanh - - banner ("Small Example - NeuralNet_3L: trainNtest") - mod.trainNtest ()() // train and test the model - mod.opti.plotLoss ("NeuralNet_3L") // loss function vs epochs - - banner ("Small Example - NeuralNet_3L: trainNtest2") - mod.trainNtest2 ()() // train and test the model - with auto-tuning - mod.opti.plotLoss ("NeuralNet_3L") // loss function vs epochs - println (mod.summary2 ()) // parameter/coefficient statistics - - banner ("neuralNet_3LTest: Compare with Linear Regression - first column of y") - val rg0 = new Regression (x, y0) // create a Regression model - rg0.trainNtest ()() // train and test the model - println (rg0.summary ()) // parameter/coefficient statistics - - banner ("neuralNet_3LTest: Compare with Linear Regression - second column of y") - val rg1 = new Regression (x, y1) // create a Regression model - rg1.trainNtest ()() // train and test the model - println (rg1.summary ()) // parameter/coefficient statistics - -end neuralNet_3LTest - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `neuralNet_3LTest2` main function tests the `NeuralNet_3L` class using the - * Concrete dataset. It has three outputs/response variables. - * There are two ways to create the model: - * new NeuralNet_3L (x, y, x_fname) - depending on act. function user must rescale - * NeuralNet_3L.rescale (x, y, x_fname) - automatically rescales, assumes matrix response - * > runMain scalation.modeling.neuralnet.neuralNet_3LTest2 - */ -@main def neuralNet_3LTest2 (): Unit = - - import Example_Concrete.{x, y, x_fname} // don't include intercept, uses biases instead - -// println (s"x = $x") -// println (s"y = $y") - println (s"x_fname = ${stringOf (x_fname)}") - -// val mod = new NeuralNet_3L (x, y, x_fname) // create model without intercept - val mod = NeuralNet_3L.rescale (x, y, x_fname) // create model without intercept- rescales - - banner ("Concrete - NeuralNet_3L: trainNtest") - mod.trainNtest ()() // train and test the model - mod.opti.plotLoss ("NeuralNet_3L") // loss function vs epochs - - banner ("Concrete - NeuralNet_3L: trainNtest2") - mod.trainNtest2 ()() // train and test the model - with auto-tuning - mod.opti.plotLoss ("NeuralNet_3L") // loss function vs epochs - println (mod.summary2 ()) // parameter/coefficient statistics - - banner ("Concrete - NeuralNet_3L: validate") - println (FitM.showFitMap (mod.validate ()(), QoF.values.map (_.toString))) - - banner ("Concrete - NeuralNet_3L: crossValidate") - val stats = mod.crossValidate () - FitM.showQofStatTable (stats) - -end neuralNet_3LTest2 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `neuralNet_3LTest3` main function tests the `NeuralNet_3L` class using the - * AutoMPG dataset. There are two ways to create the model: - * new NeuralNet_3L (x, yy, x_fname) - depending on act. function user must rescale - * NeuralNet_3L.rescale (x, yy, x_fname) - automatically rescales, assumes matrix response - * > runMain scalation.modeling.neuralnet.neuralNet_3LTest3 - */ -@main def neuralNet_3LTest3 (): Unit = - - import Example_AutoMPG.{x, yy, x_fname} // don't include intercept, uses biases instead - -// println (s"x = $x") -// println (s"yy = $yy") - println (s"x_fname = ${stringOf (x_fname)}") - - Optimizer.hp("eta") = 5.0 -// val mod = new NeuralNet_3L (x, yy, x_fname) // create model without intercept - val mod = NeuralNet_3L.rescale (x, yy, x_fname) // create model without intercept - rescales - - banner ("AutoMPG - NeuralNet_3L: trainNtest") - mod.trainNtest ()() // train and test the model - mod.opti.plotLoss ("NeuralNet_3L") // loss function vs epochs - - banner ("AutoMPG - NeuralNet_3L: trainNtest2") - mod.trainNtest2 ()() // train and test the model - with auto-tuning -// println (mod.summary2 ()) // parameter/coefficient statistics - mod.opti.plotLoss ("NeuralNet_3L") // loss function vs epochs - - banner ("AutoMPG - NeuralNet_3L: validate") - println (FitM.showFitMap (mod.validate ()(), QoF.values.map (_.toString))) - -/* - banner ("AutoMPG - NeuralNet_3L: crossValidate") - val stats = mod.crossValidate () - FitM.showQofStatTable (stats) -*/ - -end neuralNet_3LTest3 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `neuralNet_3LTest4` main function tests the `NeuralNet_3L` class using the - * AutoMPG dataset. It tests forward selection. - * > runMain scalation.modeling.neuralnet.neuralNet_3LTest4 - */ -@main def neuralNet_3LTest4 (): Unit = - - import Example_AutoMPG.{x, yy, x_fname} // don't include intercept, uses biases instead - -// println (s"x = $x") -// println (s"yy = $yy") - println (s"x_fname = ${stringOf (x_fname)}") - - banner ("AutoMPG NeuralNet_3L") -// val mod = new NeuralNet_3L (x, yy, x_fname) // create model without intercept - val mod = NeuralNet_3L.rescale (x, yy, x_fname) // create model without intercept - rescales -// mod.trainNtest ()() // train and test the model - mod.trainNtest2 ()() // train and test the model - with auto-tuning - println (mod.summary2 ()) // parameter/coefficient statistics - - banner ("Feature Selection Technique: Forward") - val (cols, rSq) = mod.forwardSelAll () // R^2, R^2 bar, smape, R^2 cv -// val (cols, rSq) = mod.backwardElimAll () // R^2, R^2 bar, smape, R^2 cv - val k = cols.size - println (s"k = $k, n = ${x.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "smape", "R^2 cv"), - s"R^2 vs n for ${mod.modelName}", lines = true) - println (s"rSq = $rSq") - -end neuralNet_3LTest4 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `neuralNet_3LTest5` main function tests the `NeuralNet_3L` class using the AutoMPG - * dataset. It tests forward, backward and stepwise selection. - * > runMain scalation.modeling.neuralnet.neuralNet_3LTest5 - */ -@main def neuralNet_3LTest5 (): Unit = - - import Example_AutoMPG.{x, yy, x_fname} // don't include intercept, uses biases instead - -// println (s"x = $x") -// println (s"yy = $yy") - - banner ("AutoMPG NeuralNet_3L") -// val mod = new NeuralNet_3L (x, yy, x_fname) // create model without intercept - val mod = NeuralNet_3L.rescale (x, yy, x_fname) // create model without intercept - rescales -// mod.trainNtest ()() // train and test the model - mod.trainNtest2 ()() // train and test the model - with auto-tuning - println (mod.summary2 ()) // parameter/coefficient statistics - - banner ("Cross-Validation") - FitM.showQofStatTable (mod.crossValidate ()) - - println (s"x_fname = ${stringOf (x_fname)}") - - for tech <- SelectionTech.values do - banner (s"Feature Selection Technique: $tech") - val (cols, rSq) = mod.selectFeatures (tech) // R^2, R^2 bar, smape, R^2 cv - val k = cols.size - println (s"k = $k, n = ${x.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "smape", "R^2 cv"), - s"R^2 vs n for ${mod.modelName} with $tech", lines = true) - println (s"$tech: rSq = $rSq") - end for - -end neuralNet_3LTest5 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `neuralNet_3LTest6` main function tests the `NeuralNet_3L` class using the - * AutoMPG dataset. It tries all activation functions of the form (f, id), - * Ideally, eta should be initialized separately for each activation function. - * > runMain scalation.modeling.neuralnet.neuralNet_3LTest6 - */ -@main def neuralNet_3LTest6 (): Unit = - - import Example_AutoMPG.{x, yy, x_fname} // don't include intercept, uses biases instead - -// println (s"x = $x") -// println (s"yy = $yy") - println (s"x_fname = ${stringOf (x_fname)}") - - Optimizer.hp ("eta") = 0.025 // some activation functions need smaller eta - for f <- f_aff do // try all activation functions for first layer - banner (s"AutoMPG NeuralNet_3L with ${f.name}") - val mod = NeuralNet_3L.rescale (x, yy, x_fname, f = f) // create model without intercept - rescales - mod.trainNtest2 ()() // train and test the model - with auto-tuning - - banner ("AutoMPG Validation Test") - println (FitM.showFitMap (mod.validate ()(), QoF.values.map (_.toString))) - end for - -end neuralNet_3LTest6 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `neuralNet_3LTest7` main function tests the `NeuralNet_3L` class using the - * AutoMPG dataset. It uses the best combination of two features weight and modelyear. - * > runMain scalation.modeling.neuralnet.neuralNet_3LTest7 - */ -@main def neuralNet_3LTest7 (): Unit = - - import Example_AutoMPG.{x46, y, yy, x46_fname} // don't include intercept, uses biases instead - - val xs = rescaleX (x46, f_sigmoid) // ActivationFun rescale the X matrix to active range of sigmoid - val (mn, mx) = (x46.min, x46.max) -// val xs = scale ((mn, mx), (-2, 2))(x46) // MatrixTranform scale the X matrix to (-2, 2) - -/* - val xs = new MatrixD (x46.dim, x46.dim2) // low-level rescaling approach to (-2, 2) - for j <- x46.indices2 do - val scale = 4.0 / (mx(j) - mn(j)) - xs(?, j) = (x46(?, j) - mn(j)) * scale - 2 // shift and scale - end for -*/ - -// println (s"xs = $xs") -// println (s"yy = $yy") - println (s"x46_fname = ${stringOf (x46_fname)}") - - Optimizer.hp ("eta") = 0.01 // some activation functions need smaller eta - val nz = 2 // number of hidden nodes - banner (s"AutoMPG NeuralNet_3L") - val mod = new NeuralNet_3L (xs, yy, x46_fname, 2) // create model without intercept - val (yp, qof) = mod.trainNtest2 ()() // train and test the model - with auto-tuning - - banner ("AutoMPG Validation Test") - println (FitM.showFitMap (mod.validate ()(), QoF.values.map (_.toString))) - - banner ("Compare Model with Formula f_nn") - val a = MatrixD ((2, 2), -2.12262, -0.743867, // weights: input -> hidden layer - -0.200314, 1.62988) - val ab = VectorD (-2.15785, -1.65227) // biases: input -> hidden layer - val b = MatrixD ((2, 1), 15.7250, // weights: hidden -> output layer - 13.1971) - val bb = VectorD (13.4702) // bias: hidden -> output layer - -// def f_nn (x: VectorD): VectorD = mod.predict (x) // result from model - def f_nn (x: VectorD): VectorD = ((b dot sigmoid_ ((a dot x) + ab)) + bb) // result from formula (should be close) - - val yp2 = VectorD (for i <- xs.indices yield f_nn (xs(i))(0)) // compute the response - - val yp_ = yp(?, 0) // get response from model - println (s"(yp_ - yp2).norm = ${(yp_ - yp2).norm}") // norm of difference - new Plot (null, yp_, yp2, "yp_ (black/model) vs. yp2 (red/formula)") - - def ff (x: VectorD, i: Int): Double = - val xx = (x(0) - 1.61300) * (4/3.537) - 2 - val yy = (x(1) - 70) * (4.0/12) - 2 -// println (s"ff: [$xx, $yy], xs($i) = ${xs(i)}") - val u = -2.12262 * xx - 0.200314 * yy - 2.15785 - val v = -0.743867 * xx + 1.62988 * yy - 1.65227 - val uu = 1.0 / (1.0 + exp(-u)) - val vv = 1.0 / (1.0 + exp(-v)) - 15.7250 * uu + 13.1971 * vv + 13.4702 - end ff - - val yp3 = VectorD (for i <- xs.indices yield ff (x46(i), i)) // compute the response - - def ff2 (x: VectorD, i: Int): Double = -// val xx = (x(0) - 1.61300) * (4/3.537) - 2 -// val yy = (x(1) - 70) * (4.0/12) - 2 -// val u = -2.12262 * xx - 0.200314 * yy - 2.15785 -// val v = -0.743867 * xx + 1.62988 * yy - 1.65227 - - 15.7250 / (1.0 + exp (2.12262 * ((x(0) - 1.61300) * (4/3.537) - 2) + - 0.200314 * ((x(1) - 70) * (4.0/12) - 2) + 2.15785)) + - 13.1971 / (1.0 + exp (0.743867 * ((x(0) - 1.61300) * (4/3.537) - 2) - - 1.62988 * ((x(1) - 70) * (4.0/12) - 2) + 1.65227)) + - 13.4702 - end ff2 - - val yp4 = VectorD (for i <- xs.indices yield ff2 (x46(i), i)) // compute the response - - println (s"(y - yp_).norm = ${(y - yp_).norm}") // norm of difference - println (s"(y - yp2).norm = ${(y - yp2).norm}") // norm of difference - println (s"(y - yp3).norm = ${(y - yp3).norm}") // norm of difference - println (s"(y - yp4).norm = ${(y - yp4).norm}") // norm of difference - -end neuralNet_3LTest7 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `neuralNet_3LTest8` main function is used to test the `NeuralNet_3L` class. - * It tests a simple case that does not require a file to be read. - * @see translate.google.com/translate?hl=en&sl=zh-CN&u=https: - * //www.hrwhisper.me/machine-learning-decision-tree/&prev=search - * > runMain scalation.modeling.neuralnet.neuralNet_3LTest8 - */ -@main def neuralNet_3LTest8 (): Unit = - - val x = MatrixD ((10, 1), 1, 2, 3, 4, 5, 6, 7, 8, 9, 10) - val y = VectorD (5.56, 5.70, 5.91, 6.40, 6.80, 7.05, 8.90, 8.70, 9.00, 9.05) - val ox = VectorD.one (x.dim) +^: x - val fname = Array ("x") - - banner (s"Regression with intercept") - val reg = new Regression (ox, y) - reg.trainNtest ()() // train and test the model - - banner (s"Perceptron sigmoid") - val nn = Perceptron.rescale (ox, y) - nn.trainNtest ()() // train and test the model - - banner (s"Perceptron tanh") - val nn2 = Perceptron.rescale (ox, y, f = ActivationFun.f_tanh) - nn2.trainNtest ()() // train and test the model - - val ym = MatrixD.fromVector (y) - Optimizer.hp ("eta") = 0.85 // Preceptron and NeuralNet_2L use different optimizers, - // so different learning rates (eta) are needed. - banner (s"NeuralNet_2L sigmoid") - val nn3 = NeuralNet_2L.rescale (ox, ym) - nn3.trainNtest ()() // train and test the model - - banner (s"NeuralNet_2L tanh") - val nn4 = NeuralNet_2L.rescale (ox, ym, f = ActivationFun.f_tanh) - nn4.trainNtest ()() // train and test the model - - banner (s"NeuralNet_3L sigmoid-id") - val nn5 = NeuralNet_3L.rescale (ox, ym) - nn5.trainNtest ()() // train and test the model - - banner (s"NeuralNet_3L tanh-tanh") - val nn6 = NeuralNet_3L.rescale (ox, ym, f = ActivationFun.f_tanh, f1 = ActivationFun.f_tanh) - nn6.trainNtest ()() // train and test the model - -end neuralNet_3LTest8 - diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/old/Optimizer_SGD.scala.bak b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/old/Optimizer_SGD.scala.bak deleted file mode 100644 index c877f2bff..000000000 --- a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/old/Optimizer_SGD.scala.bak +++ /dev/null @@ -1,231 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller, Hao Peng - * @version 2.0 - * @date Sun Feb 6 00:08:23 EST 2022 - * @see LICENSE (MIT style license file). - * - * @title Optimization: Stochastic Gradient Descent Optimizer - */ - -package scalation -package modeling -package neuralnet - -import scala.math.min - -import scalation.mathstat._ - -import Optimizer._ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Optimizer-SGD` class provides methods to optimize the parameters (weights - * and biases) of Neural Networks with various numbers of layers. - * This optimizer implements a Stochastic Gradient Descent algorithm. - */ -class Optimizer_SGD extends Optimizer: - - private val debug = debugf ("Optimizer_SGD", true) // debug function - private val flaw = flawf ("Optimizer_SGD") // flaw function - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Given training data x and y for a 2-layer, multi-output Neural Network, fit - * the parameter/weight matrix b. Iterate over several epochs, where each epoch - * divides the training set into nB batches. Each batch is used to update the - * parameter's weights. - * @param x the m-by-n input matrix (training data consisting of m input vectors) - * @param y the m-by-ny output matrix (training data consisting of m output vectors) - * @param bb the array of parameters (weights & biases) between every two adjacent layers - * @param eta_ the initial learning/convergence rate - * @param ff the array of activation function family for every two adjacent layers - */ - def optimize2 (x: MatrixD, y: MatrixD, - bb: NetParams, eta_ : Double, ff: Array [AFF]): (Double, Int) = - val permGen = permGenerator (x.dim) // permutation vector generator - val b = bb(0) // net-parameters: weight matrix and bias vector - val f = ff(0) // activation function - val bSize = min (hp("bSize").toInt, x.dim) // batch size - val maxEpochs = hp("maxEpochs").toInt // maximum number of epochs - val upLimit = hp("upLimit").toInt // limit on increasing lose - val nB = x.dim / bSize // the number of batches - var eta = eta_ // set initial learning rate - println (s"optimize2: bSize = $bSize, nB = $nB") - - for epoch <- 1 to maxEpochs do // iterate over each epoch - val batches = permGen.igen.chop (nB) // permute indices & chop into nB batches - - for ib <- batches do b -= updateWeight (x(ib), y(ib)) // iteratively update parameters b - - val sse = (y - f.fM (b * x)).normFSq // recompute sum of squared errors - collectLoss (sse) // collect the loss per epoch -// debug ("optimize2", s"parameters for $epoch th epoch: sse = $sse") - val (b_best, sse_best) = stopWhen (Array (b), sse) - if b_best != null then - b.set (b_best (0)) - return (sse_best, epoch - upLimit) - end if - - if epoch % ADJUST_PERIOD == 0 then eta *= ADJUST_FACTOR // adjust the learning rate - end for - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /* Update the parameter/weight matrix b based on the current batch. - * Take a step in the direction opposite to the gradient. - * @param x the input matrix for the current batch - * @param y the output matrix for the current batch - */ - def updateWeight (x: MatrixD, y: MatrixD): MatrixD = - val yp = f.fM (b * x) // Yp = f(XB) - val ee = yp - y // negative of error matrix - val d = f.dM (yp) *~ ee // delta matrix for y - - val eta_o_sz = eta / x.dim // eta over current batch size - x.transpose * d * eta_o_sz // return change in parameters - end updateWeight - - debug ("optimize2", s"parameters b = $b") - ((y - f.fM (b * x)).normFSq, maxEpochs) // return sse and # epochs - end optimize2 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Given training data x and y for a 3-layer Neural Network, fit the parameters - * (weights and biases) a & b. Iterate over several epochs, where each epoch divides - * the training set into nB batches. Each batch is used to update the weights. - * @param x the m-by-n input matrix (training data consisting of m input vectors) - * @param y the m-by-ny output matrix (training data consisting of m output vectors) - * @param bb the array of parameters (weights & biases) between every two adjacent layers - * @param eta_ the initial learning/convergence rate - * @param ff the array of activation function family for every two adjacent layers - */ - def optimize3 (x: MatrixD, y: MatrixD, - bb: NetParams, eta_ : Double, ff: Array [AFF]): (Double, Int) = - val permGen = permGenerator (x.dim) // permutation vector generator - val (a, b) = (bb(0), bb(1)) // two sets of net-parameters - val (f, f1) = (ff(0), ff(1)) // two activation functions - val bSize = min (hp("bSize").toInt, x.dim) // batch size - val maxEpochs = hp("maxEpochs").toInt // maximum number of epochs - val upLimit = hp("upLimit").toInt // limit on increasing lose - val nB = x.dim / bSize // the number of batches - var eta = eta_ // counter for number of times moving up - println (s"optimize3: bSize = $bSize, nB = $nB") - - for epoch <- 1 to maxEpochs do // iterate over each epoch - val batches = permGen.igen.chop (nB) // permute indices & chop into nB batches - - for ib <- batches do - val ab = updateWeight (x(ib), y(ib)) // iteratively update parameters a & b - a -= ab._1; b -= ab._2 - end for - - val sse = (y - b * f1.fM (f.fM (a * x))).normFSq - collectLoss (sse) // collect the loss per epoch -// debug ("optimize3", s"parameters for $epoch th epoch: sse = $sse") - val (b_best, sse_best) = stopWhen (Array (a, b), sse) - if b_best != null then - a.set (b_best(0)) - b.set (b_best(1)) - return (sse_best, epoch - upLimit) - end if - - if epoch % ADJUST_PERIOD == 0 then eta *= ADJUST_FACTOR // adjust the learning rate - end for - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /* Compute the parameter a & b updates based on the current batch. - * A step in the direction opposite to the gradient. - * @param x the input matrix for the current batch - * @param y the output matrix for the current batch - */ - def updateWeight (x: MatrixD, y: MatrixD): (NetParam, NetParam) = - var z = f.fM (a * x) // Z = f(XA) - var yp = f1.fM (b * z) // Yp = f(ZB) - var ee = yp - y // negative of the error matrix - val d1 = f1.dM (yp) *~ ee // delta matrix for y - val d0 = f.dM (z) *~ (d1 * b.w.transpose) // delta matrix for z - - val eta_o_sz = eta / x.dim // eta over current batch size - (NetParam (x.transpose * d0 * eta_o_sz, d0.mean * eta), // change to a paramters (weights and biases) - NetParam (z.transpose * d1 * eta_o_sz, d1.mean * eta)) // change to b paramters (weights and biases) - end updateWeight - - debug ("optimize3", s"parameters a = $a \n b = $b") - ((y - b * f1.fM (f.fM (a * x))).normFSq, maxEpochs) // return sse and # epochs - end optimize3 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Given training data x and y for a multi-hidden layer Neural Network, fit the - * parameter array b, where each b(l) contains a weight matrix and bias vector. - * Iterate over several epochs, where each epoch divides the training set into nB batches. - * Each batch is used to update the weights. - * @param x the m-by-n input matrix (training data consisting of m input vectors) - * @param y the m-by-ny output matrix (training data consisting of m output vectors) - * @param b the array of parameters (weights & biases) between every two adjacent layers - * @param eta_ the initial learning/convergence rate - * @param f the array of activation function family for every two adjacent layers - */ - def optimize (x: MatrixD, y: MatrixD, - b: NetParams, eta_ : Double, f: Array [AFF]): (Double, Int) = - val permGen = permGenerator (x.dim) // permutation vector generator - val bSize = min (hp("bSize").toInt, x.dim) // batch size - val maxEpochs = hp("maxEpochs").toInt // maximum number of epochs - val upLimit = hp("upLimit").toInt // limit on increasing lose - val nB = x.dim / bSize // the number of batches - var eta = eta_ // counter for number of times moving up - var sse = 0.0 // stores accumulated sse over batches for epoch - println (s"optimize: bSize = $bSize, nB = $nB") - - val nl = f.size // number of layers - val layers = 0 until nl // range for layers - val z = Array.ofDim [MatrixD] (nl+1) // array to store activations, layer by layer - val d = Array.ofDim [MatrixD] (nl) // array to store all deltas - - for epoch <- 1 to maxEpochs do // iterate over each epoch - sse = 0.0 - val batches = permGen.igen.chop (nB) // permute indices & chop into nB batches - - for ib <- batches do sse += updateWeight (x(ib), y(ib)) // update parameter array b - - collectLoss (sse) // collect the loss per epoch -// debug ("optimize", s"parameters for $epoch th epoch: b = $b, sse = $sse") - val (b_best, sse_best) = stopWhen (b, sse) - if b_best != null then - for l <- b.indices do b(l) = b_best(l) - return (sse_best, epoch - upLimit) - end if - - if epoch % ADJUST_PERIOD == 0 then eta *= ADJUST_FACTOR // adjust the learning rate - end for - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /* Compute the parameter array b updates based on the current batch. - * A step in the direction opposite to the gradient. - * @param x the input matrix for the current batch - * @param y the output matrix for the current batch - */ - def updateWeight (x: MatrixD, y: MatrixD): Double = - z(0) = x // initial activation, which is the input matrix - for l <- layers do z(l+1) = f(l).fM (b(l) * z(l)) // feedforward and store all activations - - val yp = z.last // predicted value of y - val ee = yp - y // -E where E is the error matrix - d(nl-1) = f.last.dM (yp) *~ ee // delta for the last layer before output - for l <- nl-2 to 0 by -1 do - d(l) = f(l).dM (z(l+1)) *~ (d(l+1) * b(l+1).w.transpose) // deltas for all previous hidden layers - end for - - val eta_o_sz = eta / x.dim // learning rate divided by size of mini-batch - for l <- layers do -// b(l).w *= 1.0 - eta * (lambda / x.dim) // regularization factor, weight decay - b(l) -= (z(l).transpose * d(l) * eta_o_sz, // update weights - d(l).mean * eta) // update biases - end for - - ee.normFSq // return the sse of this batch - end updateWeight - - debug ("optimize", s"parameters b = $b") - (sse, maxEpochs) // return sse and number of epochs - end optimize - -end Optimizer_SGD - diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/old/Optimizer_SGDM.scala.bak b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/old/Optimizer_SGDM.scala.bak deleted file mode 100644 index bb63423e4..000000000 --- a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/old/Optimizer_SGDM.scala.bak +++ /dev/null @@ -1,243 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller, Hao Peng - * @version 2.0 - * @date Sun Feb 6 00:08:23 EST 2022 - * @see LICENSE (MIT style license file). - * - * @title Optimization: Stochastic Gradient Descent with Momentum Optimizer - */ - -package scalation -package modeling -package neuralnet - -import scala.math.min - -import scalation.mathstat._ - -import Optimizer._ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Optimizer_SGDM` class provides functions to optimize the parameters (weights - * and biases) of Neural Networks with various numbers of layers. - * This optimizer implements a Stochastic Gradient Descent with Momentum algorithm. - */ -class Optimizer_SGDM extends Optimizer: - - private val debug = debugf ("Optimizer_SGDM", true) // debug function - private val flaw = flawf ("Optimizer_SGDM") // flaw function - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Given training data x and y for a 2-layer, multi-output Neural Network, fit - * the parameter/weight matrix b. Iterate over several epochs, where each epoch - * divides the training set into nB batches. Each batch is used to update the - * the parameter's weights. - * @param x the m-by-n input matrix (training data consisting of m input vectors) - * @param y the m-by-ny output matrix (training data consisting of m output vectors) - * @param bb the array of parameters (weights & biases) between every two adjacent layers - * @param eta_ the initial learning/convergence rate - * @param ff the array of activation function family for every two adjacent layers - */ - def optimize2 (x: MatrixD, y: MatrixD, - bb: NetParams, eta_ : Double, ff: Array [AFF]): (Double, Int) = - val permGen = permGenerator (x.dim) // permutation vector generator - val b = bb(0) // net-parameters: weight matrix and bias vector - val f = ff(0) // activation function - val bSize = min (hp("bSize").toInt, x.dim) // batch size - val maxEpochs = hp("maxEpochs").toInt // maximum number of epochs - val upLimit = hp("upLimit").toInt // limit on increasing lose - val beta = hp("beta").toDouble // momentum hyper-parameter - val nB = x.dim / bSize // the number of batches - var eta = eta_ // set initial learning rate - var mo = new MatrixD (b.w.dim, b.w.dim2) // momentum matrix - println (s"optimize2: bSize = $bSize, nB = $nB") - - for epoch <- 1 to maxEpochs do // iterate over each epoch - val batches = permGen.igen.chop (nB) // permute indices & split into nB batches - - for ib <- batches do b -= updateWeight (x(ib), y(ib)) // iteratively update weight matrix b - - val sse = (y - f.fM (b * x)).normFSq // recompute sum of squared errors - collectLoss (sse) // collect loss per epoch -// debug ("optimize2", s"parameters for $epoch th epoch: sse = $sse") - val (b_best, sse_best) = stopWhen (Array (b), sse) - if b_best != null then - b.set (b_best (0)) - return (sse_best, epoch - upLimit) - end if - - if epoch % ADJUST_PERIOD == 0 then eta *= ADJUST_FACTOR // adjust the learning rate - end for - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /* Update the parameter/weight matrix b based on the current batch. - * Take a step in the direction opposite to the gradient. - * @param x the input matrix for the current batch - * @param y the output matrix for the current batch - */ - def updateWeight (x: MatrixD, y: MatrixD): MatrixD = - val yp = f.fM (b * x) // Yp = f(XB) - val ee = yp - y // negative of the error matrix - val d = f.dM (yp) *~ ee // delta matrix for y - - val eta_o_sz = eta / x.dim // eta over the current batch size - val bup = x.transpose * d * eta_o_sz // gradient-based change in input-output weights - mo = mo * beta + bup // update momentum - mo // return momentum - end updateWeight - -// debug ("optimize2", s"parameters b = $b") - ((y - f.fM (b * x)).normFSq, maxEpochs) // return number of epochs - end optimize2 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Given training data x and y for a 3-layer Neural Network, fit the parameters - * (weights and biases) a & b. Iterate over several epochs, where each epoch divides - * the training set into nB batches. Each batch is used to update the weights. - * @param x the m-by-n input matrix (training data consisting of m input vectors) - * @param y the m-by-ny output matrix (training data consisting of m output vectors) - * @param bb the array of parameters (weights & biases) between every two adjacent layers - * @param eta_ the initial learning/convergence rate - * @param ff the array of activation function family for every two adjacent layers - */ - def optimize3 (x: MatrixD, y: MatrixD, - bb: NetParams, eta_ : Double, ff: Array [AFF]): (Double, Int) = - val idx = VectorI.range (0, x.dim) // instance index range - val permGen = permGenerator (x.dim) // permutation vector generator - val (a, b) = (bb(0), bb(1)) // two sets of net-parameters - val (f, f1) = (ff(0), ff(1)) // two activation functions - val bSize = min (hp("bSize").toInt, x.dim) // batch size - val maxEpochs = hp("maxEpochs").toInt // maximum number of epochs - val upLimit = hp("upLimit").toInt // limit on increasing lose - val beta = hp("beta").toDouble // momentum hyper-parameter - val nB = x.dim / bSize // the number of batches - var eta = eta_ // counter for number of times moving up - var moa = new MatrixD (a.w.dim, a.w.dim2) // momentum matrix a - var mob = new MatrixD (b.w.dim, b.w.dim2) // momentum matrix b - - println (s"optimize3: bSize = $bSize, nB = $nB") - - for epoch <- 1 to maxEpochs do // iterate over each epoch - val batches = permGen.igen.chop (nB) // permute indices & split into nB batches - - for ib <- batches do - val ab = updateWeight (x(ib), y(ib)) // iteratively update weight matrices a & b - a -= ab._1; b -= ab._2 - end for - - val sse = (y - b * f1.fM (f.fM (a * x))).normFSq - collectLoss (sse) // collect the loss per epoch -// debug ("optimize3", s"parameters for $epoch th epoch: sse = $sse") - val (b_best, sse_best) = stopWhen (Array (a, b), sse) - if b_best != null then - a.set (b_best(0)) - b.set (b_best(1)) - return (sse_best, epoch - upLimit) - end if - - if epoch % ADJUST_PERIOD == 0 then eta *= ADJUST_FACTOR // adjust the learning rate - end for - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /* Compute the parameter/weight matrices a and b updates based on the current batch. - * A step in the direction opposite to the gradient. - * @param x the input matrix for the current batch - * @param y the output matrix for the current batch - */ - def updateWeight (x: MatrixD, y: MatrixD): (NetParam, NetParam) = - var z = f.fM (a * x) // Z = f(XA) - var yp = f1.fM (b * z) // Yp = f(ZB) - var ee = yp - y // negative of the error matrix - val d1 = f1.dM (yp) *~ ee // delta matrix for y - val d0 = f.dM (z) *~ (d1 * b.w.transpose) // delta matrix for z - - val eta_o_sz = eta / x.dim // eta over current batch size - moa = moa * beta + x.transpose * d0 * eta_o_sz // update momentum a - mob = mob * beta + z.transpose * d1 * eta_o_sz // update momentum b - (NetParam (moa, d0.mean * eta), // change to a parameters (weights and biases) - NetParam (mob, d1.mean * eta)) // change to b parameters (weights and biases) - end updateWeight - -// debug ("optimize3", s"parameters a = $a \n b = $b") - ((y - b * f1.fM (f.fM (a * x))).normFSq, maxEpochs) // return sse and number of epochs - end optimize3 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Given training data x and y, fit the parameter/weight matrices bw and - * bias vectors bi. Iterate over several epochs, where each epoch divides the - * training set into nB batches. Each batch is used to update the weights. - * @param x the m-by-n input matrix (training data consisting of m input vectors) - * @param y the m-by-ny output matrix (training data consisting of m output vectors) - * @param b the array of parameters (weights & biases) between every two adjacent layers - * @param eta_ the initial learning/convergence rate - * @param f the array of activation function family for every two adjacent layers - */ - def optimize (x: MatrixD, y: MatrixD, - b: NetParams, eta_ : Double, f: Array [AFF]): (Double, Int) = - val permGen = permGenerator (x.dim) // permutation vector generator - val bSize = min (hp("bSize").toInt, x.dim) // batch size - val maxEpochs = hp("maxEpochs").toInt // maximum number of epochs - val upLimit = hp("upLimit").toInt // limit on increasing lose - val beta = hp("beta").toDouble // momentum hyper-parameter - val nB = x.dim / bSize // the number of batches - var eta = eta_ // counter for number of times moving up - var sse = 0.0 // stores accumulated sse over batches for epoch - println (s"optimize: bSize = $bSize, nB = $nB") - - val nl = f.size // number of layers - val layers = 0 until nl // range for layers - val z = Array.ofDim [MatrixD] (nl+1) // array to store activations, layer by layer - val d = Array.ofDim [MatrixD] (nl) // array to store all deltas - var mo = Array.ofDim [MatrixD] (nl) // momentum array - for l <- layers do mo(l) = new MatrixD (b(l).w.dim, b(l).w.dim2) - - for epoch <- 1 to maxEpochs do // iterate over each epoch - sse = 0.0 - val batches = permGen.igen.chop (nB) // permute indices &split into nB batches - - for ib <- batches do sse += updateWeight (x(ib), y(ib)) // update parameter array b - - collectLoss (sse) // collect the loss per epoch -// debug ("optimize", s" parameters for $epoch th epoch: b = $b, sse = $sse") - val (b_best, sse_best) = stopWhen (b, sse) - if b_best != null then - for l <- b.indices do b(l).set (b_best(l)) - return (sse_best, epoch - upLimit) - end if - - if epoch % ADJUST_PERIOD == 0 then eta *= ADJUST_FACTOR // adjust the learning rate - end for - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /* Compute the parameter array b updates based on the current batch. - * A step in the direction opposite to the gradient. - * @param x the input matrix for the current batch - * @param y the output matrix for the current batch - */ - def updateWeight (x: MatrixD, y: MatrixD): Double = - z(0) = x // initial activation, which is the input matrix - for l <- layers do z(l+1) = f(l).fM (b(l) * z(l)) // feedforward and store all activations - - val yp = z.last // predicted value of y - val ee = yp - y // negative of the error matrix - d(nl-1) = f.last.dM (yp) *~ ee // delta for the last layer before output - for l <- nl-2 to 0 by -1 do - d(l) = f(l).dM (z(l+1)) *~ (d(l+1) * b(l+1).w.transpose) // deltas for all previous hidden layers - - val eta_o_sz = eta / x.dim // learning rate divided by size of mini-batch - for l <- layers do -// b(l).w *= 1.0 - eta * (lambda / x.dim) // regularization factor, weight decay - mo(l) = mo(l) * beta + z(l).transpose * d(l) * eta_o_sz // update l-th momentum - b(l) -= (mo(l), d(l).mean * eta) // update l-th parameter (weights and biases) - end for - - ee.normFSq // return the sse of this batch - end updateWeight - -// debug ("optimize", s"parameters b = $b") - (sse, maxEpochs) // return sse and number of epochs - end optimize - -end Optimizer_SGDM - diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/predictorMVTest.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/predictorMVTest.class deleted file mode 100644 index d0409f472..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/predictorMVTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/predictorMVTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/predictorMVTest.tasty deleted file mode 100644 index a8b378413..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/predictorMVTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/regressionMVTest.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/regressionMVTest.class deleted file mode 100644 index a2db8a33f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/regressionMVTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/regressionMVTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/regressionMVTest.tasty deleted file mode 100644 index 116e0d51d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/regressionMVTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/regressionMVTest2.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/regressionMVTest2.class deleted file mode 100644 index 52b2e673e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/regressionMVTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/regressionMVTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/regressionMVTest2.tasty deleted file mode 100644 index d0ecee3e3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/regressionMVTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/regressionMVTest3.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/regressionMVTest3.class deleted file mode 100644 index 92b214113..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/regressionMVTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/regressionMVTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/regressionMVTest3.tasty deleted file mode 100644 index a84302dfc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/regressionMVTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/regressionMVTest4.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/regressionMVTest4.class deleted file mode 100644 index 684053925..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/regressionMVTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/regressionMVTest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/regressionMVTest4.tasty deleted file mode 100644 index 92d7efbd0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/regressionMVTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/regressionMVTest5.class b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/regressionMVTest5.class deleted file mode 100644 index fab212f8e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/regressionMVTest5.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/regressionMVTest5.tasty b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/regressionMVTest5.tasty deleted file mode 100644 index add249c9e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/regressionMVTest5.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/test_case_3L.txt b/target/scala-3.6.4/classes/scalation/modeling/neuralnet/test_case_3L.txt deleted file mode 100644 index 6ea064ed4..000000000 --- a/target/scala-3.6.4/classes/scalation/modeling/neuralnet/test_case_3L.txt +++ /dev/null @@ -1,66 +0,0 @@ - -@main def neuralNet_3LTest11 (): Unit = - - // 9 data points: Constant x1 x2 y0 y1 - val xy = MatrixD ((9, 5), 1.0, 0.0, 0.0, 0.5, 0.4, // dataset - 1.0, 0.0, 0.5, 0.3, 0.3, - 1.0, 0.0, 1.0, 0.2, 0.2, - - 1.0, 0.5, 0.0, 0.8, 0.7, - 1.0, 0.5, 0.5, 0.5, 0.5, - 1.0, 0.5, 1.0, 0.3, 0.4, - - 1.0, 1.0, 0.0, 1.0, 0.9, - 1.0, 1.0, 0.5, 0.8, 0.7, - 1.0, 1.0, 1.0, 0.5, 0.5) - val x = xy(?, 0 until 3) // matrix for predictor variables - val y = xy(?, 3 until 5) // matrix for response variables - val sst0 = (y(?, 0) - y(?, 0).mean).normSq // sum of squares total for y_:0 - val sst1 = (y(?, 1) - y(?, 1).mean).normSq // sum of squares total for y_:1 - - val η = 0.4 // learning rate - val a = MatrixD ((3, 2), 0.1, 0.1, // weights/parameters X -> Z - 0.2, 0.1, - 0.1, 0.1) - val b = MatrixD ((2, 2), 0.1, 0.1, // weights/parameters Z -> Y - 0.1, 0.1) - - val f0 = f_sigmoid // hidden layer activation function - val f1 = f_id // output layer activation function - - for epoch <- 1 to 10 do - banner (s"improvement step $epoch") - // forward - val u = x * a // pre-activation vector - val z = f0.fM (u) // hidden matrix - val v = z * b // output pre-activation matrix - val yp = f1.fM (v) // predicted response from calculation for sigmoid - // backward - val ε = y - yp // error matrix - val δ1 = ε *~ f1.dM (yp) // delta1 @ output layer - val δ0 = δ1 * b.Ƭ *~ f0.dM (z) // delta0 @ hidden layer - b += z.Ƭ * δ1 * η // parameter update Z -> Y - a += x.Ƭ * δ0 * η // parameter update X -> Z - - val sse0 = ε(?, 0).normSq // sum of squared errors for column 0 - val sse1 = ε(?, 1).normSq // sum of squared errors for column 1 - - banner ("forward") - println (s"u = $u") - println (s"z = $z") - println (s"v = $v") - println (s"yp = $yp") - banner ("backward") - println (s"ε = $ε") - println (s"δ1 = $δ1") - println (s"δ0 = $δ0") - println (s"b = $b") - println (s"a = $a") - banner ("metrics") - println (s"sse0 = $sse0") - println (s"sse1 = $sse1") - println (s"R^2_0 = ${1 - sse0/sst0}") - println (s"R^2_1 = ${1 - sse1/sst1}") - -end neuralNet_3LTest11 - diff --git a/target/scala-3.6.4/classes/scalation/modeling/nonlinearRegressionTest.class b/target/scala-3.6.4/classes/scalation/modeling/nonlinearRegressionTest.class deleted file mode 100644 index 786af427e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/nonlinearRegressionTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/nonlinearRegressionTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/nonlinearRegressionTest.tasty deleted file mode 100644 index dca84d0d5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/nonlinearRegressionTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/nullModelTest.class b/target/scala-3.6.4/classes/scalation/modeling/nullModelTest.class deleted file mode 100644 index b35d0d2e2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/nullModelTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/nullModelTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/nullModelTest.tasty deleted file mode 100644 index e2bf08eb2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/nullModelTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/nullModelTest2.class b/target/scala-3.6.4/classes/scalation/modeling/nullModelTest2.class deleted file mode 100644 index dade7ecbc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/nullModelTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/nullModelTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/nullModelTest2.tasty deleted file mode 100644 index 44d2883ab..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/nullModelTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/old/Perceptron.scala.bak b/target/scala-3.6.4/classes/scalation/modeling/old/Perceptron.scala.bak deleted file mode 100644 index 2fbf66b94..000000000 --- a/target/scala-3.6.4/classes/scalation/modeling/old/Perceptron.scala.bak +++ /dev/null @@ -1,194 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 1.6 - * @date Mon Sep 9 13:30:41 EDT 2013 - * @see LICENSE (MIT style license file). - * - * @title Model: Perceptron (single output 2-layer Neural-Network) - * - * @see hebb.mit.edu/courses/9.641/2002/lectures/lecture03.pdf - */ - -package scalation -package modeling - -import scalation.mathstat._ - -import ActivationFun._ -import Initializer._ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Perceptron` class supports single-output, 2-layer (input and output) - * Neural-Networks. Although perceptrons are typically used for classification, - * this class is used for prediction. Given several input vectors and output - * values (training data), fit the weights/parameters 'b' connecting the layers, - * so that for a new input vector 'z', the net can predict the output value, i.e., - * z = f (b dot z) - * The parameter vector 'b' (w) gives the weights between input and output layers. - * Note, 'b0' is treated as the bias, so 'x0' must be 1.0. - * @param x the data/input m-by-n matrix (data consisting of m input vectors) - * @param y the response/output m-vector (data consisting of m output values) - * @param fname_ the feature/variable names - * @param hparam the hyper-parameters for the model/network - * @param f the activation function family for layers 1->2 (input to output) - * @param itran the inverse transformation function returns responses to original scale - */ -class Perceptron (x: MatrixD, y: VectorD, fname_ : Array [String] = null, - hparam: HyperParameter = Perceptron.hp, - f: AFF = f_sigmoid, val itran: FunctionV2V = null) - extends Predictor (x, y, fname_, hparam) - with Fit (dfm = x.dim2 - 1, df = x.dim - x.dim2): - - private val debug = debugf ("Perceptron", false) // debug function - private val flaw = flawf ("Perceptron") // flaw function - private val (m, n) = x.dims // input data matrix dimensions - private var eta = hparam ("eta").toDouble // the learning/convergence rate (requires adjustment) - private var bSize = hparam ("bSize").toInt // the batch size - private val maxEpochs = hparam ("maxEpochs").toInt // the maximum number of training epcochs/iterations - private val _1 = VectorD.one (m) // vector of all ones - - if y.dim != m then flaw ("constructor", "dimensions of x and y are incompatible") - - println (s"Create a Perceptron with $n input nodes and 1 output node") - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Set the initial parameter/weight vector 'b' manually before training. - * This is mainly for testing purposes. - * @param w0 the initial weights for b - */ - def setWeights (w0: VectorD): Unit = b = w0 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Reset the learning rate 'eta'. - * @param eta the learning rate - */ - def reset (eta_ : Double): Unit = eta = eta_ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Given training data 'x_' and 'y_', fit the parameter/weight vector 'b'. - * Minimize the error in the prediction by adjusting the weight vector 'b'. - * The error 'e' is simply the difference between the target value 'y_' and the - * predicted value 'yp'. Minimize the dot product of error with itself using - * gradient-descent (move in the opposite direction of the gradient). - * Iterate over several epochs (no batching). - * Use val d = yp * (_1 - yp) * e // delta y (for sigmoid only) - * @param x_ the training/full data/input matrix - * @param y_ the training/full response/output vector - */ - def train (x_ : MatrixD = x, y_ : VectorD = y): Unit = - println (s"train0: eta = $eta") - if b == null then b = weightVec (n) // initialize parameters/weights - var sse0 = Double.MaxValue - - for epoch <- 1 to maxEpochs do // epoch-th learning phase - val yp = f.f_ (x_ * b) // predicted output vector yp = f(Xb) - e = y_ - yp // error vector for y - val d = -f.d (yp) * e // delta vector for y - b -= x_.transpose * d * eta // update the parameters/weights - - val sse = (y_ - f.f_ (x_ * b)).normSq // recompute sum of squared errors - debug ("train0", s"parameters for $epoch th epoch: b = $b, sse = $sse") - if sse >= sse0 then return // return when sse increases - sse0 = sse // save prior sse - end for - end train - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test a predictive model y_ = f(x_) + e and return its QoF vector. - * Testing may be be in-sample (on the training set) or out-of-sample - * (on the testing set) as determined by the parameters passed in. - * Note: must call train before test. - * @param x_ the testing/full data/input matrix (defaults to full x) - * @param y_ the testing/full response/output vector (defaults to full y) - */ - def test (x_ : MatrixD = x, y_ : VectorD = y): (VectorD, VectorD) = - val yp = predict (x_) // make predictions - e = y_ - yp // RECORD the residuals/errors (@see `Predictor`) - (yp, diagnose (y_, yp)) // return predictions and QoF vector - end test - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Given training data 'x_' and 'y_', fit the parameter/weight vector 'b'. - * Minimize the error in the prediction by adjusting the weight vector 'b'. - * Iterate over several epochs, where each epoch divides the training set into - * 'nbat' batches. Each batch is used to update the weights. - * @param x_ the training/full data/input matrix - * @param y_ the training/full response/output vector - def train (x_ : MatrixD = x, y_ : VectorD = y): Unit = - if y_.dim < 2 * bSize then flaw ("train", "not enough data for batching - use 'train0'") - println (s"train: eta = $eta") - if b == null then b = weightVec (n) // initialize parameters/weights - val result = optimize (x_, y_, b, eta, bSize, maxEpochs, f) - println (s"result = (sse, ending_epoch_) = $result") - end train - */ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Given training data 'x_' and 'y_', fit the parameter/weight vector 'b'. - * Minimize the error in the prediction by adjusting the weight vector 'b'. - * Iterate over several epochs, where each epoch divides the training set into - * 'nbat' batches. Each batch is used to update the weights. - * This version preforms an interval search for the best 'eta' value. - * @param x_ the training/full data/input matrix - * @param y_ the training/full response/output vector - override def train2 (x_ : MatrixD = x, y_ : VectorD = y): Unit = - if y_.dim < 2 * bSize then flaw ("train2", "not enough data for batching - use 'train0'") - val etaI = (0.25 * eta, 4.0 * eta) // quarter to four times eta - println (s"train2: etaI = $etaI") - if b == null then b = weightVec (n) // initialize parameters/weights - val result = optimizeI (x_, y_, b, etaI, bSize, maxEpochs, f) - println (s"result = (sse, ending_epoch_) = $result") - end train2 - */ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Switch between 'train' methods: simple (0), regular (1) and hyper-parameter - * optimizing (2). - * @param which the kind of 'train' method to use - * @param x_ the training/full data/input matrix - * @param y_ the training/full response/output vector - def trainSwitch (which: Int, x_ : MatrixD = x, y_ : VectorD = y): Perceptron = - which match - case 0 => train0 (x_, y_) - case 1 => train (x_, y_) - case 2 => train2 (x_, y_) - case _ => flaw ("trainSwitch", s"which = $which not in (0, 1, 2)"); null - end match - end trainSwitch - */ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Given a new input vector 'z', predict the output/response value 'f(z)'. - * @param z the new input vector - */ - override def predict (z: VectorD): Double = f.f (b dot z) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Given a new input matrix 'z', predict the output/response value 'f(z)'. - * @param z the new input matrix - */ - override def predict (z: MatrixD = x): VectorD = f.f_ (z * b) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Build a sub-model that is restricted to the given columns of the data matrix. - * @param x_cols the columns that the new model is restricted to - */ - override def buildModel (x_cols: MatrixD): Perceptron = - new Perceptron (x_cols, y, null, hparam, f, itran) - end buildModel - -end Perceptron - - -object Perceptron: - - /** hyper-parameters for tuning the optimization algorithms - user tuning - */ - val hp = new HyperParameter - hp += ("eta", 0.1, 0.1) // learning/convergence rate - hp += ("bSize", 20, 20) // mini-batch size, common range 10 to 30 - hp += ("maxEpochs", 500, 500) // maximum number of epochs/iterations - -end Perceptron - diff --git a/target/scala-3.6.4/classes/scalation/modeling/old/RegressionTree.scala.bak b/target/scala-3.6.4/classes/scalation/modeling/old/RegressionTree.scala.bak deleted file mode 100644 index f43eb991b..000000000 --- a/target/scala-3.6.4/classes/scalation/modeling/old/RegressionTree.scala.bak +++ /dev/null @@ -1,461 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author Dong Yu Yu, John Miller - * @version 2.0 - * @date Wed Nov 7 17:08:17 EST 2018 - * @see LICENSE (MIT style license file). - * - * @title Model: Regression Tree - */ - -package scalation -package modeling - -import scala.collection.mutable.{ArrayBuffer, Queue, Set} -import scala.runtime.ScalaRunTime.stringOf - -import scalation.mathstat._ -import scalation.random.PermutedVecI - -// FIX - rSqBar from validate is wrong - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `RegressionTree` companion object is used to count the number of leaves - * and provide factory functions. - */ -object RegressionTree: - - val hp = new HyperParameter // default hyper-parameter values - hp += ("maxDepth", 5, 5) - hp += ("threshold", 0.1, 0.1) - - private var nLeaves_ = 0 // the number of leaves in the tree - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the number of leaves in the tree. - */ - def nLeaves: Int = nLeaves_ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Icrement the number of leaves in the tree. - */ - def incLeaves (): Unit = nLeaves_ += 1 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Reset the number of leaves in the tree. - */ - def resetLeaves (): Unit = nLeaves_ = 0 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `RegressionTree` object from a combined data-response matrix. - * @param xy the combined data-response matrix - * @param fname the names for all features/variables - * @param hparam the hyper-parameters - * @param col the designated response column (defaults to the last column) - */ - def apply (xy: MatrixD, fname: Array [String] = null, - hparam: HyperParameter = hp)(col: Int = xy.dim2 - 1): RegressionTree = - new RegressionTree (xy.not(?, col), xy(?, col), fname, hparam) - end apply - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `RegressionTree` object from a data matrix and response vector. - * @param x the data matrix - * @param y the response vector - * @param fname the names for all features/variables - * @param hparam the hyper-parameters - */ - def rescale (x: MatrixD, y: VectorD, fname: Array [String] = null, - hparam: HyperParameter = hp): RegressionTree = - val xn = normalize ((x.mean, x.stdev)) (x) - new RegressionTree (xn, y, fname, hparam) - end rescale - -end RegressionTree - -import RegressionTree.{nLeaves, incLeaves, resetLeaves} - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Node` class contains information for a tree node. - * @param f the feature of the node used for splitting, if it is leaf, contains the feature of its parent - * @param branch the branch value (0 => left, 1 => right) - * @param yp leaf node's prediction for y - * @param thresh the threshold for continuous feature - * @param depth the current depth of the node - * @param pthresh the threshold for parent node - * @param pfea the feature of parent node - * @param leaf `Boolean` value indicate whether is a leaf node - */ -case class Node (f: Int, branch: Int, yp: Double, thresh: Double, - depth: Int, pthresh: Double, pfea: Int, leaf: Boolean = false): - - val child = new ArrayBuffer [Node] () // children of node - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert node to a string. - */ - override def toString: String = - if child.length == 0 then - s"Leaf (pfeature = x$pfea, branch = $branch, feature = x$f, yp = $yp)" - else if depth == 0 then - s"Root (feature = x$f, threshold = $thresh)" - else - s"Node (pfeature = x$pfea, branch = $branch, feature = x$f, threshold = $thresh)" - end toString - -end Node - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `RegressionTree` class implements a Regression Tree that selects splitting features - * using minimal variance in children nodes. To avoid exponential choices in the selection, - * supporting ordinal features currently. - * @param x the m-by-n input/data matrix - * @param y the response m-vector - * @param fname_ the names of the model's features/variables - * @param hparam the hyper-parameters for the model - * @param curDepth current depth - * @param branchValue the branch value for the tree node - * @param feature the feature for the tree's parent node - */ -class RegressionTree (x: MatrixD, y: VectorD, fname_ : Array [String] = null, - hparam: HyperParameter = RegressionTree.hp, - curDepth: Int = -1, branchValue: Int = -1, feature: Int = -1) - extends Predictor (x, y, fname_, hparam) - with Fit (dfm = x.dim2 - 1, df = x.dim - x.dim2): // call resetDF once tree is built - - private val debug = debugf ("RegressionTree", true) // debug function - private val (m, n) = (x.dim, x.dim2) // matrix dimensions - private val maxDepth = hparam ("maxDepth").toInt // the depth limit for tree - private val thres = hparam ("threshold").toDouble // the threshold for the tree's parent node - private val threshold = new Array [(Double, Double)] (n) // store best splitting threshold for each feature - private val stream = 0 // the random number stream - private val permGen = PermutedVecI (VectorI.range (0, m), stream) - private var root: Node = null // root node - - modelName = "RegressionTree" - - debug ("init", s"Constructing a Regression Tree: curDepth = $curDepth") - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Split gives row indices of left and right children when splitting using thresh. - * @param j the column/feature to use - * @param thresh the threshold for splitting (below => left, above => right) - */ - private def split (j: Int, thresh: Double): (IndexedSeq [Int], IndexedSeq [Int]) = - val (sLeft, sRight) = (Set [Int] (), Set [Int] ()) - for i <- x.indices do if x(i, j) <= thresh then sLeft += i else sRight += i - (sLeft.toIndexedSeq, sRight.toIndexedSeq) - end split - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Given feature f, use fast threshold selection to find an optimal threshold/ - * split point in O(NlogN) time. - * @see people.cs.umass.edu/~domke/courses/sml/12trees.pdf - * @param f the given feature for which the threshold is desired - * @param x_f column f in data matrix - * @param subSample optional, use to select from the range - */ - def fastThreshold (f: Int, x_f: VectorD, subSample: VectorI = null): Unit = - var thres = 0.0 // to hold optimal threshold - var tSSE = Double.MaxValue // total sum of squared errors - var ref = Array.ofDim [(Double, Int)] (y.dim) // pair column value with column index - for i <- x.indices do ref(i) = (x_f(i), i) // assign pairs - ref = ref.sortBy (_._1) // sort by column value - - val values = x_f.distinct.sorted // get distinct values from column x_f & sort - - val v = new VectorD (values.dim - 1) // mid points between all values - if v.dim == 0 then { threshold(f) = (thres, -1.0); return } // no values => return early - for i <- v.indices do v(i) = (values(i) + values(i+1)) / 2.0 - - val (totalSum, totalSqr) = (y.sum, y.normSq) // total sum and sum of squares - var sum, square, mean = 0.0 // left sum, square and mean - var (row, valu) = (0, v(0)) // candidate split value/threshold - - for i <- ref.indices do - if ref(i)._1 > valu then - val n_i = ref.size - i // number of elements on left - val rSum = totalSum - sum // right sum - val rSqr = totalSqr - square // right sum of squares - val rMean = rSum / n_i // right mean - val lrSSE = square - 2 * sum * mean + i * mean * mean + - rSqr - 2 * rSum * rMean + n_i * rMean * rMean - - if lrSSE < tSSE then { tSSE = lrSSE; thres = valu } // update if lrSSE is smaller - row += 1 - end if - - val yi = y(ref(i)._2) - sum += yi // left sum - square += yi * yi // left sum of squares - mean = (yi + i * mean) / (i + 1) // left mean - if row < v.dim then valu = v(row) - end for - - threshold(f) = (thres, tSSE) // return best split point - end fastThreshold - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return new x matrix and y vector for next step of constructing regression tree. - * @param f the feature index - * @param side indicator for which side of child is chosen (i.e., 0 for left child) - */ - def nextXY (f: Int, side: Int): (MatrixD, VectorD) = - val (left, right) = split (f, threshold(f)._1) - if side == 0 then (x(left), y(left)) else (x(right), y(right)) - end nextXY - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train the regression tree by selecting threshold for all the features - * in y_ (can be used as all the samples or sub-samples). - * @param x_ the training/full data/input matrix - * @param y_ the training/full response/output vector - * only the values in y_ will be used in selecting threshold - */ - def train (x_ : MatrixD, y_ : VectorD): Unit = - // FIX - other methods need x_ , currently only works for x_ = x - train (VectorI.range (0, y_.dim)) - end train - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train the regression tree by selecting threshold for all the features - * in interval (sub-samples). - * @param interval only the values in interval will be used in selecting threshold - */ - def train (interval: VectorI): RegressionTree = - for f <- 0 until n do fastThreshold (f, x(?, f), interval) // set threshold for features - var opt = (0, threshold(0)._2) // compute variance for feature 0 - - debug ("train", s"for feature ${opt._1} the variance is ${opt._2}") - - for f <- 1 until n do - val fVar = threshold(f)._2 - debug ("train", s"for feature $f the variance is $fVar") - if fVar <= opt._2 then opt = (f, fVar) // save feature giving minimal variance - end for - - debug ("train", s"optimal feature is ${opt._1} with variance of ${opt._2}") - buildTree (opt) - this - end train - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Given the next most distinguishing feature/attribute, extend the regression tree. - * @param opt the optimal feature and the variance - */ - def buildTree (opt: (Int, Double)): Unit = - root = - if curDepth == 0 then Node (opt._1, -1, y.mean, threshold(opt._1)._1, curDepth, -1.0, -1) - else Node (opt._1, branchValue, y.mean, threshold(opt._1)._1, curDepth, thres, feature) - debug ("buildTree", s"--> Add root = ${root}") - - for i <- 0 until 2 do // 0 => left, 1 => right - val next = nextXY (opt._1, i) - if next._2.size != 0 then - root.child += - (if curDepth == maxDepth - 1 || next._2.size <= x.dim2 then - val yp = next._2.mean - incLeaves () - Node (opt._1, root.child.length, yp, threshold(opt._1)._1, curDepth + 1, - threshold(opt._1)._1, opt._1, true) - else - val hp = RegressionTree.hp.updateReturn ("threshold", threshold(opt._1)._1) - val subtree = new RegressionTree (next._1, next._2, fname, hp, curDepth + 1, i, opt._1) - subtree.train (next._1, next._2) - subtree.root) - - debug ("buildTree", s"--> Add child = ${root.child}") -// debug ("buildTree", s"\t x \t = ${next._1} \n\t y \t = ${next._2}") - end if - end for - end buildTree - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test a predictive model y_ = f(x_) + e and return its QoF vector. - * Testing may be be in-sample (on the training set) or out-of-sample - * (on the testing set) as determined by the parameters passed in. - * Note: must call train before test. - * @param x_ the testing/full data/input matrix (defaults to full x) - * @param y_ the testing/full response/output vector (defaults to full y) - */ - def test (x_ : MatrixD = x, y_ : VectorD = y): (VectorD, VectorD) = - val yp = predict (x_) // make predictions - val df1 = nLeaves // degrees of freedom model = number of leaves - val df2 = y_.dim - df1 // degrees of freedom error - resetDF ((df1, df2)) - (yp, diagnose (y_, yp)) // return predictions and QoF vector - end test - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Print the regression tree in Pre-Order using printT method. - */ - def printTree (): Unit = - println ("Regression Tree: nLeaves = " + nLeaves) - println ("fname = " + stringOf (fname)) - printT (root, 0) - println () - end printTree - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /* Recursively print the regression tree nodes. - * @param nod the current node - * @param level the level of node nod in the tree - */ - def printT (nod: Node, level: Int): Unit = - println ("\t" * level + "[ " + nod + " ]") - for cnode <-nod.child do printT (cnode, level + 1) - end printT - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Print out the regression tree using Breadth First Search (BFS). - */ - def printTree2 (): Unit = - println ("RegressionTree:") - println ("fname = " + stringOf (fname)) - val queue = new Queue [Node] () - - for cnode <- root.child do queue += cnode - println (root) - var level = 0 - - while ! queue.isEmpty do - val size = queue.size - level += 1 - for i <- 0 until size do - val nod = queue.dequeue () - println ("\t" * level + "[ " + nod + " ]") - for cnode <- nod.child do queue += cnode - end for - println () - end while - end printTree2 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Given a data vector z, predict the value by following the tree to the leaf. - * @param z the data vector to predict - */ - override def predict (z: VectorD): Double = - var nd = root // current node - while nd.child.length >= 2 do - nd = if z(nd.f) <= nd.thresh then nd.child(0) else nd.child(1) - end while - nd.yp - end predict - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Given a data matrix z, predict the value by following the tree to the leaf. - * @param z the data matrix to predict - */ - override def predict (z: MatrixD = x): VectorD = - VectorD (for i <- z.indices yield predict (z(i))) - end predict - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Build a sub-model that is restricted to the given columns of the data matrix. - * @param x_cols the columns that the new model is restricted to - */ - override def buildModel (x_cols: MatrixD): RegressionTree = - new RegressionTree (x_cols, y, null, hparam) - end buildModel - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Reset or re-initialize the frequency tables and the probability tables. - */ - def reset (): Unit = resetLeaves () - -end RegressionTree - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `regressionTreeTest` main function is used to test the `RegressionTree` class. - * It tests a simple case that does not require a file to be read. - * @see translate.google.com/translate?hl=en&sl=zh-CN&u=https: - * //www.hrwhisper.me/machine-learning-decision-tree/&prev=search - * > runMain scalation.modeling.regressionTreeTest - */ -@main def regressionTreeTest (): Unit = - - val x = MatrixD ((10, 1), 1, 2, 3, 4, 5, 6, 7, 8, 9, 10) - val y = VectorD (5.56, 5.70, 5.91, 6.40, 6.80, 7.05, 8.90, 8.70, 9.00, 9.05) - val ox = VectorD.one (x.dim) +^: x - val fname = Array ("x") - - banner (s"Regression no intercept") - val reg = new Regression (x, y) - reg.trainNtest ()() // train and test the model - - banner (s"Regression with intercept") - val reg2 = new Regression (ox, y) - reg2.trainNtest ()() // train and test the model - - banner (s"Quadratic Regression") - val reg3 = SymbolicRegression.quadratic (x, y, fname) - reg3.trainNtest ()() // train and test the model - - banner (s"Perceptron sigmoid") - val nn = Perceptron.rescale (reg3.getX, y) - nn.trainNtest ()() // train and test the model - - banner (s"Perceptron tanh") - val nn2 = Perceptron.rescale (reg3.getX, y, f = ActivationFun.f_tanh) - nn2.trainNtest ()() // train and test the model - - for d <- 0 to 4 do - banner (s"Regression Tree with maxDepth = $d") - val hp2 = RegressionTree.hp.updateReturn ("maxDepth", d) - val mod = new RegressionTree (x, y, null, hp2) - mod.trainNtest ()() // train and test the model - mod.printTree () - mod.reset () - end for - -end regressionTreeTest - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `regressionTreeTest2` main function tests the `RegressionTree` class using the AutoMPG - * dataset. Assumes no missing values. It tests forward, backward and stepwise selection. - * > runMain scalation.modeling.regressionTreeTest2 - */ -@main def regressionTreeTest2 (): Unit = - - import Example_AutoMPG._ - -// println (s"x = $o") -// println (s"y = $y") - - banner ("auto_mpg Regression Tree") - val mod = new RegressionTree (x, y, x_fname) // create model with intercept (else pass x) - mod.trainNtest ()() // train and test the model - mod.printTree () // print the regression tree -// println (mod.summary ()) // parameter/coefficient statistics - -/* - banner ("Cross-Validation") - Fit.showQofStatTable (mod.crossValidate ()) - - println (s"ox_fname = ${stringOf (ox_fname)}") - - for tech <- Predictor.SelectionTech.values do - banner (s"Feature Selection Technique: $tech") - val (cols, rSq) = mod.selectFeatures (tech) // R^2, R^2 bar, R^2 cv - val k = cols.size - println (s"k = $k, n = ${x.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "R^2 cv"), - s"R^2 vs n for Regression Tree with $tech", lines = true) - println (s"$tech: rSq = $rSq") - end for -*/ - banner ("Quasi Feature Selection via max-depth constraint") - for d <- 1 to 6 do - banner (s"Regression Tree with maxDepth = $d") - val hp2 = RegressionTree.hp.updateReturn ("maxDepth", d) - val mod = new RegressionTree (x, y, x_fname, hp2) - mod.trainNtest ()() // train and test the model - mod.printTree () - mod.reset () - end for - -end regressionTreeTest2 - diff --git a/target/scala-3.6.4/classes/scalation/modeling/old/RegressionTree2.scala.bak b/target/scala-3.6.4/classes/scalation/modeling/old/RegressionTree2.scala.bak deleted file mode 100644 index 339bc72b8..000000000 --- a/target/scala-3.6.4/classes/scalation/modeling/old/RegressionTree2.scala.bak +++ /dev/null @@ -1,453 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author Dong Yu Yu, John Miller - * @version 2.0 - * @date Wed Nov 7 17:08:17 EST 2018 - * @see LICENSE (MIT style license file). - * - * @title Model: Regression Tree - */ - -package scalation -package modeling - -import scala.collection.mutable.{ArrayBuffer, Queue, Set} -import scala.math.abs -import scala.runtime.ScalaRunTime.stringOf - -import scalation.mathstat._ - -// FIX - rSqBar from validate is wrong - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `RegressionTree2` companion object is used to count the number of leaves - * and provide factory functions. - */ -object RegressionTree2: - - private val debug = debugf ("RegressionTree2", true) // debug function - private var nLeaves_ = 0 // the number of leaves in the tree - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the number of leaves in the tree. - */ - def nLeaves: Int = nLeaves_ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Icrement the number of leaves in the tree. - */ - def incLeaves (): Unit = nLeaves_ += 1 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Reset the number of leaves in the tree. - */ - def resetLeaves (): Unit = nLeaves_ = 0 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `RegressionTree2` object from a combined data-response matrix. - * @param xy the combined data-response matrix - * @param fname the names for all features/variables - * @param hparam the hyper-parameters - * @param col the designated response column (defaults to the last column) - */ - def apply (xy: MatrixD, fname: Array [String] = null, - hparam: HyperParameter = RegressionTree.hp) - (col: Int = xy.dim2 - 1): RegressionTree2 = - new RegressionTree2 (xy.not(?, col), xy(?, col), fname, hparam) - end apply - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a `RegressionTree2` object from a data matrix and response vector. - * @param x the data matrix - * @param y the response vector - * @param fname the names for all features/variables - * @param hparam the hyper-parameters - */ - def rescale (x: MatrixD, y: VectorD, fname: Array [String] = null, - hparam: HyperParameter = RegressionTree.hp): RegressionTree2 = - val xn = normalize ((x.mean, x.stdev)) (x) - new RegressionTree2 (xn, y, fname, hparam) - end rescale - -end RegressionTree2 - -import RegressionTree2.{nLeaves, incLeaves, resetLeaves} - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `RegressionTree2` class implements a Regression Tree that selects splitting features - * using minimal variance in children nodes. To avoid exponential choices in the selection, - * supporting ordinal features currently. - * Note: may not split in certain cases where `RegressionTree` does. - * @param x the m-by-n input/data matrix - * @param y the response m-vector - * @param fname_ the names of the model's features/variables - * @param hparam the hyper-parameters for the model - * @param curDepth current depth - * @param branchValue the branch value for the tree node - * @param feature the feature for the tree's parent node - */ -class RegressionTree2 (x: MatrixD, y: VectorD, fname_ : Array [String] = null, - hparam: HyperParameter = RegressionTree.hp, - curDepth: Int = 0, branchValue: Int = -1, feature: Int = -1) - extends Predictor (x, y, fname_, hparam) - with Fit (dfm = x.dim2 - 1, df = x.dim - x.dim2): // call resetDF once tree is built - - private val debug = debugf ("RegressionTree2", true) // debug function - private val flaw = flawf ("RegressionTree2") // flaw function - private val (m, n) = (x.dim, x.dim2) // matrix dimensions - private val depth = hparam ("maxDepth").toInt // the depth limit for tree - private val thres = hparam ("threshold").toDouble // the threshold for the tree's parent node - private val threshold = new Array [(Double, Double)] (n) // store best splitting (threshold, score) for each feature - - private var root: Node = null // root node - - modelName = s"RegressionTree2 ($depth)" - - debug ("init", s"Constructing a Regression Tree 2: curDepth = $curDepth") - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Split gives row indices of left and right children when splitting using thresh. - * @param j the column/feature to use - * @param thresh the threshold for splitting (below => left, above => right) - */ - private def split (j: Int, thresh: Double): (IndexedSeq [Int], IndexedSeq [Int]) = - val (sLeft, sRight) = (Set [Int] (), Set [Int] ()) - for i <- x.indices do if x(i, j) <= thresh then sLeft += i else sRight += i - (sLeft.toIndexedSeq, sRight.toIndexedSeq) - end split - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Given feature f, use fast threshold selection to find an optimal threshold/ - * split point in O(NlogN) time. - * @see people.cs.umass.edu/~domke/courses/sml/12trees.pdf - * @param xj the j-th column in data matrix - */ - def fastThreshold (xj: VectorD): (Double, Double) = - var thres = -0.0 // to hold optimal threshold (nothing marker) - var tSSE = Double.MaxValue // total sum of squared errors - var ref = Array.ofDim [(Double, Int)] (y.dim) // pair column value with column index - for i <- x.indices do ref(i) = (xj(i), i) // assign pairs - ref = ref.sortBy (_._1) // sort by column value - - val dvals = xj.distinct.sorted // get distinct values from column xj & sort - if dvals.dim <= 1 then return (thres, -1.0) // can't divide => return early - val v = dvals.mids // mid points between all values - - val (totalSum, totalSqr) = (y.sum, y.normSq) // total sum and sum of squares - var sum, square, mean = 0.0 // left sum, square and mean - var (row, valu) = (0, v(0)) // candidate split value/threshold - - for i <- ref.indices do - if ref(i)._1 > valu then - val n_i = ref.size - i // number of elements on left - val rSum = totalSum - sum // right sum - val rSqr = totalSqr - square // right sum of squares - val rMean = rSum / n_i // right mean - val lrSSE = square - 2 * sum * mean + i * mean * mean + - rSqr - 2 * rSum * rMean + n_i * rMean * rMean - - if lrSSE < tSSE then { tSSE = lrSSE; thres = valu } // update if lrSSE is smaller - row += 1 - end if - - val yi = y(ref(i)._2) - sum += yi // left sum - square += yi * yi // left sum of squares - mean = (yi + i * mean) / (i + 1) // left mean - if row < v.dim then valu = v(row) - end for - - println (s"(thres, tSSE) = ($thres, $tSSE)") - (thres, tSSE) // return best split point for feature j - end fastThreshold - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return new x matrix and y vector for next step of constructing regression tree. - * @param j the feature/variable index - * @param side indicator for which side of child is chosen (i.e., 0 for left child) - */ - private def nextXY (j: Int, side: Int): (MatrixD, VectorD) = - val (left, right) = split (j, threshold(j)._1) - if side == 0 then (x(left), y(left)) else (x(right), y(right)) - end nextXY - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Check that results of the two fast threshold algorithms agree, returning - * true if they do and false otherwise. - * @param j the column index in data matrix (xj) - * @param thr the threshold selected using RegressionTree.fastThreshold - * @param th2 the threshold selected using this.fastThreshold - * @param score the score from RegressionTree.fastThreshold - * @param scor2 the score from this.fastThreshold - */ - private def check (j: Int, thr: Double, th2: Double, score: Double, scor2: Double): Boolean = - var okay = true - println ("-" * 70) - println (s"check for x$j, thr = $thr, th2 = $th2, score = $score, scor2 = $scor2") - if thr != th2 then - println ("\n W A I T W H A T \n") - okay = flaw ("check", s"threshold for x$j thr = $thr != th2 = $th2") - end if - if abs (score - scor2) > 1E-6 then - okay = flaw ("check", s"scores for x$j score = $score != scor2 = $scor2") - end if -// assert (thr == th2) - okay - end check - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Train the regression tree by selecting thresholds for the features/variables - * in matrix x_. - * @param x_ the training/full data/input matrix - * @param y_ the training/full response/output vector - */ - def train (x_ : MatrixD, y_ : VectorD): Unit = - resetLeaves () - val ssy = y_.normSq // sum of squared y - for j <- x.indices2 do - val (thr, score) = RegressionTree.fastThreshold (x(?, j), y, ssy) // set threshold for features - val (th2, scor2) = fastThreshold (x(?, j)) // set threshold for features - check (j, thr, th2, score, scor2) // just check - threshold(j) = (th2, scor2) // set threshold for features - end for - - var opt = (0, threshold(0)._2) // compute variance for feature 0 - debug ("train", s"for feature ${opt._1} the variance is ${opt._2}") - - for j <- 1 until x.dim2 do - val jScore = threshold(j)._2 - debug ("train", s"for feature $j the score is $jScore") - if jScore <= opt._2 then opt = (j, jScore) // save feature giving minimal variance - end for - - debug ("train", s"optimal feature is ${opt._1} with variance of ${opt._2}") - buildTree (opt) - end train - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Given the next most distinguishing feature/attribute, extend the regression tree. - * @param opt the optimal feature and the variance - */ - private def buildTree (opt: (Int, Double)): Unit = - root = - if curDepth == 0 then Node (opt._1, -1, VectorD (y.mean), threshold(opt._1)._1, curDepth, -1.0, -1) - else Node (opt._1, branchValue, VectorD (y.mean), threshold(opt._1)._1, curDepth, thres, feature) - debug ("buildTree", s"--> Add root = ${root}") - - for i <- 0 until 2 do // 0 => left, 1 => right - val next = nextXY (opt._1, i) - if next._2.size != 0 then - root.child += - (if curDepth == depth - 1 || next._2.size <= x.dim2 then - val yp = next._2.mean - incLeaves () - Node (opt._1, root.child.length, VectorD (yp), threshold(opt._1)._1, curDepth + 1, - threshold(opt._1)._1, opt._1, true) - else - val hp = RegressionTree.hp.updateReturn ("threshold", threshold(opt._1)._1) - val subtree = new RegressionTree2 (next._1, next._2, fname, hp, curDepth + 1, i, opt._1) - subtree.train (next._1, next._2) - subtree.root) - - debug ("buildTree", s"--> Add child = ${root.child}") -// debug ("buildTree", s"\t x \t = ${next._1} \n\t y \t = ${next._2}") - end if - end for - end buildTree - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Test a predictive model y_ = f(x_) + e and return its QoF vector. - * Testing may be be in-sample (on the training set) or out-of-sample - * (on the testing set) as determined by the parameters passed in. - * Note: must call train before test. - * @param x_ the testing/full data/input matrix (defaults to full x) - * @param y_ the testing/full response/output vector (defaults to full y) - */ - def test (x_ : MatrixD = x, y_ : VectorD = y): (VectorD, VectorD) = - val yp = predict (x_) // make predictions - val df1 = nLeaves // degrees of freedom model = number of leaves - val df2 = y_.dim - df1 // degrees of freedom error - resetDF ((df1, df2)) - (yp, diagnose (y_, yp)) // return predictions and QoF vector - end test - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Print the regression tree in Pre-Order using printT method. - */ - def printTree (): Unit = - println ("Regression Tree: nLeaves = " + nLeaves) - println ("fname = " + stringOf (fname)) - printT (root, 0) - println () - end printTree - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /* Recursively print the regression tree nodes. - * @param nod the current node - * @param level the level of node nod in the tree - */ - def printT (nod: Node, level: Int): Unit = - println ("\t" * level + "[ " + nod + " ]") - for cnode <-nod.child do printT (cnode, level + 1) - end printT - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Print out the regression tree using Breadth First Search (BFS). - */ - def printTree2 (): Unit = - println ("RegressionTree2:") - println ("fname = " + stringOf (fname)) - val queue = new Queue [Node] () - - for cnode <- root.child do queue += cnode - println (root) - var level = 0 - - while ! queue.isEmpty do - val size = queue.size - level += 1 - for i <- 0 until size do - val nod = queue.dequeue () - println ("\t" * level + "[ " + nod + " ]") - for cnode <- nod.child do queue += cnode - end for - println () - end while - end printTree2 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Given a data vector z, predict the value by following the tree to the leaf. - * @param z the data vector to predict - */ - override def predict (z: VectorD): Double = - var nd = root // current node - while nd.child.length >= 2 do - nd = if z(nd.j) <= nd.thresh then nd.child(0) else nd.child(1) - end while - nd.b(0) // b0 is the mean - end predict - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Given a data matrix z, predict the value by following the tree to the leaf. - * @param z the data matrix to predict - */ - override def predict (z: MatrixD = x): VectorD = - VectorD (for i <- z.indices yield predict (z(i))) - end predict - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Build a sub-model that is restricted to the given columns of the data matrix. - * @param x_cols the columns that the new model is restricted to - */ - override def buildModel (x_cols: MatrixD): RegressionTree2 = - new RegressionTree2 (x_cols, y, null, hparam) - end buildModel - -end RegressionTree2 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `regressionTree2Test` main function is used to test the `RegressionTree2` class. - * It tests a simple case that does not require a file to be read. - * @see translate.google.com/translate?hl=en&sl=zh-CN&u=https: - * //www.hrwhisper.me/machine-learning-decision-tree/&prev=search - * > runMain scalation.modeling.regressionTree2Test - */ -@main def regressionTree2Test (): Unit = - - val x = MatrixD ((10, 1), 1, 2, 3, 4, 5, 6, 7, 8, 9, 10) - val y = VectorD (5.56, 5.70, 5.91, 6.40, 6.80, 7.05, 8.90, 8.70, 9.00, 9.05) - val ox = VectorD.one (x.dim) +^: x - val fname = Array ("x") - - banner (s"Regression no intercept") - val reg = new Regression (x, y) - reg.trainNtest ()() // train and test the model - - banner (s"Regression with intercept") - val reg2 = new Regression (ox, y) - reg2.trainNtest ()() // train and test the model - - banner (s"Quadratic Regression") - val reg3 = SymbolicRegression.quadratic (x, y, fname) - reg3.trainNtest ()() // train and test the model - - banner (s"Perceptron sigmoid") - val nn = Perceptron.rescale (reg3.getX, y) - nn.trainNtest ()() // train and test the model - - banner (s"Perceptron tanh") - val nn2 = Perceptron.rescale (reg3.getX, y, f = ActivationFun.f_tanh) - nn2.trainNtest ()() // train and test the model - - for d <- 1 to 2 do - banner (s"Regression Tree 2 with depth = $d") - RegressionTree.hp("maxDepth") = d - val mod = new RegressionTree2 (x, y, fname) - mod.trainNtest ()() // train and test the model - mod.printTree () - end for - -end regressionTree2Test - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `regressionTree2Test2` main function tests the `RegressionTree2` class using the - * AutoMPG dataset. Assumes no missing values. It tests multiple depths. - * > runMain scalation.modeling.regressionTree2Test2 - */ -@main def regressionTree2Test2 (): Unit = - - import Example_AutoMPG._ - -// println (s"x = $o") -// println (s"y = $y") - - for d <- 1 to 5 do - banner (s"AutoMPG Regression Tree 2 with d = $d") - RegressionTree.hp("maxDepth") = d - val mod = new RegressionTree (x, y, x_fname) // create model with intercept (else pass x) - mod.trainNtest ()() // train and test the model - mod.printTree () // print the regression tree -// println (mod.summary ()) // parameter/coefficient statistics - end for - -end regressionTree2Test2 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `regressionTree2Test3` main function tests the `RegressionTree2` class using the - * AutoMPG dataset. Assumes no missing values. It tests forward, backward and stepwise - * selection. - * > runMain scalation.modeling.regressionTree2Test3 - */ -@main def regressionTree2Test3 (): Unit = - - import Example_AutoMPG._ - - val d = 5 - -// println (s"x = $x") -// println (s"y = $y") - - banner (s"AutoMPG Regression Tree 2 with d = $d") - RegressionTree.hp("maxDepth") = d - val mod = new RegressionTree2 (x, y, x_fname) // create model with intercept (else pass x) - mod.trainNtest ()() // train and test the model - mod.printTree () // print the regression tree - -// banner ("Cross-Validation") -// Fit.showQofStatTable (mod.crossValidate ()) - - for tech <- SelectionTech.values do - banner (s"Feature Selection Technique: $tech") - val (cols, rSq) = mod.selectFeatures (tech) // R^2, R^2 bar, R^2 cv - val k = cols.size - println (s"k = $k, n = ${x.dim2}") - new PlotM (null, rSq.transpose, Array ("R^2", "R^2 bar", "R^2 cv"), - s"R^2 vs n for Regression Tree with $tech", lines = true) - println (s"$tech: rSq = $rSq") - end for - -end regressionTree2Test3 - diff --git a/target/scala-3.6.4/classes/scalation/modeling/outlierTest.class b/target/scala-3.6.4/classes/scalation/modeling/outlierTest.class deleted file mode 100644 index 45db2a116..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/outlierTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/outlierTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/outlierTest.tasty deleted file mode 100644 index 735e29111..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/outlierTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/outlierTest2.class b/target/scala-3.6.4/classes/scalation/modeling/outlierTest2.class deleted file mode 100644 index 3d6c16fed..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/outlierTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/outlierTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/outlierTest2.tasty deleted file mode 100644 index 65de8a14d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/outlierTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/outlierTest3.class b/target/scala-3.6.4/classes/scalation/modeling/outlierTest3.class deleted file mode 100644 index 3384da25c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/outlierTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/outlierTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/outlierTest3.tasty deleted file mode 100644 index bb821bb6a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/outlierTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/perceptronTest.class b/target/scala-3.6.4/classes/scalation/modeling/perceptronTest.class deleted file mode 100644 index 7369b3516..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/perceptronTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/perceptronTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/perceptronTest.tasty deleted file mode 100644 index be05e6622..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/perceptronTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/perceptronTest2.class b/target/scala-3.6.4/classes/scalation/modeling/perceptronTest2.class deleted file mode 100644 index dd85ffe9f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/perceptronTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/perceptronTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/perceptronTest2.tasty deleted file mode 100644 index 0bb5e0b49..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/perceptronTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/perceptronTest3.class b/target/scala-3.6.4/classes/scalation/modeling/perceptronTest3.class deleted file mode 100644 index 6330333b0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/perceptronTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/perceptronTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/perceptronTest3.tasty deleted file mode 100644 index 42dd08948..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/perceptronTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/perceptronTest4.class b/target/scala-3.6.4/classes/scalation/modeling/perceptronTest4.class deleted file mode 100644 index 6652bedec..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/perceptronTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/perceptronTest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/perceptronTest4.tasty deleted file mode 100644 index c1dc192d0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/perceptronTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/perceptronTest5.class b/target/scala-3.6.4/classes/scalation/modeling/perceptronTest5.class deleted file mode 100644 index 62c754475..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/perceptronTest5.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/perceptronTest5.tasty b/target/scala-3.6.4/classes/scalation/modeling/perceptronTest5.tasty deleted file mode 100644 index 0ab5cdbfb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/perceptronTest5.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/perceptronTest6.class b/target/scala-3.6.4/classes/scalation/modeling/perceptronTest6.class deleted file mode 100644 index 7d0b7d444..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/perceptronTest6.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/perceptronTest6.tasty b/target/scala-3.6.4/classes/scalation/modeling/perceptronTest6.tasty deleted file mode 100644 index bcc544bdd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/perceptronTest6.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/poissonRegressionTest.class b/target/scala-3.6.4/classes/scalation/modeling/poissonRegressionTest.class deleted file mode 100644 index 402384a14..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/poissonRegressionTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/poissonRegressionTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/poissonRegressionTest.tasty deleted file mode 100644 index bffe6d7d8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/poissonRegressionTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/poissonRegressionTest2.class b/target/scala-3.6.4/classes/scalation/modeling/poissonRegressionTest2.class deleted file mode 100644 index 9167a2ef9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/poissonRegressionTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/poissonRegressionTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/poissonRegressionTest2.tasty deleted file mode 100644 index 317137c09..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/poissonRegressionTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/polyORegressionTest.class b/target/scala-3.6.4/classes/scalation/modeling/polyORegressionTest.class deleted file mode 100644 index b542f4e5e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/polyORegressionTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/polyORegressionTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/polyORegressionTest.tasty deleted file mode 100644 index ccef2d55c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/polyORegressionTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/polyORegressionTest2.class b/target/scala-3.6.4/classes/scalation/modeling/polyORegressionTest2.class deleted file mode 100644 index 0619a1de1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/polyORegressionTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/polyORegressionTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/polyORegressionTest2.tasty deleted file mode 100644 index e74fd4d40..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/polyORegressionTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/polyRegressionTest.class b/target/scala-3.6.4/classes/scalation/modeling/polyRegressionTest.class deleted file mode 100644 index e4cc6af0d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/polyRegressionTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/polyRegressionTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/polyRegressionTest.tasty deleted file mode 100644 index 49888b68d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/polyRegressionTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/polyRegressionTest2.class b/target/scala-3.6.4/classes/scalation/modeling/polyRegressionTest2.class deleted file mode 100644 index 6a046af51..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/polyRegressionTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/polyRegressionTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/polyRegressionTest2.tasty deleted file mode 100644 index 1f0bcdc3b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/polyRegressionTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/predictorTest.class b/target/scala-3.6.4/classes/scalation/modeling/predictorTest.class deleted file mode 100644 index 569fa30e0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/predictorTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/predictorTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/predictorTest.tasty deleted file mode 100644 index 21575c24b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/predictorTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionCatTest.class b/target/scala-3.6.4/classes/scalation/modeling/regressionCatTest.class deleted file mode 100644 index 5d9de8da7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionCatTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionCatTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/regressionCatTest.tasty deleted file mode 100644 index 72e991db8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionCatTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionCatTest2.class b/target/scala-3.6.4/classes/scalation/modeling/regressionCatTest2.class deleted file mode 100644 index 28efdaad8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionCatTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionCatTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/regressionCatTest2.tasty deleted file mode 100644 index 754d8ce1e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionCatTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionCatTest3.class b/target/scala-3.6.4/classes/scalation/modeling/regressionCatTest3.class deleted file mode 100644 index 056e7ccb7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionCatTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionCatTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/regressionCatTest3.tasty deleted file mode 100644 index 454e8511e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionCatTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionCatTest4.class b/target/scala-3.6.4/classes/scalation/modeling/regressionCatTest4.class deleted file mode 100644 index d43aaeecc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionCatTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionCatTest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/regressionCatTest4.tasty deleted file mode 100644 index 0c6ac2d10..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionCatTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionCatTest5.class b/target/scala-3.6.4/classes/scalation/modeling/regressionCatTest5.class deleted file mode 100644 index 06251bb12..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionCatTest5.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionCatTest5.tasty b/target/scala-3.6.4/classes/scalation/modeling/regressionCatTest5.tasty deleted file mode 100644 index 05f7029b1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionCatTest5.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionCatTest6.class b/target/scala-3.6.4/classes/scalation/modeling/regressionCatTest6.class deleted file mode 100644 index 3d1464e7f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionCatTest6.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionCatTest6.tasty b/target/scala-3.6.4/classes/scalation/modeling/regressionCatTest6.tasty deleted file mode 100644 index bf2e36d57..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionCatTest6.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionCatTest7.class b/target/scala-3.6.4/classes/scalation/modeling/regressionCatTest7.class deleted file mode 100644 index a98e11fd9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionCatTest7.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionCatTest7.tasty b/target/scala-3.6.4/classes/scalation/modeling/regressionCatTest7.tasty deleted file mode 100644 index a36896c6a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionCatTest7.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionTest.class b/target/scala-3.6.4/classes/scalation/modeling/regressionTest.class deleted file mode 100644 index 6d5f8e9d6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/regressionTest.tasty deleted file mode 100644 index 79a324fb5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionTest10.class b/target/scala-3.6.4/classes/scalation/modeling/regressionTest10.class deleted file mode 100644 index e207641cf..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionTest10.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionTest10.tasty b/target/scala-3.6.4/classes/scalation/modeling/regressionTest10.tasty deleted file mode 100644 index f9c7fdb4c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionTest10.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionTest2.class b/target/scala-3.6.4/classes/scalation/modeling/regressionTest2.class deleted file mode 100644 index 6a2de6810..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/regressionTest2.tasty deleted file mode 100644 index d2263aec2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionTest3.class b/target/scala-3.6.4/classes/scalation/modeling/regressionTest3.class deleted file mode 100644 index a9cefef8f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/regressionTest3.tasty deleted file mode 100644 index 65717dac1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionTest4.class b/target/scala-3.6.4/classes/scalation/modeling/regressionTest4.class deleted file mode 100644 index 457ddab63..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionTest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/regressionTest4.tasty deleted file mode 100644 index 3921adab5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionTest5.class b/target/scala-3.6.4/classes/scalation/modeling/regressionTest5.class deleted file mode 100644 index a05dfd48e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionTest5.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionTest5.tasty b/target/scala-3.6.4/classes/scalation/modeling/regressionTest5.tasty deleted file mode 100644 index a23e051a5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionTest5.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionTest6.class b/target/scala-3.6.4/classes/scalation/modeling/regressionTest6.class deleted file mode 100644 index fac1685a7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionTest6.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionTest6.tasty b/target/scala-3.6.4/classes/scalation/modeling/regressionTest6.tasty deleted file mode 100644 index 9d93837f3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionTest6.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionTest7.class b/target/scala-3.6.4/classes/scalation/modeling/regressionTest7.class deleted file mode 100644 index b33a355f6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionTest7.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionTest7.tasty b/target/scala-3.6.4/classes/scalation/modeling/regressionTest7.tasty deleted file mode 100644 index 3a249ce5b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionTest7.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionTest8.class b/target/scala-3.6.4/classes/scalation/modeling/regressionTest8.class deleted file mode 100644 index 797bcaca2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionTest8.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionTest8.tasty b/target/scala-3.6.4/classes/scalation/modeling/regressionTest8.tasty deleted file mode 100644 index 8502c73bc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionTest8.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionTest9.class b/target/scala-3.6.4/classes/scalation/modeling/regressionTest9.class deleted file mode 100644 index e8b4d85fa..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionTest9.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionTest9.tasty b/target/scala-3.6.4/classes/scalation/modeling/regressionTest9.tasty deleted file mode 100644 index 12bf8880f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionTest9.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeGBTest.class b/target/scala-3.6.4/classes/scalation/modeling/regressionTreeGBTest.class deleted file mode 100644 index b93e1386a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeGBTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeGBTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/regressionTreeGBTest.tasty deleted file mode 100644 index 924b67b80..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeGBTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeGBTest2.class b/target/scala-3.6.4/classes/scalation/modeling/regressionTreeGBTest2.class deleted file mode 100644 index 75c333560..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeGBTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeGBTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/regressionTreeGBTest2.tasty deleted file mode 100644 index cc8a55e17..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeGBTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeGBTest3.class b/target/scala-3.6.4/classes/scalation/modeling/regressionTreeGBTest3.class deleted file mode 100644 index 6f6444b7a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeGBTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeGBTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/regressionTreeGBTest3.tasty deleted file mode 100644 index 77e591be8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeGBTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeGBTest4.class b/target/scala-3.6.4/classes/scalation/modeling/regressionTreeGBTest4.class deleted file mode 100644 index e2e0abd9c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeGBTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeGBTest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/regressionTreeGBTest4.tasty deleted file mode 100644 index 32d07021d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeGBTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeGBTest5.class b/target/scala-3.6.4/classes/scalation/modeling/regressionTreeGBTest5.class deleted file mode 100644 index d3fcb7c9a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeGBTest5.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeGBTest5.tasty b/target/scala-3.6.4/classes/scalation/modeling/regressionTreeGBTest5.tasty deleted file mode 100644 index 493dac2eb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeGBTest5.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeGBTest6.class b/target/scala-3.6.4/classes/scalation/modeling/regressionTreeGBTest6.class deleted file mode 100644 index c1bfb8e63..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeGBTest6.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeGBTest6.tasty b/target/scala-3.6.4/classes/scalation/modeling/regressionTreeGBTest6.tasty deleted file mode 100644 index 52de568cd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeGBTest6.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeMTTest.class b/target/scala-3.6.4/classes/scalation/modeling/regressionTreeMTTest.class deleted file mode 100644 index 80fdf29fd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeMTTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeMTTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/regressionTreeMTTest.tasty deleted file mode 100644 index ff091a908..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeMTTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeMTTest2.class b/target/scala-3.6.4/classes/scalation/modeling/regressionTreeMTTest2.class deleted file mode 100644 index cddee41c7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeMTTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeMTTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/regressionTreeMTTest2.tasty deleted file mode 100644 index 020257eab..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeMTTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeMTTest3.class b/target/scala-3.6.4/classes/scalation/modeling/regressionTreeMTTest3.class deleted file mode 100644 index fb42ef182..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeMTTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeMTTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/regressionTreeMTTest3.tasty deleted file mode 100644 index 388742c21..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeMTTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeRFTest.class b/target/scala-3.6.4/classes/scalation/modeling/regressionTreeRFTest.class deleted file mode 100644 index 644eb5816..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeRFTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeRFTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/regressionTreeRFTest.tasty deleted file mode 100644 index c676092c7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeRFTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeRFTest2.class b/target/scala-3.6.4/classes/scalation/modeling/regressionTreeRFTest2.class deleted file mode 100644 index b5db40e14..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeRFTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeRFTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/regressionTreeRFTest2.tasty deleted file mode 100644 index f5bb3dace..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeRFTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeRFTest3.class b/target/scala-3.6.4/classes/scalation/modeling/regressionTreeRFTest3.class deleted file mode 100644 index 341080c5b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeRFTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeRFTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/regressionTreeRFTest3.tasty deleted file mode 100644 index fe9182387..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeRFTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeRFTest4.class b/target/scala-3.6.4/classes/scalation/modeling/regressionTreeRFTest4.class deleted file mode 100644 index d31720b97..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeRFTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeRFTest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/regressionTreeRFTest4.tasty deleted file mode 100644 index dfaf5f5cb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeRFTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeRFTest5.class b/target/scala-3.6.4/classes/scalation/modeling/regressionTreeRFTest5.class deleted file mode 100644 index b76f4d881..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeRFTest5.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeRFTest5.tasty b/target/scala-3.6.4/classes/scalation/modeling/regressionTreeRFTest5.tasty deleted file mode 100644 index a68b49c3e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeRFTest5.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeRF_MTTest.class b/target/scala-3.6.4/classes/scalation/modeling/regressionTreeRF_MTTest.class deleted file mode 100644 index a597e46ca..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeRF_MTTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeRF_MTTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/regressionTreeRF_MTTest.tasty deleted file mode 100644 index 4a64172fd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeRF_MTTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeRF_MTTest2.class b/target/scala-3.6.4/classes/scalation/modeling/regressionTreeRF_MTTest2.class deleted file mode 100644 index 6be39b13b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeRF_MTTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeRF_MTTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/regressionTreeRF_MTTest2.tasty deleted file mode 100644 index a21593e06..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeRF_MTTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeRF_MTTest3.class b/target/scala-3.6.4/classes/scalation/modeling/regressionTreeRF_MTTest3.class deleted file mode 100644 index 758f50125..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeRF_MTTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeRF_MTTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/regressionTreeRF_MTTest3.tasty deleted file mode 100644 index 44df04768..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeRF_MTTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeRF_MTTest4.class b/target/scala-3.6.4/classes/scalation/modeling/regressionTreeRF_MTTest4.class deleted file mode 100644 index 64da51a87..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeRF_MTTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeRF_MTTest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/regressionTreeRF_MTTest4.tasty deleted file mode 100644 index f0cb88887..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeRF_MTTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeTest.class b/target/scala-3.6.4/classes/scalation/modeling/regressionTreeTest.class deleted file mode 100644 index 495b8aa3c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/regressionTreeTest.tasty deleted file mode 100644 index 0a8d3b7d7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeTest2.class b/target/scala-3.6.4/classes/scalation/modeling/regressionTreeTest2.class deleted file mode 100644 index 6d82838a6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/regressionTreeTest2.tasty deleted file mode 100644 index f49f06e20..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeTest3.class b/target/scala-3.6.4/classes/scalation/modeling/regressionTreeTest3.class deleted file mode 100644 index 130800ad8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/regressionTreeTest3.tasty deleted file mode 100644 index bcbe38d58..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeTest4.class b/target/scala-3.6.4/classes/scalation/modeling/regressionTreeTest4.class deleted file mode 100644 index 3db1500e8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeTest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/regressionTreeTest4.tasty deleted file mode 100644 index 9e1d31ad1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionTreeTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionWLSTest.class b/target/scala-3.6.4/classes/scalation/modeling/regressionWLSTest.class deleted file mode 100644 index 1f23c3875..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionWLSTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionWLSTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/regressionWLSTest.tasty deleted file mode 100644 index b2424ea97..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionWLSTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionWLSTest2.class b/target/scala-3.6.4/classes/scalation/modeling/regressionWLSTest2.class deleted file mode 100644 index 4a73112eb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionWLSTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/regressionWLSTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/regressionWLSTest2.tasty deleted file mode 100644 index 44a0b85fb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/regressionWLSTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/ridgeRegressionTest.class b/target/scala-3.6.4/classes/scalation/modeling/ridgeRegressionTest.class deleted file mode 100644 index 1d512e91c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/ridgeRegressionTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/ridgeRegressionTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/ridgeRegressionTest.tasty deleted file mode 100644 index 4d29c4f81..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/ridgeRegressionTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/ridgeRegressionTest2.class b/target/scala-3.6.4/classes/scalation/modeling/ridgeRegressionTest2.class deleted file mode 100644 index 267d1337b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/ridgeRegressionTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/ridgeRegressionTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/ridgeRegressionTest2.tasty deleted file mode 100644 index 7cb8f4ab6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/ridgeRegressionTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/ridgeRegressionTest3.class b/target/scala-3.6.4/classes/scalation/modeling/ridgeRegressionTest3.class deleted file mode 100644 index a48946190..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/ridgeRegressionTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/ridgeRegressionTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/ridgeRegressionTest3.tasty deleted file mode 100644 index aaee851b0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/ridgeRegressionTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/ridgeRegressionTest4.class b/target/scala-3.6.4/classes/scalation/modeling/ridgeRegressionTest4.class deleted file mode 100644 index d4f6715f8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/ridgeRegressionTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/ridgeRegressionTest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/ridgeRegressionTest4.tasty deleted file mode 100644 index 6db254b45..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/ridgeRegressionTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/ridgeRegressionTest5.class b/target/scala-3.6.4/classes/scalation/modeling/ridgeRegressionTest5.class deleted file mode 100644 index c660c966e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/ridgeRegressionTest5.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/ridgeRegressionTest5.tasty b/target/scala-3.6.4/classes/scalation/modeling/ridgeRegressionTest5.tasty deleted file mode 100644 index 8427d7f30..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/ridgeRegressionTest5.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/ridgeRegressionTest6.class b/target/scala-3.6.4/classes/scalation/modeling/ridgeRegressionTest6.class deleted file mode 100644 index a90d4a619..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/ridgeRegressionTest6.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/ridgeRegressionTest6.tasty b/target/scala-3.6.4/classes/scalation/modeling/ridgeRegressionTest6.tasty deleted file mode 100644 index e553a6cc7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/ridgeRegressionTest6.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/ridgeRegressionTest7.class b/target/scala-3.6.4/classes/scalation/modeling/ridgeRegressionTest7.class deleted file mode 100644 index afa888787..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/ridgeRegressionTest7.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/ridgeRegressionTest7.tasty b/target/scala-3.6.4/classes/scalation/modeling/ridgeRegressionTest7.tasty deleted file mode 100644 index 50f43d6aa..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/ridgeRegressionTest7.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/roundRegressionTest.class b/target/scala-3.6.4/classes/scalation/modeling/roundRegressionTest.class deleted file mode 100644 index bf3ad11c7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/roundRegressionTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/roundRegressionTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/roundRegressionTest.tasty deleted file mode 100644 index 69f59ecef..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/roundRegressionTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/simpleExpRegressionTest.class b/target/scala-3.6.4/classes/scalation/modeling/simpleExpRegressionTest.class deleted file mode 100644 index b977da218..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/simpleExpRegressionTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/simpleExpRegressionTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/simpleExpRegressionTest.tasty deleted file mode 100644 index 693ccb07f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/simpleExpRegressionTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/simpleExpRegressionTest2.class b/target/scala-3.6.4/classes/scalation/modeling/simpleExpRegressionTest2.class deleted file mode 100644 index 66bf44ceb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/simpleExpRegressionTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/simpleExpRegressionTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/simpleExpRegressionTest2.tasty deleted file mode 100644 index 10551cada..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/simpleExpRegressionTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/simpleExpRegressionTest3.class b/target/scala-3.6.4/classes/scalation/modeling/simpleExpRegressionTest3.class deleted file mode 100644 index 4f504333d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/simpleExpRegressionTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/simpleExpRegressionTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/simpleExpRegressionTest3.tasty deleted file mode 100644 index a5a8270b6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/simpleExpRegressionTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/simpleRegressionTest.class b/target/scala-3.6.4/classes/scalation/modeling/simpleRegressionTest.class deleted file mode 100644 index 46a4ec8ae..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/simpleRegressionTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/simpleRegressionTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/simpleRegressionTest.tasty deleted file mode 100644 index e615ab758..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/simpleRegressionTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/simpleRegressionTest2.class b/target/scala-3.6.4/classes/scalation/modeling/simpleRegressionTest2.class deleted file mode 100644 index fb2580a8f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/simpleRegressionTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/simpleRegressionTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/simpleRegressionTest2.tasty deleted file mode 100644 index 8ad6acc7e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/simpleRegressionTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/simpleRegressionTest3.class b/target/scala-3.6.4/classes/scalation/modeling/simpleRegressionTest3.class deleted file mode 100644 index 4005412de..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/simpleRegressionTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/simpleRegressionTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/simpleRegressionTest3.tasty deleted file mode 100644 index 3db168185..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/simpleRegressionTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/simpleRegressionTest4.class b/target/scala-3.6.4/classes/scalation/modeling/simpleRegressionTest4.class deleted file mode 100644 index 16c35bea7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/simpleRegressionTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/simpleRegressionTest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/simpleRegressionTest4.tasty deleted file mode 100644 index 39e94a5e0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/simpleRegressionTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/simpleRegressionTest5.class b/target/scala-3.6.4/classes/scalation/modeling/simpleRegressionTest5.class deleted file mode 100644 index 5db74c15a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/simpleRegressionTest5.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/simpleRegressionTest5.tasty b/target/scala-3.6.4/classes/scalation/modeling/simpleRegressionTest5.tasty deleted file mode 100644 index 342e9d3fa..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/simpleRegressionTest5.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/simpleRegressionTest6.class b/target/scala-3.6.4/classes/scalation/modeling/simpleRegressionTest6.class deleted file mode 100644 index af3e62a24..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/simpleRegressionTest6.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/simpleRegressionTest6.tasty b/target/scala-3.6.4/classes/scalation/modeling/simpleRegressionTest6.tasty deleted file mode 100644 index fd015d1a0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/simpleRegressionTest6.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/simpleRegressionTest7.class b/target/scala-3.6.4/classes/scalation/modeling/simpleRegressionTest7.class deleted file mode 100644 index 9e727f32e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/simpleRegressionTest7.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/simpleRegressionTest7.tasty b/target/scala-3.6.4/classes/scalation/modeling/simpleRegressionTest7.tasty deleted file mode 100644 index cbc003cd8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/simpleRegressionTest7.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/simpleRegressionTest8.class b/target/scala-3.6.4/classes/scalation/modeling/simpleRegressionTest8.class deleted file mode 100644 index f241bb1b6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/simpleRegressionTest8.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/simpleRegressionTest8.tasty b/target/scala-3.6.4/classes/scalation/modeling/simpleRegressionTest8.tasty deleted file mode 100644 index 9221b9fb8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/simpleRegressionTest8.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/simplerRegressionTest.class b/target/scala-3.6.4/classes/scalation/modeling/simplerRegressionTest.class deleted file mode 100644 index 69950b400..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/simplerRegressionTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/simplerRegressionTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/simplerRegressionTest.tasty deleted file mode 100644 index c260fe735..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/simplerRegressionTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/simplerRegressionTest2.class b/target/scala-3.6.4/classes/scalation/modeling/simplerRegressionTest2.class deleted file mode 100644 index 0c7e9f2bf..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/simplerRegressionTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/simplerRegressionTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/simplerRegressionTest2.tasty deleted file mode 100644 index 10ba3f412..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/simplerRegressionTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/simplerRegressionTest3.class b/target/scala-3.6.4/classes/scalation/modeling/simplerRegressionTest3.class deleted file mode 100644 index 8af2f59ef..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/simplerRegressionTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/simplerRegressionTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/simplerRegressionTest3.tasty deleted file mode 100644 index 328e59312..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/simplerRegressionTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/simplerRegressionTest4.class b/target/scala-3.6.4/classes/scalation/modeling/simplerRegressionTest4.class deleted file mode 100644 index 396c45431..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/simplerRegressionTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/simplerRegressionTest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/simplerRegressionTest4.tasty deleted file mode 100644 index c76117c30..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/simplerRegressionTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/sumQueueTest.class b/target/scala-3.6.4/classes/scalation/modeling/sumQueueTest.class deleted file mode 100644 index bc46cc09f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/sumQueueTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/sumQueueTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/sumQueueTest.tasty deleted file mode 100644 index 7635f4f60..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/sumQueueTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/sumQueueTest2.class b/target/scala-3.6.4/classes/scalation/modeling/sumQueueTest2.class deleted file mode 100644 index 9aba047e0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/sumQueueTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/sumQueueTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/sumQueueTest2.tasty deleted file mode 100644 index c79b903c8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/sumQueueTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/symLassoRegressionTest.class b/target/scala-3.6.4/classes/scalation/modeling/symLassoRegressionTest.class deleted file mode 100644 index b65b1c7b4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/symLassoRegressionTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/symLassoRegressionTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/symLassoRegressionTest.tasty deleted file mode 100644 index 3050400a6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/symLassoRegressionTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/symLassoRegressionTest2.class b/target/scala-3.6.4/classes/scalation/modeling/symLassoRegressionTest2.class deleted file mode 100644 index c1afb3e54..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/symLassoRegressionTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/symLassoRegressionTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/symLassoRegressionTest2.tasty deleted file mode 100644 index 05fcb446f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/symLassoRegressionTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/symLassoRegressionTest3.class b/target/scala-3.6.4/classes/scalation/modeling/symLassoRegressionTest3.class deleted file mode 100644 index 4faf45f5f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/symLassoRegressionTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/symLassoRegressionTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/symLassoRegressionTest3.tasty deleted file mode 100644 index 9d27749da..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/symLassoRegressionTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/symLassoRegressionTest4.class b/target/scala-3.6.4/classes/scalation/modeling/symLassoRegressionTest4.class deleted file mode 100644 index 1a9e6890a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/symLassoRegressionTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/symLassoRegressionTest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/symLassoRegressionTest4.tasty deleted file mode 100644 index 8d0fd4271..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/symLassoRegressionTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/symLassoRegressionTest5.class b/target/scala-3.6.4/classes/scalation/modeling/symLassoRegressionTest5.class deleted file mode 100644 index 93698e31e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/symLassoRegressionTest5.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/symLassoRegressionTest5.tasty b/target/scala-3.6.4/classes/scalation/modeling/symLassoRegressionTest5.tasty deleted file mode 100644 index 2429ae35a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/symLassoRegressionTest5.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/symLassoRegressionTest6.class b/target/scala-3.6.4/classes/scalation/modeling/symLassoRegressionTest6.class deleted file mode 100644 index 9d23cf13c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/symLassoRegressionTest6.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/symLassoRegressionTest6.tasty b/target/scala-3.6.4/classes/scalation/modeling/symLassoRegressionTest6.tasty deleted file mode 100644 index 5b9df6d9a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/symLassoRegressionTest6.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/symLassoRegressionTest7.class b/target/scala-3.6.4/classes/scalation/modeling/symLassoRegressionTest7.class deleted file mode 100644 index 62157c825..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/symLassoRegressionTest7.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/symLassoRegressionTest7.tasty b/target/scala-3.6.4/classes/scalation/modeling/symLassoRegressionTest7.tasty deleted file mode 100644 index 18a036f48..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/symLassoRegressionTest7.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/symLassoRegressionTest8.class b/target/scala-3.6.4/classes/scalation/modeling/symLassoRegressionTest8.class deleted file mode 100644 index 4fcc9a22d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/symLassoRegressionTest8.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/symLassoRegressionTest8.tasty b/target/scala-3.6.4/classes/scalation/modeling/symLassoRegressionTest8.tasty deleted file mode 100644 index c9e6159df..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/symLassoRegressionTest8.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/symLassoRegressionTest9.class b/target/scala-3.6.4/classes/scalation/modeling/symLassoRegressionTest9.class deleted file mode 100644 index d72053bd1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/symLassoRegressionTest9.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/symLassoRegressionTest9.tasty b/target/scala-3.6.4/classes/scalation/modeling/symLassoRegressionTest9.tasty deleted file mode 100644 index cf97aba7d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/symLassoRegressionTest9.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/symRidgeRegressionTest.class b/target/scala-3.6.4/classes/scalation/modeling/symRidgeRegressionTest.class deleted file mode 100644 index a96921287..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/symRidgeRegressionTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/symRidgeRegressionTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/symRidgeRegressionTest.tasty deleted file mode 100644 index 428baad03..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/symRidgeRegressionTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/symRidgeRegressionTest2.class b/target/scala-3.6.4/classes/scalation/modeling/symRidgeRegressionTest2.class deleted file mode 100644 index e1b5a8a88..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/symRidgeRegressionTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/symRidgeRegressionTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/symRidgeRegressionTest2.tasty deleted file mode 100644 index 00b7771e9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/symRidgeRegressionTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/symRidgeRegressionTest3.class b/target/scala-3.6.4/classes/scalation/modeling/symRidgeRegressionTest3.class deleted file mode 100644 index 3793ef87f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/symRidgeRegressionTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/symRidgeRegressionTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/symRidgeRegressionTest3.tasty deleted file mode 100644 index 63346f760..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/symRidgeRegressionTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/symRidgeRegressionTest4.class b/target/scala-3.6.4/classes/scalation/modeling/symRidgeRegressionTest4.class deleted file mode 100644 index 60c1705d9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/symRidgeRegressionTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/symRidgeRegressionTest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/symRidgeRegressionTest4.tasty deleted file mode 100644 index 72aee4537..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/symRidgeRegressionTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/symRidgeRegressionTest5.class b/target/scala-3.6.4/classes/scalation/modeling/symRidgeRegressionTest5.class deleted file mode 100644 index 8c3b1358c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/symRidgeRegressionTest5.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/symRidgeRegressionTest5.tasty b/target/scala-3.6.4/classes/scalation/modeling/symRidgeRegressionTest5.tasty deleted file mode 100644 index 4c808dcd0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/symRidgeRegressionTest5.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/symRidgeRegressionTest6.class b/target/scala-3.6.4/classes/scalation/modeling/symRidgeRegressionTest6.class deleted file mode 100644 index cee8a4fe5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/symRidgeRegressionTest6.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/symRidgeRegressionTest6.tasty b/target/scala-3.6.4/classes/scalation/modeling/symRidgeRegressionTest6.tasty deleted file mode 100644 index c2b28d91f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/symRidgeRegressionTest6.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/symRidgeRegressionTest7.class b/target/scala-3.6.4/classes/scalation/modeling/symRidgeRegressionTest7.class deleted file mode 100644 index 365f15280..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/symRidgeRegressionTest7.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/symRidgeRegressionTest7.tasty b/target/scala-3.6.4/classes/scalation/modeling/symRidgeRegressionTest7.tasty deleted file mode 100644 index 2531653a6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/symRidgeRegressionTest7.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/symRidgeRegressionTest8.class b/target/scala-3.6.4/classes/scalation/modeling/symRidgeRegressionTest8.class deleted file mode 100644 index 90c5bc70d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/symRidgeRegressionTest8.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/symRidgeRegressionTest8.tasty b/target/scala-3.6.4/classes/scalation/modeling/symRidgeRegressionTest8.tasty deleted file mode 100644 index 4faec939d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/symRidgeRegressionTest8.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/symRidgeRegressionTest9.class b/target/scala-3.6.4/classes/scalation/modeling/symRidgeRegressionTest9.class deleted file mode 100644 index 939997e9c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/symRidgeRegressionTest9.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/symRidgeRegressionTest9.tasty b/target/scala-3.6.4/classes/scalation/modeling/symRidgeRegressionTest9.tasty deleted file mode 100644 index ec8f2a3e9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/symRidgeRegressionTest9.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest.class b/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest.class deleted file mode 100644 index 507766077..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest.tasty deleted file mode 100644 index 4d00f5308..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest10.class b/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest10.class deleted file mode 100644 index 45b62f89a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest10.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest10.tasty b/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest10.tasty deleted file mode 100644 index e99516224..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest10.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest11.class b/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest11.class deleted file mode 100644 index 235988936..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest11.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest11.tasty b/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest11.tasty deleted file mode 100644 index e83e1006b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest11.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest12.class b/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest12.class deleted file mode 100644 index 57f3a69c1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest12.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest12.tasty b/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest12.tasty deleted file mode 100644 index 6650577d1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest12.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest13.class b/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest13.class deleted file mode 100644 index aa36a1959..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest13.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest13.tasty b/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest13.tasty deleted file mode 100644 index 4b684b013..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest13.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest2.class b/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest2.class deleted file mode 100644 index 946888e82..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest2.tasty deleted file mode 100644 index 021824b09..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest3.class b/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest3.class deleted file mode 100644 index f10913fec..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest3.tasty deleted file mode 100644 index c6dbd74ed..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest4.class b/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest4.class deleted file mode 100644 index 54aa6195d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest4.tasty deleted file mode 100644 index 948f88632..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest5.class b/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest5.class deleted file mode 100644 index 2d2662670..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest5.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest5.tasty b/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest5.tasty deleted file mode 100644 index a5c0e66f4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest5.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest6.class b/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest6.class deleted file mode 100644 index 7fae0fc76..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest6.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest6.tasty b/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest6.tasty deleted file mode 100644 index 5753955f6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest6.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest7.class b/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest7.class deleted file mode 100644 index 067ca090e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest7.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest7.tasty b/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest7.tasty deleted file mode 100644 index 13fc3b51b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest7.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest8.class b/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest8.class deleted file mode 100644 index 05184da5d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest8.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest8.tasty b/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest8.tasty deleted file mode 100644 index 904062698..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest8.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest9.class b/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest9.class deleted file mode 100644 index 1b585793e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest9.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest9.tasty b/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest9.tasty deleted file mode 100644 index 6cd23a94f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/symbolicRegressionTest9.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/tranRegressionTest.class b/target/scala-3.6.4/classes/scalation/modeling/tranRegressionTest.class deleted file mode 100644 index 2347b15d3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/tranRegressionTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/tranRegressionTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/tranRegressionTest.tasty deleted file mode 100644 index 185fccea8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/tranRegressionTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/tranRegressionTest2.class b/target/scala-3.6.4/classes/scalation/modeling/tranRegressionTest2.class deleted file mode 100644 index 394201d68..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/tranRegressionTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/tranRegressionTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/tranRegressionTest2.tasty deleted file mode 100644 index 953a42da7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/tranRegressionTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/tranRegressionTest3.class b/target/scala-3.6.4/classes/scalation/modeling/tranRegressionTest3.class deleted file mode 100644 index ba10acda6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/tranRegressionTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/tranRegressionTest3.tasty b/target/scala-3.6.4/classes/scalation/modeling/tranRegressionTest3.tasty deleted file mode 100644 index 465d970d1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/tranRegressionTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/tranRegressionTest4.class b/target/scala-3.6.4/classes/scalation/modeling/tranRegressionTest4.class deleted file mode 100644 index 7b1e54dc6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/tranRegressionTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/tranRegressionTest4.tasty b/target/scala-3.6.4/classes/scalation/modeling/tranRegressionTest4.tasty deleted file mode 100644 index 9083fc22b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/tranRegressionTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/tranRegressionTest5.class b/target/scala-3.6.4/classes/scalation/modeling/tranRegressionTest5.class deleted file mode 100644 index 9c249212b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/tranRegressionTest5.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/tranRegressionTest5.tasty b/target/scala-3.6.4/classes/scalation/modeling/tranRegressionTest5.tasty deleted file mode 100644 index 37a5fbf34..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/tranRegressionTest5.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/tranRegressionTest6.class b/target/scala-3.6.4/classes/scalation/modeling/tranRegressionTest6.class deleted file mode 100644 index 9a411cc73..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/tranRegressionTest6.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/tranRegressionTest6.tasty b/target/scala-3.6.4/classes/scalation/modeling/tranRegressionTest6.tasty deleted file mode 100644 index bf17edc64..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/tranRegressionTest6.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/tranRegressionTest7.class b/target/scala-3.6.4/classes/scalation/modeling/tranRegressionTest7.class deleted file mode 100644 index c664f98da..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/tranRegressionTest7.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/tranRegressionTest7.tasty b/target/scala-3.6.4/classes/scalation/modeling/tranRegressionTest7.tasty deleted file mode 100644 index e3fa47c35..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/tranRegressionTest7.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/tranRegressionTest8.class b/target/scala-3.6.4/classes/scalation/modeling/tranRegressionTest8.class deleted file mode 100644 index 2939d9dec..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/tranRegressionTest8.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/tranRegressionTest8.tasty b/target/scala-3.6.4/classes/scalation/modeling/tranRegressionTest8.tasty deleted file mode 100644 index 8110c10fa..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/tranRegressionTest8.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/trigRegressionTest.class b/target/scala-3.6.4/classes/scalation/modeling/trigRegressionTest.class deleted file mode 100644 index 111753d88..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/trigRegressionTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/trigRegressionTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/trigRegressionTest.tasty deleted file mode 100644 index bfb03570c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/trigRegressionTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/trigRegressionTest2.class b/target/scala-3.6.4/classes/scalation/modeling/trigRegressionTest2.class deleted file mode 100644 index 139b75d2e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/trigRegressionTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/trigRegressionTest2.tasty b/target/scala-3.6.4/classes/scalation/modeling/trigRegressionTest2.tasty deleted file mode 100644 index d06eea110..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/trigRegressionTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/variableTest.class b/target/scala-3.6.4/classes/scalation/modeling/variableTest.class deleted file mode 100644 index 80f2f398a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/variableTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/modeling/variableTest.tasty b/target/scala-3.6.4/classes/scalation/modeling/variableTest.tasty deleted file mode 100644 index 74be6542a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/modeling/variableTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/multiArrayDequesTest.class b/target/scala-3.6.4/classes/scalation/multiArrayDequesTest.class deleted file mode 100644 index 0a78056f0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/multiArrayDequesTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/multiArrayDequesTest.tasty b/target/scala-3.6.4/classes/scalation/multiArrayDequesTest.tasty deleted file mode 100644 index f50d469ec..000000000 Binary files a/target/scala-3.6.4/classes/scalation/multiArrayDequesTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/BoundsConstraint$.class b/target/scala-3.6.4/classes/scalation/optimization/BoundsConstraint$.class deleted file mode 100644 index 14875754d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/BoundsConstraint$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/BoundsConstraint.class b/target/scala-3.6.4/classes/scalation/optimization/BoundsConstraint.class deleted file mode 100644 index 3aa440a26..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/BoundsConstraint.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/BoundsConstraint.tasty b/target/scala-3.6.4/classes/scalation/optimization/BoundsConstraint.tasty deleted file mode 100644 index 436f00d71..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/BoundsConstraint.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/ConjugateGradient$.class b/target/scala-3.6.4/classes/scalation/optimization/ConjugateGradient$.class deleted file mode 100644 index 21a621ec6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/ConjugateGradient$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/ConjugateGradient$package$.class b/target/scala-3.6.4/classes/scalation/optimization/ConjugateGradient$package$.class deleted file mode 100644 index ac43e71e7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/ConjugateGradient$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/ConjugateGradient$package.class b/target/scala-3.6.4/classes/scalation/optimization/ConjugateGradient$package.class deleted file mode 100644 index f954509bd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/ConjugateGradient$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/ConjugateGradient$package.tasty b/target/scala-3.6.4/classes/scalation/optimization/ConjugateGradient$package.tasty deleted file mode 100644 index cf5d627b0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/ConjugateGradient$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/ConjugateGradient.class b/target/scala-3.6.4/classes/scalation/optimization/ConjugateGradient.class deleted file mode 100644 index 7515516fa..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/ConjugateGradient.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/ConjugateGradient.tasty b/target/scala-3.6.4/classes/scalation/optimization/ConjugateGradient.tasty deleted file mode 100644 index cd2061276..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/ConjugateGradient.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/ConjugateGradient_NoLS$package$.class b/target/scala-3.6.4/classes/scalation/optimization/ConjugateGradient_NoLS$package$.class deleted file mode 100644 index cd982aa6d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/ConjugateGradient_NoLS$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/ConjugateGradient_NoLS$package.class b/target/scala-3.6.4/classes/scalation/optimization/ConjugateGradient_NoLS$package.class deleted file mode 100644 index 56c8e67bd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/ConjugateGradient_NoLS$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/ConjugateGradient_NoLS$package.tasty b/target/scala-3.6.4/classes/scalation/optimization/ConjugateGradient_NoLS$package.tasty deleted file mode 100644 index e2ed02526..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/ConjugateGradient_NoLS$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/ConjugateGradient_NoLS.class b/target/scala-3.6.4/classes/scalation/optimization/ConjugateGradient_NoLS.class deleted file mode 100644 index e6b99a94c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/ConjugateGradient_NoLS.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/ConjugateGradient_NoLS.tasty b/target/scala-3.6.4/classes/scalation/optimization/ConjugateGradient_NoLS.tasty deleted file mode 100644 index f84555419..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/ConjugateGradient_NoLS.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/CoordinateDescent$.class b/target/scala-3.6.4/classes/scalation/optimization/CoordinateDescent$.class deleted file mode 100644 index 7cf4dd5a2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/CoordinateDescent$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/CoordinateDescent$package$.class b/target/scala-3.6.4/classes/scalation/optimization/CoordinateDescent$package$.class deleted file mode 100644 index 0da0846ff..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/CoordinateDescent$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/CoordinateDescent$package.class b/target/scala-3.6.4/classes/scalation/optimization/CoordinateDescent$package.class deleted file mode 100644 index 3637614b8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/CoordinateDescent$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/CoordinateDescent$package.tasty b/target/scala-3.6.4/classes/scalation/optimization/CoordinateDescent$package.tasty deleted file mode 100644 index 9bf2338e3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/CoordinateDescent$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/CoordinateDescent.class b/target/scala-3.6.4/classes/scalation/optimization/CoordinateDescent.class deleted file mode 100644 index 98b9ded75..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/CoordinateDescent.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/CoordinateDescent.tasty b/target/scala-3.6.4/classes/scalation/optimization/CoordinateDescent.tasty deleted file mode 100644 index 4c343e4fc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/CoordinateDescent.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/GoldenSectionLS$.class b/target/scala-3.6.4/classes/scalation/optimization/GoldenSectionLS$.class deleted file mode 100644 index 215a05705..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/GoldenSectionLS$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/GoldenSectionLS$package$.class b/target/scala-3.6.4/classes/scalation/optimization/GoldenSectionLS$package$.class deleted file mode 100644 index 5f1017ef1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/GoldenSectionLS$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/GoldenSectionLS$package.class b/target/scala-3.6.4/classes/scalation/optimization/GoldenSectionLS$package.class deleted file mode 100644 index ddd4a60fc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/GoldenSectionLS$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/GoldenSectionLS$package.tasty b/target/scala-3.6.4/classes/scalation/optimization/GoldenSectionLS$package.tasty deleted file mode 100644 index 4a2e45e34..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/GoldenSectionLS$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/GoldenSectionLS.class b/target/scala-3.6.4/classes/scalation/optimization/GoldenSectionLS.class deleted file mode 100644 index 587675bc2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/GoldenSectionLS.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/GoldenSectionLS.tasty b/target/scala-3.6.4/classes/scalation/optimization/GoldenSectionLS.tasty deleted file mode 100644 index 1f2154c84..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/GoldenSectionLS.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/GradientDescent$.class b/target/scala-3.6.4/classes/scalation/optimization/GradientDescent$.class deleted file mode 100644 index ebf1c51e6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/GradientDescent$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/GradientDescent$package$.class b/target/scala-3.6.4/classes/scalation/optimization/GradientDescent$package$.class deleted file mode 100644 index 8ae47444d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/GradientDescent$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/GradientDescent$package.class b/target/scala-3.6.4/classes/scalation/optimization/GradientDescent$package.class deleted file mode 100644 index 5e54de98d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/GradientDescent$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/GradientDescent$package.tasty b/target/scala-3.6.4/classes/scalation/optimization/GradientDescent$package.tasty deleted file mode 100644 index 637246b22..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/GradientDescent$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/GradientDescent.class b/target/scala-3.6.4/classes/scalation/optimization/GradientDescent.class deleted file mode 100644 index 2c11e8fce..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/GradientDescent.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/GradientDescent.tasty b/target/scala-3.6.4/classes/scalation/optimization/GradientDescent.tasty deleted file mode 100644 index 8a903792d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/GradientDescent.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_Adam$.class b/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_Adam$.class deleted file mode 100644 index c7cea0756..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_Adam$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_Adam$package$.class b/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_Adam$package$.class deleted file mode 100644 index 6ded407ba..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_Adam$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_Adam$package.class b/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_Adam$package.class deleted file mode 100644 index a7d3208e7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_Adam$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_Adam$package.tasty b/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_Adam$package.tasty deleted file mode 100644 index 1cb7d97a7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_Adam$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_Adam.class b/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_Adam.class deleted file mode 100644 index e02d525cc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_Adam.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_Adam.tasty b/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_Adam.tasty deleted file mode 100644 index 27b10636d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_Adam.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_Mo$.class b/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_Mo$.class deleted file mode 100644 index 1729b478a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_Mo$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_Mo$package$.class b/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_Mo$package$.class deleted file mode 100644 index 4d156afcb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_Mo$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_Mo$package.class b/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_Mo$package.class deleted file mode 100644 index 22170e8f0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_Mo$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_Mo$package.tasty b/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_Mo$package.tasty deleted file mode 100644 index de87efb86..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_Mo$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_Mo.class b/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_Mo.class deleted file mode 100644 index 84d6c1a94..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_Mo.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_Mo.tasty b/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_Mo.tasty deleted file mode 100644 index a855de61f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_Mo.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_Mo2$.class b/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_Mo2$.class deleted file mode 100644 index 2b8177b73..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_Mo2$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_Mo2$package$.class b/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_Mo2$package$.class deleted file mode 100644 index 67518634d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_Mo2$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_Mo2$package.class b/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_Mo2$package.class deleted file mode 100644 index 308a62142..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_Mo2$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_Mo2$package.tasty b/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_Mo2$package.tasty deleted file mode 100644 index 0ba94ad64..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_Mo2$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_Mo2.class b/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_Mo2.class deleted file mode 100644 index 923ee6487..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_Mo2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_Mo2.tasty b/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_Mo2.tasty deleted file mode 100644 index 3a5b6becd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_Mo2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_NoLS$.class b/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_NoLS$.class deleted file mode 100644 index 3f9382118..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_NoLS$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_NoLS$package$.class b/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_NoLS$package$.class deleted file mode 100644 index a25435c9e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_NoLS$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_NoLS$package.class b/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_NoLS$package.class deleted file mode 100644 index 4ba5c8441..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_NoLS$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_NoLS$package.tasty b/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_NoLS$package.tasty deleted file mode 100644 index 3803fe5b6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_NoLS$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_NoLS.class b/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_NoLS.class deleted file mode 100644 index 093fb3b1d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_NoLS.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_NoLS.tasty b/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_NoLS.tasty deleted file mode 100644 index 8e18a9aa3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/GradientDescent_NoLS.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/GridSearch$.class b/target/scala-3.6.4/classes/scalation/optimization/GridSearch$.class deleted file mode 100644 index 8c55f5cfc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/GridSearch$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/GridSearch$package$.class b/target/scala-3.6.4/classes/scalation/optimization/GridSearch$package$.class deleted file mode 100644 index a6fc8beed..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/GridSearch$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/GridSearch$package.class b/target/scala-3.6.4/classes/scalation/optimization/GridSearch$package.class deleted file mode 100644 index cd03ae5b7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/GridSearch$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/GridSearch$package.tasty b/target/scala-3.6.4/classes/scalation/optimization/GridSearch$package.tasty deleted file mode 100644 index 213bd8f9b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/GridSearch$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/GridSearch.class b/target/scala-3.6.4/classes/scalation/optimization/GridSearch.class deleted file mode 100644 index 74dc872a7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/GridSearch.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/GridSearch.tasty b/target/scala-3.6.4/classes/scalation/optimization/GridSearch.tasty deleted file mode 100644 index c0b1865b1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/GridSearch.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/GridSearchLS$package$.class b/target/scala-3.6.4/classes/scalation/optimization/GridSearchLS$package$.class deleted file mode 100644 index ea65d5ca3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/GridSearchLS$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/GridSearchLS$package.class b/target/scala-3.6.4/classes/scalation/optimization/GridSearchLS$package.class deleted file mode 100644 index a25b40cd0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/GridSearchLS$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/GridSearchLS$package.tasty b/target/scala-3.6.4/classes/scalation/optimization/GridSearchLS$package.tasty deleted file mode 100644 index 0b4ba98ab..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/GridSearchLS$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/GridSearchLS.class b/target/scala-3.6.4/classes/scalation/optimization/GridSearchLS.class deleted file mode 100644 index 811fd9716..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/GridSearchLS.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/GridSearchLS.tasty b/target/scala-3.6.4/classes/scalation/optimization/GridSearchLS.tasty deleted file mode 100644 index 9fb98a0eb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/GridSearchLS.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/Hungarian$package$.class b/target/scala-3.6.4/classes/scalation/optimization/Hungarian$package$.class deleted file mode 100644 index 7ed008aaf..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/Hungarian$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/Hungarian$package.class b/target/scala-3.6.4/classes/scalation/optimization/Hungarian$package.class deleted file mode 100644 index 8ef28d5cb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/Hungarian$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/Hungarian$package.tasty b/target/scala-3.6.4/classes/scalation/optimization/Hungarian$package.tasty deleted file mode 100644 index a71a5042f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/Hungarian$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/IntegerTabuSearch$.class b/target/scala-3.6.4/classes/scalation/optimization/IntegerTabuSearch$.class deleted file mode 100644 index 9205c0809..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/IntegerTabuSearch$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/IntegerTabuSearch$package$.class b/target/scala-3.6.4/classes/scalation/optimization/IntegerTabuSearch$package$.class deleted file mode 100644 index f3aa45cca..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/IntegerTabuSearch$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/IntegerTabuSearch$package.class b/target/scala-3.6.4/classes/scalation/optimization/IntegerTabuSearch$package.class deleted file mode 100644 index f03c7a426..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/IntegerTabuSearch$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/IntegerTabuSearch$package.tasty b/target/scala-3.6.4/classes/scalation/optimization/IntegerTabuSearch$package.tasty deleted file mode 100644 index 2f501287a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/IntegerTabuSearch$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/IntegerTabuSearch.class b/target/scala-3.6.4/classes/scalation/optimization/IntegerTabuSearch.class deleted file mode 100644 index 2c4a31ffa..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/IntegerTabuSearch.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/IntegerTabuSearch.tasty b/target/scala-3.6.4/classes/scalation/optimization/IntegerTabuSearch.tasty deleted file mode 100644 index bf0b8fa4f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/IntegerTabuSearch.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/LassoAddm$package$.class b/target/scala-3.6.4/classes/scalation/optimization/LassoAddm$package$.class deleted file mode 100644 index 3f349ab18..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/LassoAddm$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/LassoAddm$package.class b/target/scala-3.6.4/classes/scalation/optimization/LassoAddm$package.class deleted file mode 100644 index c1765fd3e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/LassoAddm$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/LassoAddm$package.tasty b/target/scala-3.6.4/classes/scalation/optimization/LassoAddm$package.tasty deleted file mode 100644 index 52152425e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/LassoAddm$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/LassoAdmm$.class b/target/scala-3.6.4/classes/scalation/optimization/LassoAdmm$.class deleted file mode 100644 index bcdc25c2b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/LassoAdmm$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/LassoAdmm.class b/target/scala-3.6.4/classes/scalation/optimization/LassoAdmm.class deleted file mode 100644 index a83ab5532..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/LassoAdmm.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/LassoAdmm.tasty b/target/scala-3.6.4/classes/scalation/optimization/LassoAdmm.tasty deleted file mode 100644 index 373ab921a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/LassoAdmm.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/LineSearch.class b/target/scala-3.6.4/classes/scalation/optimization/LineSearch.class deleted file mode 100644 index 9cbb108cd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/LineSearch.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/LineSearch.tasty b/target/scala-3.6.4/classes/scalation/optimization/LineSearch.tasty deleted file mode 100644 index 4c584f16b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/LineSearch.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/Minimize$.class b/target/scala-3.6.4/classes/scalation/optimization/Minimize$.class deleted file mode 100644 index a18a31eed..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/Minimize$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/Minimize$package$.class b/target/scala-3.6.4/classes/scalation/optimization/Minimize$package$.class deleted file mode 100644 index b653da1e8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/Minimize$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/Minimize$package.class b/target/scala-3.6.4/classes/scalation/optimization/Minimize$package.class deleted file mode 100644 index 946576c57..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/Minimize$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/Minimize$package.tasty b/target/scala-3.6.4/classes/scalation/optimization/Minimize$package.tasty deleted file mode 100644 index 1cf9fcaa5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/Minimize$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/Minimize.class b/target/scala-3.6.4/classes/scalation/optimization/Minimize.class deleted file mode 100644 index 43ef0c49f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/Minimize.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/Minimize.tasty b/target/scala-3.6.4/classes/scalation/optimization/Minimize.tasty deleted file mode 100644 index 2712eaa99..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/Minimize.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/Minimizer$.class b/target/scala-3.6.4/classes/scalation/optimization/Minimizer$.class deleted file mode 100644 index 538daa5a5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/Minimizer$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/Minimizer.class b/target/scala-3.6.4/classes/scalation/optimization/Minimizer.class deleted file mode 100644 index be64ac121..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/Minimizer.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/Minimizer.tasty b/target/scala-3.6.4/classes/scalation/optimization/Minimizer.tasty deleted file mode 100644 index 0ea7f2b61..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/Minimizer.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/MonitorEpochs.class b/target/scala-3.6.4/classes/scalation/optimization/MonitorEpochs.class deleted file mode 100644 index 068f6ceda..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/MonitorEpochs.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/MonitorEpochs.tasty b/target/scala-3.6.4/classes/scalation/optimization/MonitorEpochs.tasty deleted file mode 100644 index a6d5d6a4a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/MonitorEpochs.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/MoreThuenteLS.scalaa b/target/scala-3.6.4/classes/scalation/optimization/MoreThuenteLS.scalaa deleted file mode 100644 index 477758784..000000000 --- a/target/scala-3.6.4/classes/scalation/optimization/MoreThuenteLS.scalaa +++ /dev/null @@ -1,466 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Mon Jul 12 16:13:47 EDT 2021 - * @see LICENSE (MIT style license file). - * - * @note More-Thuente Line Search Algorithm - * - * @see www.ii.uib.no/~lennart/drgrad/More1994.pdf - */ - -package scalation -package optimization - -import scala.math.{abs, max, min, sqrt} - -import scalation.mathstat.VectorD - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Return true if variables x and y have different signs. - * in C: #define fsigndiff(x, y) (*(x) * (*(y) / fabs(*(y))) < 0.) - * @param x the first variable (double) - * @param y the second variable (double) - */ -inline def fsigndiff (x: Double, y: Double): Boolean = x * (y / abs (y)) < 0.0 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `MoreThuenteLS` class ... - * @param f the objective/loss function to minimize (vector-to-scalar) - * @param g the gradient of the objective/loss function (vector-to-vector) - * @param c1 constant for sufficient decrease (Wolfe condition 1: .0001 to .001) - * @param c2 constant for curvature/slope constraint (Wolfe condition 2: .9 to .8) - */ -class MoreThuenteLS (f: FunctionV2S, g: FunctionV2V, - c1: Double = 0.0001, c2: Double = 0.0, - minStep = 0.01, maxStep: Double = 100.0, - xtol: Double = 0.0001, ftol: Double = 0.0001, gtol: Double = 0.0001) - extends WolfeConditions (f, g, c1, c2): - - private val debug = debugf ("MoreThuenteLS", true) // debug function - private val flaw = flawf ("MoreThuenteLS") // flaw function - - private var brackt = false // not bracketed yet - private var stmin = 0.0 // minimum step length - private var stmax = PositiveInfinity // maximum step length - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Set the minimum and maximum steps to correspond to the present interval of uncertainty. - * @param stx - * @param sty - */ - inline def setInterval (brack: Boolean, stx: Double, sty: Double): Unit = - if brackt then - stmin = min (stx, sty) - stmax = max (stx, sty) - else - stmin = stx - stmax = step + 4.0 * (step - stx) - end if - end setInterval - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Perform an inexact Line Search (LS) on the function f to find an approximate - * local minima from the point x moving distance a (alpha) in the search direction - * p, which satisfies both Wolfe Conditions, returning the displacement a and the - * new point y = x + p * a. - * @param x the current point - * @param fx the functional value at x, f(x) - * @param p the current search direction - * @param step the initial step length - */ - def lsearch (x: VectorD, fx: Double, p: VectorD, step: Double = 1.0): (Double, VectorD) = - - val gx = g(x) // gradient at x - val gxp = gx dot p // initial gradient in search direction - var stage1 = true // in stage 1 - - if step <= 0.0 then flaw ("lsearch", "step size must be strictly positive") - if gxp > 0 then flaw ("lsearch", "p must be descent direction") - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Improve the current point x by moving in the search direction s. - * @param n the dimension of search space - * @param x_ the current location/point vector - * @param f_ the value of the objective function - * @param g_ the gradient vector - * @param s the search direction - * @param step_ the current step size - * @param xp_ the previous location/point - * @param gp the previous gradient vector - * @param wp not used by this algorithm (use default) - */ - def lsearch (n: Int, - x_ : VectorD, - f_ : Double, - g_ : VectorD, - s: VectorD, - step_ : Double, - xp_ : VectorD, - gp: VectorD, - wp: VectorD = null): (Double, Double, Int) = - - - - debug ("lsearch", s"linesearch from x_ = $x_") - var x = x_ - var f = f_ - var g = g_ - var a = step - var xp = xp_ - - var count = 0 - var brackt = false - var stage1 = false - var uinfo = 0 - var dg = 0.0 - var stx, fx, dgx = 0.0 - var sty, fy, dgy = 0.0 - var fxm, dgxm, fym, dgym, fm, dgm = 0.0 - var finit, ftest1, dginit, dgtest = 0.0 - var width, prev_width = 0.0 - var stmin, stmax = 0.0 - - - - - // Initialize local variables. - finit = f - dgtest = param.ftol * dginit - width = param.max_step - param.min_step - prev_width = 2.0 * width - - */ - fx = finit - fy = finit - dgx = dginit - dgy = dginit - - // Variables a, f, dg contain the values of the step, function, and derivative at the current step. - var (a, fx, dgx) = (step, f(x), g(x)) - - // Variables stx, fx, dgx contain the values of the step, function, and directional derivative at the best step. - var (stx, fx, dgx) = (0.0, finit, dginit) - - // Variables sty, fy, dgy contain the value of the step, function, and derivative at the other endpoint of the interval of uncertainty. - var (sty, fy, dgy) = (0.0, finit, dginit) - - var (go, it) = (true, 0) - cfor (go && it < MAX_IT, it += 1) { - - var uinfo = 0 - - // Clip the step a to be in the range of [minStep, maxStep] - if a < minStep then s = minStep - if a > maxStep then a = maxStep - - // If an unusual termination is to occur then let step be the lowest point obtained so far. - if (brackt && ((a <= stmin || stmax <= a) || it >= MAX_IT || uinfo != 0)) || - (brackt && (stmax - stmin <= xtol * stmax)) then - a = stx - end if - - val y = x + p * a // new point: x + search dir * step - val fy = f(y) // new functional value - val gy = g(y) // new gradient - val gyp = gy dot p // dotted with search direction p - - val wolf1 = wolfe1 (fx, fy, a, gxp) // is Wolfe Condition 1 satisfied - - // Test for errors and convergence - - if brackt && ((a <= stmin || stmax <= a) || uinfo != 0) then - flaw ("lsearch", "rounding errors prevent further progress") - go = false - - if a == maxStep && f <= ftest1 && dg <= dgtest then - flaw ("lsearch", "the step size is at its maximum value") - go = false - - if a == minStep && (ftest1 < f || dgtest <= dg) then - flaw ("lsearch", "the step size is at its minimum value") - go = false - - if brackt && (stmax - stmin) <= xtol * stmax then - flaw ("lsearch", "width of the interval of uncertainty is too small") - go = false - - if f <= ftest1 && abs (dg) <= gtol * (-dginit) then - println ("lsearch: done as sufficient decrease cond. and the directional deriv cond. hold. - go = false - - // In the first stage we seek a step for which the modified - // function has a nonpositive value and nonnegative derivative. - - if stage1 && f <= ftest1 && min (ftol, gtol) * dginit <= dg then - stage1 = false - -/*********** - // A modified function is used to predict the step only if we have not obtained a step - // for which the modified function has a nonpositive function value and nonnegative derivative, - // and if a lower function value has been obtained but the decrease is not sufficient. - - if stage1 && ftest1 < f && f <= fx then // define the modified function and derivative values. - val fm = f - a * dgtest - val fxm = fx - stx * dgtest - val fym = fy - sty * dgtest - val gm = d - dgtest - val gxpm = gxp - dgtest - val gypm = gyp - dgtest - - // Call update_trial_interval() to update the interval of uncertainty and to compute new step - - uinfo = update_trial_interval (stx, fxm, gxpm, sty, fym, gypm, a, fm, gm, stmin, stmax, brackt) - - // Reset the function and gradient values for f - - fx = fxm + stx * dgtest - fy = fym + sty * dgtest - gxp = dgxm + dgtest - gyp = dgym + dgtest - else - // Call update_trial_interval() to update the interval of uncertainty and to compute new step. - - uinfo = update_trial_interval (stx, fx, gxp, sty, fy, gyp, a, f, g, stmin, stmax, brackt) - end if -***********/ - - // Call update_trial_interval() to update the interval of uncertainty and to compute new step. - - uinfo = update_trial_interval (stx, fx, gxp, sty, fy, gyp, a, f, g, stmin, stmax, brackt) - - // Force a sufficient decrease in the interval of uncertainty - - if brackt then - if 0.66 * prev_width <= abs (sty - stx) then a = stx + 0.5 * (sty - stx) - prev_width = width - width = abs (sty - stx) - end if - } // cfor - - (a, y) - end lsearch - -end MoreThuenteLS - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Update a safeguarded trial value and interval for line search. - *------------------------------------------------------------------------------ - * The parameter x represents the step with the least function value. - * The parameter t represents the current step. This function assumes - * that the derivative at the point of x in the direction of the step. - * If the bracket is set to true, the minimizer has been bracketed in - * an interval of uncertainty with endpoints between x and y. - *------------------------------------------------------------------------------ - * @see Jorge J. More and David J. Thuente. Line search algorithm with - * guaranteed sufficient decrease. ACM Transactions on Mathematical - * Software (TOMS), Vol 20, No 3, pp. 286-307, 1994. - *------------------------------------------------------------------------------ - * @param x The pointer to the value of one endpoint. - * @param fx The pointer to the value of f(x). - * @param dx The pointer to the value of f'(x). - * @param y The pointer to the value of another endpoint. - * @param fy The pointer to the value of f(y). - * @param dy The pointer to the value of f'(y). - * @param t The pointer to the value of the trial value, t. - * @param ft The pointer to the value of f(t). - * @param dt The pointer to the value of f'(t). - * @param tmin The minimum value for the trial value, t. - * @param tmax The maximum value for the trial value, t. - * @param brackt The pointer to the predicate if the trial value is bracketed. - * @return int Status value. Zero indicates a normal termination. - */ -def update_trial_interval (x_ : Double, - fx_ : Double, - dx_ : Double, - y_ : Double, - fy_ : Double, - dy_ : Double, - t_ : Double, - ft: Double, - dt: Double, - tmin: Double, - tmax: Double, - brackt_ : Boolean): Int = - var x = x_ - var fx = fx_ - var dx = dx_ - var y = y_ - var fy = fy_ - var dy = dy_ - var t = t_ - var brackt = brackt_ - - var bound = false - var dsign = fsigndiff (dt, dx) - var mc = 0.0 // minimizer of an interpolated cubic. - var mq = 0.0 // minimizer of an interpolated quadratic. - var newt = 0.0 // new trial value. - - // Check the input parameters for errors. - if brackt then - if t <= min (x, y) || max (x, y) <= t then // The trival value t is out of the interval. - return LBFGSERR_OUTOFINTERVAL.code - - if 0.0 <= dx * (t - x) then // The function must decrease from x. - return LBFGSERR_INCREASEGRADIENT.code - - if tmax < tmin then // Incorrect tmin and tmax specified. - return LBFGSERR_INCORRECT_TMINMAX.code - end if - - // Trial value selection. - if fx < ft then - /* Case 1: a higher function value. - The minimum is brackt. If the cubic minimizer is closer - to x than the quadratic one, the cubic one is taken, else - the average of the minimizers is taken. - */ - brackt = true - bound = true - mc = cubic_minimizer (mc, x, fx, dx, t, ft, dt) - mq = quad_minimizer (mq, x, fx, dx, t, ft) - newt = if abs (mc - x) < abs (mq - x) then mc else mc + 0.5 * (mq - mc) - - else if dsign then - /* Case 2: a lower function value and derivatives of - opposite sign. The minimum is brackt. If the cubic - minimizer is closer to x than the quadratic (secant) one, - the cubic one is taken, else the quadratic one is taken. - */ - brackt = true - bound = false - mc = cubic_minimizer (mc, x, fx, dx, t, ft, dt) - mq = quad_minimizer2 (mq, x, dx, t, dt) - newt = if abs (mc - t) > abs (mq - t) then mc else mq - - else if abs (dt) < abs (dx) then - /* Case 3: a lower function value, derivatives of the - same sign, and the magnitude of the derivative decreases. - The cubic minimizer is only used if the cubic tends to - infinity in the direction of the minimizer or if the minimum - of the cubic is beyond t. Otherwise the cubic minimizer is - defined to be either tmin or tmax. The quadratic (secant) - minimizer is also computed and if the minimum is brackt - then the the minimizer closest to x is taken, else the one - farthest away is taken. - */ - bound = true - mc = cubic_minimizer2 (mc, x, fx, dx, t, ft, dt, tmin, tmax) - mq = quad_minimizer2 (mq, x, dx, t, dt) - if brackt then - newt = if abs (t - mc) < abs (t - mq) then mc else mq - else - newt = if abs (t - mc) > abs (t - mq) then mc else mq - end if - - else - /* Case 4: a lower function value, derivatives of the - same sign, and the magnitude of the derivative does - not decrease. If the minimum is not brackt, the step - is either tmin or tmax, else the cubic minimizer is taken. - */ - bound = false - newt = if brackt then cubic_minimizer (newt, t, ft, dt, y, fy, dy) - else if x < t then tmax - else tmin - end if - - /* Update the interval of uncertainty. This update does not - depend on the new step or the case analysis above. - - Case a: if f(x) < f(t), - x <- x, y <- t. - - Case b: if f(t) <= f(x) && f'(t)*f'(x) > 0, - x <- t, y <- y. - - Case c: if f(t) <= f(x) && f'(t)*f'(x) < 0, - x <- t, y <- x. - */ - if fx < ft then // Case a - y = t; fy = ft; dy = dt - else // Case c - if dsign then - y = x; fy = fx; dy = dx - end if - // Cases b and c - x = t; fx = ft; dx = dt - end if - - // Clip the new trial value in [tmin, tmax]. - if tmax < newt then newt = tmax - if newt < tmin then newt = tmin - - // Redefine the new trial value if it is close to the upper bound of the interval. - if brackt && bound then - mq = x + 0.66 * (y - x) - if x < y then - if mq < newt then newt = mq - else - if newt < mq then newt = mq - end if - end if - - // Return the new trial value. - t = newt - 0 // return success -end update_trial_interval - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Return the 1-norm of vector x. - * @param x the vector whose 1-norm is sought - * @param start the start index - * @param n the end index - */ -def owlqn_x1norm (x: VectorD, start: Int, n: Int): Double = - var norm = 0.0 - for i <- start until n do norm += abs (x(i)) - norm -end owlqn_x1norm - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Project ... - * @param d the distance vector - * @param sign the sign vector - * @param start the start index - * @param end_ the end index - */ -def owlqn_project (d: VectorD, sign: VectorD, start: Int, end_ : Int): Unit = - for i <- start until end_ do - if d(i) * sign(i) <= 0.0 then d(i) = 0.0 - end for -end owlqn_project - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `bFGS_LSTest` main function tests the `BFGSLS` object. - * > runMain scalation.optimization.bFGS_LSTest - */ -@main def bFGS_LSTest (): Unit = - - println ("\nMinimize: (x_0 - 3)^2 + (x_1 - 4)^2 + 1") - - def ff (x: VectorD): Double = (x(0) - 3) * (x(0) - 3) + (x(1) - 4) * (x(1) - 4) + 1.0 - def gf (x: VectorD): VectorD = VectorD (6 - 2*x(0), 8 - 2*x(1)) - - BFGS_LS.set_ff (ff) - BFGS_LS.set_gf (gf) - - val n = 2 // the dimension of the search space - val x = VectorD (0, 0) // the current location/point vector - val f = 26.0 // the objective function value f(x) - val g = VectorD (-6, -8) // the gradient vector at x - val s = VectorD (6, 8) // the search direction (e.g., opposite g) - val step = 0.2 // the initial step size - val xp = VectorD (0, 0) // the previous location/point vector - val gp = VectorD (0, 0) // the previous gradient vector - - val code = BFGS_LS.line_search_backtracking (n, x, f, g, s, step, xp, gp) - - println (s"optimal solution x = $x with an objective value ff(x) = ${ff(x)}, with status code $code") - -end bFGS_LSTest - diff --git a/target/scala-3.6.4/classes/scalation/optimization/NLPTest$package$.class b/target/scala-3.6.4/classes/scalation/optimization/NLPTest$package$.class deleted file mode 100644 index 56078fb80..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/NLPTest$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/NLPTest$package.class b/target/scala-3.6.4/classes/scalation/optimization/NLPTest$package.class deleted file mode 100644 index 7c8890e3f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/NLPTest$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/NLPTest$package.tasty b/target/scala-3.6.4/classes/scalation/optimization/NLPTest$package.tasty deleted file mode 100644 index 109756a95..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/NLPTest$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/NelderMeadSimplex$package$.class b/target/scala-3.6.4/classes/scalation/optimization/NelderMeadSimplex$package$.class deleted file mode 100644 index b7e614dd5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/NelderMeadSimplex$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/NelderMeadSimplex$package.class b/target/scala-3.6.4/classes/scalation/optimization/NelderMeadSimplex$package.class deleted file mode 100644 index 105d26fdf..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/NelderMeadSimplex$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/NelderMeadSimplex$package.tasty b/target/scala-3.6.4/classes/scalation/optimization/NelderMeadSimplex$package.tasty deleted file mode 100644 index 3c00d524c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/NelderMeadSimplex$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/NelderMeadSimplex.class b/target/scala-3.6.4/classes/scalation/optimization/NelderMeadSimplex.class deleted file mode 100644 index c9f09907f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/NelderMeadSimplex.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/NelderMeadSimplex.tasty b/target/scala-3.6.4/classes/scalation/optimization/NelderMeadSimplex.tasty deleted file mode 100644 index fc14e8143..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/NelderMeadSimplex.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/NelderMeadSimplex2$.class b/target/scala-3.6.4/classes/scalation/optimization/NelderMeadSimplex2$.class deleted file mode 100644 index f6807d93d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/NelderMeadSimplex2$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/NelderMeadSimplex2$package$.class b/target/scala-3.6.4/classes/scalation/optimization/NelderMeadSimplex2$package$.class deleted file mode 100644 index 7669c6c7b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/NelderMeadSimplex2$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/NelderMeadSimplex2$package.class b/target/scala-3.6.4/classes/scalation/optimization/NelderMeadSimplex2$package.class deleted file mode 100644 index 7a3f1465d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/NelderMeadSimplex2$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/NelderMeadSimplex2$package.tasty b/target/scala-3.6.4/classes/scalation/optimization/NelderMeadSimplex2$package.tasty deleted file mode 100644 index 537ff59b9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/NelderMeadSimplex2$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/NelderMeadSimplex2.class b/target/scala-3.6.4/classes/scalation/optimization/NelderMeadSimplex2.class deleted file mode 100644 index d97ebc003..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/NelderMeadSimplex2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/NelderMeadSimplex2.tasty b/target/scala-3.6.4/classes/scalation/optimization/NelderMeadSimplex2.tasty deleted file mode 100644 index 46868fbf1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/NelderMeadSimplex2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/NewtonRaphson$package$.class b/target/scala-3.6.4/classes/scalation/optimization/NewtonRaphson$package$.class deleted file mode 100644 index 6e151c49d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/NewtonRaphson$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/NewtonRaphson$package.class b/target/scala-3.6.4/classes/scalation/optimization/NewtonRaphson$package.class deleted file mode 100644 index a3461affd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/NewtonRaphson$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/NewtonRaphson$package.tasty b/target/scala-3.6.4/classes/scalation/optimization/NewtonRaphson$package.tasty deleted file mode 100644 index e71163450..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/NewtonRaphson$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/NewtonRaphson.class b/target/scala-3.6.4/classes/scalation/optimization/NewtonRaphson.class deleted file mode 100644 index 0a2fd4b95..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/NewtonRaphson.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/NewtonRaphson.tasty b/target/scala-3.6.4/classes/scalation/optimization/NewtonRaphson.tasty deleted file mode 100644 index 63dfbe33e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/NewtonRaphson.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/Newton_NoLS$.class b/target/scala-3.6.4/classes/scalation/optimization/Newton_NoLS$.class deleted file mode 100644 index 47507e383..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/Newton_NoLS$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/Newton_NoLS$package$.class b/target/scala-3.6.4/classes/scalation/optimization/Newton_NoLS$package$.class deleted file mode 100644 index 53ad09819..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/Newton_NoLS$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/Newton_NoLS$package.class b/target/scala-3.6.4/classes/scalation/optimization/Newton_NoLS$package.class deleted file mode 100644 index 4af889e08..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/Newton_NoLS$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/Newton_NoLS$package.tasty b/target/scala-3.6.4/classes/scalation/optimization/Newton_NoLS$package.tasty deleted file mode 100644 index 9879f1c19..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/Newton_NoLS$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/Newton_NoLS.class b/target/scala-3.6.4/classes/scalation/optimization/Newton_NoLS.class deleted file mode 100644 index 4dfed6b39..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/Newton_NoLS.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/Newton_NoLS.tasty b/target/scala-3.6.4/classes/scalation/optimization/Newton_NoLS.tasty deleted file mode 100644 index 7085b4050..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/Newton_NoLS.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/PathMonitor.class b/target/scala-3.6.4/classes/scalation/optimization/PathMonitor.class deleted file mode 100644 index a4b66f6b1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/PathMonitor.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/PathMonitor.tasty b/target/scala-3.6.4/classes/scalation/optimization/PathMonitor.tasty deleted file mode 100644 index 780bb858c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/PathMonitor.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/SPSA$.class b/target/scala-3.6.4/classes/scalation/optimization/SPSA$.class deleted file mode 100644 index a05cc6f71..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/SPSA$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/SPSA$package$.class b/target/scala-3.6.4/classes/scalation/optimization/SPSA$package$.class deleted file mode 100644 index 3e9f9977e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/SPSA$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/SPSA$package.class b/target/scala-3.6.4/classes/scalation/optimization/SPSA$package.class deleted file mode 100644 index a013a9926..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/SPSA$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/SPSA$package.tasty b/target/scala-3.6.4/classes/scalation/optimization/SPSA$package.tasty deleted file mode 100644 index b2fd946b6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/SPSA$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/SPSA.class b/target/scala-3.6.4/classes/scalation/optimization/SPSA.class deleted file mode 100644 index 84d03cd71..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/SPSA.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/SPSA.tasty b/target/scala-3.6.4/classes/scalation/optimization/SPSA.tasty deleted file mode 100644 index eebe2ee6e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/SPSA.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/StoppingRule$.class b/target/scala-3.6.4/classes/scalation/optimization/StoppingRule$.class deleted file mode 100644 index b114ade05..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/StoppingRule$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/StoppingRule.class b/target/scala-3.6.4/classes/scalation/optimization/StoppingRule.class deleted file mode 100644 index 015e33c37..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/StoppingRule.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/StoppingRule.tasty b/target/scala-3.6.4/classes/scalation/optimization/StoppingRule.tasty deleted file mode 100644 index 9167ec43b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/StoppingRule.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/TabuSearch$.class b/target/scala-3.6.4/classes/scalation/optimization/TabuSearch$.class deleted file mode 100644 index 98f1f774c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/TabuSearch$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/TabuSearch$package$.class b/target/scala-3.6.4/classes/scalation/optimization/TabuSearch$package$.class deleted file mode 100644 index b858cdcc5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/TabuSearch$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/TabuSearch$package.class b/target/scala-3.6.4/classes/scalation/optimization/TabuSearch$package.class deleted file mode 100644 index f6dc4b96c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/TabuSearch$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/TabuSearch$package.tasty b/target/scala-3.6.4/classes/scalation/optimization/TabuSearch$package.tasty deleted file mode 100644 index aeaa7c1fe..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/TabuSearch$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/TabuSearch.class b/target/scala-3.6.4/classes/scalation/optimization/TabuSearch.class deleted file mode 100644 index ab9aab2d4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/TabuSearch.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/TabuSearch.tasty b/target/scala-3.6.4/classes/scalation/optimization/TabuSearch.tasty deleted file mode 100644 index d918d0eba..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/TabuSearch.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/WolfeConditions$.class b/target/scala-3.6.4/classes/scalation/optimization/WolfeConditions$.class deleted file mode 100644 index 596fee02e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/WolfeConditions$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/WolfeConditions$package$.class b/target/scala-3.6.4/classes/scalation/optimization/WolfeConditions$package$.class deleted file mode 100644 index e3dc4cf48..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/WolfeConditions$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/WolfeConditions$package.class b/target/scala-3.6.4/classes/scalation/optimization/WolfeConditions$package.class deleted file mode 100644 index b74724128..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/WolfeConditions$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/WolfeConditions$package.tasty b/target/scala-3.6.4/classes/scalation/optimization/WolfeConditions$package.tasty deleted file mode 100644 index 07322d99b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/WolfeConditions$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/WolfeConditions.class b/target/scala-3.6.4/classes/scalation/optimization/WolfeConditions.class deleted file mode 100644 index e16b5f1e9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/WolfeConditions.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/WolfeConditions.tasty b/target/scala-3.6.4/classes/scalation/optimization/WolfeConditions.tasty deleted file mode 100644 index 87a8f2967..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/WolfeConditions.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/WolfeLS$.class b/target/scala-3.6.4/classes/scalation/optimization/WolfeLS$.class deleted file mode 100644 index c5fb05568..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/WolfeLS$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/WolfeLS$package$.class b/target/scala-3.6.4/classes/scalation/optimization/WolfeLS$package$.class deleted file mode 100644 index 841756f5e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/WolfeLS$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/WolfeLS$package.class b/target/scala-3.6.4/classes/scalation/optimization/WolfeLS$package.class deleted file mode 100644 index 6bf62faa0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/WolfeLS$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/WolfeLS$package.tasty b/target/scala-3.6.4/classes/scalation/optimization/WolfeLS$package.tasty deleted file mode 100644 index 8ef86dc15..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/WolfeLS$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/WolfeLS.class b/target/scala-3.6.4/classes/scalation/optimization/WolfeLS.class deleted file mode 100644 index 70b3e37d7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/WolfeLS.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/WolfeLS.tasty b/target/scala-3.6.4/classes/scalation/optimization/WolfeLS.tasty deleted file mode 100644 index 32c8af44b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/WolfeLS.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/WolfeLS2$.class b/target/scala-3.6.4/classes/scalation/optimization/WolfeLS2$.class deleted file mode 100644 index 3b378d1d3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/WolfeLS2$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/WolfeLS2$package$.class b/target/scala-3.6.4/classes/scalation/optimization/WolfeLS2$package$.class deleted file mode 100644 index 57b5cda4d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/WolfeLS2$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/WolfeLS2$package.class b/target/scala-3.6.4/classes/scalation/optimization/WolfeLS2$package.class deleted file mode 100644 index aeb6ff513..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/WolfeLS2$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/WolfeLS2$package.tasty b/target/scala-3.6.4/classes/scalation/optimization/WolfeLS2$package.tasty deleted file mode 100644 index 9f52c247b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/WolfeLS2$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/WolfeLS2.class b/target/scala-3.6.4/classes/scalation/optimization/WolfeLS2.class deleted file mode 100644 index fe829d866..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/WolfeLS2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/WolfeLS2.tasty b/target/scala-3.6.4/classes/scalation/optimization/WolfeLS2.tasty deleted file mode 100644 index aae021ad2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/WolfeLS2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/WolfeLS3$.class b/target/scala-3.6.4/classes/scalation/optimization/WolfeLS3$.class deleted file mode 100644 index cf8b8350a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/WolfeLS3$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/WolfeLS3$package$.class b/target/scala-3.6.4/classes/scalation/optimization/WolfeLS3$package$.class deleted file mode 100644 index 975fd46fc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/WolfeLS3$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/WolfeLS3$package.class b/target/scala-3.6.4/classes/scalation/optimization/WolfeLS3$package.class deleted file mode 100644 index 1b109c158..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/WolfeLS3$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/WolfeLS3$package.tasty b/target/scala-3.6.4/classes/scalation/optimization/WolfeLS3$package.tasty deleted file mode 100644 index 20e8938f6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/WolfeLS3$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/WolfeLS3.class b/target/scala-3.6.4/classes/scalation/optimization/WolfeLS3.class deleted file mode 100644 index 7674595da..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/WolfeLS3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/WolfeLS3.tasty b/target/scala-3.6.4/classes/scalation/optimization/WolfeLS3.tasty deleted file mode 100644 index c04beee41..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/WolfeLS3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/conjugateGradientTest.class b/target/scala-3.6.4/classes/scalation/optimization/conjugateGradientTest.class deleted file mode 100644 index 13940d4db..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/conjugateGradientTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/conjugateGradientTest.tasty b/target/scala-3.6.4/classes/scalation/optimization/conjugateGradientTest.tasty deleted file mode 100644 index 081be53a9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/conjugateGradientTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/conjugateGradientTest2.class b/target/scala-3.6.4/classes/scalation/optimization/conjugateGradientTest2.class deleted file mode 100644 index a83012da2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/conjugateGradientTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/conjugateGradientTest2.tasty b/target/scala-3.6.4/classes/scalation/optimization/conjugateGradientTest2.tasty deleted file mode 100644 index f40bfc27b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/conjugateGradientTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/conjugateGradientTest3.class b/target/scala-3.6.4/classes/scalation/optimization/conjugateGradientTest3.class deleted file mode 100644 index 49ffba9ba..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/conjugateGradientTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/conjugateGradientTest3.tasty b/target/scala-3.6.4/classes/scalation/optimization/conjugateGradientTest3.tasty deleted file mode 100644 index 3b3393efd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/conjugateGradientTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/conjugateGradient_NoLSTest.class b/target/scala-3.6.4/classes/scalation/optimization/conjugateGradient_NoLSTest.class deleted file mode 100644 index 7d5ff6883..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/conjugateGradient_NoLSTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/conjugateGradient_NoLSTest.tasty b/target/scala-3.6.4/classes/scalation/optimization/conjugateGradient_NoLSTest.tasty deleted file mode 100644 index e782b239f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/conjugateGradient_NoLSTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/conjugateGradient_NoLSTest2.class b/target/scala-3.6.4/classes/scalation/optimization/conjugateGradient_NoLSTest2.class deleted file mode 100644 index dbdff7bb0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/conjugateGradient_NoLSTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/conjugateGradient_NoLSTest2.tasty b/target/scala-3.6.4/classes/scalation/optimization/conjugateGradient_NoLSTest2.tasty deleted file mode 100644 index 5e09cd144..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/conjugateGradient_NoLSTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/conjugateGradient_NoLSTest3.class b/target/scala-3.6.4/classes/scalation/optimization/conjugateGradient_NoLSTest3.class deleted file mode 100644 index f83e07c4e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/conjugateGradient_NoLSTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/conjugateGradient_NoLSTest3.tasty b/target/scala-3.6.4/classes/scalation/optimization/conjugateGradient_NoLSTest3.tasty deleted file mode 100644 index d7d7ba9c3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/conjugateGradient_NoLSTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/coordinateDescentTest.class b/target/scala-3.6.4/classes/scalation/optimization/coordinateDescentTest.class deleted file mode 100644 index 24eac6d8f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/coordinateDescentTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/coordinateDescentTest.tasty b/target/scala-3.6.4/classes/scalation/optimization/coordinateDescentTest.tasty deleted file mode 100644 index 14dcdda79..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/coordinateDescentTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/BealeFunction$.class b/target/scala-3.6.4/classes/scalation/optimization/functions/BealeFunction$.class deleted file mode 100644 index 03486a74c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/functions/BealeFunction$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/BealeFunction.class b/target/scala-3.6.4/classes/scalation/optimization/functions/BealeFunction.class deleted file mode 100644 index 45d03720d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/functions/BealeFunction.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/BealeFunction.tasty b/target/scala-3.6.4/classes/scalation/optimization/functions/BealeFunction.tasty deleted file mode 100644 index b936d025f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/functions/BealeFunction.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/BenchmarkFunction.class b/target/scala-3.6.4/classes/scalation/optimization/functions/BenchmarkFunction.class deleted file mode 100644 index af23396d5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/functions/BenchmarkFunction.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/BenchmarkFunction.tasty b/target/scala-3.6.4/classes/scalation/optimization/functions/BenchmarkFunction.tasty deleted file mode 100644 index de3886e51..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/functions/BenchmarkFunction.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/Bohachevsky1Function$.class b/target/scala-3.6.4/classes/scalation/optimization/functions/Bohachevsky1Function$.class deleted file mode 100644 index 9a4040124..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/functions/Bohachevsky1Function$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/Bohachevsky1Function.class b/target/scala-3.6.4/classes/scalation/optimization/functions/Bohachevsky1Function.class deleted file mode 100644 index af12b43ad..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/functions/Bohachevsky1Function.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/Bohachevsky1Function.tasty b/target/scala-3.6.4/classes/scalation/optimization/functions/Bohachevsky1Function.tasty deleted file mode 100644 index b82581aae..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/functions/Bohachevsky1Function.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/Bohachevsky2Function$.class b/target/scala-3.6.4/classes/scalation/optimization/functions/Bohachevsky2Function$.class deleted file mode 100644 index 9b97b161c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/functions/Bohachevsky2Function$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/Bohachevsky2Function.class b/target/scala-3.6.4/classes/scalation/optimization/functions/Bohachevsky2Function.class deleted file mode 100644 index 2f18d21df..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/functions/Bohachevsky2Function.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/Bohachevsky2Function.tasty b/target/scala-3.6.4/classes/scalation/optimization/functions/Bohachevsky2Function.tasty deleted file mode 100644 index fde370c22..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/functions/Bohachevsky2Function.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/Bohachevsky3Function$.class b/target/scala-3.6.4/classes/scalation/optimization/functions/Bohachevsky3Function$.class deleted file mode 100644 index 9ca319bf4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/functions/Bohachevsky3Function$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/Bohachevsky3Function.class b/target/scala-3.6.4/classes/scalation/optimization/functions/Bohachevsky3Function.class deleted file mode 100644 index 27df64bbc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/functions/Bohachevsky3Function.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/Bohachevsky3Function.tasty b/target/scala-3.6.4/classes/scalation/optimization/functions/Bohachevsky3Function.tasty deleted file mode 100644 index 3f3159ff3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/functions/Bohachevsky3Function.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/BoothFunction$.class b/target/scala-3.6.4/classes/scalation/optimization/functions/BoothFunction$.class deleted file mode 100644 index 7233a9ebb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/functions/BoothFunction$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/BoothFunction.class b/target/scala-3.6.4/classes/scalation/optimization/functions/BoothFunction.class deleted file mode 100644 index 5303a43ab..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/functions/BoothFunction.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/BoothFunction.tasty b/target/scala-3.6.4/classes/scalation/optimization/functions/BoothFunction.tasty deleted file mode 100644 index 8f4872abd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/functions/BoothFunction.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/Camel3Function$.class b/target/scala-3.6.4/classes/scalation/optimization/functions/Camel3Function$.class deleted file mode 100644 index f4e8b9185..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/functions/Camel3Function$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/Camel3Function.class b/target/scala-3.6.4/classes/scalation/optimization/functions/Camel3Function.class deleted file mode 100644 index a96bd7673..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/functions/Camel3Function.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/Camel3Function.tasty b/target/scala-3.6.4/classes/scalation/optimization/functions/Camel3Function.tasty deleted file mode 100644 index 04fddb5e4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/functions/Camel3Function.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/CubeFunction$.class b/target/scala-3.6.4/classes/scalation/optimization/functions/CubeFunction$.class deleted file mode 100644 index f4abfc6e0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/functions/CubeFunction$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/CubeFunction.class b/target/scala-3.6.4/classes/scalation/optimization/functions/CubeFunction.class deleted file mode 100644 index 2361cdcce..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/functions/CubeFunction.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/CubeFunction.tasty b/target/scala-3.6.4/classes/scalation/optimization/functions/CubeFunction.tasty deleted file mode 100644 index c8cfeeb3e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/functions/CubeFunction.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/FreudensteinRothFunction$.class b/target/scala-3.6.4/classes/scalation/optimization/functions/FreudensteinRothFunction$.class deleted file mode 100644 index 651a4d9ff..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/functions/FreudensteinRothFunction$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/FreudensteinRothFunction.class b/target/scala-3.6.4/classes/scalation/optimization/functions/FreudensteinRothFunction.class deleted file mode 100644 index 8b52eb2ce..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/functions/FreudensteinRothFunction.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/FreudensteinRothFunction.tasty b/target/scala-3.6.4/classes/scalation/optimization/functions/FreudensteinRothFunction.tasty deleted file mode 100644 index c468ad0d4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/functions/FreudensteinRothFunction.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/McCormickFunction$.class b/target/scala-3.6.4/classes/scalation/optimization/functions/McCormickFunction$.class deleted file mode 100644 index 21c3a23db..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/functions/McCormickFunction$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/McCormickFunction.class b/target/scala-3.6.4/classes/scalation/optimization/functions/McCormickFunction.class deleted file mode 100644 index 8898f64d2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/functions/McCormickFunction.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/McCormickFunction.tasty b/target/scala-3.6.4/classes/scalation/optimization/functions/McCormickFunction.tasty deleted file mode 100644 index 5ede97ecc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/functions/McCormickFunction.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/ParaboloidFunction$.class b/target/scala-3.6.4/classes/scalation/optimization/functions/ParaboloidFunction$.class deleted file mode 100644 index 7d2ef2fd0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/functions/ParaboloidFunction$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/ParaboloidFunction.class b/target/scala-3.6.4/classes/scalation/optimization/functions/ParaboloidFunction.class deleted file mode 100644 index 8c650b5c8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/functions/ParaboloidFunction.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/ParaboloidFunction.tasty b/target/scala-3.6.4/classes/scalation/optimization/functions/ParaboloidFunction.tasty deleted file mode 100644 index e8f440ca8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/functions/ParaboloidFunction.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/QuarticFunction$.class b/target/scala-3.6.4/classes/scalation/optimization/functions/QuarticFunction$.class deleted file mode 100644 index dfa2ddf72..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/functions/QuarticFunction$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/QuarticFunction.class b/target/scala-3.6.4/classes/scalation/optimization/functions/QuarticFunction.class deleted file mode 100644 index 94ae16f8b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/functions/QuarticFunction.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/QuarticFunction.tasty b/target/scala-3.6.4/classes/scalation/optimization/functions/QuarticFunction.tasty deleted file mode 100644 index d4e36530a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/functions/QuarticFunction.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/ReciprocalFunction$.class b/target/scala-3.6.4/classes/scalation/optimization/functions/ReciprocalFunction$.class deleted file mode 100644 index 00676848f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/functions/ReciprocalFunction$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/ReciprocalFunction.class b/target/scala-3.6.4/classes/scalation/optimization/functions/ReciprocalFunction.class deleted file mode 100644 index 1b7501687..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/functions/ReciprocalFunction.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/ReciprocalFunction.tasty b/target/scala-3.6.4/classes/scalation/optimization/functions/ReciprocalFunction.tasty deleted file mode 100644 index 35665d753..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/functions/ReciprocalFunction.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/RosenbrockFunction$.class b/target/scala-3.6.4/classes/scalation/optimization/functions/RosenbrockFunction$.class deleted file mode 100644 index 0bfb0a789..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/functions/RosenbrockFunction$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/RosenbrockFunction.class b/target/scala-3.6.4/classes/scalation/optimization/functions/RosenbrockFunction.class deleted file mode 100644 index c4bb93812..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/functions/RosenbrockFunction.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/RosenbrockFunction.tasty b/target/scala-3.6.4/classes/scalation/optimization/functions/RosenbrockFunction.tasty deleted file mode 100644 index c0ff64259..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/functions/RosenbrockFunction.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/index.html b/target/scala-3.6.4/classes/scalation/optimization/functions/index.html deleted file mode 100644 index ac3381817..000000000 --- a/target/scala-3.6.4/classes/scalation/optimization/functions/index.html +++ /dev/null @@ -1,9 +0,0 @@ - - -

    Source files in functions Package

    -

    - - \ No newline at end of file diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/old/BealeFunction.scala.bak b/target/scala-3.6.4/classes/scalation/optimization/functions/old/BealeFunction.scala.bak deleted file mode 100644 index fa8849168..000000000 --- a/target/scala-3.6.4/classes/scalation/optimization/functions/old/BealeFunction.scala.bak +++ /dev/null @@ -1,39 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author André Filipe Caldas Laranjeira - * @version 2.0 - * @date Mon Jan 22 15:21:01 EST 2024 - * @see LICENSE (MIT style license file). - * - * @note Beale Objective Function, its Minima and Gradient Function - */ - -package scalation -package optimization -package functions - -import scalation.mathstat.VectorD - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `BealeFunction` object represents the Beale function for tests and benchmarks - * performed on function optimizers. - */ -object BealeFunction - extends BenchmarkFunction: - - val functionMinimum: VectorD = VectorD (3, 0.5) - - def objFunction (x: VectorD): Double = - (1.5 - x(0) + x(0) * x(1)) ~^ 2 + (2.25 - x(0) + x(0) * (x(1) ~^ 2)) ~^ 2 + (2.625 - x(0) + x(0) * (x(1) ~^ 3)) ~^ 2 - - override def gradFunction (x: VectorD): VectorD = - VectorD (2 * (1.5 - x(0) + x(0) * x(1)) * (-1 + x(1)) + - 2 * (2.25 - x(0) + x(0) * (x(1) ~^ 2)) * (-1 + (x(1) ~^ 2)) + - 2 * (2.625 - x(0) + x(0) * (x(1) ~^ 3)) * (-1 + (x(1) ~^ 3)), - - 2 * (1.5 - x(0) + x(0) * x(1)) * x(0) + - 2 * (2.25 - x(0) + x(0) * (x(1) ~^ 2)) * (2 * x(0) * x(1)) + - 2 * (2.625 - x(0) + x(0) * (x(1) ~^ 3)) * (3 * x(0) * (x(1) ~^ 2))) - -end BealeFunction - diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/old/BealeFunction.scala.bak3 b/target/scala-3.6.4/classes/scalation/optimization/functions/old/BealeFunction.scala.bak3 deleted file mode 100644 index a5414a71c..000000000 --- a/target/scala-3.6.4/classes/scalation/optimization/functions/old/BealeFunction.scala.bak3 +++ /dev/null @@ -1,37 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author André Filipe Caldas Laranjeira - * @version 2.0 - * @note Mon Jan 22 15:21:01 EST 2024 - * @see LICENSE (MIT style license file). - * - * @note Beale Function - */ - -package scalation -package optimization -package functions - -import scalation.mathstat.VectorD - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `BealeFunction` object to represent the Beale function for tests and benchmarks - * performed on function optimization and gradient descent classes. - */ -object BealeFunction extends BenchmarkFunction: - - val functionMinimum: VectorD = VectorD (3, 0.5) - - def objFunction (x: VectorD): Double = - (1.5 - x(0) + x(0) * x(1)) ~^ 2 + (2.25 - x(0) + x(0) * (x(1) ~^ 2)) ~^ 2 + (2.625 - x(0) + x(0) * (x(1) ~^ 3)) ~^ 2 - - override def gradFunction (x: VectorD): VectorD = - VectorD (2 * (1.5 - x(0) + x(0) * x(1)) * (-1 + x(1)) + - 2 * (2.25 - x(0) + x(0) * (x(1) ~^ 2)) * (-1 + (x(1) ~^ 2)) + - 2 * (2.625 - x(0) + x(0) * (x(1) ~^ 3)) * (-1 + (x(1) ~^ 3)), - 2 * (1.5 - x(0) + x(0) * x(1)) * x(0) + - 2 * (2.25 - x(0) + x(0) * (x(1) ~^ 2)) * (2 * x(0) * x(1)) + - 2 * (2.625 - x(0) + x(0) * (x(1) ~^ 3)) * (3 * x(0) * (x(1) ~^ 2))) - -end BealeFunction - diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/old/BenchmarkFunction.scala.bak b/target/scala-3.6.4/classes/scalation/optimization/functions/old/BenchmarkFunction.scala.bak deleted file mode 100644 index b21d1a049..000000000 --- a/target/scala-3.6.4/classes/scalation/optimization/functions/old/BenchmarkFunction.scala.bak +++ /dev/null @@ -1,72 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author André Filipe Caldas Laranjeira - * @version 2.0 - * @date Mon Jan 22 15:14:41 EST 2024 - * @see LICENSE (MIT style license file). - * - * @note specification of logic needed by an object that represents a test/benchmark function - */ - -package scalation -package optimization -package functions - -import scalation.calculus.Differential -import scalation.mathstat.VectorD - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `BenchmarkFunction` trait specifies the requirements for the logic - * of an object representing a benchmark function. The methods provided in this - * trait are used for tests and benchmarks performed on function optimization - * and gradient descent classes. - * - * Classes mixing in this trait must declare the `functionMinimum` field and - * implement the `objFunction` method. Additionally, overriding the - * default implementation of the `gradFunction` method is highly - * recommended. The `objFunction` method represents the mathematical - * function the object will model. The `functionMinimum` field represents the - * variable values that minimize the output of the `objFunction` method. - * Finally, the `gradFunction` method represents the gradient function for - * the `objFunction` function. An approximation for this method is - * automatically provided by making use of the [[Differential]] class, but - * overriding it with a hard-coded definition of the gradient function is - * highly recommended as it will greatly improve the accuracy of the results. - */ -trait BenchmarkFunction: - - val functionMinimum: VectorD // point with the lowest functional value - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** The objective function used for benchmarking or testing purposes. Can be - * any mathematical function that receives an arbitrary number of real - * numbers as input and produces a single real number as an output. - * - * @param x [[VectorD]] with the values of the variables to be used - * as input for the objective function. - * @return Double The output of the objective function given by using the - * values in `x` as input. - */ - def objFunction (x: VectorD): Double - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** The mathematical function that represents the gradient of the objective - * function represented by `objFunction`. An approximation using the - * [[Differential]] class is provided as the default implementation of this - * method, but it is highly encouraged to override this method with a - * hard-coded implementation of the correct gradient function in order to - * greatly improve the accuracy of the results. - * - * The function described in this method implementation should correspond - * to the gradient of the function described in `objFunction` or else - * the results obtained in any tests or benchmarks will be void of meaning. - * - * @param x [[VectorD]] with the values of the variables to be used - * as input for the gradient function. - * @return VectorD The gradient of the objective function in the position - * given by the values in `x`. - */ - def gradFunction (x: VectorD): VectorD = Differential.grad (objFunction, x) - -end BenchmarkFunction - diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/old/Bohachevsky1Function.scala.bak b/target/scala-3.6.4/classes/scalation/optimization/functions/old/Bohachevsky1Function.scala.bak deleted file mode 100644 index 5141607b9..000000000 --- a/target/scala-3.6.4/classes/scalation/optimization/functions/old/Bohachevsky1Function.scala.bak +++ /dev/null @@ -1,37 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author André Filipe Caldas Laranjeira - * @version 2.0 - * @date Mon Jan 22 15:58:20 EST 2024 - * @see LICENSE (MIT style license file). - * - * @note Bohachevsky1 Objective Function, its Minima and Gradient Function - */ - -package scalation -package optimization -package functions - -import scala.math.{cos, Pi, sin} - -import scalation.mathstat.VectorD - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Bohachevsky1` object represents the Bohachevsky1 function for tests and benchmarks - * performed on function optimizers. - */ -object Bohachevsky1Function - extends BenchmarkFunction: - - val functionMinimum: VectorD = VectorD (0, 0) - - def objFunction (x: VectorD): Double = - x(0) ~^ 2 + 2 * x(1) ~^ 2 - 0.3 * cos (3 * Pi * x(0)) - 0.4 * cos (4 * Pi * x(1)) + 0.7 - - override def gradFunction (x: VectorD): VectorD = - VectorD (2 * x(0) - 0.9 * Pi * sin (3 * Pi * x(0)), - - 4 * x(1) - 1.6 * Pi * sin (4 * Pi * x(1))) - -end Bohachevsky1Function - diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/old/Bohachevsky1Function.scala.bak3 b/target/scala-3.6.4/classes/scalation/optimization/functions/old/Bohachevsky1Function.scala.bak3 deleted file mode 100644 index 7fd26fdba..000000000 --- a/target/scala-3.6.4/classes/scalation/optimization/functions/old/Bohachevsky1Function.scala.bak3 +++ /dev/null @@ -1,35 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author André Filipe Caldas Laranjeira - * @version 2.0 - * @note Mon Jan 22 15:58:20 EST 2024 - * @see LICENSE (MIT style license file). - * - * @note Bohachevsky1 Function - */ - -package scalation -package optimization -package functions - -import scala.math.{cos, Pi, sin} - -import scalation.mathstat.VectorD - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Bohachevsky1Function` object to represent the Bohachevsky1 function for tests - * and benchmarks performed on function optimization and gradient descent classes. - */ -object Bohachevsky1Function extends BenchmarkFunction: - - val functionMinimum: VectorD = VectorD (0, 0) - - def objFunction(x: VectorD): Double = - x(0) ~^ 2 + 2 * x(1) ~^ 2 - 0.3 * cos(3 * Pi * x(0)) - 0.4 * cos(4 * Pi * x(1)) + 0.7 - - override def gradFunction (x: VectorD): VectorD = - VectorD (2 * x(0) - 0.3 * 3 * Pi * sin(3 * Pi * x(0)), - 4 * x(1) - 0.4 * 4 * Pi * sin(4 * Pi * x(1))) - -end Bohachevsky1Function - diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/old/Bohachevsky2Function.scala.bak b/target/scala-3.6.4/classes/scalation/optimization/functions/old/Bohachevsky2Function.scala.bak deleted file mode 100644 index dcf215790..000000000 --- a/target/scala-3.6.4/classes/scalation/optimization/functions/old/Bohachevsky2Function.scala.bak +++ /dev/null @@ -1,36 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author André Filipe Caldas Laranjeira - * @version 2.0 - * @date Mon Jan 22 16:03:29 EST 2024 - * @see LICENSE (MIT style license file). - * - * @note Bohachevsky2 Objective Function, its Minima and Gradient Function - */ - -package scalation -package optimization -package functions - -import scala.math.{cos, Pi, sin} - -import scalation.mathstat.VectorD - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Bohachevsky2Function` object represents the Bohachevsky2 function for tests - * and benchmarks performed on function optimizers. - */ -object Bohachevsky2Function extends BenchmarkFunction: - - val functionMinimum: VectorD = VectorD (0, 0) - - def objFunction (x: VectorD): Double = - x(0) ~^ 2 + 2 * x(1) ~^ 2 - 0.3 * cos (3 * Pi * x(0)) * cos (4 * Pi * x(1)) + 0.3 - - override def gradFunction (x: VectorD): VectorD = - VectorD (2 * x(0) + 0.9 * Pi * sin (3 * Pi * x(0)) * cos (4 * Pi * x(1)), - - 4 * x(1) - 1.2 * Pi * cos (3 * Pi * x(0)) * sin (4 * Pi * x(1))) - -end Bohachevsky2Function - diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/old/Bohachevsky2Function.scala.bak3 b/target/scala-3.6.4/classes/scalation/optimization/functions/old/Bohachevsky2Function.scala.bak3 deleted file mode 100644 index 6dc391374..000000000 --- a/target/scala-3.6.4/classes/scalation/optimization/functions/old/Bohachevsky2Function.scala.bak3 +++ /dev/null @@ -1,35 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author André Filipe Caldas Laranjeira - * @version 2.0 - * @note Mon Jan 22 16:03:29 EST 2024 - * @see LICENSE (MIT style license file). - * - * @note Bohachevsky2 Function - */ - -package scalation -package optimization -package functions - -import scala.math.{cos, Pi, sin} - -import scalation.mathstat.VectorD - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Bohachevsky2Function` object to represent the Bohachevsky2 function for tests - * and benchmarks performed on function optimization and gradient descent classes. - */ -object Bohachevsky2Function extends BenchmarkFunction: - - val functionMinimum: VectorD = VectorD (0, 0) - - def objFunction (x: VectorD): Double = - x(0) ~^ 2 + 2 * x(1) ~^ 2 - 0.3 * cos(3 * Pi * x(0)) * cos(4 * Pi * x(1)) + 0.3 - - override def gradFunction(x: VectorD): VectorD = - VectorD (2 * x(0) + 0.3 * 3 * Pi * sin(3 * Pi * x(0)) * cos(4 * Pi * x(1)), - 4 * x(1) - 0.3 * 4 * Pi * cos(3 * Pi * x(0)) * sin(4 * Pi * x(1))) - -end Bohachevsky2Function - diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/old/Bohachevsky3Function.scala.bak b/target/scala-3.6.4/classes/scalation/optimization/functions/old/Bohachevsky3Function.scala.bak deleted file mode 100644 index 2e21c7d09..000000000 --- a/target/scala-3.6.4/classes/scalation/optimization/functions/old/Bohachevsky3Function.scala.bak +++ /dev/null @@ -1,37 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author André Filipe Caldas Laranjeira - * @version 2.0 - * @date Tue Jan 23 10:41:25 EST 2024 - * @see LICENSE (MIT style license file). - * - * @note Bohachevsky3 Objective Function, its Minima and Gradient Function - */ - -package scalation -package optimization -package functions - -import scala.math.{cos, Pi, sin} - -import scalation.mathstat.VectorD - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Bohachevsky3` object epresents the Bohachevsky3 function for tests and benchmarks - * performed on function optimizers. - */ -object Bohachevsky3Function - extends BenchmarkFunction: - - val functionMinimum: VectorD = VectorD (0, 0) - - def objFunction (x: VectorD): Double = - x(0) ~^ 2 + 2 * x(1) ~^ 2 - 0.3 * cos (3 * Pi * x(0) + 4 * Pi * x(1)) + 0.3 - - override def gradFunction (x: VectorD): VectorD = - VectorD (2 * x(0) + 0.9 * Pi * sin (3 * Pi * x(0) + 4 * Pi * x(1)), - - 4 * x(1) + 1.2 * Pi * sin (3 * Pi * x(0) + 4 * Pi * x(1))) - -end Bohachevsky3Function - diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/old/Bohachevsky3Function.scala.bak3 b/target/scala-3.6.4/classes/scalation/optimization/functions/old/Bohachevsky3Function.scala.bak3 deleted file mode 100644 index c9a3be1ae..000000000 --- a/target/scala-3.6.4/classes/scalation/optimization/functions/old/Bohachevsky3Function.scala.bak3 +++ /dev/null @@ -1,35 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author André Filipe Caldas Laranjeira - * @version 2.0 - * @note Tue Jan 23 10:41:25 EST 2024 - * @see LICENSE (MIT style license file). - * - * @note Bohachevsky3 Function - */ - -package scalation -package optimization -package functions - -import scala.math.{cos, Pi, sin} - -import scalation.mathstat.VectorD - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Bohachevsky3Function` object to represent the Bohachevsky3 function for tests - * and benchmarks performed on function optimization and gradient descent classes. - */ -object Bohachevsky3Function extends BenchmarkFunction: - - val functionMinimum: VectorD = VectorD (0, 0) - - def objFunction (x: VectorD): Double = - x(0) ~^ 2 + 2 * x(1) ~^ 2 - 0.3 * cos(3 * Pi * x(0) + 4 * Pi * x(1)) + 0.3 - - override def gradFunction(x: VectorD): VectorD = VectorD( - 2 * x(0) + 0.3 * 3 * Pi * sin(3 * Pi * x(0) + 4 * Pi * x(1)), - 4 * x(1) + 0.3 * 4 * Pi * sin(3 * Pi * x(0) + 4 * Pi * x(1))) - -end Bohachevsky3Function - diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/old/BoothFunction.scala.bak b/target/scala-3.6.4/classes/scalation/optimization/functions/old/BoothFunction.scala.bak deleted file mode 100644 index 85ebcd72f..000000000 --- a/target/scala-3.6.4/classes/scalation/optimization/functions/old/BoothFunction.scala.bak +++ /dev/null @@ -1,35 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author André Filipe Caldas Laranjeira - * @version 2.0 - * @date Mon Jan 22 15:40:12 EST 2024 - * @see LICENSE (MIT style license file). - * - * @note Booth Objective Function, its Minima and Gradient Function - */ - -package scalation -package optimization -package functions - -import scalation.mathstat.VectorD - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `BoothFunction` object represents the Booth function for tests and benchmarks - * performed on unction optimizers. - */ -object BoothFunction - extends BenchmarkFunction: - - val functionMinimum: VectorD = VectorD (1, 3) - - def objFunction (x: VectorD): Double = - (x(0) + 2 * x(1) - 7) ~^ 2 + (2 * x(0) + x(1) - 5) ~^ 2 - - override def gradFunction (x: VectorD): VectorD = - VectorD (10 * x(0) + 8 * x(1) - 34, - - 8 * x(0) + 10 * x(1) - 38) - -end BoothFunction - diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/old/BoothFunction.scala.bak3 b/target/scala-3.6.4/classes/scalation/optimization/functions/old/BoothFunction.scala.bak3 deleted file mode 100644 index cee2de1cb..000000000 --- a/target/scala-3.6.4/classes/scalation/optimization/functions/old/BoothFunction.scala.bak3 +++ /dev/null @@ -1,34 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author André Filipe Caldas Laranjeira - * @version 2.0 - * @note Mon Jan 22 15:40:12 EST 2024 - * @see LICENSE (MIT style license file). - * - * @note Booth Function - */ - -// Package definition. -package scalation -package optimization -package functions - -// Project imports. -import scalation.mathstat.VectorD - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `BoothFunction` object to represent the Booth function for tests and benchmarks - * performed on function optimization and gradient descent classes. - */ -object BoothFunction extends BenchmarkFunction: - - val functionMinimum: VectorD = VectorD (1, 3) - - def objectiveFunction (x: VectorD): Double = - (x(0) + 2 * x(1) - 7) ~^ 2 + (2 * x(0) + x(1) - 5) ~^ 2 - - override def gradientFunction (x: VectorD): VectorD = - VectorD (10 * x(0) + 8 * x(1) - 34, 8 * x(0) + 10 * x(1) - 38) - -end BoothFunction - diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/old/Camel3Function.scala.bak b/target/scala-3.6.4/classes/scalation/optimization/functions/old/Camel3Function.scala.bak deleted file mode 100644 index 845253d56..000000000 --- a/target/scala-3.6.4/classes/scalation/optimization/functions/old/Camel3Function.scala.bak +++ /dev/null @@ -1,35 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author André Filipe Caldas Laranjeira - * @version 2.0 - * @date Tue Jan 23 10:45:34 EST 2024 - * @see LICENSE (MIT style license file). - * - * @note Camel3 Objective Function, its Minima and Gradient Function - */ - -package scalation -package optimization -package functions - -import scalation.mathstat.VectorD - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Camel3Function` object represents the Camel3 function for tests and benchmarks - * performed on function optimizers. - */ -object Camel3Function - extends BenchmarkFunction: - - val functionMinimum: VectorD = VectorD (0, 0) - - def objFunction (x: VectorD): Double = - 2 * x(0) ~^ 2 - 1.05 * x(0) ~^ 4 + (1 / 6.0) * x(0) ~^ 6 + x(0) * x(1) + x(1) ~^ 2 - - override def gradFunction (x: VectorD): VectorD = - VectorD (4 * x(0) - 4.2 * x(0) ~^ 3 + x(0) ~^ 5 + x(1), - - x(0) + 2 * x(1)) - -end Camel3Function - diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/old/Camel3Function.scala.bak3 b/target/scala-3.6.4/classes/scalation/optimization/functions/old/Camel3Function.scala.bak3 deleted file mode 100644 index 2b831746a..000000000 --- a/target/scala-3.6.4/classes/scalation/optimization/functions/old/Camel3Function.scala.bak3 +++ /dev/null @@ -1,32 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author André Filipe Caldas Laranjeira - * @version 2.0 - * @note Tue Jan 23 10:45:34 EST 2024 - * @see LICENSE (MIT style license file). - * - * @note Camel3 Function - */ - -package scalation -package optimization -package functions - -import scalation.mathstat.VectorD - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The ``BoothFunction` object to represent the Camel3 function for tests and benchmarks - * performed on function optimization and gradient descent classes. - */ -object Camel3Function extends BenchmarkFunction: - - val functionMinimum: VectorD = VectorD (0, 0) - - def objectiveFunction (x: VectorD): Double = - 2 * x(0) ~^ 2 - 1.05 * x(0) ~^ 4 + (1 / 6.0) * x(0) ~^ 6 + x(0) * x(1) + x(1) ~^ 2 - - override def gradientFunction (x: VectorD): VectorD = - VectorD (4 * x(0) - 4.2 * x(0) ~^ 3 + x(0) ~^ 5 + x(1), x(0) + 2 * x(1)) - -end Camel3Function - diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/old/CubeFunction.scala.bak b/target/scala-3.6.4/classes/scalation/optimization/functions/old/CubeFunction.scala.bak deleted file mode 100644 index 591bfc362..000000000 --- a/target/scala-3.6.4/classes/scalation/optimization/functions/old/CubeFunction.scala.bak +++ /dev/null @@ -1,35 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author André Filipe Caldas Laranjeira - * @version 2.0 - * @date Tue Jan 23 10:46:44 EST 2024 - * @see LICENSE (MIT style license file). - * - * @note Cube Objective Function, its Minima and Gradient Function - */ - -package scalation -package optimization -package functions - -import scalation.mathstat.VectorD - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `CubeFunction` object represents the Cube function for tests and benchmarks - * performed on function optimizers. - */ -object CubeFunction - extends BenchmarkFunction: - - val functionMinimum: VectorD = VectorD (-1, 1) - - def objFunction (x: VectorD): Double = - 100 * (x(1) - x(0) ~^ 3) ~^ 2 + (1 - x(0)) ~^ 2 - - override def gradFunction (x: VectorD): VectorD = - VectorD (-200 * (x(1) - x(0) ~^ 3) * (3 * x(0) ~^ 2) - 2 * (1 - x(0)), - - 200 * (x(1) - x(0) ~^ 3)) - -end CubeFunction - diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/old/CubeFunction.scala.bak3 b/target/scala-3.6.4/classes/scalation/optimization/functions/old/CubeFunction.scala.bak3 deleted file mode 100644 index 3bc1a3594..000000000 --- a/target/scala-3.6.4/classes/scalation/optimization/functions/old/CubeFunction.scala.bak3 +++ /dev/null @@ -1,32 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author André Filipe Caldas Laranjeira - * @version 2.0 - * @note Tue Jan 23 10:46:44 EST 2024 - * @see LICENSE (MIT style license file). - * - * @note Cube Function - */ - -package scalation -package optimization -package functions - -import scalation.mathstat.VectorD - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `CubeFunction` object to represent the Cube function for tests and benchmarks - * performed on function optimization and gradient descent classes. - */ -object CubeFunction extends BenchmarkFunction: - - val functionMinimum: VectorD = VectorD (-1, 1) - - def objectiveFunction(x: VectorD): Double = - 100 * (x(1) - x(0) ~^ 3) ~^ 2 + (1 - x(0)) ~^ 2 - - override def gradientFunction(x: VectorD): VectorD = - VectorD (-200 * (x(1) - x(0) ~^ 3) * (3 * x(0) ~^ 2) - 2 * (1 - x(0)), 200 * (x(1) - x(0) ~^ 3)) - -end CubeFunction - diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/old/EvaluationLogic.scala.bak b/target/scala-3.6.4/classes/scalation/optimization/functions/old/EvaluationLogic.scala.bak deleted file mode 100644 index dceea94e9..000000000 --- a/target/scala-3.6.4/classes/scalation/optimization/functions/old/EvaluationLogic.scala.bak +++ /dev/null @@ -1,42 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author André Filipe Caldas Laranjeira - * @version 2.0 - * @date Fri Sep 22 16:15:18 EDT 2023 - * @see LICENSE (MIT style license file). - * - * @note Specification of evaluation logic used by Quasi-Newton Optimizers. - */ - -package scalation -package optimization -package functions - -import scalation.mathstat.VectorD - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `EvaluationLogic` trait specifies the requirements for the logic to - * used for evaluation of the objective function in Quasi-Newton optimzers. - * Classes mixing in this trait must implement the evaluate method, which is - * used to evaluate the gradients and objective function for a given state of - * the variables. - */ -trait EvaluationLogic: - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Evaluates the gradients and objective function according to the state of - * the variables during the minimization process. - * - * @param instance User data provided by each call of the optimizers main method. - * Can have [[Any]] type defined by the user as long as the same - * type is utilized in other instances that rely on this - * `EvaluationLogic`. - * @param x [[VectorD]] with the current values of the variables. - * @param n The number of variables. - * @param step Current step chosen by the line search routine. - * @return FunctionEvaluationResults Results obtained from evaluating the variables. - */ - def evaluate (instance: Any, x: VectorD, n: Int, step: Double): FunctionEvaluationResults - -end EvaluationLogic - diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/old/FreudensteinRothFunction.scala.bak b/target/scala-3.6.4/classes/scalation/optimization/functions/old/FreudensteinRothFunction.scala.bak deleted file mode 100644 index 8be6b4666..000000000 --- a/target/scala-3.6.4/classes/scalation/optimization/functions/old/FreudensteinRothFunction.scala.bak +++ /dev/null @@ -1,36 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author André Filipe Caldas Laranjeira - * @version 2.0 - * @date Tue Jan 23 10:48:17 EST 2024 - * @see LICENSE (MIT style license file). - * - * @note Freudenstein-Roth Objective Function, its Minima and Gradient Function - */ - -package scalation -package optimization -package functions - -import scalation.mathstat.VectorD - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `FreudensteinRothFunction` object represents the Freudenstein-Roth function - * for tests and benchmarks performed on function optimizers. - */ -object FreudensteinRothFunction - extends BenchmarkFunction: - - val functionMinimum: VectorD = VectorD (5, 4) - - def objFunction (x: VectorD): Double = - (x(0) - 13 + x(1) * ((5 - x(1)) * x(1) - 2)) ~^ 2 + (x(0) - 29 + x(1) * ((x(1) + 1) * x(1) - 14)) ~^ 2 - - override def gradFunction (x: VectorD): VectorD = - VectorD (2 * (x(0) - 13 + x(1) * ((5 - x(1)) * x(1) - 2)) + 2 * (x(0) - 29 + x(1) * ((x(1) + 1) * x(1) - 14)), - - 2 * x(1) * ((5 - x(1)) * x(1) - 2) + 2 * (x(1) * ((x(1) + 1) * x(1) - 14) + - (x(0) - 13 + x(1) * ((5 - x(1)) * x(1) - 2)) * ((5 - x(1)) * x(1) - 2))) - -end FreudensteinRothFunction - diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/old/FreudensteinRothFunction.scala.bak3 b/target/scala-3.6.4/classes/scalation/optimization/functions/old/FreudensteinRothFunction.scala.bak3 deleted file mode 100644 index 48a50a654..000000000 --- a/target/scala-3.6.4/classes/scalation/optimization/functions/old/FreudensteinRothFunction.scala.bak3 +++ /dev/null @@ -1,34 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author André Filipe Caldas Laranjeira - * @version 2.0 - * @note Tue Jan 23 10:48:17 EST 2024 - * @see LICENSE (MIT style license file). - * - * @note Freudenstein-Roth Function - */ - -package scalation -package optimization -package functions - -import scalation.mathstat.VectorD - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/* The `FreudensteinRothFunction` object to represent the Freudenstein-Roth function - * for tests and benchmarks performed on function optimization and gradient descent classes. - */ -object FreudensteinRothFunction extends BenchmarkFunction: - - val functionMinimum: VectorD = VectorD (5, 4) - - def objectiveFunction (x: VectorD): Double = - (x(0) - 13 + x(1) * ((5 - x(1)) * x(1) - 2)) ~^ 2 + (x(0) - 29 + x(1) * ((x(1) + 1) * x(1) - 14)) ~^ 2 - - override def gradientFunction (x: VectorD): VectorD = - VectorD (2 * (x(0) - 13 + x(1) * ((5 - x(1)) * x(1) - 2)) + 2 * (x(0) - 29 + x(1) * ((x(1) + 1) * x(1) - 14)), - 2 * x(1) * ((5 - x(1)) * x(1) - 2) + 2 * (x(1) * ((x(1) + 1) * x(1) - 14) + - (x(0) - 13 + x(1) * ((5 - x(1)) * x(1) - 2)) * ((5 - x(1)) * x(1) - 2))) - -end FreudensteinRothFunction - diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/old/FunctionOptimization.scala.bak b/target/scala-3.6.4/classes/scalation/optimization/functions/old/FunctionOptimization.scala.bak deleted file mode 100644 index 84722e956..000000000 --- a/target/scala-3.6.4/classes/scalation/optimization/functions/old/FunctionOptimization.scala.bak +++ /dev/null @@ -1,58 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author André Filipe Caldas Laranjeira - * @version 2.0 - * @date Wed Oct 11 14:03:06 EDT 2023 - * @see LICENSE (MIT style license file). - * - * @note Function Optimization Grouping an Objective Function and its Gradient - */ - -package scalation -package optimization -package functions - -import scalation.calculus.Differential -import scalation.mathstat.{FunctionV2S, FunctionV2V, VectorD} - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `FunctionOptimization` case class to store the definition of a function optimization - * in a format that adheres to the optimization logic format used by Quasi-Newton optimizers. - * @param objFunction the objective function to be minimized - * @param gradFunction the gradient function of the objective function - */ -case class FunctionOptimization (objFunction: FunctionV2S, gradFunction: FunctionV2V) - extends OptimizationLogic: - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - def evaluate (instance: Any, x: VectorD, n: Int, step: Double): FunctionEvaluationResults = - FunctionEvaluationResults (objFunction (x), gradFunction (x)) - end evaluate - -end FunctionOptimization - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `FunctionOptimization` companion object contains a factory method. - */ -object FunctionOptimization: - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a Function Optimization object using numerical approximation for the gradient. - * @param objFunction the objective function to be minimized - */ - def apply (objFunction: FunctionV2S) = - new FunctionOptimization (objFunction, (x: VectorD) => Differential.grad (objFunction, x)) - end apply - -end FunctionOptimization - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `FunctionEvaluationResults` case class collects the results from running - * evaluation logic for a Quasi-Newton optimizer. - * @param objFunctionValue the optimal value found for objective function - * @param gradVector the corresponding value for the gradient - */ -case class FunctionEvaluationResults (objFunctionValue: Double, gradVector: VectorD) - diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/old/McCormickFunction.scala.bak b/target/scala-3.6.4/classes/scalation/optimization/functions/old/McCormickFunction.scala.bak deleted file mode 100644 index f1f0ace74..000000000 --- a/target/scala-3.6.4/classes/scalation/optimization/functions/old/McCormickFunction.scala.bak +++ /dev/null @@ -1,37 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author André Filipe Caldas Laranjeira - * @version 2.0 - * @date Mon Jan 22 15:33:02 EST 2024 - * @see LICENSE (MIT style license file). - * - * @note McCormick Objective Function, its Minima and Gradient Function - */ - -package scalation -package optimization -package functions - -import scala.math.{cos, sin} - -import scalation.mathstat.VectorD - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `McCormickFunction` object represents the McCormick function for tests and - * benchmarks performed on function optimizers. - */ -object McCormickFunction - extends BenchmarkFunction: - - val functionMinimum: VectorD = VectorD (-0.54719, -1.54719) - - def objFunction (x: VectorD): Double = - sin (x(0) + x(1)) + (x(0) - x(1)) ~^ 2 - 1.5 * x(0) + 2.5 * x(1) + 1 - - override def gradFunction (x: VectorD): VectorD = - VectorD (-1.5 + 2 * x(0) - 2 * x(1) + cos (x(0) + x(1)), - - 2.5 - 2 * x(0) + 2 * x(1) + cos (x(0) + x(1))) - -end McCormickFunction - diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/old/McCormickFunction.scala.bak3 b/target/scala-3.6.4/classes/scalation/optimization/functions/old/McCormickFunction.scala.bak3 deleted file mode 100644 index cb16e53b1..000000000 --- a/target/scala-3.6.4/classes/scalation/optimization/functions/old/McCormickFunction.scala.bak3 +++ /dev/null @@ -1,35 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author André Filipe Caldas Laranjeira - * @version 2.0 - * @note Mon Jan 22 15:33:02 EST 2024 - * @see LICENSE (MIT style license file). - * - * @note McCormick Function - */ - -package scalation -package optimization -package functions - -import scala.math.{cos, sin} - -import scalation.mathstat.VectorD - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `McCormickFunction` object to represent the McCormick function for tests and - * benchmarks performed on function optimization and gradient descent classes. - */ -object McCormickFunction extends BenchmarkFunction: - - val functionMinimum: VectorD = VectorD (-0.54719, -1.54719) - - def objectiveFunction (x: VectorD): Double = - sin(x(0) + x(1)) + (x(0) - x(1)) ~^ 2 - 1.5 * x(0) + 2.5 * x(1) + 1 - - override def gradientFunction (x: VectorD): VectorD = - VectorD (-1.5 + 2 * x(0) - 2 * x(1) + cos(x(0) + x(1)), - 2.5 - 2 * x(0) + 2 * x(1) + cos(x(0) + x(1))) - -end McCormickFunction - diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/old/OptimizationLogic.scala.bak b/target/scala-3.6.4/classes/scalation/optimization/functions/old/OptimizationLogic.scala.bak deleted file mode 100644 index 41fd3b9af..000000000 --- a/target/scala-3.6.4/classes/scalation/optimization/functions/old/OptimizationLogic.scala.bak +++ /dev/null @@ -1,70 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author André Filipe Caldas Laranjeira - * @version 2.0 - * @date Tue Sep 19 09:23:45 EDT 2023 - * @see LICENSE (MIT style license file). - * - * @note Specification of Optimization Logic used by Quasi-Newton Optimizers - */ - -package scalation -package optimization -package functions - -import scalation.mathstat.VectorD - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `OptimizationLogicNative` trait specifies the requirements for the logic - * to be used in each step of Quasi-Newton optimizers. - * Classes mixing in this trait must implement two methods: evaluate and - * progress. The evaluate method is used to evaluate the gradients and - * objective function for a given state of the variables. The progress method - * is used to report on how the minimization process is progressing. - */ -trait OptimizationLogic - extends EvaluationLogic: - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Receives the progress of each iteration of the optimization process. Can - * be used to display or record said progress and to determine if the - * optimization should continue or be cancelled. A default implementation - * is provided to just print the contents of the current iteration of the - * optimization. - * - * @param instance User data provided by each call of the optimizer's invocation - * method. Can have [[Any]] type defined by the user as long as - * the same type is utilized in the `evaluate` method implementation - * for the class extending this trait. - * @param x [[VectorD]] with the current values of the variables. - * @param g [[VectorD]] with the current value of the gradient - * vector. - * @param fx Current value of the objective function. - * @param xnorm Euclidean norm of the variables. - * @param gnorm Euclidean norm of the gradient vector. - * @param step Step used by the line search routine in this iteration. - * @param n The number of variables. - * @param k Iteration count. - * @param ls The number of evaluations called for this iteration. - * @return ReturnCode Determines if optimization should continue. Zero continues - * optimization. Non-zero values cancel the optimization. - */ - def progress (instance: Any, x: VectorD, g: VectorD, fx: Double, - xnorm: Double, gnorm: Double, step: Double, - n: Int, k: Int, ls: Int): ReturnCode = - println (s""" - Iteration $k: - x \t\t= $x - g \t\t= $g - fx \t\t= $fx - xnorm \t= $xnorm - gnorm \t= $gnorm - step \t= $step - n \t\t= $n - ls \t\t= $ls - """) - ReturnCode.Success - end progress - -end OptimizationLogic - diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/old/ParaboloidExampleFunction.scala.bak3 b/target/scala-3.6.4/classes/scalation/optimization/functions/old/ParaboloidExampleFunction.scala.bak3 deleted file mode 100644 index f9b649fde..000000000 --- a/target/scala-3.6.4/classes/scalation/optimization/functions/old/ParaboloidExampleFunction.scala.bak3 +++ /dev/null @@ -1,32 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author André Filipe Caldas Laranjeira - * @version 2.0 - * @note Mon Jan 29 15:16:43 EST 2024 - * @see LICENSE (MIT style license file). - * - * @note Paraboloid Function - */ - -package scalation -package optimization -package functions - -import scalation.mathstat.VectorD - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ParaboloidFunction` object to represent an example of a Paraboloid function for - * tests and benchmarks performed on function optimization and gradient descent classes. - */ -object ParaboloidFunction extends BenchmarkFunction: - - val functionMinimum: VectorD = VectorD (3, 4) - - def objectiveFunction (x: VectorD): Double = - (x(0) - 3.0) ~^ 2 + (x(1) - 4.0) ~^ 2 + 1.0 - - override def gradientFunction (x: VectorD): VectorD = - VectorD (2 * x(0) - 6, 2 * x(1) - 8) - -end ParaboloidFunction - diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/old/ParaboloidFunction.scala.bak b/target/scala-3.6.4/classes/scalation/optimization/functions/old/ParaboloidFunction.scala.bak deleted file mode 100644 index 3b7712f1b..000000000 --- a/target/scala-3.6.4/classes/scalation/optimization/functions/old/ParaboloidFunction.scala.bak +++ /dev/null @@ -1,35 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author André Filipe Caldas Laranjeira - * @version 2.0 - * @date Mon Jan 29 15:16:43 EST 2024 - * @see LICENSE (MIT style license file). - * - * @note Paraboloid Objective Function, its Minima and Gradient Function - */ - -package scalation -package optimization -package functions - -import scalation.mathstat.VectorD - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ParaboloidFunction` object represents an example of a Paraboloid function - * for tests and benchmarks performed on function optimizers. - */ -object ParaboloidFunction - extends BenchmarkFunction: - - val functionMinimum: VectorD = VectorD (3, 4) - - def objFunction (x: VectorD): Double = - (x(0) - 3.0) ~^ 2 + (x(1) - 4.0) ~^ 2 + 1.0 - - override def gradFunction (x: VectorD): VectorD = - VectorD (2 * x(0) - 6, - - 2 * x(1) - 8) - -end ParaboloidFunction - diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/old/QuarticExampleFunction.scala.bak3 b/target/scala-3.6.4/classes/scalation/optimization/functions/old/QuarticExampleFunction.scala.bak3 deleted file mode 100644 index 6171038d3..000000000 --- a/target/scala-3.6.4/classes/scalation/optimization/functions/old/QuarticExampleFunction.scala.bak3 +++ /dev/null @@ -1,34 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author André Filipe Caldas Laranjeira - * @version 2.0 - * @note Mon Jan 29 15:17:54 EST 2024 - * @see LICENSE (MIT style license file). - * - * @note Quartic Function - */ - -// Package definition. -package scalation -package optimization -package functions - -// Project imports. -import scalation.mathstat.VectorD - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `QuarticFunction` object to represent an example of a Quartic function for tests - * and benchmarks performed on function optimization and gradient descent classes. - */ -object QuarticFunction extends BenchmarkFunction: - - val functionMinimum: VectorD = VectorD (1, 4) - - def objectiveFunction (x: VectorD): Double = - x(0) ~^ 4 + (x(0) - 3.0) ~^ 2 + (x(1) - 4.0) ~^ 2 + 1.0 - - override def gradientFunction (x: VectorD): VectorD = - VectorD (4.0 * x(0) ~^ 3 + 2 * x(0) - 6, 2 * x(1) - 8) - -end QuarticFunction - diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/old/QuarticFunction.scala.bak b/target/scala-3.6.4/classes/scalation/optimization/functions/old/QuarticFunction.scala.bak deleted file mode 100644 index 571d978ff..000000000 --- a/target/scala-3.6.4/classes/scalation/optimization/functions/old/QuarticFunction.scala.bak +++ /dev/null @@ -1,35 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author André Filipe Caldas Laranjeira - * @version 2.0 - * @date Mon Jan 29 15:17:54 EST 2024 - * @see LICENSE (MIT style license file). - * - * @note Quartic Objective Function, its Minima and Gradient Function - */ - -package scalation -package optimization -package functions - -import scalation.mathstat.VectorD - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `QuarticFunction` object represents an example of a Quartic function for - * tests and benchmarks performed on function optimizers. - */ -object QuarticFunction - extends BenchmarkFunction: - - val functionMinimum: VectorD = VectorD (1, 4) - - def objFunction (x: VectorD): Double = - x(0) ~^ 4 + (x(0) - 3.0) ~^ 2 + (x(1) - 4.0) ~^ 2 + 1.0 - - override def gradFunction (x: VectorD): VectorD = - VectorD (4.0 * x(0) ~^ 3 + 2 * x(0) - 6, - - 2 * x(1) - 8) - -end QuarticFunction - diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/old/ReciprocalExampleFunction.scala.bak3 b/target/scala-3.6.4/classes/scalation/optimization/functions/old/ReciprocalExampleFunction.scala.bak3 deleted file mode 100644 index ad9d048dc..000000000 --- a/target/scala-3.6.4/classes/scalation/optimization/functions/old/ReciprocalExampleFunction.scala.bak3 +++ /dev/null @@ -1,32 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author André Filipe Caldas Laranjeira - * @version 2.0 - * @note Mon Jan 29 15:28:08 EST 2024 - * @see LICENSE (MIT style license file). - * - * @note Reciprocal Function - */ - -package scalation -package optimization -package functions - -import scalation.mathstat.VectorD - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ReciprocalFunction` object to represent an example of a Reciprocal function for - * tests and benchmarks performed on function optimization and gradient descent classes. - */ -object ReciprocalFunction extends BenchmarkFunction: - - val functionMinimum: VectorD = VectorD (1.06035, 4) - - def objectiveFunction (x: VectorD): Double = - 1 / x(0) + x(0) ~^ 4 + (x(0) - 3.0) ~^ 2 + (x(1) - 4.0) ~^ 2 + 1.0 - - override def gradientFunction (x: VectorD): VectorD = - VectorD (-(x(0) ~^ (-2)) + 4.0 * x(0) ~^ 3 + 2 * x(0) - 6, 2 * x(1) - 8) - -end ReciprocalFunction - diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/old/ReciprocalFunction.scala.bak b/target/scala-3.6.4/classes/scalation/optimization/functions/old/ReciprocalFunction.scala.bak deleted file mode 100644 index 26f207f1e..000000000 --- a/target/scala-3.6.4/classes/scalation/optimization/functions/old/ReciprocalFunction.scala.bak +++ /dev/null @@ -1,35 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author André Filipe Caldas Laranjeira - * @version 2.0 - * @date Mon Jan 29 15:28:08 EST 2024 - * @see LICENSE (MIT style license file). - * - * @note Reciprocal Objective Function, its Minima and Gradient Function - */ - -package scalation -package optimization -package functions - -import scalation.mathstat.VectorD - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ReciprocalFunction` object represents an example of a Reciprocal function for - * tests and benchmarks performed on function optimizers. - */ -object ReciprocalFunction - extends BenchmarkFunction: - - val functionMinimum: VectorD = VectorD (1.06035, 4) - - def objFunction (x: VectorD): Double = - 1 / x(0) + x(0) ~^ 4 + (x(0) - 3.0) ~^ 2 + (x(1) - 4.0) ~^ 2 + 1.0 - - override def gradFunction (x: VectorD): VectorD = - VectorD (-(x(0) ~^ (-2)) + 4.0 * x(0) ~^ 3 + 2 * x(0) - 6, - - 2 * x(1) - 8) - -end ReciprocalFunction - diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/old/ReturnCode.scala.bak b/target/scala-3.6.4/classes/scalation/optimization/functions/old/ReturnCode.scala.bak deleted file mode 100644 index fab529de7..000000000 --- a/target/scala-3.6.4/classes/scalation/optimization/functions/old/ReturnCode.scala.bak +++ /dev/null @@ -1,164 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author André Filipe Caldas Laranjeira - * @version 2.0 - * @date Mon Aug 21 13:48:43 EDT 2023 - * @see LICENSE (MIT style license file). - * - * @note Return Codes for Quasi-Newton Optimizers - */ - -package scalation -package optimization -package functions - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ReturnCode` enumeration describes possible return codes of the - * L-BFGS optimization, including different ways the optimization may correctly - * conclude, possible errors with the parameters given and possible errors - * during the optimization process. - * - * @param code Integer value that represents the return code. - */ -enum ReturnCode (val code: Int = -1024): - - /** L-BFGS reaches convergence. */ - case Success extends ReturnCode(0) - case Convergence extends ReturnCode(0) - case Stop extends ReturnCode(1) - - /** The initial variables already minimize the objective function. */ - case AlreadyMinimized extends ReturnCode(2) - - /** Unknown error. */ - case UnknownError extends ReturnCode(-1024) - - /** Logic error. */ - case LogicError extends ReturnCode(-1023) - - /** Insufficient memory. */ - case OutOfMemory extends ReturnCode(-1022) - - /** The minimization process has been canceled. */ - case Canceled extends ReturnCode(-1021) - - /** Invalid number of variables specified. */ - case InvalidN extends ReturnCode(-1020) - - /** Invalid number of variables (for SSE) specified. */ - case InvalidNSSE extends ReturnCode(-1019) - - /** The array x must be aligned to 16 (for SSE). */ - case InvalidXSSE extends ReturnCode(-1018) - - /** Invalid parameter LBFGSParameters.epsilon specified. */ - case InvalidEpsilon extends ReturnCode(-1017) - - /** Invalid parameter LBFGSParameters.past specified. */ - case InvalidTestPeriod extends ReturnCode(-1016) - - /** Invalid parameter LBFGSParameters.delta specified. */ - case InvalidDelta extends ReturnCode(-1015) - - /** Invalid parameter LBFGSParameters.lineSearch specified. */ - case InvalidLineSearch extends ReturnCode(-1014) - - /** Invalid parameter LBFGSParameters.minStep specified. */ - case InvalidMinStep extends ReturnCode(-1013) - - /** Invalid parameter LBFGSParameters.maxStep specified. */ - case InvalidMaxStep extends ReturnCode(-1012) - - /** Invalid parameter LBFGSParameters.ftol specified. */ - case InvalidFTOL extends ReturnCode(-1011) - - /** Invalid parameter BFGSParameters.wolfe specified. */ - case InvalidWolfe extends ReturnCode(-1010) - - /** Invalid parameter LBFGSParameters.gtol specified. */ - case InvalidGTOL extends ReturnCode(-1009) - - /** Invalid parameter LBFGSParameters.xtol specified. */ - case InvalidXTOL extends ReturnCode(-1008) - - /** Invalid parameter LBFGSParameters.maxLineSearch]] specified. */ - case InvalidMaxLineSearch extends ReturnCode(-1007) - - /** Invalid parameter LBFGSParameters.orthantwiseC specified. */ - case InvalidOrthantwise extends ReturnCode(-1006) - - /** Invalid parameter LBFGSParameters.orthantwiseStart specified. */ - case InvalidOrthantwiseStart extends ReturnCode(-1005) - - /** Invalid parameter LBFGSParameters.orthantwiseEnd specified. */ - case InvalidOrthantwiseEnd extends ReturnCode(-1004) - - /** The line-search step went out of the interval of uncertainty. */ - case OutOfInterval extends ReturnCode(-1003) - - /** A logic error occurred or the interval of uncertainty became too small. */ - case IncorrectTMinMax extends ReturnCode(-1002) - - /** A rounding error occurred, or no line-search step satisfies sufficient - * decrease and curvature conditions. - */ - case RoundingError extends ReturnCode(-1001) - - /** The line-search step became smaller than LBFGSParameters.minStep. */ - case MinimumStep extends ReturnCode(-1000) - - /** The line-search step became larger than LBFGSParameters.maxStep. */ - case MaximumStep extends ReturnCode(-999) - - /** The line-search routine reaches the maximum number of evaluations. */ - case MaximumLineSearch extends ReturnCode(-998) - - /** The algorithm routine reaches the maximum number of iterations. */ - case MaximumIteration extends ReturnCode(-997) - - /** Relative width of the interval of uncertainty is at most LBFGSParameters.xtol. */ - case WidthTooSmall extends ReturnCode(-996) - - /** A logic error (negative line-search step) occurred. */ - case InvalidParameters extends ReturnCode(-995) - - /** The current search direction increases the objective function value. */ - case IncreaseGradient extends ReturnCode(-994) - - /** Invalid parameter `momentum` specified for DMLBFGS. */ - case InvalidMomentum extends ReturnCode(-993) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Returns a boolean indicating whether this return code is an error code. - * - * @return Boolean Indicates whether this return code is an error code. - */ - def isErrorCode: Boolean = - this match - case ReturnCode.Success | - ReturnCode.Convergence | - ReturnCode.Stop | - ReturnCode.AlreadyMinimized => false - case _ => true - end isErrorCode - -end ReturnCode - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `ReturnCode` object provides a method for convert an code code to a ReturnCode enum. - */ -object ReturnCode: - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Returns the `ReturnCode` corresponding to a numerical code. - * - * @param code Numerical value used to determine a `ReturnCode` return code. - * @return ReturnCode `ReturnCode` whose numerical value is equal to the `code` parameter. - */ - def fromCode (code: Int): ReturnCode = - ReturnCode.values.find (_.code == code).get - end fromCode - -end ReturnCode - diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/old/RosenbrockFunction.scala.bak b/target/scala-3.6.4/classes/scalation/optimization/functions/old/RosenbrockFunction.scala.bak deleted file mode 100644 index e253579d9..000000000 --- a/target/scala-3.6.4/classes/scalation/optimization/functions/old/RosenbrockFunction.scala.bak +++ /dev/null @@ -1,35 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author André Filipe Caldas Laranjeira - * @version 2.0 - * @date Mon Jan 29 15:25:28 EST 2024 - * @see LICENSE (MIT style license file). - * - * @note Rosenbrock Objective Function, its Minima and Gradient Function - */ - -package scalation -package optimization -package functions - -import scalation.mathstat.VectorD - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `RosenbrockFunction` object represents the Rosenbrock function for tests and - * benchmarks performed on function optimizers. - */ -object RosenbrockFunction - extends BenchmarkFunction: - - val functionMinimum: VectorD = VectorD (1, 1) - - def objFunction (x: VectorD): Double = - (1.0 - x(0)) ~^ 2 + 100.0 * (x(1) - x(0) ~^ 2) ~^ 2 - - override def gradFunction (x: VectorD): VectorD = - VectorD ( -2.0 * (1 - x(0)) - 400.0 * x(0) * (x(1) - x(0) ~^ 2), - - 200.0 * (x(1) - x(0) ~^ 2)) - -end RosenbrockFunction - diff --git a/target/scala-3.6.4/classes/scalation/optimization/functions/old/RosenbrockFunction.scala.bak3 b/target/scala-3.6.4/classes/scalation/optimization/functions/old/RosenbrockFunction.scala.bak3 deleted file mode 100644 index 468b25a6a..000000000 --- a/target/scala-3.6.4/classes/scalation/optimization/functions/old/RosenbrockFunction.scala.bak3 +++ /dev/null @@ -1,33 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author André Filipe Caldas Laranjeira - * @version 2.0 - * @note Mon Jan 29 15:25:28 EST 2024 - * @see LICENSE (MIT style license file). - * - * @note Rosenbrock Function - */ - -package scalation -package optimization -package functions - -import scalation.mathstat.VectorD - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `RosenbrockFunction` object to represent the Rosenbrock function for tests and - * benchmarks performed on function optimization and gradient descent classes. - */ -object RosenbrockFunction extends BenchmarkFunction: - - val functionMinimum: VectorD = VectorD (1, 1) - - def objectiveFunction (x: VectorD): Double = - (1.0 - x(0)) ~^ 2 + 100.0 * (x(1) - x(0) ~^ 2) ~^ 2 - - override def gradientFunction(x: VectorD): VectorD = - VectorD (-2.0 * (1 - x(0)) - 400.0 * x(0) * (x(1) - x(0) ~^ 2), - 200.0 * (x(1) - x(0) ~^ 2)) - -end RosenbrockFunction - diff --git a/target/scala-3.6.4/classes/scalation/optimization/goldenSectionLSTest.class b/target/scala-3.6.4/classes/scalation/optimization/goldenSectionLSTest.class deleted file mode 100644 index accced4bc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/goldenSectionLSTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/goldenSectionLSTest.tasty b/target/scala-3.6.4/classes/scalation/optimization/goldenSectionLSTest.tasty deleted file mode 100644 index 8346b520b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/goldenSectionLSTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/goldenSectionLSTest2.class b/target/scala-3.6.4/classes/scalation/optimization/goldenSectionLSTest2.class deleted file mode 100644 index 7cb2bd67c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/goldenSectionLSTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/goldenSectionLSTest2.tasty b/target/scala-3.6.4/classes/scalation/optimization/goldenSectionLSTest2.tasty deleted file mode 100644 index c67c978d6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/goldenSectionLSTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/gradientDescentTest.class b/target/scala-3.6.4/classes/scalation/optimization/gradientDescentTest.class deleted file mode 100644 index 15eae4180..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/gradientDescentTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/gradientDescentTest.tasty b/target/scala-3.6.4/classes/scalation/optimization/gradientDescentTest.tasty deleted file mode 100644 index 313088559..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/gradientDescentTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/gradientDescentTest2.class b/target/scala-3.6.4/classes/scalation/optimization/gradientDescentTest2.class deleted file mode 100644 index 2febb77fb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/gradientDescentTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/gradientDescentTest2.tasty b/target/scala-3.6.4/classes/scalation/optimization/gradientDescentTest2.tasty deleted file mode 100644 index 67005e073..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/gradientDescentTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/gradientDescentTest3.class b/target/scala-3.6.4/classes/scalation/optimization/gradientDescentTest3.class deleted file mode 100644 index bf2640ad0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/gradientDescentTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/gradientDescentTest3.tasty b/target/scala-3.6.4/classes/scalation/optimization/gradientDescentTest3.tasty deleted file mode 100644 index 27a05e760..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/gradientDescentTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/gradientDescentTest4.class b/target/scala-3.6.4/classes/scalation/optimization/gradientDescentTest4.class deleted file mode 100644 index c1de16b80..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/gradientDescentTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/gradientDescentTest4.tasty b/target/scala-3.6.4/classes/scalation/optimization/gradientDescentTest4.tasty deleted file mode 100644 index 9247705db..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/gradientDescentTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/gradientDescent_AdamTest.class b/target/scala-3.6.4/classes/scalation/optimization/gradientDescent_AdamTest.class deleted file mode 100644 index 71c923b63..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/gradientDescent_AdamTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/gradientDescent_AdamTest.tasty b/target/scala-3.6.4/classes/scalation/optimization/gradientDescent_AdamTest.tasty deleted file mode 100644 index 1ce66caea..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/gradientDescent_AdamTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/gradientDescent_Mo2Test.class b/target/scala-3.6.4/classes/scalation/optimization/gradientDescent_Mo2Test.class deleted file mode 100644 index 8a470f0f3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/gradientDescent_Mo2Test.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/gradientDescent_Mo2Test.tasty b/target/scala-3.6.4/classes/scalation/optimization/gradientDescent_Mo2Test.tasty deleted file mode 100644 index 1bb4e6185..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/gradientDescent_Mo2Test.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/gradientDescent_MoTest.class b/target/scala-3.6.4/classes/scalation/optimization/gradientDescent_MoTest.class deleted file mode 100644 index 4b9042ed1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/gradientDescent_MoTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/gradientDescent_MoTest.tasty b/target/scala-3.6.4/classes/scalation/optimization/gradientDescent_MoTest.tasty deleted file mode 100644 index c5a4cdb53..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/gradientDescent_MoTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/gradientDescent_NoLSTest.class b/target/scala-3.6.4/classes/scalation/optimization/gradientDescent_NoLSTest.class deleted file mode 100644 index 80ce7347e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/gradientDescent_NoLSTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/gradientDescent_NoLSTest.tasty b/target/scala-3.6.4/classes/scalation/optimization/gradientDescent_NoLSTest.tasty deleted file mode 100644 index f884fb00a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/gradientDescent_NoLSTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/gridSearchLSTest.class b/target/scala-3.6.4/classes/scalation/optimization/gridSearchLSTest.class deleted file mode 100644 index 538eeeb99..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/gridSearchLSTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/gridSearchLSTest.tasty b/target/scala-3.6.4/classes/scalation/optimization/gridSearchLSTest.tasty deleted file mode 100644 index 75195614a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/gridSearchLSTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/gridSearchLSTest2.class b/target/scala-3.6.4/classes/scalation/optimization/gridSearchLSTest2.class deleted file mode 100644 index a4748b5d7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/gridSearchLSTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/gridSearchLSTest2.tasty b/target/scala-3.6.4/classes/scalation/optimization/gridSearchLSTest2.tasty deleted file mode 100644 index 72e3aa0eb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/gridSearchLSTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/gridSearchTest.class b/target/scala-3.6.4/classes/scalation/optimization/gridSearchTest.class deleted file mode 100644 index 6a9afcd61..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/gridSearchTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/gridSearchTest.tasty b/target/scala-3.6.4/classes/scalation/optimization/gridSearchTest.tasty deleted file mode 100644 index f562c9263..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/gridSearchTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/gridSearchTest2.class b/target/scala-3.6.4/classes/scalation/optimization/gridSearchTest2.class deleted file mode 100644 index 8b028300a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/gridSearchTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/gridSearchTest2.tasty b/target/scala-3.6.4/classes/scalation/optimization/gridSearchTest2.tasty deleted file mode 100644 index ab8c628b0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/gridSearchTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/hungarianTest.class b/target/scala-3.6.4/classes/scalation/optimization/hungarianTest.class deleted file mode 100644 index d56def09b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/hungarianTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/hungarianTest.tasty b/target/scala-3.6.4/classes/scalation/optimization/hungarianTest.tasty deleted file mode 100644 index 55af8b535..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/hungarianTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/hungarianTest2.class b/target/scala-3.6.4/classes/scalation/optimization/hungarianTest2.class deleted file mode 100644 index 6e25a8785..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/hungarianTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/hungarianTest2.tasty b/target/scala-3.6.4/classes/scalation/optimization/hungarianTest2.tasty deleted file mode 100644 index 54d89bafb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/hungarianTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/index.html b/target/scala-3.6.4/classes/scalation/optimization/index.html deleted file mode 100644 index 81f25c4e7..000000000 --- a/target/scala-3.6.4/classes/scalation/optimization/index.html +++ /dev/null @@ -1,44 +0,0 @@ - - -

    Source files in optimization Package

    -

    - - \ No newline at end of file diff --git a/target/scala-3.6.4/classes/scalation/optimization/integerTabuSearchTest.class b/target/scala-3.6.4/classes/scalation/optimization/integerTabuSearchTest.class deleted file mode 100644 index 0a7e2414a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/integerTabuSearchTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/integerTabuSearchTest.tasty b/target/scala-3.6.4/classes/scalation/optimization/integerTabuSearchTest.tasty deleted file mode 100644 index fd2fa826f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/integerTabuSearchTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/integerTabuSearchTest2.class b/target/scala-3.6.4/classes/scalation/optimization/integerTabuSearchTest2.class deleted file mode 100644 index ec9094c38..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/integerTabuSearchTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/integerTabuSearchTest2.tasty b/target/scala-3.6.4/classes/scalation/optimization/integerTabuSearchTest2.tasty deleted file mode 100644 index 6d696b8f1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/integerTabuSearchTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/lassoAdmmTest.class b/target/scala-3.6.4/classes/scalation/optimization/lassoAdmmTest.class deleted file mode 100644 index f9871e783..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/lassoAdmmTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/lassoAdmmTest.tasty b/target/scala-3.6.4/classes/scalation/optimization/lassoAdmmTest.tasty deleted file mode 100644 index 0d9011ff8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/lassoAdmmTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/lassoAdmmTest2.class b/target/scala-3.6.4/classes/scalation/optimization/lassoAdmmTest2.class deleted file mode 100644 index 7f8fc6873..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/lassoAdmmTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/lassoAdmmTest2.tasty b/target/scala-3.6.4/classes/scalation/optimization/lassoAdmmTest2.tasty deleted file mode 100644 index 24caf98ec..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/lassoAdmmTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/lassoAdmmTest3.class b/target/scala-3.6.4/classes/scalation/optimization/lassoAdmmTest3.class deleted file mode 100644 index 9b8ec2e30..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/lassoAdmmTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/lassoAdmmTest3.tasty b/target/scala-3.6.4/classes/scalation/optimization/lassoAdmmTest3.tasty deleted file mode 100644 index e7d5edbc9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/lassoAdmmTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/linear_opt/CheckLP.class b/target/scala-3.6.4/classes/scalation/optimization/linear_opt/CheckLP.class deleted file mode 100644 index 01cff32d0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/linear_opt/CheckLP.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/linear_opt/CheckLP.tasty b/target/scala-3.6.4/classes/scalation/optimization/linear_opt/CheckLP.tasty deleted file mode 100644 index 2bd298eab..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/linear_opt/CheckLP.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/linear_opt/IntegerLP$.class b/target/scala-3.6.4/classes/scalation/optimization/linear_opt/IntegerLP$.class deleted file mode 100644 index d79460438..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/linear_opt/IntegerLP$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/linear_opt/IntegerLP$package$.class b/target/scala-3.6.4/classes/scalation/optimization/linear_opt/IntegerLP$package$.class deleted file mode 100644 index 668428fc0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/linear_opt/IntegerLP$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/linear_opt/IntegerLP$package.class b/target/scala-3.6.4/classes/scalation/optimization/linear_opt/IntegerLP$package.class deleted file mode 100644 index 16307b5bc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/linear_opt/IntegerLP$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/linear_opt/IntegerLP$package.tasty b/target/scala-3.6.4/classes/scalation/optimization/linear_opt/IntegerLP$package.tasty deleted file mode 100644 index 0d444f262..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/linear_opt/IntegerLP$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/linear_opt/IntegerLP.class b/target/scala-3.6.4/classes/scalation/optimization/linear_opt/IntegerLP.class deleted file mode 100644 index f011b680e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/linear_opt/IntegerLP.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/linear_opt/IntegerLP.tasty b/target/scala-3.6.4/classes/scalation/optimization/linear_opt/IntegerLP.tasty deleted file mode 100644 index 945e2e460..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/linear_opt/IntegerLP.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/linear_opt/MinimizerLP.class b/target/scala-3.6.4/classes/scalation/optimization/linear_opt/MinimizerLP.class deleted file mode 100644 index 7d161b4e5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/linear_opt/MinimizerLP.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/linear_opt/MinimizerLP.tasty b/target/scala-3.6.4/classes/scalation/optimization/linear_opt/MinimizerLP.tasty deleted file mode 100644 index 9b2d70906..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/linear_opt/MinimizerLP.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/linear_opt/Simplex2P$package$.class b/target/scala-3.6.4/classes/scalation/optimization/linear_opt/Simplex2P$package$.class deleted file mode 100644 index 6165b75bf..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/linear_opt/Simplex2P$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/linear_opt/Simplex2P$package.class b/target/scala-3.6.4/classes/scalation/optimization/linear_opt/Simplex2P$package.class deleted file mode 100644 index 029a941f4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/linear_opt/Simplex2P$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/linear_opt/Simplex2P$package.tasty b/target/scala-3.6.4/classes/scalation/optimization/linear_opt/Simplex2P$package.tasty deleted file mode 100644 index ca464819a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/linear_opt/Simplex2P$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/linear_opt/Simplex2P.class b/target/scala-3.6.4/classes/scalation/optimization/linear_opt/Simplex2P.class deleted file mode 100644 index 226ea76cc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/linear_opt/Simplex2P.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/linear_opt/Simplex2P.tasty b/target/scala-3.6.4/classes/scalation/optimization/linear_opt/Simplex2P.tasty deleted file mode 100644 index e7bf53328..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/linear_opt/Simplex2P.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/linear_opt/index.html b/target/scala-3.6.4/classes/scalation/optimization/linear_opt/index.html deleted file mode 100644 index 7149a4aee..000000000 --- a/target/scala-3.6.4/classes/scalation/optimization/linear_opt/index.html +++ /dev/null @@ -1,12 +0,0 @@ - - -

    Source files in linear_opt Package

    -

    - - \ No newline at end of file diff --git a/target/scala-3.6.4/classes/scalation/optimization/linear_opt/integerLPTest.class b/target/scala-3.6.4/classes/scalation/optimization/linear_opt/integerLPTest.class deleted file mode 100644 index a5a0ace78..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/linear_opt/integerLPTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/linear_opt/integerLPTest.tasty b/target/scala-3.6.4/classes/scalation/optimization/linear_opt/integerLPTest.tasty deleted file mode 100644 index 1146bc789..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/linear_opt/integerLPTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/linear_opt/simplex2PTest.class b/target/scala-3.6.4/classes/scalation/optimization/linear_opt/simplex2PTest.class deleted file mode 100644 index 78a114421..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/linear_opt/simplex2PTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/linear_opt/simplex2PTest.tasty b/target/scala-3.6.4/classes/scalation/optimization/linear_opt/simplex2PTest.tasty deleted file mode 100644 index 7820db22b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/linear_opt/simplex2PTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/linearopt/QuadraticSimplex$.class b/target/scala-3.6.4/classes/scalation/optimization/linearopt/QuadraticSimplex$.class deleted file mode 100644 index ad5a00082..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/linearopt/QuadraticSimplex$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/linearopt/QuadraticSimplex$package$.class b/target/scala-3.6.4/classes/scalation/optimization/linearopt/QuadraticSimplex$package$.class deleted file mode 100644 index 410f88ea2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/linearopt/QuadraticSimplex$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/linearopt/QuadraticSimplex$package.class b/target/scala-3.6.4/classes/scalation/optimization/linearopt/QuadraticSimplex$package.class deleted file mode 100644 index 022547288..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/linearopt/QuadraticSimplex$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/linearopt/QuadraticSimplex$package.tasty b/target/scala-3.6.4/classes/scalation/optimization/linearopt/QuadraticSimplex$package.tasty deleted file mode 100644 index 7a1487cfb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/linearopt/QuadraticSimplex$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/linearopt/QuadraticSimplex.class b/target/scala-3.6.4/classes/scalation/optimization/linearopt/QuadraticSimplex.class deleted file mode 100644 index 403ec6483..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/linearopt/QuadraticSimplex.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/linearopt/QuadraticSimplex.tasty b/target/scala-3.6.4/classes/scalation/optimization/linearopt/QuadraticSimplex.tasty deleted file mode 100644 index 16b6c597e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/linearopt/QuadraticSimplex.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/linearopt/quadraticSimplexTest.class b/target/scala-3.6.4/classes/scalation/optimization/linearopt/quadraticSimplexTest.class deleted file mode 100644 index b332ae5a2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/linearopt/quadraticSimplexTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/linearopt/quadraticSimplexTest.tasty b/target/scala-3.6.4/classes/scalation/optimization/linearopt/quadraticSimplexTest.tasty deleted file mode 100644 index a30b1a504..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/linearopt/quadraticSimplexTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/nLPTest.class b/target/scala-3.6.4/classes/scalation/optimization/nLPTest.class deleted file mode 100644 index 5fb3d2075..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/nLPTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/nLPTest.tasty b/target/scala-3.6.4/classes/scalation/optimization/nLPTest.tasty deleted file mode 100644 index 120363b3d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/nLPTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/nLPTest2.class b/target/scala-3.6.4/classes/scalation/optimization/nLPTest2.class deleted file mode 100644 index ab430ecbf..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/nLPTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/nLPTest2.tasty b/target/scala-3.6.4/classes/scalation/optimization/nLPTest2.tasty deleted file mode 100644 index 8ef91f7fa..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/nLPTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/nelderMeadSimplex2Test.class b/target/scala-3.6.4/classes/scalation/optimization/nelderMeadSimplex2Test.class deleted file mode 100644 index 390932fc3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/nelderMeadSimplex2Test.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/nelderMeadSimplex2Test.tasty b/target/scala-3.6.4/classes/scalation/optimization/nelderMeadSimplex2Test.tasty deleted file mode 100644 index a27b41ed4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/nelderMeadSimplex2Test.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/nelderMeadSimplexTest.class b/target/scala-3.6.4/classes/scalation/optimization/nelderMeadSimplexTest.class deleted file mode 100644 index c70b3682e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/nelderMeadSimplexTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/nelderMeadSimplexTest.tasty b/target/scala-3.6.4/classes/scalation/optimization/nelderMeadSimplexTest.tasty deleted file mode 100644 index ca6e3a8f2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/nelderMeadSimplexTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/newtonRaphsonTest.class b/target/scala-3.6.4/classes/scalation/optimization/newtonRaphsonTest.class deleted file mode 100644 index 7fad08d32..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/newtonRaphsonTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/newtonRaphsonTest.tasty b/target/scala-3.6.4/classes/scalation/optimization/newtonRaphsonTest.tasty deleted file mode 100644 index efb1e4a86..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/newtonRaphsonTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/newtonRaphsonTest2.class b/target/scala-3.6.4/classes/scalation/optimization/newtonRaphsonTest2.class deleted file mode 100644 index f39cfa53a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/newtonRaphsonTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/newtonRaphsonTest2.tasty b/target/scala-3.6.4/classes/scalation/optimization/newtonRaphsonTest2.tasty deleted file mode 100644 index 4d7bdf45e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/newtonRaphsonTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/newtonRaphsonTest3.class b/target/scala-3.6.4/classes/scalation/optimization/newtonRaphsonTest3.class deleted file mode 100644 index d06210454..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/newtonRaphsonTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/newtonRaphsonTest3.tasty b/target/scala-3.6.4/classes/scalation/optimization/newtonRaphsonTest3.tasty deleted file mode 100644 index ffdee9030..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/newtonRaphsonTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/newton_NoLSTest.class b/target/scala-3.6.4/classes/scalation/optimization/newton_NoLSTest.class deleted file mode 100644 index 73017d8fc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/newton_NoLSTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/newton_NoLSTest.tasty b/target/scala-3.6.4/classes/scalation/optimization/newton_NoLSTest.tasty deleted file mode 100644 index 6aff00126..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/newton_NoLSTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/newton_NoLSTest2.class b/target/scala-3.6.4/classes/scalation/optimization/newton_NoLSTest2.class deleted file mode 100644 index ed158514b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/newton_NoLSTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/newton_NoLSTest2.tasty b/target/scala-3.6.4/classes/scalation/optimization/newton_NoLSTest2.tasty deleted file mode 100644 index 76059448f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/newton_NoLSTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/newton_NoLSTest3.class b/target/scala-3.6.4/classes/scalation/optimization/newton_NoLSTest3.class deleted file mode 100644 index b2b958192..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/newton_NoLSTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/newton_NoLSTest3.tasty b/target/scala-3.6.4/classes/scalation/optimization/newton_NoLSTest3.tasty deleted file mode 100644 index a17ed91d3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/newton_NoLSTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/old/Hungarian.scala.bak b/target/scala-3.6.4/classes/scalation/optimization/old/Hungarian.scala.bak deleted file mode 100644 index 862f2fd1c..000000000 --- a/target/scala-3.6.4/classes/scalation/optimization/old/Hungarian.scala.bak +++ /dev/null @@ -1,276 +0,0 @@ - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Thu Jul 14 11:51:50 EDT 2011 - * @see LICENSE (MIT style license file). - * - * @title Hungarian Algorithm for Assignment Problem (AP) - * - * Translated from C code from Assignment Problem and Hungarian Algorithm - * @see www.topcoder.com/tc?module=Static&d1=tutorials&d2=hungarianAlgorithm - */ - -package scalation -package optimization - -import java.util.ArrayDeque // use Java since `ArrayDeque` is faster than Scala's `Queue` - -import scala.Double.PositiveInfinity -import scala.math.{max, min} -import scala.runtime.ScalaRunTime.stringOf -import scala.util.control.Breaks.{breakable, break} - -import scalation.mathstat.MatrixD - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Hungarian` is an O(n^3) implementation of the Hungarian algorithm - * (or Kuhn-Munkres algorithm). Find the maximum cost set of pairings between - * m x-nodes (workers) and n y-nodes (jobs) such that each worker is assigned - * to one job and each job has at most one worker assigned. - * It solves the maximum-weighted bipartite graph matching problem. - * - * maximize sum i = 0 .. m-1 { cost(x_i, y_i) } - * - * Caveat: only works if m <= n (i.e., there is at least one job for every worker). - * @param cost the cost matrix: cost(x, y) = cost of assigning worker x to job y - */ -class Hungarian (cost: MatrixD): - - private val debug = debugf ("Hungarian", true) // debug function - private val NA = -1 // Not Assigned - private val NO = -2 // None Possible - private val m = cost.dim // m workers (x-nodes) - private val n = cost.dim2 // n jobs (y-nodes) - - private val r_m = 0 until m // range for workers - private val r_n = 0 until n // range for jobs - private val lx = Array.ofDim [Double] (m) // labels of x-nodes (workers) - private val ly = Array.ofDim [Double] (n) // labels of y-nodes (jobs) - private val slack = Array.ofDim [Double] (n) // slack(y) = lx(x) + lx(y) - cost(x, y) - private val slackX = Array.ofDim [Int] (n) // slackX(y) = x-node for computing slack(y) - private val xy = Array.fill (m)(NA) // xy(x) = y-node matched with x - private val yx = Array.fill (n)(NA) // yx(y) = x-node matched with y - private val qu = new ArrayDeque [Int] (m) // queue for Breadth First Search (BFS) - private val maxMatch = min (n, m) // maximum number of matches needed - private var nMatch = 0 // number of nodes in current matching - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Initialize cost labels for x-nodes by setting them to the largest cost - * on any incident edge (largest value in row). If feasible, this is the - * optimal solution, otherwise it is an upper bound. - */ - private def initLabels (): Unit = - debug ("initLabels", s"lx = ${stringOf (lx)} \ncost = $cost") - for (x <- r_m; y <- r_n) lx(x) = max (lx(x), cost(x, y)) - end initLabels - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Update the cost labels for both x-nodes and y-nodes. - */ - private def updateLabels (xSet: Array [Boolean], ySet: Array [Boolean]): Unit = - var delta: Double = PositiveInfinity - for y <- r_n if ! ySet(y) do delta = min (delta, slack(y)) - for x <- r_m if xSet(x) do lx(x) -= delta - for y <- r_n if ySet(y) do ly(y) += delta - for y <- r_n if ! ySet(y) do slack(y) -= delta - end updateLabels - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Add new edges to the tree and update slack. - * @param x current x-node - * @param prevX previous x-node before x in the alternating path, - * so we add edges (prevX, xy(x)), (xy(x), x) - */ - private def addToTree (x: Int, prevX: Int, prev: Array [Int], xSet: Array [Boolean]): Unit = - xSet(x) = true // add x to xSet - prev(x) = prevX // we need this when augmenting - for y <- r_n if lx(x) + ly(y) - cost(x, y) < slack(y) do - slack(y) = lx(x) + ly(y) - cost(x, y) - slackX(y) = x - end for - end addToTree - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Find a root (an unpaired x-node) and compute slack values for y-nodes. - */ - private def findRootSetSlack (prev: Array [Int], xSet: Array [Boolean]): Unit = - var root = NA - breakable { - for x <- r_m if xy(x) == NA do - root = x - qu.add (x) - prev(x) = NO // root is first => no previous x-node - xSet(x) = true - break () - end for - } // breakable - - for y <- r_n do // initialize the slack array - slack(y) = lx(root) + ly(y) - cost(root, y) - slackX(y) = root - println (s"findRootSetSlack: slack($y) = ${slack(y)}, slackX($y) = ${slackX(y)}") - end for - end findRootSetSlack - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Reverse the edges along the augmenting path starting with the given edge. - * @param edge the given edge - */ - private def reverseEdges (edge: Tuple2 [Int, Int], prev: Array [Int]): Unit = - var e = edge - var ty = NA - while e._1 != NO do - ty = xy(e._1) - yx(e._2) = e._1 - xy(e._1) = e._2 - e = (prev(e._1), ty) - end while - end reverseEdges - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** A recursive procedure to find augmenting paths to improve the assignments. - * Terminate when 'nMatch == maxMatch'. - */ - private def augment (): Unit = - println (s"augment: nMatch = $nMatch need $maxMatch") - val xSet = Array.fill (m)(false) // initialize xSet to empty - val ySet = Array.fill (n)(false) // initialize ySet to empty - val prev = Array.fill (m)(NA) // initialize prev to NA (for alternating tree) - var edge = (NA, NA) // edge for augmenting path - var x = NA // current x-node - - findRootSetSlack (prev, xSet) - - breakable { - while true do // main loop - while ! qu.isEmpty () do // building tree with BFS cycle - x = qu.poll () // get current x-node from queue - - for y <- r_n if cost(x, y) == lx(x) + ly(y) && ! ySet(y) do - if yx(y) == NA then { edge = (x, y); break () } // exposed x-node => augmenting path exists - ySet(y) = true // else just add y to ySet - qu.add (yx(y)) // add x-node yx(y) matched with y to the queue - addToTree (yx(y), x, prev, xSet) // add edges (x, y) and (y, yx(y)) to the tree - end for - end while - - updateLabels (xSet, ySet) // augmenting path not found, so improve labeling - qu.clear () // empty the queue - - for y <- r_n if ! ySet(y) && slack(y) == 0 do - if yx(y) == NA then // exposed x-node => augmenting path exists - x = slackX(y) - edge = (x, y); break () - else - ySet(y) = true // else just add y to ySet, - if ! xSet(yx(y)) then - qu.add (yx(y)) // add node yx(y) matched with y to the queue - addToTree (yx(y), slackX(y), prev, xSet) // add edges (x, y) and (y, yx(y)) to the tree - end if - end for - end while - } // breakable - - reverseEdges (edge, prev) // reverse edges along augmenting path - nMatch += 1 // increment number of nodes in matching - if nMatch < maxMatch then augment () // try to find another augmenting path - end augment - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** The main procedure to solve an assignment problem by finding initial pairings - * and then finding augmenting paths to improve the pairings/assignments. - */ - def solve (): Double = - if m > n then { println (s"Hungarian: error - m = $m > n = $n"); return -1.0 } - initLabels () // initial the cost labels for x-nodes - augment () // recursive method the find augmenting paths - - println ("-" * 60) - var total = 0.0 // cost/weight of the optimal matching - for x <- r_m do // form answer - - total += cost(x, xy(x)) // using values from x-side - println ("cost (" + x + ", " + xy(x) + ") = " + cost(x, xy(x))) - end for - println ("-" * 60) - total - end solve - -end Hungarian - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Hungarian` companion object supplies factory methods to create a cost - * matrix and build a `Hungarian` object suitable to solving an assignment problem. - */ -object Hungarian: - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Build build a `Hungarian` object suitable to solving an assignment problem. - * Any edges not in the map will be assigned zero cost (least value since - * maximizing). - * @param m the size of the set of x-nodes - * @param n the size of the set of y-nodes - * @param xy the map of positive edge costs connecting x_i to y_j - */ - def apply (m: Int, n: Int, xy: Map [(Int, Int), Double]): Hungarian = - val c = new MatrixD (m, n) - for ((k, v) <- xy) c(k._1, k._2) = v - new Hungarian (c) - end apply - -end Hungarian - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `HungarianTest` main function is used to test the `Hungarian` class. - * > runMain scalation.optimization.hungarianTest - */ -@main def hungarianTest (): Unit = - - banner ("AP: m = 3 workers, n = 3 jobs") - val cost1 = MatrixD ((3, 3), 1, 4, 5, - 5, 7, 6, - 5, 8, 8) - println ("optimal cost1 = " + (new Hungarian (cost1).solve ())) - - banner ("AP: m = 5 workers, n = 5 jobs") - val cost2 = MatrixD ((5, 5), 10, 19, 8, 15, 19, - 10, 18, 7, 17, 19, - 13, 16, 9, 14, 19, - 12, 19, 8, 18, 19, - 14, 17, 10, 19, 19) - println ("optimal cost2 = " + (new Hungarian (cost2).solve ())) - - banner ("AP: m = 3 workers, n = 4 jobs") - val cost3 = MatrixD ((3, 4), 1, 4, 5, 2, - 5, 7, 6, 2, - 5, 8, 8, 9) - println ("optimal cost3 = " + (new Hungarian (cost3).solve ())) - - banner ("AP: m = 4 workers, n = 3 jobs => error, not enough jobs for workers") - val cost4 = MatrixD ((4, 3), 1, 4, 5, - 5, 7, 6, - 2, 2, 9, - 5, 8, 8) - println ("optimal cost4 = " + (new Hungarian (cost4).solve ())) - -end hungarianTest - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `hungarianTest2` main function is used to test the `Hungarian` class. - * Allows edges to be specified rather than a matrix - * > runMain scalation.optimization.hungarianTest2 - */ -@main def hungarianTest2 (): Unit = - - banner ("AP: m = 3 workers, n = 3 jobs") - val xy = Map ((0, 0) -> 1.0, (0, 1) -> 4.0, (0, 2) -> 5.0, (0, 3) -> 2.0, - (1, 0) -> 5.0, (1, 1) -> 7.0, (1, 2) -> 6.0, (1, 3) -> 2.0, - (2, 0) -> 5.0, (2, 1) -> 8.0, (2, 2) -> 8.0, (2, 3) -> 9.0) - val ap = Hungarian (3, 4, xy) - println ("optimal cost = " + ap.solve ()) - -end hungarianTest2 - diff --git a/target/scala-3.6.4/classes/scalation/optimization/old/L_BFGS.scala.bak b/target/scala-3.6.4/classes/scalation/optimization/old/L_BFGS.scala.bak deleted file mode 100644 index eb8ecfb52..000000000 --- a/target/scala-3.6.4/classes/scalation/optimization/old/L_BFGS.scala.bak +++ /dev/null @@ -1,351 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/* - * Limited memory BFGS (L-BFGS). - * - * Copyright (c) 1990, Jorge Nocedal - * Copyright (c) 2007-2010 Naoaki Okazaki - * All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ - -/* $Id$ */ - -/* -This library is a C port of the FORTRAN implementation of Limited-memory -Broyden-Fletcher-Goldfarb-Shanno (L-BFGS) method written by Jorge Nocedal. -The original FORTRAN source code is available at: -http://www.ece.northwestern.edu/~nocedal/lbfgs.html -The L-BFGS algorithm is described in: - - Jorge Nocedal. - Updating Quasi-Newton Matrices with Limited Storage. - Mathematics of Computation, Vol. 35, No. 151, pp. 773--782, 1980. - - Dong C. Liu and Jorge Nocedal. - On the limited memory BFGS method for large scale optimization. - Mathematical Programming B, Vol. 45, No. 3, pp. 503-528, 1989. -The line search algorithms used in this implementation are described in: - - John E. Dennis and Robert B. Schnabel. - Numerical Methods for Unconstrained Optimization and Nonlinear - Equations, Englewood Cliffs, 1983. - - Jorge J. More and David J. Thuente. - Line search algorithm with guaranteed sufficient decrease. - ACM Transactions on Mathematical Software (TOMS), Vol. 20, No. 3, - pp. 286-307, 1994. -This library also implements Orthant-Wise Limited-memory Quasi-Newton (OWL-QN) -method presented in: - - Galen Andrew and Jianfeng Gao. - Scalable training of L1-regularized log-linear models. - In Proceedings of the 24th International Conference on Machine - Learning (ICML 2007), pp. 33-40, 2007. -I would like to thank the original author, Jorge Nocedal, who has been -distributing the effieicnt and explanatory implementation in an open source -licence. -*/ - -package scalation -package optimization - -import scala.math.abs - -import mathstat.VectorD - -import BFGS_LSA._ -import BFGS_code._ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `L_BFGS` object contains a lbfgs function for nonlinear optimization. - */ -object L_BFGS: - - private val debug = debugf ("L_BFGS", true) // debug function - - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Limited-memory Broyden-Fletcher-Goldfarb-Shanno (L-BFGS) Optimization Algorithm. - *------------------------------------------------------------------------------ - * @param n The number of variables. - *------------------------------------------------------------------------------ - * @param x The array of variables. A client program can set default values for - * the optimization and receive the optimization result through this array. - *------------------------------------------------------------------------------ - * @param ptr_fx The pointer to the variable that receives the final value of the - * objective function for the variables. This argument can be set to \c null - * if the final value of the objective function is unnecessary. - *------------------------------------------------------------------------------ - * @param proc_evaluate The callback function to provide function and gradient - * evaluations given a current values of variables. A client program must - * implement a callback function compatible with \ref lbfgs_evaluate_t and - * pass the pointer to the callback function. - *------------------------------------------------------------------------------ - * @param proc_progress The callback function to receive the progress (the number - * of iterations, the current value of the objective function) of the - * minimization process. This argument can be set to \c null if a progress - * report is unnecessary. - *------------------------------------------------------------------------------ - * @param instance A user data for the client program. The callback functions - * will receive the value of this argument. - *------------------------------------------------------------------------------ - * @param param The pointer to a structure representing parameters for L-BFGS - * optimization. A client program can set this parameter to \c null to - * use the default parameters. Call lbfgs_parameter_init() function to - * fill a structure with the default values. - *------------------------------------------------------------------------------ - * @return (VectorD, Double, Int) The optimal location, value of the objective - * function and the status code. This function returns zero if the minimization - * process terminates without an error. A non-zero value indicates an error. - */ - def lbfgs (n: Int, - x_ : VectorD, - ff: VectorD => Double, - gf: VectorD => VectorD, - param_ : BFGS_parameter = null): (VectorD, Double, Int) = - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - case class Iteration_data (var alpha: Double = 0.0, - var s: VectorD = new VectorD (n), // [n] - var y: VectorD = new VectorD (n), // [n] - var ys: Double = 0.0) // vecdot(y, s) - end Iteration_data - - // Evaluate the function value and its gradient. - var x = x_ - var fx = ff(x) // value of objective function at x - var gx = gf(x) // vector value of gradient function at x - - debug ("lbfgs", s"start optimization from x = $x, fx = $fx, gx = $gx") - - BFGS_LS.set_ff (ff) - BFGS_LS.set_gf (gf) - - // Set the (hyper) parameters to the new spedification or use their default values. - val param = if param_ != null then { BFGS_LS.set_param (param_); param_ } - else BFGS_parameter () - - // Determine which line search algorithm to use - debug ("lbfgs", s"select linesearch: ${param.linesearch}") - val linesearch = BFGS_LS.select_linesearch - - // Allocate working space for OW-LQN. - var pg: VectorD = if param.orthantwise_c != 0.0 then new VectorD (n) else null - - // Allocate an array for storing previous values of the objective function. - val pf = if param.past > 0 then Array.ofDim [Double] (param.past) else null - - // Allocate limited memory storage and initialize the limited memory. - val m = param.m // the size of the limited memory (lm) - val lm = Array.fill [Iteration_data] (m)(Iteration_data ()) - - var xnorm = 0.0 // norn of position x - var gnorm = 0.0 // norm of gradient gx - - if param.orthantwise_c != 0.0 then - // Compute the L1 norm of the variable and add it to the object value. - xnorm = owlqn_x1norm (x, param.orthantwise_start, param.orthantwise_end) - fx += xnorm * param.orthantwise_c - owlqn_pseudo_gradient (pg, x, gx, n, - param.orthantwise_c, param.orthantwise_start, param.orthantwise_end) - end if - - // Store the initial value of the objective function. - if pf != null then pf(0) = fx - - // Compute the direction, assuming the initial hessian matrix H_0 as the identity matrix. - var d = if param.orthantwise_c == 0.0 then gx else pg - - // Make sure that the initial variables are not a minimizer. - xnorm = x.norm - gnorm = if param.orthantwise_c == 0.0 then gx.norm else pg.norm - if xnorm < 1.0 then xnorm = 1.0 - if gnorm / xnorm <= param.epsilon then return (x, fx, LBFGS_ALREADY_MINIMIZED.code) - - // Compute the initial step: step = 1.0 / sqrt(vecdot(d, d, n)) - var step = 1.0 / d.norm // step size - var w = new VectorD (n) // orthant updated by linesearch - var ls = 0 // number of steps taken by linesearch - var bound = 0 // limited memory bound - var end_ = 0 // current end in limited memory - var k = 1 // number of iterations (in while loop) - - while true do - val xp = x.copy // save the current position vector - val gp = gx.copy // save the current gradient vector - var d = -gx //.copy // best direction vector, modified later - -/* - val n = 2 // the dimension of the search space - val x = VectorD (0, 0) // the current location/point vector - val f = 26.0 // the objective function value f(x) - val g = VectorD (-6, -8) // the gradient vector at x - val s = VectorD (6, 8) // the search direction (e.g., opposite g) - val step = 0.2 // the initial step size - val xp = VectorD (0, 0) // the previous location/point vector - val gp = VectorD (0, 0) // the previous gradient vector -*/ - - - // Perform linesearch in direction d - debug ("lbfgs", s"before linesearch: d = $d, x = $x") - if param.orthantwise_c == 0.0 then - ls = linesearch (n, x, fx, gx, d, step, xp, gp, w) -// line_search_backtracking (n, x, f, g, s, step, xp, gp) - else - ls = linesearch (n, x, fx, gx, d, step, xp, pg, w) - owlqn_pseudo_gradient (pg, x, gx, n, - param.orthantwise_c, param.orthantwise_start, param.orthantwise_end) - end if - debug ("lbfgs", s"after linesearch: ls = $ls, x = $x") - - if ls < 0 then // linesearch ls < 0 => revert to the previous point - x = xp.copy - gx = gp.copy - return (x, fx, ls) - end if - - // Compute x and g norms. - xnorm = x.norm - gnorm = if param.orthantwise_c == 0.0 then gx.norm else pg.norm - - // TEST for Convergence. The criterion is given by the following formula: - // |g(x)| / \max(1, |x|) < \epsilon - if xnorm < 1.0 then xnorm = 1.0 - if gnorm / xnorm <= param.epsilon then return (x, fx, LBFGS_SUCCESS.code) // convergence - - // TEST for Stopping Criterion. The criterion is given by the following formula: - // |(f(past_x) - f(x))| / f(x) < \delta - if pf != null then - if param.past <= k then // does not test the stopping criterion while k < past - val rate = (pf(k % param.past) - fx) / fx // compute the relative improvement from the past - if abs (rate) < param.delta then return (x, fx, LBFGS_STOP.code) // the stopping criterion - end if - pf(k % param.past) = fx // store the current value of the objective function - end if - - // TEST for Maximum Number of Iterations. - if param.max_iterations != 0 && param.max_iterations < k+1 then - return (x, fx, LBFGSERR_MAXIMUMITERATION.code) - end if - - // Update vectors s and y: - // s_{k+1} = x_{k+1} - x_{k} = \step * d_{k}. - // y_{k+1} = g_{k+1} - g_{k}. - var it = lm(end_) - it.s = x - xp - it.y = gx - gp - - // Compute scalars ys and yy: - // ys = y^t \cdot s = 1 / \rho. - // yy = y^t \cdot y. - // Notice that yy is used for scaling the hessian matrix H_0 (Cholesky factor). - val ys = it.y dot it.s - val yy = it.y dot it.y - it.ys = ys - - // Recursive formula to compute dir = -(H \cdot g). - // This is described in page 779 of: Jorge Nocedal. - // Updating Quasi-Newton Matrices with Limited Storage. - // Mathematics of Computation, Vol. 35, No. 151, pp. 773--782, 1980. - bound = if m <= k then m else k - k += 1 - end_ = (end_ + 1) % m - - // Reset the steepest direction (the negative of gradients). - if param.orthantwise_c != 0.0 then d = pg.copy - - var j = end_ - for i <- 0 until bound do - j = (j + m - 1) % m // if (--j == -1) j = m-1 - it = lm(j) - it.alpha = it.s dot d // \alpha_{j} = \rho_{j} s^{t}_{j} \cdot q_{k+1}. - it.alpha /= it.ys - d = it.y - it.alpha // q_{i} = q_{i+1} - \alpha_{i} y_{i}. - end for - - d *= ys / yy - - for i <- 0 until bound do - it = lm(j) - val beta = (it.y dot d) / it.ys // \beta_{j} = \rho_{j} y^t_{j} \cdot \gamma_{i}. - // \gamma_{i+1} = \gamma_{i} + (\alpha_{j} - \beta_{j}) s_{j}. - d = it.s + (it.alpha - beta) - j = (j + 1) % m // if (++j == m) j = 0 - end for - - // Constrain the search direction for orthant-wise updates. - if param.orthantwise_c != 0.0 then - for i <- param.orthantwise_start until param.orthantwise_end do - if d(i) * pg(i) >= 0 then d(i) = 0 - end for - end if - - // Now the search direction d is ready, try step = 1 first. - step = 1.0 - end while - (x, fx, 0) - end lbfgs - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** - */ - def owlqn_pseudo_gradient (pg: VectorD, - x: VectorD, - g: VectorD, - n: Int, - c: Double, - start: Int, - end_ : Int): Unit = - - // Compute the negative of gradients. - for i <- 0 until start do pg(i) = g(i) - - // Compute the psuedo-gradients. - for i <- start until end_ do - if x(i) < 0.0 then pg(i) = g(i) - c // Differentiable. - else if 0.0 < x(i) then pg(i) = g(i) + c // Differentiable. - else if g(i) < -c then pg(i) = g(i) + c // Take the right partial derivative. - else if c < g(i) then pg(i) = g(i) - c // Take the left partial derivative. - else pg(i) = 0.0 - end for - - for i <- end_ until n do pg(i) = g(i) - end owlqn_pseudo_gradient - -end L_BFGS - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `L_BFGSTest` object tests the `L_BFGS` object. - * > runMain scalation.optimization.L_BFGSTest - */ -object L_BFGSTest extends App: - - val n = 2 // dimension of the search space - val x0 = new VectorD (n) // starting location - - println ("\nMinimize: (x_0 - 3)^2 + (x_1 - 4)^2 + 1") - - def ff (x: VectorD): Double = (x(0) - 3) * (x(0) - 3) + (x(1) - 4) * (x(1) - 4) + 1.0 - def gf (x: VectorD): VectorD = VectorD (2*x(0) - 6, 2*x(1) - 8) - - val (x, fx, code) = L_BFGS.lbfgs (n, x0, ff, gf) - - println (s"optimal solution x = $x with an objective value f(x) = $fx, with status code $code") - -end L_BFGSTest - diff --git a/target/scala-3.6.4/classes/scalation/optimization/old/L_BFGS.scala.bak2 b/target/scala-3.6.4/classes/scalation/optimization/old/L_BFGS.scala.bak2 deleted file mode 100644 index b9e2a50fb..000000000 --- a/target/scala-3.6.4/classes/scalation/optimization/old/L_BFGS.scala.bak2 +++ /dev/null @@ -1,373 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/* - * Limited memory BFGS (L-BFGS). - * - * Copyright (c) 1990, Jorge Nocedal - * Copyright (c) 2007-2010 Naoaki Okazaki - * All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ - -/* $Id$ */ - -/* -This library is a C port of the FORTRAN implementation of Limited-memory -Broyden-Fletcher-Goldfarb-Shanno (L-BFGS) method written by Jorge Nocedal. -The original FORTRAN source code is available at: -http://www.ece.northwestern.edu/~nocedal/lbfgs.html -The L-BFGS algorithm is described in: - - Jorge Nocedal. - Updating Quasi-Newton Matrices with Limited Storage. - Mathematics of Computation, Vol. 35, No. 151, pp. 773--782, 1980. - - Dong C. Liu and Jorge Nocedal. - On the limited memory BFGS method for large scale optimization. - Mathematical Programming B, Vol. 45, No. 3, pp. 503-528, 1989. -The line search algorithms used in this implementation are described in: - - John E. Dennis and Robert B. Schnabel. - Numerical Methods for Unconstrained Optimization and Nonlinear - Equations, Englewood Cliffs, 1983. - - Jorge J. More and David J. Thuente. - Line search algorithm with guaranteed sufficient decrease. - ACM Transactions on Mathematical Software (TOMS), Vol. 20, No. 3, - pp. 286-307, 1994. -This library also implements Orthant-Wise Limited-memory Quasi-Newton (OWL-QN) -method presented in: - - Galen Andrew and Jianfeng Gao. - Scalable training of L1-regularized log-linear models. - In Proceedings of the 24th International Conference on Machine - Learning (ICML 2007), pp. 33-40, 2007. -I would like to thank the original author, Jorge Nocedal, who has been -distributing the effieicnt and explanatory implementation in an open source -licence. -*/ - -package scalation -package optimization - -import scala.math.abs - -import mathstat.{FunctionV2S, VectorD} - -import BFGS_code._ - -class L_BFGS (ff: FunctionV2S, gf: VectorD => VectorD) - extends Minimizer: - - private var param: BFGS_parameter = null - - def lineSearch (x: VectorD, dir: VectorD, step: Double): Double = ??? - - def setPram (param_ : BFGS_parameter): Unit = { param = param_ } - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Solve the Non-Linear Programming (NLP) problem by starting at x0 and - * iteratively moving down in the search space to a minimal point. - * Return the optimal point/vector x and its objective function value. - * @param x0 the starting point - * @param step the initial step size (ignored) - * @param toler the tolerance (ignored) - */ - def solve (x0: VectorD, step: Double = -0.0, toler: Double = -0.0): FuncVec = - val res = L_BFGS.lbfgs (x0.dim, x0, ff, gf, param) - (res._2, res._1) - end solve - -end L_BFGS - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `L_BFGS` object contains a lbfgs function for nonlinear optimization. - */ -object L_BFGS: - - private val debug = debugf ("L_BFGS", true) // debug function - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Limited-memory Broyden-Fletcher-Goldfarb-Shanno (L-BFGS) Optimization Algorithm. - *------------------------------------------------------------------------------ - * @param n The number of variables. - *------------------------------------------------------------------------------ - * @param x The array of variables. A client program can set default values for - * the optimization and receive the optimization result through this array. - *------------------------------------------------------------------------------ - * @param ptr_fx The pointer to the variable that receives the final value of the - * objective function for the variables. This argument can be set to \c null - * if the final value of the objective function is unnecessary. - *------------------------------------------------------------------------------ - * @param proc_evaluate The callback function to provide function and gradient - * evaluations given a current values of variables. A client program must - * implement a callback function compatible with \ref lbfgs_evaluate_t and - * pass the pointer to the callback function. - *------------------------------------------------------------------------------ - * @param proc_progress The callback function to receive the progress (the number - * of iterations, the current value of the objective function) of the - * minimization process. This argument can be set to \c null if a progress - * report is unnecessary. - *------------------------------------------------------------------------------ - * @param instance A user data for the client program. The callback functions - * will receive the value of this argument. - *------------------------------------------------------------------------------ - * @param param The pointer to a structure representing parameters for L-BFGS - * optimization. A client program can set this parameter to \c null to - * use the default parameters. Call lbfgs_parameter_init() function to - * fill a structure with the default values. - *------------------------------------------------------------------------------ - * @return (VectorD, Double, Int) The optimal location, value of the objective - * function and the status code. This function returns zero if the minimization - * process terminates without an error. A non-zero value indicates an error. - */ - def lbfgs (n: Int, - x_ : VectorD, - ff: VectorD => Double, - gf: VectorD => VectorD, - param_ : BFGS_parameter = null): (VectorD, Double, Int) = - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - case class Iteration_data (var alpha: Double = 0.0, - var s: VectorD = new VectorD (n), // [n] - var y: VectorD = new VectorD (n), // [n] - var ys: Double = 0.0) // vecdot(y, s) - end Iteration_data - - // Evaluate the function value and its gradient. - var x = x_ - var fx = ff(x) // value of objective function at x - var gx = gf(x) // vector value of gradient function at x - - debug ("lbfgs", s"start optimization from x = $x, fx = $fx, gx = $gx") - - BFGS_LS.set_ff (ff) - BFGS_LS.set_gf (gf) - - // Set the (hyper) parameters to the new spedification or use their default values. - val param = if param_ != null then { BFGS_LS.set_param (param_); param_ } - else BFGS_parameter () - - // Determine which line search algorithm to use - debug ("lbfgs", s"select linesearch: ${param.linesearch}") - val linesearch = BFGS_LS.select_linesearch - - // Allocate an array for storing previous values of the objective function. - val pf = if param.past > 0 then Array.ofDim [Double] (param.past) else null - - // Allocate limited memory storage and initialize the limited memory. - val m = param.m // the size of the limited memory (lm) - val lm = Array.fill [Iteration_data] (m)(Iteration_data ()) - - var xnorm = 0.0 // norn of position x - var gnorm = 0.0 // norm of gradient gx - - // Store the initial value of the objective function. - if pf != null then pf(0) = fx - - // Compute the direction, assuming the initial hessian matrix H_0 as the identity matrix. - var d = -gx - debug ("lbfgs", s"direction d = $d, d.norm = ${d.norm}") - - // Make sure that the initial variables are not a minimizer. - xnorm = x.norm - gnorm = gx.norm - if xnorm < 1.0 then xnorm = 1.0 - if gnorm / xnorm <= param.epsilon then - debug ("lbfgs", "return already minimized") - return (x, fx, LBFGS_ALREADY_MINIMIZED.code) - end if - - // Compute the initial step: step = 1.0 / sqrt(vecdot(d, d, n)) - var step = 1.0 / d.norm // step size - var w = new VectorD (n) // orthant updated by linesearch - var ls = 0 // number of steps taken by linesearch - var bound = 0 // limited memory bound - var end_ = 0 // current end in limited memory - var k = 1 // number of iterations (in while loop) - - while true do - banner (s"lbfgs iteration $k: step = $step") - val xp = x.copy // save the current position vector - val gp = gx.copy // save the current gradient vector - var d = -gx //.copy // best direction vector, modified later - -/* - val n = 2 // the dimension of the search space - val x = VectorD (0, 0) // the current location/point vector - val f = 26.0 // the objective function value f(x) - val g = VectorD (-6, -8) // the gradient vector at x - val s = VectorD (6, 8) // the search direction (e.g., opposite g) - val step = 0.2 // the initial step size - val xp = VectorD (0, 0) // the previous location/point vector - val gp = VectorD (0, 0) // the previous gradient vector -*/ - - - // Perform linesearch in direction d - debug ("lbfgs", s"before linesearch: d = $d, x = $x") -// line_search_backtracking (n, x, f, g, s, step, xp, gp, wa) -// val ret = linesearch (n, x, fx, gx, d, step, xp, gp, w) - val ret = linesearch (x, fx, gx, d, step) - - fx = ret._1; step = ret._2 - debug ("lbfgs", s"after linesearch: ls = $ls, x = $x") - - if ls < 0 then // linesearch ls < 0 => revert to the previous point - x = xp.copy - gx = gp.copy - debug ("lbfgs", "return due to linesearch ls < 0") - return (x, fx, ls) - end if - - // Compute x and g norms. - xnorm = x.norm - gnorm = gx.norm - - // TEST for Convergence. The criterion is given by the following formula: - // |g(x)| / \max(1, |x|) < \epsilon - if xnorm < 1.0 then xnorm = 1.0 - if gnorm / xnorm <= param.epsilon then - debug ("lbfgs", "return due to convergence") - return (x, fx, LBFGS_SUCCESS.code) // convergence - end if - - // TEST for Stopping Criterion. The criterion is given by the following formula: - // |(f(past_x) - f(x))| / f(x) < \delta - if pf != null then - if param.past <= k then // does not test the stopping criterion while k < past - val rate = (pf(k % param.past) - fx) / fx // compute the relative improvement from the past - if abs (rate) < param.delta then return (x, fx, LBFGS_STOP.code) // the stopping criterion - end if - pf(k % param.past) = fx // store the current value of the objective function - end if - - // TEST for Maximum Number of Iterations. - if param.max_iterations != 0 && param.max_iterations < k+1 then - return (x, fx, LBFGSERR_MAXIMUMITERATION.code) - end if - - // Update vectors s and y: - // s_{k+1} = x_{k+1} - x_{k} = \step * d_{k}. - // y_{k+1} = g_{k+1} - g_{k}. - var it = lm(end_) - it.s = x - xp - it.y = gx - gp - - // Compute scalars ys and yy: - // ys = y^t \cdot s = 1 / \rho. - // yy = y^t \cdot y. - // Notice that yy is used for scaling the hessian matrix H_0 (Cholesky factor). - val ys = it.y dot it.s - val yy = it.y dot it.y - it.ys = ys - - // Recursive formula to compute dir = -(H \cdot g). - // This is described in page 779 of: Jorge Nocedal. - // Updating Quasi-Newton Matrices with Limited Storage. - // Mathematics of Computation, Vol. 35, No. 151, pp. 773--782, 1980. - bound = if m <= k then m else k - k += 1 - end_ = (end_ + 1) % m - - var j = end_ - for i <- 0 until bound do - j = (j + m - 1) % m // if (--j == -1) j = m-1 - it = lm(j) - it.alpha = it.s dot d // \alpha_{j} = \rho_{j} s^{t}_{j} \cdot q_{k+1}. - it.alpha /= it.ys - d = it.y - it.alpha // q_{i} = q_{i+1} - \alpha_{i} y_{i}. - end for - - d *= ys / yy - - for i <- 0 until bound do - it = lm(j) - val beta = (it.y dot d) / it.ys // \beta_{j} = \rho_{j} y^t_{j} \cdot \gamma_{i}. - // \gamma_{i+1} = \gamma_{i} + (\alpha_{j} - \beta_{j}) s_{j}. - d = it.s + (it.alpha - beta) - j = (j + 1) % m // if (++j == m) j = 0 - end for - - // Now the search direction d is ready, try step = 1 first. -// step *= 0.99 -// step = 1.0 - end while - (x, fx, 0) - end lbfgs - -end L_BFGS - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `l_BFGSTest` main function tests the `L_BFGS` object. - * > runMain scalation.optimization.l_BFGSTest - */ -@main def l_BFGSTest (): Unit = - - val n = 2 // dimension of the search space - val x0 = new VectorD (n) // starting location - - banner ("\nMinimize: (x_0 - 3)^2 + (x_1 - 4)^2 + 1") - - def ff (x: VectorD): Double = (x(0) - 3)~^2 + (x(1) - 4)~^2 + 1.0 - def gf (x: VectorD): VectorD = VectorD (2*x(0) - 6, 2*x(1) - 8) - - val (x, fx, code) = L_BFGS.lbfgs (n, x0, ff, gf) - println (s"optimal solution x = $x with an objective value f(x) = $fx, with status code $code") - -end l_BFGSTest - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `l_BFGSTest2` main function is used to test the `L_BFGS` class. - * > runMain scalation.optimization.l_BFGSTest2 - */ -@main def l_BFGSTest2 (): Unit = - - val n = 2 // dimension of the search space - val x0 = new VectorD (n) // starting location - - banner ("\nMinimize: x_0^4 + (x_0 - 3)^2 + (x_1 - 4)^2 + 1") - - def ff (x: VectorD): Double = x(0)~^4 + (x(0) - 3.0)~^2 + (x(1) - 4.0)~^2 + 1.0 - def gf (x: VectorD): VectorD = VectorD (4*x(0)~^3 + 2*x(0) - 6, 2*x(1) - 8) - - val (x, fx, code) = L_BFGS.lbfgs (n, x0, ff, gf) - println (s"optimal solution x = $x with an objective value f(x) = $fx, with status code $code") - -end l_BFGSTest2 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `l_BFGSTest3` main function is used to test the `L_BFGS` class. - * > runMain scalation.optimization.l_BFGSTest3 - */ -@main def l_BFGSTest3 (): Unit = - - val n = 2 // dimension of the search space - val x0 = VectorD (0.1, 0.0) // starting location - - banner ("\nMinimize: 1/x(0) + x_0^4 + (x_0 - 3)^2 + (x_1 - 4)^2 + 1") - - def ff (x: VectorD): Double = 1/x(0) + x(0)~^4 + (x(0) - 3.0)~^2 + (x(1) - 4.0)~^2 + 1.0 - def gf (x: VectorD): VectorD = VectorD (-x(0)~^(-2) + 4*x(0)~^3 + 2*x(0) - 6, 2*x(1) - 8) - - val (x, fx, code) = L_BFGS.lbfgs (n, x0, ff, gf) - println (s"optimal solution x = $x with an objective value f(x) = $fx, with status code $code") - -end l_BFGSTest3 - diff --git a/target/scala-3.6.4/classes/scalation/optimization/old/L_BFGS_LS.scala.bak b/target/scala-3.6.4/classes/scalation/optimization/old/L_BFGS_LS.scala.bak deleted file mode 100644 index 4350b9ca5..000000000 --- a/target/scala-3.6.4/classes/scalation/optimization/old/L_BFGS_LS.scala.bak +++ /dev/null @@ -1,685 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Mon Jul 12 16:13:47 EDT 2021 - * @see LICENSE (MIT style license file). - * - * @title BFGS Line Search Algorithms - * - * @see github.com/clementfarabet/lbfgs/blob/master/lbfgs.h - * @see github.com/chokkan/liblbfgs/blob/master/lib/lbfgs.c - */ - -package scalation -package optimization - -import scala.math.{abs, max, min} - -import scalation.mathstat.VectorD - -import BFGS_code._ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Return true if variables x and y have different signs. - * in C: #define fsigndiff(x, y) (*(x) * (*(y) / fabs(*(y))) < 0.) - * @param x the first variable (double) - * @param y the second variable (double) - */ -inline def fsigndiff (x: Double, y: Double): Boolean = x * (y / abs (y)) < 0.0 - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Line search algorithms enumeration. - * @param num_ the number corresponding to the line search algorithm - */ -enum BFGS_LSA (num_ : Int): - - def num: Int = num_ - - /** The default algorithm (MoreThuente method). */ - case LBFGS_LINESEARCH_DEFAULT extends BFGS_LSA (0) - - /** MoreThuente method proposd by More and Thuente. */ - case LBFGS_LINESEARCH_MORETHUENTE extends BFGS_LSA (0) - - /** Backtracking method with the Armijo condition. - * The backtracking method finds the step length such that it satisfies - * the sufficient decrease (Armijo) condition, - * - f(x + a * d) <= f(x) + lbfgs_parameter_t::ftol * a * g(x)^T d, - * - * where x is the current point, d is the current search direction, and - * a is the step length. - */ - case LBFGS_LINESEARCH_BACKTRACKING_ARMIJO extends BFGS_LSA (1) - - /** The backtracking method with the defualt (regular Wolfe) condition. */ - case LBFGS_LINESEARCH_BACKTRACKING extends BFGS_LSA (2) - - /** Backtracking method with regular Wolfe condition. - * The backtracking method finds the step length such that it satisfies - * both the Armijo condition (LBFGS_LINESEARCH_BACKTRACKING_ARMIJO) - * and the curvature condition, - * - g(x + a * d)^T d >= lbfgs_parameter_t::wolfe * g(x)^T d, - * - * where x is the current point, d is the current search direction, and - * a is the step length. - */ - case LBFGS_LINESEARCH_BACKTRACKING_WOLFE extends BFGS_LSA (2) - - /** Backtracking method with strong Wolfe condition. - * The backtracking method finds the step length such that it satisfies - * both the Armijo condition (LBFGS_LINESEARCH_BACKTRACKING_ARMIJO) - * and the following condition, - * - |g(x + a * d)^T d| <= lbfgs_parameter_t::wolfe * |g(x)^T d|, - * - * where x is the current point, d is the current search direction, and - * a is the step length. - */ - case LBFGS_LINESEARCH_BACKTRACKING_STRONG_WOLFE extends BFGS_LSA (3) - -end BFGS_LSA - -import BFGS_LSA._ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `BFGS_LS` object provides multiple line search algorithms. - */ -object BFGS_LS: - - private val debug = debugf ("BFGS_LS", true) // debug function - private val flaw = flawf ("BFGS_LS") // flaw function - - private var param: BFGS_parameter = BFGS_parameter () // the (hyper) parameters - private var ff: VectorD => Double = null // the objective function - private var gf: VectorD => VectorD = null // the gradient function - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Set the (hyper) parameter values (to use other than default values). - * @param param_ the (hyper) parameters - */ - def set_param (param_ : BFGS_parameter): Unit = { param = param_ } - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Set the objective function. Must be set before calling a line search algorithm. - * @param ff the objective function - */ - def set_ff (ff_ : VectorD => Double): Unit = { ff = ff_ } - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Set the gradient functions. Must be set before calling a line search algorithm. - * @param gf the gradient function - */ - def set_gf (gf_ : VectorD => VectorD): Unit = { gf = gf_ } - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return/select a linesearch algorithm based on the given (hyper) parameter values. - */ - def select_linesearch = // Linesearch_type = - param.linesearch match - case LBFGS_LINESEARCH_MORETHUENTE => - line_search_morethuente - case _ => - line_search_backtracking - end match -/******* - if param.orthantwise_c != 0.0 then - param.linesearch match - case LBFGS_LINESEARCH_BACKTRACKING => - line_search_backtracking_owlqn _ - - case _ => - flaw ("select_linesearch", "LBFGSERR_INVALID_LINESEARCH") - null // Only backtracking method is available. - end match - end if -*******/ - end select_linesearch - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Line search with backtracking returning the new location for x and the - * status code. - * @see typedef int (*line_search_proc) in github.com/debbiemarkslab/plmc/blob/master/src/lib/lbfgs.c - * Note: the cs and param arguments are removed, handled by other means. - * @param x the current location/point vector (copy in, out) - * @param f_ the objective function value f(x) (copy in, out) - * @param g_ the gradient vector at x (copy in, out) - * @param s the search direction - * @param step_ the current step size (copy in, out) - */ - def line_search_backtracking (x: VectorD, - f_ : Double, - g_ : VectorD, - s: VectorD, - step_ : Double): (Double, Double, Int) = - // Copy in parameters - var f = f_ - var g = g_ - var step = step_ - - debug ("line_search_backtracking", s"linesearch from x = $x, f = $f, g = $g, step = $step") - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Copy out parameters, otherwise must be explicitly returned (f, step). - * Must be called before all returns. - */ - inline def copy_out (): Unit = g_ set g - - var width = 0.9 // initial width (step size multiplier) - val dec = 0.5 // amount to decrease width - val inc = 2.1 // amount to increase width - - // Check the input parameters for errors. - if step <= 0.0 then - flaw ("line_search_backtracking", s"step = $step must be nonegative") - copy_out () - return (f, step, LBFGSERR_INVALIDPARAMETERS.code) - - // Compute the initial gradient in the search direction. - // Make sure that s points to a descent direction. - val dginit = g dot s // vecdot (&dginit, g, s, n) - if dginit > 0.0 then - flaw ("line_search_backtracking", s"dginit = $dginit must be <= 0.0") - copy_out () - return (f, step, LBFGSERR_INCREASEGRADIENT.code) - - // The initial value of the objective function. - val finit = f - val dgtest = param.ftol * dginit - - var count = 0 // iteration count - - while true do - val xp = x.copy // veccpy (x, xp, n) - x += s * step // vecadd (x, s, step, n) - - // Evaluate the function and gradient values. - f = ff (x) // f = cd.proc_evaluate (cd.instance, x, g, cd.n, step) - g = gf (x) - count += 1 - debug ("line_search_backtracking", s"count = $count, x = $x, f = $f") - - - if f > finit + step * dgtest then - width = dec - else if param.linesearch == LBFGS_LINESEARCH_BACKTRACKING_ARMIJO then - // The sufficient decrease condition (Armijo condition). - debug ("line_search_backtracking", "return due to Armijo sufficient decrease condition") - copy_out () - return (f, step, count) // exit with the Armijo condition. - end if - - // Check the Wolfe condition. - val dg = g dot s // vecdot (&dg, g, s, n) - if dg < param.wolfe * dginit then - width = inc - else if param.linesearch == LBFGS_LINESEARCH_BACKTRACKING_WOLFE then - debug ("line_search_backtracking", "return due to regular Wolfe condition") - copy_out () - return (f, step, count) // exit with the regular Wolfe condition. - end if - - // Check the strong Wolfe condition. - if dg > -param.wolfe * dginit then - width = dec - else - debug ("line_search_backtracking", "return due to strong Wolfe condition") - copy_out () - return (f, step, count) // exit with the strong Wolfe condition. - end if - - if step < param.min_step then // the step is the minimum value. - debug ("line_search_backtracking", "return due to step size too small") - copy_out () - return (f, step, LBFGSERR_MINIMUMSTEP.code) - - if step > param.max_step then // the step is the maximum value. - debug ("line_search_backtracking", "return due to step size too big") - copy_out () - return (f, step, LBFGSERR_MAXIMUMSTEP.code) - - if param.max_linesearch <= count then // maximum number of iteration. - debug ("line_search_backtracking", "return due to iteration limit") - copy_out () - return (f, step, LBFGSERR_MAXIMUMLINESEARCH.code) - - step *= width // decrease the step size by width factor - end while - - copy_out () - (f, step, count) // return count - end line_search_backtracking - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Improve the current point x by moving in the search direction s. - * using the More-Thuente Line Search Algorithm. - * @see www.ii.uib.no/~lennart/drgrad/More1994.pdf - * @param x the current location/point vector - * @param f_ the value of the objective function - * @param g_ the gradient vector - * @param s the search direction - * @param step_ the current step size - */ - def line_search_morethuente (x: VectorD, - f_ : Double, - g_ : VectorD, - s: VectorD, - step_ : Double): (Double, Double, Int) = - var f = f_ - var g = g_ - var step = step_ - - debug ("line_search_morethuente", s"linesearch from x = $x, f = $f, g = $g, step = $step") - - var count = 0 // iteration count - var uinfo = 0 // - var stmin, stmax = 0.0 // minimum and maximum steps - - // Check the input parameters for errors. - if step <= 0.0 then - return (f, step, LBFGSERR_INVALIDPARAMETERS.code) - - // Compute the initial gradient in the search direction. - // Make sure that s points to a descent direction. - val dginit = g dot s // vecdot (&dginit, g, s, n) - if 0 < dginit then - return (f, step, LBFGSERR_INCREASEGRADIENT.code) - - // Initialize local variables. - var brackt = false // whether the interval is bracketed yet - var stage1 = true // whether in stage 1 of the 2-stage algorithm - val finit = f // initial functional value - val dgtest = param.ftol * dginit // - var width = param.max_step - param.min_step // initial interval width - var prev_width = 2.0 * width // previous width - - // Variables stx, fx, dgx contain the values of the step, function, and directional derivative at the best step. - // Variables sty, fy, dgy contain the value of the step, function, and derivative at the other endpoint of the interval of uncertainty. - // Variables step, f, dg contain the values of the step, function, and derivative at the current step. - - var stx = 0.0 // best step size - var sty = 0.0 // step to end of interval - var fx = finit // functional value at best step - var fy = finit // functional value at end step - var dgx = dginit // directional derivative best step - var dgy = dginit // directional derivative end step - - while true do - // Set the minimum and maximum steps to correspond to the present interval of uncertainty. - if brackt then - stmin = min (stx, sty) - stmax = max (stx, sty) - else - stmin = stx - stmax = step + 4.0 * (step - stx) - end if - - // Clip the step in the range of [stepmin, stepmax]. - if step < param.min_step then step = param.min_step - if param.max_step < step then step = param.max_step - - // If an unusual termination is to occur then let step be the lowest point obtained so far. - if (brackt && ((step <= stmin || stmax <= step) || param.max_linesearch <= count + 1 || uinfo != 0)) || - (brackt && (stmax - stmin <= param.xtol * stmax)) then - step = stx - end if - - // Compute the current value of x: x <- x + (step) * s. - val xp = x.copy // veccpy (x, xp, n) - previous point - x += s * step // vecadd (x, s, step, n) - new point - - // Evaluate the function and gradient values. - f = ff (x) // f = cd.proc_evaluate(cd.instance, x, g, cd.n, step) - g = gf (x) - val dg = g dot s // vecdot (&dg, g, s, n) - directional derivative - - val ftest1 = finit + step * dgtest - count += 1 - - // Test for errors and convergence. - - if brackt && ((step <= stmin || stmax <= step) || uinfo != 0) then - return (f, step, LBFGSERR_ROUNDING_ERROR.code) // rounding errors prevent further progress - - if step == param.max_step && f <= ftest1 && dg <= dgtest then - return (f, step, LBFGSERR_MAXIMUMSTEP.code) // the step is the maximum value. - - if step == param.min_step && (ftest1 < f || dgtest <= dg) then - return (f, step, LBFGSERR_MINIMUMSTEP.code) // the step is the minimum value - - if brackt && (stmax - stmin) <= param.xtol * stmax then - return (f, step, LBFGSERR_WIDTHTOOSMALL.code) // relative width of interval of uncertainty is at most xtol - - if param.max_linesearch <= count then - return (f, step, LBFGSERR_MAXIMUMLINESEARCH.code) // maximum number of iteration - - if f <= ftest1 && abs (dg) <= param.gtol * (-dginit) then - return (f, step, count) // sufficient decrease cond. and directional deriv cond. hold. - - // In first stage we seek a step for which the modified function has a nonpositive value and nonnegative derivative. - - if stage1 && f <= ftest1 && min (param.ftol, param.gtol) * dginit <= dg then - stage1 = false - - // A modified function is used to predict the step only if we have not obtained a step for which the modified - // function has a nonpositive function value and nonnegative derivative, and if a lower function value has been - // obtained but the decrease is not sufficient. - - if stage1 && ftest1 < f && f <= fx then // define the modified function and derivative values. - val fm = f - step * dgtest // modified functional value at current step - val fxm = fx - stx * dgtest // modified functional value at best step - val fym = fy - sty * dgtest // modified functional value at end step - val dgm = dg - dgtest // modified directional derivative at current step - val dgxm = dgx - dgtest // modified directional derivative at best step - val dgym = dgy - dgtest // modified directional derivative at end step - - // Call update_trial_interval() to update the interval of uncertainty and to compute new step. - uinfo = update_trial_interval (stx, fxm, dgxm, sty, fym, dgym, step, fm, dgm, stmin, stmax, brackt) - - // Reset the function and gradient values for f. - fx = fxm + stx * dgtest - fy = fym + sty * dgtest - dgx = dgxm + dgtest - dgy = dgym + dgtest - else - // Call update_trial_interval() to update the interval of uncertainty and to compute new step. - uinfo = update_trial_interval (stx, fx, dgx, sty, fy, dgy, step, f, dg, stmin, stmax, brackt) - end if - - // Force a sufficient decrease in the interval of uncertainty. - if brackt then - if 0.66 * prev_width <= abs(sty - stx) then step = stx + 0.5 * (sty - stx) - prev_width = width - width = abs(sty - stx) - end if - end while - - (f, step, LBFGSERR_LOGICERROR.code) - end line_search_morethuente - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Improve the current point x by moving in the search direction s. - * Line search algorithm for problems with L1 regularization, including optimizers - * such as Orthant-Wise Quasi-Newton optimizers. - * @param n the dimension of search space - * @param x_ the current location/point vector - * @param f_ the value of the objective function - * @param g_ the gradient vector - * @param s the search direction - * @param step_ the current step size - * @param xp_ the previous location/point - * @param gp the previous gradient vector - * @param wp the orthant for the point (updated in return) - */ - def line_search_backtracking_owlqn (n: Int, - x_ : VectorD, - f_ : Double, - g_ : VectorD, - s: VectorD, - step_ : Double, - xp_ : VectorD, - gp: VectorD, - wp: VectorD): (Double, Double, Int) = - debug ("line_search_backtracking_owlqn", s"linesearch from x_ = $x_") - var x = x_ - var f = f_ - var g = g_ - var step = step_ - var xp = xp_ - - var i, count = 0 - var width = 0.5 - var norm = 0.0 - var dgtest = 0.0 - var finit = f - - // Check the input parameters for errors. - if step <= 0.0 then - return (f, step, LBFGSERR_INVALIDPARAMETERS.code) - - // Choose the orthant for the new point. - for i <- 0 until n do wp(i) = if xp(i) == 0.0 then -gp(i) else xp(i) - - while true do // Update the current point. - xp = x.copy // veccpy (x, xp, n) - x += s * step // vecadd (x, s, step, n) - - // The current point is projected onto the orthant. - owlqn_project(x, wp, param.orthantwise_start, param.orthantwise_end) - - // Evaluate the function and gradient values. - f = ff (x) // f = cd.proc_evaluate(cd.instance, x, g, cd.n, step) - g = gf (x) - - // Compute the L1 norm of the variables and add it to the object value. - norm = owlqn_x1norm(x, param.orthantwise_start, param.orthantwise_end) - f += norm * param.orthantwise_c - - count += 1 - - dgtest = 0.0 - for i <- 0 until n do dgtest += (x(i) - xp(i)) * gp(i) - - if f <= finit + param.ftol * dgtest then // The sufficient decrease condition. - return (f, step, count) - - if step < param.min_step then // The step is the minimum value. - return (f, step, LBFGSERR_MINIMUMSTEP.code) - - if step > param.max_step then // The step is the maximum value. - return (f, step, LBFGSERR_MAXIMUMSTEP.code) - - if param.max_linesearch <= count then // Maximum number of iteration. - return (f, step, LBFGSERR_MAXIMUMLINESEARCH.code) - - step *= width - end while - (f, step, 0) // return success - end line_search_backtracking_owlqn - -end BFGS_LS - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Update a safeguarded trial value and interval for line search. - *------------------------------------------------------------------------------ - * The parameter x represents the step with the least function value. - * The parameter t represents the current step. This function assumes - * that the derivative at the point of x in the direction of the step. - * If the bracket is set to true, the minimizer has been bracketed in - * an interval of uncertainty with endpoints between x and y. - *------------------------------------------------------------------------------ - * @see Jorge J. More and David J. Thuente. Line search algorithm with - * guaranteed sufficient decrease. ACM Transactions on Mathematical - * Software (TOMS), Vol 20, No 3, pp. 286-307, 1994. - *------------------------------------------------------------------------------ - * @param x The pointer to the value of one endpoint. - * @param fx The pointer to the value of f(x). - * @param dx The pointer to the value of f'(x). - * @param y The pointer to the value of another endpoint. - * @param fy The pointer to the value of f(y). - * @param dy The pointer to the value of f'(y). - * @param t The pointer to the value of the trial value, t. - * @param ft The pointer to the value of f(t). - * @param dt The pointer to the value of f'(t). - * @param tmin The minimum value for the trial value, t. - * @param tmax The maximum value for the trial value, t. - * @param brackt The pointer to the predicate if the trial value is bracketed. - * @return int Status value. Zero indicates a normal termination. - */ -def update_trial_interval (x_ : Double, - fx_ : Double, - dx_ : Double, - y_ : Double, - fy_ : Double, - dy_ : Double, - t_ : Double, - ft: Double, - dt: Double, - tmin: Double, - tmax: Double, - brackt_ : Boolean): Int = - var x = x_ - var fx = fx_ - var dx = dx_ - var y = y_ - var fy = fy_ - var dy = dy_ - var t = t_ - var brackt = brackt_ - - var bound = false - var dsign = fsigndiff (dt, dx) - var mc = 0.0 // minimizer of an interpolated cubic - var mq = 0.0 // minimizer of an interpolated quadratic - var newt = 0.0 // new trial value - - // Check the input parameters for errors. - if brackt then - if t <= min (x, y) || max (x, y) <= t then // trival value t is out of the interval - return LBFGSERR_OUTOFINTERVAL.code - - if 0.0 <= dx * (t - x) then // function must decrease from x - return LBFGSERR_INCREASEGRADIENT.code - - if tmax < tmin then // incorrect tmin and tmax specified - return LBFGSERR_INCORRECT_TMINMAX.code - end if - - // Trial value selection. - if fx < ft then - // Case 1: a higher function value. The minimum is brackt. - // If the cubic minimizer is closer to x than the quadratic one, - // the cubic one is taken, else the average of the minimizers is taken. - - brackt = true - bound = true - mc = cubic_minimizer (mc, x, fx, dx, t, ft, dt) - mq = quad_minimizer (mq, x, fx, dx, t, ft) - newt = if abs (mc - x) < abs (mq - x) then mc else mc + 0.5 * (mq - mc) - - else if dsign then - // Case 2: a lower function value and derivatives of opposite sign. - // The minimum is brackt. If the cubic minimizer is closer to x than - // the quadratic (secant) one, the cubic one is taken, else the quadratic one is taken. - - brackt = true - bound = false - mc = cubic_minimizer (mc, x, fx, dx, t, ft, dt) - mq = quad_minimizer2 (mq, x, dx, t, dt) - newt = if abs (mc - t) > abs (mq - t) then mc else mq - - else if abs (dt) < abs (dx) then - // Case 3: a lower function value, derivatives of the same sign, and the magnitude of - // the derivative decreases. The cubic minimizer is only used if the cubic tends to - // infinity in the direction of the minimizer or if the minimum of the cubic is beyond t. - // Otherwise the cubic minimizer is defined to be either tmin or tmax. The quadratic (secant) - // minimizer is also computed and if the minimum is brackt then the the minimizer closest - // to x is taken, else the one farthest away is taken. - - bound = true - mc = cubic_minimizer2 (mc, x, fx, dx, t, ft, dt, tmin, tmax) - mq = quad_minimizer2 (mq, x, dx, t, dt) - newt = - if brackt then - if abs (t - mc) < abs (t - mq) then mc else mq - else - if abs (t - mc) > abs (t - mq) then mc else mq - - else - // Case 4: a lower function value, derivatives of the same sign, and the magnitude of - // the derivative does not decrease. If the minimum is not brackt, the step is either - // tmin or tmax, else the cubic minimizer is taken. - - bound = false - newt = if brackt then cubic_minimizer (newt, t, ft, dt, y, fy, dy) - else if x < t then tmax - else tmin - end if - - // Update interval of uncertainty: update does not depend on new step or case analysis above. - // Case a: if f(x) < f(t), x <- x, y <- t - // Case b: if f(t) <= f(x) && f'(t) * f'(x) > 0, x <- t, y <- y - // Case c: if f(t) <= f(x) && f'(t) * f'(x) < 0, x <- t, y <- x - - if fx < ft then // Case a - y = t; fy = ft; dy = dt - else // Case c - if dsign then y = x; fy = fx; dy = dx - // Cases b and c - x = t; fx = ft; dx = dt - end if - - // Clip the new trial value in [tmin, tmax]. - if tmax < newt then newt = tmax - if newt < tmin then newt = tmin - - // Redefine the new trial value if it is close to the upper bound of the interval. - if brackt && bound then - mq = x + 0.66 * (y - x) - if x < y then if mq < newt then newt = mq - else if newt < mq then newt = mq - end if - - // Return the new trial value. - t = newt - 0 // return success -end update_trial_interval - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Return the 1-norm of vector x. - * @param x the vector whose 1-norm is sought - * @param start the start index - * @param n the end index - */ -def owlqn_x1norm (x: VectorD, start: Int, n: Int): Double = - var norm = 0.0 - for i <- start until n do norm += abs (x(i)) - norm -end owlqn_x1norm - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Project ... - * @param d the distance vector - * @param sign the sign vector - * @param start the start index - * @param end_ the end index - */ -def owlqn_project (d: VectorD, sign: VectorD, start: Int, end_ : Int): Unit = - for i <- start until end_ do - if d(i) * sign(i) <= 0.0 then d(i) = 0.0 - end for -end owlqn_project - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `bFGS_LSTest` main function tests the `BFGS_LS` object. - * > runMain scalation.optimization.bFGS_LSTest - */ -@main def bFGS_LSTest (): Unit = - - println ("\nMinimize: (x_0 - 2)^2 + (x_1 - 3)^2 + 1") - - def ff(x: VectorD): Double = (x(0) - 2)~^2 + (x(1) - 3)~^2 + 1 - def gf(x: VectorD): VectorD = VectorD (2 * x(0) - 4, 2 * x(1) - 6) - - BFGS_LS.set_ff (ff) - BFGS_LS.set_gf (gf) - - val x = VectorD (0, 0) // the current location/point vector - val f = ff(x) // the objective function value f(x) - val g = gf(x) // the gradient vector at x -// val s = -g // the search direction (e.g., opposite g) - val s = VectorD (1, 1) // custom search direction - val step = 0.2 // the initial step size - - banner (s"x = $x, f = $f, g = $g, s = $s, step = $step") - - val code = BFGS_LS.line_search_backtracking (x, f, g, s, step) -// val code = BFGS_LS.line_search_morethuente (x, f, g, s, step) - - println (s"optimal solution x = $x with an objective value ff(x) = ${ff(x)}, with status code $code") - -end bFGS_LSTest - diff --git a/target/scala-3.6.4/classes/scalation/optimization/old/L_BFGS_NoLS.scala.bak b/target/scala-3.6.4/classes/scalation/optimization/old/L_BFGS_NoLS.scala.bak deleted file mode 100644 index 19a94c41e..000000000 --- a/target/scala-3.6.4/classes/scalation/optimization/old/L_BFGS_NoLS.scala.bak +++ /dev/null @@ -1,166 +0,0 @@ - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Wed Jun 7 17:54:59 EDT 2023 - * @see LICENSE (MIT style license file). - * - * @title Limited Memory BFGS Method to Find Minima for Functions of Vectors - * - * @see web.stanford.edu/class/cme304/docs/newton-type-methods.pdf - * @see `L_BFGS` for similar code that uses line-search - */ - -package scalation -package optimization - -import scala.math.{abs, sqrt} - -import scalation.calculus.Differential.∇ -import scalation.mathstat._ - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `L_BFGS_NoLS` class is used to find optima for functions of vectors. - * The solve method finds local optima using a Quasi Newton method. - * @param f the vector to scalar function to find optima of - * @param m the memory size or number of historical s and y vectors to maintain - * @param n the dimensionality of the optimization problem - */ -class L_BFGS_NoLS (f: FunctionV2S, m: Int, n: Int) - extends Minimize: - - private val debug = debugf ("L_BFGS_NoLS", true) // debug function - private val s = new MatrixD (m, n) // history of x-position changes - private val y = new MatrixD (m, n) // history of gradient changes - private val p = new VectorD (m) // rho (p) vector for findDir - private val a = new VectorD (m) // alpha (a) vector for findDir - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Find the deflected gradient by passing in the current gradient and using - * the last m (change in x-position) s and (change in gradient) y vectors. - * @see https://en.wikipedia.org/wiki/Limited-memory_BFGS - * FIX - access in correct order - circular - * @param g the current gradient - * @param k the k-th iteration - */ - def findDir (g: VectorD, k: Int): VectorD = - var q = g // start with current gradient - for i <- k-1 to k-m by -1 do - a(i) = (s(i) dot q) * p(i) - q -= y(i) * a(i) - val ga = (s(k-1) dot y(k-1)) / y(k-1).normSq // gamma - var z = q * ga - for i <- k-m until k do - val b = (y(i) dot z) * p(i) - z += s(i) * (a(i) - b) - z // return direction = deflected gradient - end findDir - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Solve for an optima by finding a local optima close to the starting point/guess 'x0'. - * This version numerically approximates the first derivatives. - * @param x0 the starting point/guess - * @param α the current learning rate - */ - def solve (x0: VectorD, α: Double = eta): FuncVec = - var x = x0 // current point - var f_x = f(x) // function value at x - var df_x = ∇ (f, x) // compute gradient, numerically - - var it = 0 // iteration counter - cfor (it < MAX_IT && df_x.norm > EPS, it += 1) { - debug ("solve", s"it = $it: f($x) = $f_x, df_x = $df_x") - - val d = if it == 0 then df_x // direction = gradient - else findDir (df_x, it) // find deflected gradient - - //FIX - add in correct order - s(it) = d * -α // compute step vector (- => opposite gradient) - x += s(it) // update new x - val df_x_ = df_x // save previous gradient - df_x = ∇ (f, x) // compute new gradient, numerically - y(it) = df_x - df_x_ // difference in gradients - f_x = f(x) // functional value - } // cfor - - println (s"optimal solution x= $x, f = ${f(x)}") - (f_x, x) - end solve - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Solve for an optima by finding a local optima close to the starting point/guess 'x0'. - * This version uses explicit functions for the gradient (partials derivatives) - * @param x0 the starting point/guess - * @param grad the gradient as explicit functions for partials - * @param α the current learning rate - */ - def solve2 (x0: VectorD, grad: FunctionV2V, α: Double = eta): FuncVec = - var x = x0 // current point - var f_x = f(x) // function value at x - var df_x = grad (x) // compute gradient by function evaluation - - var it = 0 // iteration counter - cfor (it < MAX_IT && df_x.norm > EPS, it += 1) { - debug ("solve", s"it = $it: f($x) = $f_x, df_x = $df_x") - - val d = if it == 0 then df_x // direction = gradient - else findDir (df_x, it) // find deflected gradient - - s(it) = d * -α // compute step vector (- => opposite gradient) - x += s(it) // update new x - val df_x_ = df_x // save previous gradient - df_x = grad (x) // compute new gradient by function evaluation - y(it) = df_x - df_x_ // difference in gradients - f_x = f(x) // functional value - } // cfor - - println (s"optimal solution x= $x, f = ${f(x)}") - (f_x, x) - end solve2 - -end L_BFGS_NoLS - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `l_BFGS_NoLSTest` main function is used to test the `L_BFGS_NoLS` class. - * This test numerically approximates the derivatives to find minima. - * > runMain scalation.optimization.l_BFGS_NoLSTest - */ -@main def l_BFGS_NoLSTest (): Unit = - - val m = 4 // size of memory - val n = 2 // dimension of the search space - val x0 = new VectorD (n) - - banner ("Minimize: (x_0 - 3)^2 + (x_1 - 4)^2 + 1") - def f (x: VectorD): Double = (x(0) - 3.0)~^2 + (x(1) - 4.0)~^2 + 1.0 - - val optimizer = new L_BFGS_NoLS (f, m, n) - val opt = optimizer.solve (x0) - println (s"][ optimal solution (f(x), x) = $opt") - -end l_BFGS_NoLSTest - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `l_BFGS_NoLSTest2` main function is used to test the `L_BFGS_NoLS` class. - * This tests use functions for partial derivatives to find minima. - * > runMain scalation.optimization.l_BFGS_NoLSTest2 - */ -@main def l_BFGS_NoLSTest2 (): Unit = - - val m = 4 // size of memory - val n = 2 // dimension of the search space - val x0 = new VectorD (n) - - banner ("Minimize: (x_0 - 3)^2 + (x_1 - 4)^2 + 1") - def f (x: VectorD): Double = (x(0) - 3)~^2 + (x(1) - 4)~^2 + 1 - - def grad (x: VectorD): VectorD = VectorD (2 * x(0) - 6, 2 * x(1) - 8) - - val optimizer = new L_BFGS_NoLS (f, m, n) - val opt = optimizer.solve2 (x0, grad) - println (s"][ optimal solution (f(x), x) = $opt") - -end l_BFGS_NoLSTest2 - diff --git a/target/scala-3.6.4/classes/scalation/optimization/old/L_BFGS_code.scala.bak b/target/scala-3.6.4/classes/scalation/optimization/old/L_BFGS_code.scala.bak deleted file mode 100644 index 185c4bda5..000000000 --- a/target/scala-3.6.4/classes/scalation/optimization/old/L_BFGS_code.scala.bak +++ /dev/null @@ -1,242 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Mon Jul 12 16:13:47 EDT 2021 - * @see LICENSE (MIT style license file). - * - * @title BFGS Error Codes - Translated from liblbfgs - * - * @see github.com/clementfarabet/lbfgs/blob/master/lbfgs.h - * @see github.com/chokkan/liblbfgs/blob/master/lib/lbfgs.c - */ - -package scalation -package optimization - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Define the values/codes used by lbfgs (). - * Roughly speaking, a negative value indicates an error. - */ -enum BFGS_code (code_ : Int = -1): - - def code: Int = code_ - - // L-BFGS reaches convergence. - case LBFGS_SUCCESS extends BFGS_code (0) - case LBFGS_CONVERGENCE extends BFGS_code (0) - case LBFGS_STOP extends BFGS_code (0) - - // The initial variables already minimize the objective function. - case LBFGS_ALREADY_MINIMIZED extends BFGS_code () - - // Unknown error. - case LBFGSERR_UNKNOWNERROR extends BFGS_code (-1024) - - // Logic error. - case LBFGSERR_LOGICERROR extends BFGS_code () - - // Insufficient memory. - case LBFGSERR_OUTOFMEMORY extends BFGS_code () - - // The minimization process has been canceled. - case LBFGSERR_CANCELED extends BFGS_code () - - // Invalid number of variables specified. - case LBFGSERR_INVALID_N extends BFGS_code () - - // Invalid number of variables (for SSE) specified. - case LBFGSERR_INVALID_N_SSE extends BFGS_code () - - // The array x must be aligned to 16 (for SSE). - case LBFGSERR_INVALID_X_SSE extends BFGS_code () - - // Invalid parameter lbfgs_parameter_t::epsilon specified. - case LBFGSERR_INVALID_EPSILON extends BFGS_code () - - // Invalid parameter lbfgs_parameter_t::past specified. - case LBFGSERR_INVALID_TESTPERIOD extends BFGS_code () - - // Invalid parameter lbfgs_parameter_t::delta specified. - case LBFGSERR_INVALID_DELTA extends BFGS_code () - - // Invalid parameter lbfgs_parameter_t::linesearch specified. - case LBFGSERR_INVALID_LINESEARCH extends BFGS_code () - - // Invalid parameter lbfgs_parameter_t::max_step specified. - case LBFGSERR_INVALID_MINSTEP extends BFGS_code () - - // Invalid parameter lbfgs_parameter_t::max_step specified. - case LBFGSERR_INVALID_MAXSTEP extends BFGS_code () - - // Invalid parameter lbfgs_parameter_t::ftol specified. - case LBFGSERR_INVALID_FTOL extends BFGS_code () - - // Invalid parameter lbfgs_parameter_t::wolfe specified. - case LBFGSERR_INVALID_WOLFE extends BFGS_code () - - // Invalid parameter lbfgs_parameter_t::gtol specified. - case LBFGSERR_INVALID_GTOL extends BFGS_code () - - // Invalid parameter lbfgs_parameter_t::xtol specified. - case LBFGSERR_INVALID_XTOL extends BFGS_code () - - // Invalid parameter lbfgs_parameter_t::max_linesearch specified. - case LBFGSERR_INVALID_MAXLINESEARCH extends BFGS_code () - - // Invalid parameter lbfgs_parameter_t::orthantwise_c specified. - case LBFGSERR_INVALID_ORTHANTWISE extends BFGS_code () - - // Invalid parameter lbfgs_parameter_t::orthantwise_start specified. - case LBFGSERR_INVALID_ORTHANTWISE_START extends BFGS_code () - - // Invalid parameter lbfgs_parameter_t::orthantwise_end specified. - case LBFGSERR_INVALID_ORTHANTWISE_END extends BFGS_code () - - // The line-search step went out of the interval of uncertainty. - case LBFGSERR_OUTOFINTERVAL extends BFGS_code () - - // A logic error occurred or the interval of uncertainty became too small. - case LBFGSERR_INCORRECT_TMINMAX extends BFGS_code () - - // A rounding error occurred, or no line-search step satisfies sufficient decrease and curvature conditions. - case LBFGSERR_ROUNDING_ERROR extends BFGS_code () - - // The line-search step became smaller than lbfgs_parameter_t::min_step. - case LBFGSERR_MINIMUMSTEP extends BFGS_code () - - // The line-search step became larger than lbfgs_parameter_t::max_step. - case LBFGSERR_MAXIMUMSTEP extends BFGS_code () - - // The line-search routine reaches the maximum number of evaluations. - case LBFGSERR_MAXIMUMLINESEARCH extends BFGS_code () - - // The algorithm routine reaches the maximum number of iterations. - case LBFGSERR_MAXIMUMITERATION extends BFGS_code () - - // Relative width of the interval of uncertainty is at most lbfgs_parameter_t::xtol. - case LBFGSERR_WIDTHTOOSMALL extends BFGS_code () - - // A logic error (negative line-search step) occurred. - case LBFGSERR_INVALIDPARAMETERS extends BFGS_code () - - // The current search direction increases the objective function value. - case LBFGSERR_INCREASEGRADIENT extends BFGS_code () - -end BFGS_code - -import BFGS_code._ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Return a description of the lbfgs standard error code. - * @pram err the error code - */ -def lbfgs_strerror (err: BFGS_code): String = - err match - case LBFGS_SUCCESS => // also handles LBFGS_CONVERGENCE. - "Success: reached convergence (gtol)." - - case LBFGS_STOP => - "Success: met stopping criteria (ftol)." - - case LBFGS_ALREADY_MINIMIZED => - "The initial variables already minimize the objective function." - - case LBFGSERR_UNKNOWNERROR => - "Unknown error." - - case LBFGSERR_LOGICERROR => - "Logic error." - - case LBFGSERR_OUTOFMEMORY => - "Insufficient memory." - - case LBFGSERR_CANCELED => - "The minimization process has been canceled." - - case LBFGSERR_INVALID_N => - "Invalid number of variables specified." - - case LBFGSERR_INVALID_N_SSE => - "Invalid number of variables (for SSE) specified." - - case LBFGSERR_INVALID_X_SSE => - "The array x must be aligned to 16 (for SSE)." - - case LBFGSERR_INVALID_EPSILON => - "Invalid parameter lbfgs_parameter_t::epsilon specified." - - case LBFGSERR_INVALID_TESTPERIOD => - "Invalid parameter lbfgs_parameter_t::past specified." - - case LBFGSERR_INVALID_DELTA => - "Invalid parameter lbfgs_parameter_t::delta specified." - - case LBFGSERR_INVALID_LINESEARCH => - "Invalid parameter lbfgs_parameter_t::linesearch specified." - - case LBFGSERR_INVALID_MINSTEP => - "Invalid parameter lbfgs_parameter_t::max_step specified." - - case LBFGSERR_INVALID_MAXSTEP => - "Invalid parameter lbfgs_parameter_t::max_step specified." - - case LBFGSERR_INVALID_FTOL => - "Invalid parameter lbfgs_parameter_t::ftol specified." - - case LBFGSERR_INVALID_WOLFE => - "Invalid parameter lbfgs_parameter_t::wolfe specified." - - case LBFGSERR_INVALID_GTOL => - "Invalid parameter lbfgs_parameter_t::gtol specified." - - case LBFGSERR_INVALID_XTOL => - "Invalid parameter lbfgs_parameter_t::xtol specified." - - case LBFGSERR_INVALID_MAXLINESEARCH => - "Invalid parameter lbfgs_parameter_t::max_linesearch specified." - - case LBFGSERR_INVALID_ORTHANTWISE => - "Invalid parameter lbfgs_parameter_t::orthantwise_c specified." - - case LBFGSERR_INVALID_ORTHANTWISE_START => - "Invalid parameter lbfgs_parameter_t::orthantwise_start specified." - - case LBFGSERR_INVALID_ORTHANTWISE_END => - "Invalid parameter lbfgs_parameter_t::orthantwise_end specified." - - case LBFGSERR_OUTOFINTERVAL => - "The line-search step went out of the interval of uncertainty." - - case LBFGSERR_INCORRECT_TMINMAX => - "A logic error occurred, or the interval of uncertainty became too small." - - case LBFGSERR_ROUNDING_ERROR => - "A rounding error occurred, or no line-search step satisfies sufficient decrease and curvature conditions." - - case LBFGSERR_MINIMUMSTEP => - "The line-search step became smaller than lbfgs_parameter_t::min_step." - - case LBFGSERR_MAXIMUMSTEP => - "The line-search step became larger than lbfgs_parameter_t::max_step." - - case LBFGSERR_MAXIMUMLINESEARCH => - "The line-search routine reaches the maximum number of evaluations." - - case LBFGSERR_MAXIMUMITERATION => - "The algorithm routine reaches the maximum number of iterations." - - case LBFGSERR_WIDTHTOOSMALL => - "Relative width of the interval of uncertainty is at most lbfgs_parameter_t::xtol." - - case LBFGSERR_INVALIDPARAMETERS => - "A logic error (negative line-search step) occurred." - - case LBFGSERR_INCREASEGRADIENT => - "The current search direction increases the objective function value." - - case _ => - "(unknown)" - end match -end lbfgs_strerror - diff --git a/target/scala-3.6.4/classes/scalation/optimization/old/L_BFGS_minimizer.scala.bak b/target/scala-3.6.4/classes/scalation/optimization/old/L_BFGS_minimizer.scala.bak deleted file mode 100644 index ed7d5d533..000000000 --- a/target/scala-3.6.4/classes/scalation/optimization/old/L_BFGS_minimizer.scala.bak +++ /dev/null @@ -1,107 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Mon Jul 12 16:13:47 EDT 2021 - * @see LICENSE (MIT style license file). - * - * @title BFGS Line Search Minimizers - * - * @see github.com/clementfarabet/lbfgs/blob/master/lbfgs.h - * @see github.com/chokkan/liblbfgs/blob/master/lib/lbfgs.c - */ - -package scalation -package optimization - -import scala.math.{abs, max, sqrt} - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Find a minimizer of an interpolated cubic function. - * @param cm The minimizer of the interpolated cubic. - * @param u The value of one point, u. - * @param fu The value of f(u). - * @param du The value of f'(u). - * @param v The value of another point, v. - * @param fv The value of f(v). - * @param du The value of f'(v). - */ -def cubic_minimizer (cm: Double, u: Double, fu: Double, du: Double, - v: Double, fv: Double, dv: Double): Double = - val d = v - u - val theta = (fu - fv) * 3 / d + du + dv - var p = abs (theta) - var q = abs (du) - var r = abs (dv) - val s = max3 (p, q, r) - // gamma = s*sqrt((theta/s)**2 - (du/s) * (dv/s)) - val a = theta / s - var gamma = s * sqrt (a * a - (du / s) * (dv / s)) - if v < u then gamma = -gamma - p = gamma - du + theta - q = gamma - du + gamma + dv - r = p / q - u + r * d // retrun updated cm -end cubic_minimizer - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Find a minimizer of an interpolated cubic function. - * @param cm The minimizer of the interpolated cubic. - * @param u The value of one point, u. - * @param fu The value of f(u). - * @param du The value of f'(u). - * @param v The value of another point, v. - * @param fv The value of f(v). - * @param dv The value of f'(v). - * @param xmin The minimum value. - * @param xmax The maximum value. - */ -def cubic_minimizer2 (cm: Double, u: Double, fu: Double, du: Double, - v: Double, fv: Double, dv: Double, xmin: Double, xmax: Double): Double = - val d = v - u - val theta = (fu - fv) * 3 / d + du + dv - var p = abs (theta) - var q = abs (du) - var r = abs (dv) - val s = max3 (p, q, r) - // gamma = s*sqrt((theta/s)**2 - (du/s) * (dv/s)) - val a = theta / s - var gamma = s * sqrt (max (0, a * a - (du / s) * (dv / s))) - if u < v then gamma = -gamma - p = gamma - dv + theta - q = gamma - dv + gamma + du - r = p / q - if r < 0.0 && gamma != 0.0 then v - r * d // return updated cm - else if d > 0 then xmax - else xmin -end cubic_minimizer2 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Find a minimizer of an interpolated quadratic function. - * @param qm The minimizer of the interpolated quadratic. - * @param u The value of one point, u. - * @param fu The value of f(u). - * @param du The value of f'(u). - * @param v The value of another point, v. - * @param fv The value of f(v). - */ -def quad_minimizer (qm: Double, u: Double, fu: Double, du: Double, v: Double, fv: Double): Double = - val a = v - u - u + du / ((fu - fv) / a + du) / 2 * a // return update qm -end quad_minimizer - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Find a minimizer of an interpolated quadratic function. - * @param qm The minimizer of the interpolated quadratic. - * @param u The value of one point, u. - * @param du The value of f'(u). - * @param v The value of another point, v. - * @param dv The value of f'(v). - */ -def quad_minimizer2 (qm: Double, u: Double, du: Double, v: Double, dv: Double): Double = - val a = u - v - v + dv / (dv - du) * a // return update qm -end quad_minimizer2 - diff --git a/target/scala-3.6.4/classes/scalation/optimization/old/L_BFGS_parameter.scala.bak b/target/scala-3.6.4/classes/scalation/optimization/old/L_BFGS_parameter.scala.bak deleted file mode 100644 index 3adb4be8d..000000000 --- a/target/scala-3.6.4/classes/scalation/optimization/old/L_BFGS_parameter.scala.bak +++ /dev/null @@ -1,181 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Mon Jul 12 16:13:47 EDT 2021 - * @see LICENSE (MIT style license file). - * - * @title BFGS Hyper-Parameters - * - * @see github.com/clementfarabet/lbfgs/blob/master/lbfgs.h - * @see github.com/chokkan/liblbfgs/blob/master/lib/lbfgs.c - */ - -package scalation -package optimization - -import scalation.mathstat.VectorD - -import BFGS_code._ -import BFGS_LSA._ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `BFGS_parameter` case class provides hyper-parameters BFGS optimization. - *------------------------------------------------------------------------------ - * @param m The number of corrections to approximate the inverse hessian matrix. - * The L-BFGS routine stores the computation results of previous \ref m - * iterations to approximate the inverse hessian matrix of the current - * iteration. This parameter controls the size of the limited memories - * (corrections). The default value is \c 6. Values less than \c 3 are - * not recommended. Large values will result in excessive computing time. - *------------------------------------------------------------------------------ - * @param epsilon Epsilon for convergence test. - * This parameter determines the accuracy with which the solution is to - * be found. A minimization terminates when - * ||g|| < \ref epsilon * max(1, ||x||), - * where ||.|| denotes the Euclidean (L2) norm. The default value is \c 1e-5. - *------------------------------------------------------------------------------ - * @param past Distance for delta-based convergence test. - * This parameter determines the distance, in iterations, to compute - * the rate of decrease of the objective function. If the value of this - * parameter is zero, the library does not perform the delta-based - * convergence test. The default value is \c 0. - *------------------------------------------------------------------------------ - * @param delta Delta for convergence test. - * This parameter determines the minimum rate of decrease of the objective function. - * The library stops iterations when the following condition is met: - * (f' - f) / f < \ref delta, - * where f' is the objective value of \ref past iterations ago, and f is - * the objective value of the current iteration. The default value is \c 0. - *------------------------------------------------------------------------------ - * @param max_iterations The maximum number of iterations. - * The lbfgs() function terminates an optimization process with - * ::LBFGSERR_MAXIMUMITERATION status code when the iteration count - * exceedes this parameter. Setting this parameter to zero continues an - * optimization process until a convergence or error. The default value is \c 0. - *------------------------------------------------------------------------------ - * @param linesearch The line search algorithm. - * This parameter specifies a line search algorithm to be used by the L-BFGS routine. - *------------------------------------------------------------------------------ - * @param max_linesearch The maximum number of trials for the line search. - * This parameter controls the number of function and gradients evaluations - * per iteration for the line search routine. The default value is \c 20. - *------------------------------------------------------------------------------ - * @param min_step The minimum step of the line search routine. - * The default value is \c 1e-20. This value need not be modified unless - * the exponents are too large for the machine being used, or unless the - * problem is extremely badly scaled (in which case the exponents should be increased). - *------------------------------------------------------------------------------ - * @param max_step The maximum step of the line search. - * The default value is \c 1e+20. This value need not be modified unless - * the exponents are too large for the machine being used, or unless the - * problem is extremely badly scaled (in which case the exponents should be increased). - *------------------------------------------------------------------------------ - * @param ftol A parameter to control the accuracy of the line search routine. - * The default value is \c 1e-4. This parameter should be greater - * than zero and smaller than \c 0.5. - *------------------------------------------------------------------------------ - * @param wolfe A coefficient for the Wolfe condition. - * This parameter is valid only when the backtracking line-search - * algorithm is used with the Wolfe condition, - * ::LBFGS_LINESEARCH_BACKTRACKING_STRONG_WOLFE or - * ::LBFGS_LINESEARCH_BACKTRACKING_WOLFE . - * The default value is \c 0.9. This parameter should be greater - * the \ref ftol parameter and smaller than \c 1.0. - *------------------------------------------------------------------------------ - * @param gtol A parameter to control the accuracy of the line search routine. - * The default value is \c 0.9. If the function and gradient - * evaluations are inexpensive with respect to the cost of the - * iteration (which is sometimes the case when solving very large - * problems) it may be advantageous to set this parameter to a small - * value. A typical small value is \c 0.1. This parameter shuold be - * greater than the \ref ftol parameter (\c 1e-4) and smaller than \c 1.0. - *------------------------------------------------------------------------------ - * @param xtol The machine precision for floating-point values. - * This parameter must be a positive value set by a client program to - * estimate the machine precision. The line search routine will terminate - * with the status code (::LBFGSERR_ROUNDING_ERROR) if the relative width - * of the interval of uncertainty is less than this parameter. - *------------------------------------------------------------------------------ - * @param orthantwise_c Coeefficient for the L1 norm of variables. - * This parameter should be set to zero for standard minimization - * problems. Setting this parameter to a positive value activates - * Orthant-Wise Limited-memory Quasi-Newton (OWL-QN) method, which - * minimizes the objective function F(x) combined with the L1 norm |x| - * of the variables, {F(x) + C |x|}. This parameter is the coeefficient - * for the |x|, i.e., C. As the L1 norm |x| is not differentiable at - * zero, the library modifies function and gradient evaluations from - * a client program suitably; a client program thus have only to return - * the function value F(x) and gradients G(x) as usual. The default value is zero. - *------------------------------------------------------------------------------ - * @param orthantwise_start Start index for computing L1 norm of the variables. - * This parameter is valid only for OWL-QN method - * (i.e., \ref orthantwise_c != 0). This parameter b (0 <= b < N) - * specifies the index number from which the library computes the - * L1 norm of the variables x, - * |x| := |x_{b}| + |x_{b+1}| + ... + |x_{N}| . - * In other words, variables x_1, ..., x_{b-1} are not used for - * computing the L1 norm. Setting b (0 < b < N), one can protect - * variables, x_1, ..., x_{b-1} (e.g., a bias term of logistic - * regression) from being regularized. The default value is zero. - *------------------------------------------------------------------------------ - * @param orthantwise_end End index for computing L1 norm of the variables. - * This parameter is valid only for OWL-QN method - * (i.e., \ref orthantwise_c != 0). This parameter e (0 < e <= N) - * specifies the index number at which the library stops computing the - * L1 norm of the variables x. - */ -case class BFGS_parameter (m: Int = 6, - epsilon: Double = 1e-6, - past: Int = 0, - delta: Double = 1e-6, - max_iterations: Int = 1000, - linesearch: BFGS_LSA = LBFGS_LINESEARCH_BACKTRACKING_WOLFE, - max_linesearch: Int = 40, - min_step: Double = 1e-20, - max_step: Double = 1e20, - ftol: Double = 1e-6, - wolfe: Double = 0.9, - gtol: Double = 0.9, - xtol: Double = 1e-16, - orthantwise_c: Double = 0.0, - orthantwise_start: Int = 1, - var orthantwise_end: Int = -1): - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return default parameter values as a vector. - */ - def default_param = VectorD (6.0, 1e-5, 0.0, 1e-5, 0.0, LBFGS_LINESEARCH_DEFAULT.ordinal, - 40.0, 1e-20, 1e20, 1e-4, 0.9, 0.9, 1.0e-16, 0.0, 0.0, -1.0) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Check the input parameters for errors. - * @param n the dimension of the search space - */ - def check_parameters (n: Int): Int = - if n <= 0 then return LBFGSERR_INVALID_N.code - if epsilon < 0.0 then return LBFGSERR_INVALID_EPSILON.code - if past < 0 then return LBFGSERR_INVALID_TESTPERIOD.code - if delta < 0.0 then return LBFGSERR_INVALID_DELTA.code - if min_step < 0.0 then return LBFGSERR_INVALID_MINSTEP.code - if max_step < min_step then return LBFGSERR_INVALID_MAXSTEP.code - if ftol < 0.0 then return LBFGSERR_INVALID_FTOL.code - if linesearch == LBFGS_LINESEARCH_BACKTRACKING_WOLFE || - linesearch == LBFGS_LINESEARCH_BACKTRACKING_STRONG_WOLFE then - if wolfe <= ftol || 1.0 <= wolfe then return LBFGSERR_INVALID_WOLFE.code - end if - if gtol < 0.0 then return LBFGSERR_INVALID_GTOL.code - if xtol < 0.0 then return LBFGSERR_INVALID_XTOL.code - if max_linesearch <= 0 then return LBFGSERR_INVALID_MAXLINESEARCH.code - if orthantwise_c < 0.0 then return LBFGSERR_INVALID_ORTHANTWISE.code - if orthantwise_start < 0 || n < orthantwise_start then - return LBFGSERR_INVALID_ORTHANTWISE_START.code - end if - if orthantwise_end < 0 then orthantwise_end = n - if n < orthantwise_end then return LBFGSERR_INVALID_ORTHANTWISE_END.code - 0 // return success - end check_parameters - -end BFGS_parameter - - diff --git a/target/scala-3.6.4/classes/scalation/optimization/old/NelderMeadSimplex.scala.bak b/target/scala-3.6.4/classes/scalation/optimization/old/NelderMeadSimplex.scala.bak deleted file mode 100644 index 6087ceaeb..000000000 --- a/target/scala-3.6.4/classes/scalation/optimization/old/NelderMeadSimplex.scala.bak +++ /dev/null @@ -1,251 +0,0 @@ - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller, Yulong Wang - * @version 2.0 - * @date Thurs Jun 29 13:13:42 EDT 2023 - * @see LICENSE (MIT style license file). - * - * @see DOI 10.1002/anac.200410015 by Singer and Singer 2004 - * @see The Dr. Thomas Harvey Rowan PhD dissertation 1990 - */ - -package scalation -package optimization - -import scala.math.{abs, max, min} -import scala.runtime.ScalaRunTime.stringOf -import scala.util.control.Breaks.{break, breakable} - -import scalation.mathstat._ - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `NelderMeadSimplex` solves Non-Linear Programming (NLP) problems using - * the Nelder-Mead Simplex algorithm. Given a function f and its dimension - * n, the algorithm moves a simplex defined by n + 1 points in order to find - * an optimal solution. The algorithm is derivative-free. - * - * minimize f(x) - * - * @param f the vector-to-scalar objective function - * @param n the dimension of the search space - */ -class NelderMeadSimplex (f: FunctionV2S, n: Int) - extends Minimizer: - - private val debug = debugf ("NelderMeadSimplex", true) // debug function - private val flaw = flawf ("NelderMeadSimplex") // flaw function - private val np1 = n + 1 // number of vertices/points in simplex - private val simplex = Array.ofDim [FuncVec] (np1) // simplex { vetices } used for search - - private val alpha = 1.0 // alpha (> 0) parameter for reflection - private val beta = 0.5 // beta (0, 1) parameter for contraction - private val gamma = 2.0 // gamma (> 1) parameter for expansion - private val delta = 0.5 // delta (0, 1) parameter for shrinkage - - private var (f_h, f_s, f_l) = (0.0, 0.0, 0.0) // worst, second worst, best functional values - - if n < 2 then flaw ("init", "requires at least a 2-dimensional problem") - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Initialize the search simplex by setting n + 1 vertices and computing - * their functional values. - * @param x0 the given starting point - * @param step the step size - */ - def initSimplex (x0: VectorD, step: Double): Unit = - simplex(0) = (f(x0), x0) // given starting point and its functional value - for i <- 1 to n do - val x = x0 + VectorD.oneAt (i-1, x0.dim) * step - simplex(i) = (f(x), x) - end for - sort () // order vertices high to low - end initSimplex - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Sort the vertices in non-increasing order (high to low). Then the key - * indices are worst/highest (h=0), second worst (s=1), and best/lowest (l=n). - */ - private def sort (): Unit = - for i <- 0 until n do - var im = i - for j <- i+1 to n if simplex(j)._1 > simplex(im)._1 do im = j - if im != i then - val t = simplex(i); simplex(i) = simplex(im); simplex(im) = t - end if - end for - end sort - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Calculate the centroid of the best-side of the simplex (excluding h=0), - * returning it and its functional value. - */ - private def centroid (): FuncVec = - val c = new VectorD (n) // the centroid of the simplex - for i <- 1 to n do c += simplex(i)._2 // add vertex points, except h=0 - val x_c = c / n.toDouble // divide by # vertices - 1 - (f(x_c), x_c) - end centroid - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Reflect: compute the reflection point of the worst point (h=0) across - * the centroid. - * @param x_c the best-side centroid of the simplex - * @param x_h the lowest point - */ - private def reflect (x_c: VectorD, x_h: VectorD): FuncVec = - val x_r = x_c + (x_c - x_h) * alpha - (f(x_r), x_r) - end reflect - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Expand: compute the expansion point beyond the reflection point. - * @param x_c the best-side centroid of the simplex - * @param x_r the reflection point - */ - private def expand (x_c: VectorD, x_r: VectorD): FuncVec = - val x_e = x_c + (x_r - x_c) * gamma - (f(x_e), x_e) - end expand - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Contract: compute the outer contraction point between x_r and x_c. - * @param x_c the best-side centroid of the simplex - * @param x_r the reflection point - */ - private def contractOut (x_c: VectorD, x_r: VectorD): FuncVec = - val x_o = x_c + (x_r - x_c) * beta - (f(x_o), x_o) - end contractOut - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Contract: compute the inner contraction point between x_h and x_c. - * @param x_c the best-side centroid of the simplex - * @param x_h the lowest point - */ - private def contractIn (x_c: VectorD, x_h: VectorD): FuncVec = - val x_i = x_c + (x_h - x_c) * beta - (f(x_i), x_i) - end contractIn - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Shrink: fixing the best/lowest point (l=n), move the rest of the points - * toward it. - */ - private def shrink (): Unit = - val x_l = simplex(n)._2 // the best vertex point - for i <- 0 until n do // i != n as until is exclusive - val x = x_l + (simplex(i)._2 - x_l) * delta - simplex(i) = (f(x), x) // updated vertex - end for - end shrink - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Perform an exact (e.g., `GoldenSectionLS`) or inexact (e.g., `WolfeLS`) line search. - * Search in direction dir, returning the distance z to move in that direction. - * Currently NOT USED, but may be used to find a better point to add to simplex. - * @param x the current point - * @param dir the direction to move in - * @param step the initial step size - */ - def lineSearch (x: VectorD, dir: VectorD, step: Double = STEP): Double = 0.0 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Replace the worst vertex (h=0) in the simplex with the new point. - * @param x_n the new replacement point - * @param f_n the new function value; otherwise time-consuming if objective function computationally expensive - * @param why indicated which operation is the basis for replacement - */ - private def replace (x_n: VectorD, f_n: Double, why: String): Unit = - debug ("replace", s"the worst point ${simplex(0)} with the new point ($x_n, $f_n based on $why") - simplex(0) = (f_n, x_n) -// sort () // removed, might not be finally adopted e.g. greedy expand - end replace - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Improve the simplex by replacing the worst/highest vertex (x_h) with - * a better one found on the line containing x_h and the centroid (x_c). - * Try the reflection, expansion, outer contraction and inner contraction - * points, in that order. If none succeeds, shrink the simplex and iterate. - * Return both distance and difference between x_h (worst) and x_l (best). - */ - def transformation (): (Double, Double) = - var dist = (simplex(0)._2 - simplex(n)._2).norm // distance between x_h and x_l - var diff = simplex(0)._1 - simplex(n)._1 // difference between f_h and f_l - - f_h = simplex(0)._1 // functional value for x_h (highest/worst) - f_s = simplex(1)._1 // functional value for x_s (second worst) - f_l = simplex(n)._1 // functional value for x_l (lowest/best) - val (f_c, x_c) = centroid () // compute best-side centroid of simplex - var f_con = Double.MaxValue // to collect the contract function return - val x_con = VectorD.fill (x_c.dim)(Double.MaxValue) // to collect the contract function return - val x_h = simplex(0)._2 // to be handy to use - - val (f_r, x_r) = reflect (x_c, x_h) // try to REFLECT the simplex - if f_r < f_s then - replace (x_r, f_r, "reflect") // accept REFLECT - if f_r < f_l then - val (f_e, x_e) = expand (x_c, x_r) // try to EXPAND - if f_e < f_l then replace (x_e, f_e, "expand") // accept EXPAND as greedy expansion else stick to reflect - else // we have fr ≥ fs. REFLECT if it helps, and try to CONTRACT - val (f_con, x_con) = - if f_r < f_h then - val con = contractOut (x_c, x_r) // outer contraction - f_con = con._1 // to collect the contract function return - for i <- 0 until con._2.dim do x_con(i) = con._2(i) // to collect the contract function return - else - val con = contractIn (x_c, x_h ) // inner contraction - f_con = con._1 // to collect the contract function return - for i <- 0 until con._2.dim do x_con(i) = con._2(i) // to collect the contract function return - if f_con < minn (f_r, f_h) then - replace (x_con, f_con, "contract") // accept CONTRACT, inside or outside - else - shrink() - sort () // re-establish vertex order - dist = (simplex(0)._2 - simplex(n)._2).norm // recompute the distance - diff = simplex(0)._1 - simplex(n)._1 // recompute the difference - (dist, diff) // return distance and difference - end transformation - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Solve the Non-Linear Programming (NLP) problem using the Nelder-Mead - * Simplex algorithm. - * @param x0 the given starting point - * @param step the initial step size - * @param toler the tolerance used for termination - */ - def solve (x0: VectorD, step: Double = 1, toler: Double = 100.0 * EPSILON): FuncVec = - initSimplex (x0, step) - debug ("solve", s"simplex = ${stringOf (simplex)}") - - var (k, go) = (1, true) - cfor (k <= MAX_IT && go, k += 1) { - val (dist, diff) = transformation () - debug ("solve", s"$k:\tdist = $dist, diff = $diff, \n\tsimplex = ${stringOf (simplex)}") -// if dist < toler || diff < toler then go = false // check termination condition - if dist < toler then go = false // check termination condition - } // cfor - val opt = simplex(n) - println (s"solve: optimal function, vertex = $opt") - opt // return the best functional value and vertex point - end solve - -end NelderMeadSimplex - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `nelderMeadSimplexTest` main function is used to test the `NelderMeadSimplex` class. - * > runMain scalation.optimization.nelderMeadSimplexTest - */ -@main def nelderMeadSimplexTest (): Unit = - - val x0 = VectorD (1.0, 1.0) // starting point - - banner ("Problem 1: (x_0 - 2)^2 + (x_1 - 3)^2 + 1") - def f (x: VectorD): Double = (x(0) - 2)~^2 + (x(1) - 3)~^2 + 1 - - val optimizer = new NelderMeadSimplex (f, 2) - val opt = optimizer.solve (x0) // optimal solution - println (s"optimal solution = (f(x), x) = $opt") - -end nelderMeadSimplexTest - diff --git a/target/scala-3.6.4/classes/scalation/optimization/old/SPSA.scala.bak b/target/scala-3.6.4/classes/scalation/optimization/old/SPSA.scala.bak deleted file mode 100644 index 4ee6a3dd7..000000000 --- a/target/scala-3.6.4/classes/scalation/optimization/old/SPSA.scala.bak +++ /dev/null @@ -1,119 +0,0 @@ -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author Yulong - * @version 1.0 - * @date Thursday Feb 17 13:32:52 EDT 2022 - * @see LICENSE (MIT style license file). - * @title Simultaneous perturbation stochastic approximation - */ - -package scalation -package optimization - -import scala.math.pow -import scalation.mathstat._ -import scalation.random.Bernoulli - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - -//link1: https://www.jhuapl.edu/spsa/PDF-SPSA/Matlab-SPSA_Alg.pdf - -class SPSA (f: FunctionV2S, theta: VectorD, nSteps: Int = 20): -// extends Minimizer: - - private val debug = debugf ("SPSA", true) // debug function - //private var best = (f(x), x) // location zero solution - - private val alpha = 0.602 - private val gamma = 0.101 - private val A = 100 - private val a = 0.16 // these numbers are from Spall (1998) DOI: 10.1109/7.705889 - private val c = 1 - private val berny = Bernoulli (0.5,5) - private var best = theta // best theta or parameter to get the lowest error from loss function - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** - */ - def basic (): (Double, VectorD) = - for k <- 0 to nSteps do - val ak = a / pow(A+k+1, alpha) - val ck = c / pow(k+1, gamma) - val ck_delta = ck * (2 * berny.igen - 1) // ck * (-1 or 1) - val yplus = f(best + ck_delta) - val yminus = f(best - ck_delta) - val ghat = (yplus - yminus) / (2 * ck_delta) - best -= ak * ghat - end for - debug ("basic", s"best = $best") - (f(best), best) - end basic - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** - */ - def fastconvergence2 (): (Double, VectorD) = - // Reject iteration k → k + 1 if ||θ_k+1 - θ_k || is too - // large (does not require extra loss measurement) - // theta_k = theta_k+1 ?? - - // needs to define what is large ?? - var better = theta - val large = 0.11 - - for k <- 1 to nSteps do - val ak = a / pow(A+k+1, alpha) - val ck = c / pow(k+1, gamma) - val ck_delta = ck * (2 * berny.igen - 1) // shift = ck * (-1 or 1) - val yplus = f(best + ck_delta) // function at + shift - val yminus = f(best - ck_delta) // function at - shift - val ghat = (yplus - yminus) / (2 * ck_delta) // slope - better = best // save previous best - best -= ak * ghat // move opposite slope - - val reject = best - better - if reject.norm > large then - println (s"No $k $better") - debug ("fastconvergence2", s"k = $k, best = $best") - return (f(better), better) - end if - end for - debug ("fastconvergence2", s"k = $nSteps, best = $best") - (f(best), best) - end fastconvergence2 - - //other fast convergence from link1 to be looked into later. - -end SPSA - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The sPSATest main function if used to test the `SPSA` class. - * > runMain scalation.optimization.sPSATest - */ -@main def sPSATest (): Unit = - - banner ("Minimize: (x_0 - 3)^2 + (x_1 - 4)^2 + 1") - def f (x: VectorD): Double = (x(0) - 3.0)~^2 + (x(1) - 4.0)~^2 + 1.0 - val x0 = VectorD (1, 2) - val optimizer = new SPSA (f, x0, 200) - val opt = optimizer.basic () - println (s"][ optimal solution (f(x), x) = $opt") - -end sPSATest - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The sPSATest2 main function if used to test the `SPSA` class. - * > runMain scalation.optimization.sPSATest2 - */ -@main def sPSATest2 (): Unit = - - banner ("Minimize: (x_0 - 3)^2 + (x_1 - 4)^2 + 1") - def f (x: VectorD): Double = (x(0) - 3.0)~^2 + (x(1) - 4.0)~^2 + 1.0 - val x0 = VectorD (1, 2) - val optimizer = new SPSA (f, x0, 200) - val opt = optimizer.fastconvergence2 () - println (s"][ optimal solution (f(x), x) = $opt") - -end sPSATest2 - diff --git a/target/scala-3.6.4/classes/scalation/optimization/old/SPSA.scala.bak2 b/target/scala-3.6.4/classes/scalation/optimization/old/SPSA.scala.bak2 deleted file mode 100644 index 9ac205037..000000000 --- a/target/scala-3.6.4/classes/scalation/optimization/old/SPSA.scala.bak2 +++ /dev/null @@ -1,84 +0,0 @@ - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author Yulong Wang - * @version 2.0 - * @date Thursday Feb 17 13:32:52 EDT 2022 - * @see LICENSE (MIT style license file). - * - * @title Simultaneous Perturbation Stochastic Approximation - */ - -package scalation -package optimization - -import scala.math.pow - -import scalation.mathstat.{FunctionV2S, VectorD} -import scalation.random.{Bernoulli, Normal} - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `SPSA` class implements the Simultaneous Perturbation Stochastic Approximation - * algorithm for rough approximation of gradients. - * @see https://www.jhuapl.edu/spsa/PDF-SPSA/Matlab-SPSA_Alg.pdf - * @param f the vector to scalar function whose approximate gradient is sought - * @param theta the current point f(theta) - * @param nSteps the number of steps - */ -class SPSA (f: FunctionV2S, theta: VectorD, nSteps: Int = 20): -// extends Minimizer: - - private val alpha = 0.602 - private val gamma = 0.101 - private val A = 100 - private val a = 0.16 // these numbers are from Spall (1998) DOI: 10.1109/7.705889 - private val c = 1 - private val bernoully = Bernoulli (0.5, 5) - - var x_best = theta // best theta or parameter to get the lowest error from loss function - var f_best = Double.MaxValue - var x = theta.copy // use x for theta copy by value - - def basic (): Double = - for k <- 0 to nSteps do - val ak = a / pow (A + k + 1, alpha) - val ck = c / pow (k + 1, gamma) - val delta = 2 * bernoully.igen - 1 // -1 or 1 - val x_plus = x + ck * delta - val x_minus = x - ck * delta - val y_plus = f (x_plus) - val y_minus = f (x_minus) - val ghat = (y_plus - y_minus) / (2 * ck * delta) - x -= ak * ghat - if f(x) < f_best then - x_best = x.copy // copy by value - f_best = f(x_best) - end if - end for - - println (s"x_last is $x and y(x_last) at the end is ${f(x)} and \n " + - s"lowest is $x_best and $f_best") - f_best - end basic - -end SPSA - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `sPSATest` main function tests the SPSA class. - * > runMain scalation.optimization.sPSATest - */ -@main def sPSATest (): Unit = - - banner ("Minimize: (x_0 - 3)^2 + (x_1 - 4)^2 + 1") - val noise = Normal (0, 0.1) - - def f (x: VectorD): Double = (x(0) - 3)~^2 + (x(1) - 4)~^2 + 1 + noise.gen - - val x = VectorD (1, 2) - val optimizer = new SPSA (f, x, 1200) - val opt = optimizer.basic () -// val opt = optimizer.fastconvergence2 () - println (s"][ optimal solution (f(x), x) = $opt") - -end sPSATest - diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/BFGS$.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/BFGS$.class deleted file mode 100644 index afcc446e5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/BFGS$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/BFGS$package$.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/BFGS$package$.class deleted file mode 100644 index 3abc7a9aa..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/BFGS$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/BFGS$package.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/BFGS$package.class deleted file mode 100644 index e67d3ac03..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/BFGS$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/BFGS$package.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/BFGS$package.tasty deleted file mode 100644 index 27e7b0743..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/BFGS$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/BFGS.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/BFGS.class deleted file mode 100644 index 933a9dcaf..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/BFGS.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/BFGS.scalaa b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/BFGS.scalaa deleted file mode 100644 index 26b03a49a..000000000 --- a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/BFGS.scalaa +++ /dev/null @@ -1,386 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller, Hao Peng - * @version 2.0 - * @date Fri Sep 30 13:37:32 EDT 2011 - * @see LICENSE (MIT style license file). - * - * @note Broyden–Fletcher–Goldfarb–Shanno (BFGS) Quasi-Newton Optimizer - * - * @see The Superlinear Convergence of a Modified BFGS-Type Method for Unconstrained Optimization - * @see On the Robustness of Conjugate-Gradient Methods and Quasi-Newton Methods - * @see Limited Memory BFGS for Nonsmooth Optimization - * @see http://en.wikipedia.org/wiki/BFGS_method - * @see http://www.personal.psu.edu/cxg286/Math555.pdf - * @see http://people.orie.cornell.edu/aslewis/publications/bfgs_inexactLS.pdf - * @see http://people.orie.cornell.edu/aslewis/publications/bfgs_exactLS.pdf - */ - -package scalation -package optimization - -import scala.math.{abs, max} -import scala.util.control.Breaks.{breakable, break} - -import scalation.calculus.Differential.∇ -import scalation.mathstat._ - -import MatrixD.eye -import QNewton.aHi_inc - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `BFGS` the class implements the Broyden–Fletcher–Goldfarb–Shanno (BFGS) - * Quasi-Newton Algorithm for solving Non-Linear Programming (NLP) problems. - * BFGS determines a search direction by deflecting the steepest descent direction - * vector (opposite the gradient) by multiplying it by a matrix that approximates - * the inverse Hessian. Note, this implementation may be set up to work with the - * matrix b (approximate Hessian) or directly with the aHi matrix (the inverse of b). - * - * minimize f(x) - * subject to g(x) <= 0 [ optionally g(x) == 0 ] - * - * @param f the objective function to be minimized - * @param g the constraint function to be satisfied, if any - * @param ineq whether the constraint is treated as inequality (default) or equality - * @param exactLS whether to use exact (e.g., `GoldenLS`) - * or inexact (e.g., `WolfeLS`) Line Search - */ -class BFGS (f: FunctionV2S, g: FunctionV2S = null, - ineq: Boolean = true, exactLS: Boolean = false) - extends Minimizer: - - private val debug = debugf ("BFGS", true) // debug function - private val flaw = flawf ("BFGS") // flaw function - private val WEIGHT = 1000.0 // weight on penalty for constraint violation - private var bfgs = true // use BFGS (true) or Gradient Descent (false) - - private var df: Array [FunctionV2S] = null // gradient as explicit functions for partials - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Use the Gradient Descent algorithm rather than the default BFGS algorithm. - */ - def setSteepest (): Unit = bfgs = false - -// private var b: MatrixD = null // approx. Hessian matrix (use b or aHi) - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /* Update the b matrix, whose inverse is used to deflect -gradient to a - * better direction than steepest descent (-gradient). - * @param s the step vector (next point - current point) - * @param y the difference in the gradients (next - current) - */ -// def updateB (s: VectorD, y: VectorD): Unit = -// { -// var sy = s dot y // dot product of s and y -// if abs (sy) < TOL then sy = TOL -// val sb = s * b -// b += MatrixD.outer (y, y) / sy - MatrixD.outer (sb, sb) / (sb dot s) -// } // updateB - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Set the partial derivative functions. If these functions are available, - * they are more efficient and more accurate than estimating the values - * using difference quotients (the default approach). - * @param grad the gradient as explicit functions for partials - */ - def setDerivatives (grad: Array [FunctionV2S]): Unit = - if g != null then flaw ("setDerivatives", "only works for unconstrained problems") - df = grad // use given functions for partial derivatives - end setDerivatives - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** The objective function f plus a weighted penalty based on the constraint - * function g. - * @param x the coordinate values of the current point - */ - override def fg (x: VectorD): Double = - val f_x = f(x) - if g == null then // unconstrained - f_x - else // constrained, g(x) <= 0 - val penalty = if ineq then max (g(x), 0.0) else abs (g(x)) - f_x + abs (f_x) * WEIGHT * penalty * penalty - end if - end fg - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Perform an exact GoldenSectionLS or inexact WolfeLS Line Search. - * Search in direction dir, returning the distance z to move in that direction. - * Default to - * @param x the current point - * @param dir the direction to move in - * @param step the initial step size - */ - def lineSearch (x: VectorD, dir: VectorD, step: Double = STEP): Double = - def f_1D (z: Double): Double = fg(x + dir * z) // create a 1D function - val ls = if exactLS then new GoldenSectionLS (f_1D) // Golden Section Line Search - else new WolfeLS (f_1D) // Wolfe line search ((c1 = .0001, c2 = .9) - ls.search (step) // perform a Line Search - end lineSearch - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Solve the following Non-Linear Programming (NLP) problem using BFGS: - * min { f(x) | g(x) <= 0 }. To use explicit functions for gradient, - * replace "∇ (fg)(xn)" with "gradientD (df, xn)" - * @param x0 the starting point - * @param step_ the initial step size - * @param toler the tolerance - */ - def solve (x0: VectorD, step_ : Double = STEP, toler: Double = TOL): FuncVec = - debug ("solve", s"x0 = $x0, step_ = $step_, toler = $toler") - - var step = step_ // set the current step size - var x = (x0, ∇ (fg)(x0)) // current (point, gradient) - var xx: (VectorD, VectorD) = (null, null) // next (point, gradient) - var dir: VectorD = null // initial direction is -gradient - var s: VectorD = null // step vector - - var aHi = eye (x0.dim, x0.dim) // approximate Hessian inverse (aHi) matrix - // start with identity matrix - - debug ("solve", s"||gradient||^2 = ${x._2.normSq}") - - var mgn = 0.0 // mean gradient normSq - var diff = 0.0 // diff between current and next point - val diffTol = toler * toler // tolerance for changes in diff - var count = 0 // number of times mgn stayed roughly same (< diffTol) - val maxCount = 10 // max number of times mgn stayed roughly same => terminate - val n = x0.dim // size of the parameter vector - var goodGrad = true // good gradient value flag (not NaN nor infinity) - var xn: VectorD = null // next value for x (point) - - breakable { - for it <- 1 to MAX_IT do - debug ("solve", s"start of iteration $it: step = $step, f(x) = ${fg(x._1)}") - if goodGrad then - dir = if bfgs then -(aHi * x._2) else -x._2 - end if - s = dir * lineSearch (x._1, dir, step) // update step vector - xn = x._1 + s // next x point - if goodGrad then - for xx_i <- xn if xx_i.isNaN || xx_i.isInfinite do break () - diff = (xn - x._1).normSq / n // measure of distance moved - end if - xx = (xn, ∇ (fg)(xn)) // compute the next point - mgn = xx._2.normSq / n // compute mean gradient normSq - debug ("solve", s"current mean gradient normSq = $mgn") - - if mgn.isNaN || mgn.isInfinite then - goodGrad = false // gradient blew up - step /= 2.0 // halve the step size - else if mgn < toler || count > maxCount then { x = xx; break () } // return when vanished gradient or haven't moved - else if goodGrad then - if diff < diffTol then count += 1 // increment no movement counter - if step < step_ then step *= 1.5 // increase step size by 50% - else - goodGrad = true // gradient is currently fine - end if - - if goodGrad then - if bfgs then aHi += aHi_inc (aHi, s, xx._2 - x._2) // update the deflection matrix aHi - debug ("solve", s"(it = $it) move from ${x._1} to ${xx._1} where fg(xx._1) = ${fg(xx._1)}") - x = xx // make the next point the current point - end if - end for - } // breakable - (fg(x._1), x._1) // return functional value and current point - end solve - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Solve for an optima by finding a local optima close to the starting point/guess 'x0'. - * This version uses explicit functions for the gradient (partials derivatives). - * @param x0 the starting point/guess - * @param grad the gradient as explicit functions for partials - * @param step_ the initial step size - * @param toler the tolerance - */ - def solve2 (x0: VectorD, grad: FunctionV2V, step_ : Double = STEP, toler: Double = TOL): FuncVec = - debug ("solve2", s"x0 = $x0, step_ = $step_, toler = $toler") - - var step = step_ // set the current step size - var x = (x0, grad (x0)) // current (point, gradient) - var xx: (VectorD, VectorD) = (null, null) // next (point, gradient) - var dir: VectorD = null // initial direction is -gradient - var s: VectorD = null // step vector - - var aHi = eye (x0.dim, x0.dim) // approximate Hessian inverse (aHi) matrix - // start with identity matrix - - debug ("solve2", s"||gradient||^2 = ${x._2.normSq}") - - var mgn = 0.0 // mean gradient normSq - var diff = 0.0 // diff between current and next point - val diffTol = toler * toler // tolerance for changes in diff - var count = 0 // number of times mgn stayed roughly same (< diffTol) - val maxCount = 10 // max number of times mgn stayed roughly same => terminate - val n = x0.dim // size of the parameter vector - var goodGrad = true // good gradient value flag (not NaN nor infinity) - var xn: VectorD = null // next value for x (point) - - breakable { - for it <- 1 to MAX_IT do - debug ("solve2", s"start of iteration $it: step = $step, f(x) = ${fg(x._1)}") - if goodGrad then - dir = if bfgs then -(aHi * x._2) else -x._2 - end if - s = dir * lineSearch (x._1, dir, step) // update step vector - xn = x._1 + s // next x point - if goodGrad then - for xx_i <- xn if xx_i.isNaN || xx_i.isInfinite do break () - diff = (xn - x._1).normSq / n // measure of distance moved - end if - xx = (xn, grad (xn)) // compute the next point - mgn = xx._2.normSq / n // compute mean gradient normSq - debug ("solve2", s"current mean gradient normSq = $mgn") - - if mgn.isNaN || mgn.isInfinite then - goodGrad = false // gradient blew up - step /= 2.0 // halve the step size - else if mgn < toler || count > maxCount then { x = xx; break () } // return when vanished gradient or haven't moved - else if goodGrad then - if diff < diffTol then count += 1 // increment no movement counter - if step < step_ then step *= 1.5 // increase step size by 50% - else - goodGrad = true // gradient is currently fine - end if - - if goodGrad then - if bfgs then aHi += aHi_inc (aHi, s, xx._2 - x._2) // update the deflection matrix aHi - debug ("solve2", s"(it = $it) move from ${x._1} to ${xx._1} where fg(xx._1) = ${fg(xx._1)}") - x = xx // make the next point the current point - end if - end for - } // breakable - (fg(x._1), x._1) // return functional value and current point - end solve2 - -end BFGS - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `BFGS` companion object provides factory methods. - */ -object BFGS: - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Create a Steepest Descent (default) or BFGS optimizer. - * @param f the objective function to be minimized - * @param g the constraint function to be satisfied, if any - * @param ineq whether the constraint is treated as inequality (default) or equality - * @param exactLS whether to use exact (e.g., `GoldenLS`) - * or inexact (e.g., `WolfeLS`) Line Search - * @param steepest whether to use Steepest Descent rather than BFGS - */ - def apply (f: FunctionV2S, g: FunctionV2S = null, - ineq: Boolean = true, exactLS: Boolean = false, - steepest: Boolean = true): BFGS = - if steepest then - val steep = new BFGS (f, f, ineq, exactLS) - steep.setSteepest () - steep - else - new BFGS (f, f, ineq, exactLS) - end if - end apply - -end BFGS - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `bFGSTest` main function is used to test the `BFGS` class on f(x): - * f(x) = (x_0 - 3)^2 + (x_1 - 4)^2 + 1 - * > runMain scalation.optimization.bFGSTest - */ -@main def bFGSTest (): Unit = - - val step = 1.0 // step size (may need adjustment) - val n = 2 // dimension of the search space - val x0 = new VectorD (n) // starting point - - banner ("Minimize: (x_0 - 3)^2 + (x_1 - 4)^2 + 1") - def f (x: VectorD): Double = (x(0) - 3.0)~^2 + (x(1) - 4.0)~^2 + 1.0 - - def grad (x: VectorD): VectorD = VectorD (2 * x(0) - 6, 2 * x(1) - 8) - - val optimizer = new BFGS (f) -// val opt = optimizer.solve (x0, step) // use numerical partials - val opt = optimizer.solve2 (x0, grad, step) // use functions for partials - println (s"][ optimal solution (f(x), x) = $opt") - -end bFGSTest - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `bFGSTest2` main function is used to test the `BFGS` class on f(x): - * f(x) = x_0^4 + (x_0 - 3)^2 + (x_1 - 4)^2 + 1 - * > runMain scalation.optimization.bFGSTest2 - */ -@main def bFGSTest2 (): Unit = - - val step = 1.0 // step size (may need adjustment) - val n = 2 // dimension of the search space - val x0 = new VectorD (n) // starting point - - banner ("Minimize: x_0^4 + (x_0 - 3)^2 + (x_1 - 4)^2 + 1") - def f (x: VectorD): Double = x(0)~^4 + (x(0) - 3.0)~^2 + (x(1) - 4.0)~^2 + 1.0 - - def grad (x: VectorD): VectorD = VectorD (4.0 * x(0)~^3 + 2 * x(0) - 6, 2 * x(1) - 8) - - val optimizer = new BFGS (f) -// val opt = optimizer.solve (x0, step) // use numerical partials - val opt = optimizer.solve2 (x0, grad, step) // use functions for partials - println (s"][ optimal solution (f(x), x) = $opt") - -end bFGSTest2 - - -//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `bFGSTest3` main function is used to test the `BFGS_NoLS` class. - * This test uses the Rosenbrock function. - f(x) = (1 - x_0)^2 + 100 (x_1 - x_0^2)^2") - * > runMain scalation.optimization.bFGSTest3 - */ -@main def bFGSTest3 (): Unit = - - val step = 1.0 // step size (may need adjustment) - val n = 2 // dimension of the search space - val x0 = new VectorD (n) // starting point - - banner ("Minimize: (1 - x_0)^2 + 100 (x_1 - x_0^2)^2") - def f (x: VectorD): Double = (1.0 - x(0))~^2 + 100.0 * (x(1) - x(0)~^2)~^2 - - def grad (x: VectorD): VectorD = VectorD (-2.0 * (1 - x(0)) - 400.0 * x(0) * (x(1) - x(0)~^2), - 200.0 * (x(1) - x(0)~^2)) - - val optimizer = new BFGS (f) -// val opt = optimizer.solve (x0, step) // use numerical partials - val opt = optimizer.solve2 (x0, grad, step) // use functions for partials - println (s"][ optimal solution (f(x), x) = $opt") - -end bFGSTest3 - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `bFGSTest4` main function is used to test the `BFGS` class on f(x): - * f(x) = 1/x(0) + x_0^4 + (x_0 - 3)^2 + (x_1 - 4)^2 + 1 - * > runMain scalation.optimization.bFGSTest4 - */ -@main def bFGSTest4 (): Unit = - - val step = 1.0 // step size (may need adjustment) - val x0 = VectorD (0.1, 0.0) // starting location - - banner ("Minimize: 1/x(0) + x_0^4 + (x_0 - 3)^2 + (x_1 - 4)^2 + 1") - def f (x: VectorD): Double = 1/x(0) + x(0)~^4 + (x(0) - 3.0)~^2 + (x(1) - 4.0)~^2 + 1.0 - - def grad (x: VectorD): VectorD = VectorD (-(x(0)~^(-2)) + 4.0 * x(0)~^3 + 2 * x(0) - 6, 2 * x(1) - 8) - - val optimizer = new BFGS (f) -// val opt = optimizer.solve (x0, step) // use numerical partials - val opt = optimizer.solve2 (x0, grad, step) // use functions for partials - println (s"][ optimal solution (f(x), x) = $opt") - -// opt = optimizer.resolve (n) // try multiple starting points -// println (s"][ optimal solution (f(x), x) = $opt") - -end bFGSTest4 - diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/BFGS.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/BFGS.tasty deleted file mode 100644 index c4d8989a5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/BFGS.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/BFGS_NoLS$.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/BFGS_NoLS$.class deleted file mode 100644 index 91ad42e81..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/BFGS_NoLS$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/BFGS_NoLS$package$.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/BFGS_NoLS$package$.class deleted file mode 100644 index a0de49a0e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/BFGS_NoLS$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/BFGS_NoLS$package.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/BFGS_NoLS$package.class deleted file mode 100644 index 403da4e4a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/BFGS_NoLS$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/BFGS_NoLS$package.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/BFGS_NoLS$package.tasty deleted file mode 100644 index 70d2691a9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/BFGS_NoLS$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/BFGS_NoLS.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/BFGS_NoLS.class deleted file mode 100644 index be5f71208..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/BFGS_NoLS.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/BFGS_NoLS.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/BFGS_NoLS.tasty deleted file mode 100644 index 843de4784..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/BFGS_NoLS.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/DM_LBFGS$.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/DM_LBFGS$.class deleted file mode 100644 index 533a8e8f3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/DM_LBFGS$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/DM_LBFGS$package$.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/DM_LBFGS$package$.class deleted file mode 100644 index 59858f5ee..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/DM_LBFGS$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/DM_LBFGS$package.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/DM_LBFGS$package.class deleted file mode 100644 index 7f2f46723..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/DM_LBFGS$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/DM_LBFGS$package.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/DM_LBFGS$package.tasty deleted file mode 100644 index 4deb277bf..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/DM_LBFGS$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/DM_LBFGS.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/DM_LBFGS.class deleted file mode 100644 index 99c14f17a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/DM_LBFGS.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/DM_LBFGS.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/DM_LBFGS.tasty deleted file mode 100644 index 310bf32b0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/DM_LBFGS.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/EvaluationLogic.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/EvaluationLogic.class deleted file mode 100644 index 634b97965..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/EvaluationLogic.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/EvaluationLogic.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/EvaluationLogic.tasty deleted file mode 100644 index e8e91cf7b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/EvaluationLogic.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/FunctionEvaluation$.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/FunctionEvaluation$.class deleted file mode 100644 index 353d240a5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/FunctionEvaluation$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/FunctionEvaluation.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/FunctionEvaluation.class deleted file mode 100644 index 2dd707c59..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/FunctionEvaluation.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/FunctionEvaluation.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/FunctionEvaluation.tasty deleted file mode 100644 index 6123914fa..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/FunctionEvaluation.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/FunctionOptimization$.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/FunctionOptimization$.class deleted file mode 100644 index cfa36167c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/FunctionOptimization$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/FunctionOptimization.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/FunctionOptimization.class deleted file mode 100644 index 0966764ec..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/FunctionOptimization.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/FunctionOptimization.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/FunctionOptimization.tasty deleted file mode 100644 index 6d92946ad..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/FunctionOptimization.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGS$.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGS$.class deleted file mode 100644 index 438945649..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGS$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGS$package$.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGS$package$.class deleted file mode 100644 index a8ae54876..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGS$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGS$package.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGS$package.class deleted file mode 100644 index b90d5d247..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGS$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGS$package.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGS$package.tasty deleted file mode 100644 index a4936d9f4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGS$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGS.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGS.class deleted file mode 100644 index d2508f44f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGS.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGS.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGS.tasty deleted file mode 100644 index ee3fc1103..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGS.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSBacktrackingArmijo$.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSBacktrackingArmijo$.class deleted file mode 100644 index d48e6f272..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSBacktrackingArmijo$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSBacktrackingArmijo.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSBacktrackingArmijo.class deleted file mode 100644 index a579b1214..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSBacktrackingArmijo.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSBacktrackingArmijo.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSBacktrackingArmijo.tasty deleted file mode 100644 index 590a6c989..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSBacktrackingArmijo.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSBacktrackingOrthantWise$.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSBacktrackingOrthantWise$.class deleted file mode 100644 index 3cc7f6de6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSBacktrackingOrthantWise$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSBacktrackingOrthantWise.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSBacktrackingOrthantWise.class deleted file mode 100644 index 876b55df9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSBacktrackingOrthantWise.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSBacktrackingOrthantWise.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSBacktrackingOrthantWise.tasty deleted file mode 100644 index 5842e93fd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSBacktrackingOrthantWise.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSBacktrackingStrongWolfe$.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSBacktrackingStrongWolfe$.class deleted file mode 100644 index 5e4ebd845..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSBacktrackingStrongWolfe$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSBacktrackingStrongWolfe.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSBacktrackingStrongWolfe.class deleted file mode 100644 index a5883ae84..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSBacktrackingStrongWolfe.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSBacktrackingStrongWolfe.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSBacktrackingStrongWolfe.tasty deleted file mode 100644 index beb5ffd65..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSBacktrackingStrongWolfe.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSBacktrackingWolfe$.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSBacktrackingWolfe$.class deleted file mode 100644 index 9af61b2eb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSBacktrackingWolfe$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSBacktrackingWolfe.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSBacktrackingWolfe.class deleted file mode 100644 index d1eca09df..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSBacktrackingWolfe.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSBacktrackingWolfe.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSBacktrackingWolfe.tasty deleted file mode 100644 index f92f274f9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSBacktrackingWolfe.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSCallbackData$.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSCallbackData$.class deleted file mode 100644 index 06b8db8be..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSCallbackData$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSCallbackData.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSCallbackData.class deleted file mode 100644 index 8f59189ae..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSCallbackData.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSCallbackData.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSCallbackData.tasty deleted file mode 100644 index a04064b72..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSCallbackData.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSIterationData$.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSIterationData$.class deleted file mode 100644 index 1a2f5b638..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSIterationData$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSIterationData.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSIterationData.class deleted file mode 100644 index adc6724e5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSIterationData.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSIterationData.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSIterationData.tasty deleted file mode 100644 index 7da39be01..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSIterationData.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearch$.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearch$.class deleted file mode 100644 index c6d10082d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearch$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearch$package$.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearch$package$.class deleted file mode 100644 index 662cc5fa9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearch$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearch$package.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearch$package.class deleted file mode 100644 index d49b673b0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearch$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearch$package.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearch$package.tasty deleted file mode 100644 index 7f54e3c13..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearch$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearch.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearch.class deleted file mode 100644 index c0cc843be..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearch.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearch.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearch.tasty deleted file mode 100644 index a784477f1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearch.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearchAlg$$anon$1.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearchAlg$$anon$1.class deleted file mode 100644 index 82e49baa5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearchAlg$$anon$1.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearchAlg$$anon$2.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearchAlg$$anon$2.class deleted file mode 100644 index 21e53cc45..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearchAlg$$anon$2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearchAlg$$anon$3.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearchAlg$$anon$3.class deleted file mode 100644 index 8e308fe98..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearchAlg$$anon$3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearchAlg$$anon$4.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearchAlg$$anon$4.class deleted file mode 100644 index 7485c535c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearchAlg$$anon$4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearchAlg$$anon$5.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearchAlg$$anon$5.class deleted file mode 100644 index 2cdebcbb1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearchAlg$$anon$5.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearchAlg$$anon$6.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearchAlg$$anon$6.class deleted file mode 100644 index 51003c09d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearchAlg$$anon$6.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearchAlg$$anon$7.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearchAlg$$anon$7.class deleted file mode 100644 index 3f483149a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearchAlg$$anon$7.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearchAlg$.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearchAlg$.class deleted file mode 100644 index afe067dd5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearchAlg$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearchAlg.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearchAlg.class deleted file mode 100644 index 746ab2adf..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearchAlg.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearchAlg.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearchAlg.tasty deleted file mode 100644 index a7ecc3c4f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearchAlg.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearchFailure$.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearchFailure$.class deleted file mode 100644 index c36f193ba..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearchFailure$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearchFailure.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearchFailure.class deleted file mode 100644 index aada70f93..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearchFailure.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearchFailure.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearchFailure.tasty deleted file mode 100644 index 1c96851b3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearchFailure.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearchIncomplete$.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearchIncomplete$.class deleted file mode 100644 index 25012a01c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearchIncomplete$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearchIncomplete.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearchIncomplete.class deleted file mode 100644 index 63f3f5c8c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearchIncomplete.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearchIncomplete.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearchIncomplete.tasty deleted file mode 100644 index 20dc1013c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearchIncomplete.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearchPrms$.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearchPrms$.class deleted file mode 100644 index 82301ba1a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearchPrms$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearchPrms.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearchPrms.class deleted file mode 100644 index feb2b6c93..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearchPrms.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearchPrms.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearchPrms.tasty deleted file mode 100644 index 1c51d7410..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearchPrms.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearchStep$.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearchStep$.class deleted file mode 100644 index 41182697d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearchStep$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearchStep.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearchStep.class deleted file mode 100644 index 8f9752ef5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearchStep.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearchStep.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearchStep.tasty deleted file mode 100644 index db50c8b60..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSLineSearchStep.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSMoreThuente$.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSMoreThuente$.class deleted file mode 100644 index 5d06ead71..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSMoreThuente$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSMoreThuente.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSMoreThuente.class deleted file mode 100644 index a10dc0d08..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSMoreThuente.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSMoreThuente.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSMoreThuente.tasty deleted file mode 100644 index 8b2782f9a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSMoreThuente.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSPrms$.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSPrms$.class deleted file mode 100644 index 98ddd959e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSPrms$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSPrms.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSPrms.class deleted file mode 100644 index 41bff30ef..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSPrms.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSPrms.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSPrms.tasty deleted file mode 100644 index 252cb44fc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSPrms.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSResults$.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSResults$.class deleted file mode 100644 index 271f45124..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSResults$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSResults.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSResults.class deleted file mode 100644 index 280708a95..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSResults.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSResults.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSResults.tasty deleted file mode 100644 index 8cef544ed..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSResults.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$1.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$1.class deleted file mode 100644 index dcd211812..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$1.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$10.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$10.class deleted file mode 100644 index cb4f7b57c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$10.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$11.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$11.class deleted file mode 100644 index 653d1839b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$11.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$12.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$12.class deleted file mode 100644 index 68917665e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$12.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$13.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$13.class deleted file mode 100644 index 2c454d607..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$13.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$14.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$14.class deleted file mode 100644 index a0171c100..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$14.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$15.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$15.class deleted file mode 100644 index 98c18ef2c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$15.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$16.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$16.class deleted file mode 100644 index 15c6089aa..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$16.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$17.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$17.class deleted file mode 100644 index 1ef9cdf64..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$17.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$18.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$18.class deleted file mode 100644 index 6bc8e25c6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$18.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$19.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$19.class deleted file mode 100644 index 76f276d15..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$19.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$2.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$2.class deleted file mode 100644 index a417626e1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$20.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$20.class deleted file mode 100644 index 957c8b3c6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$20.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$21.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$21.class deleted file mode 100644 index 9427e4b6f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$21.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$22.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$22.class deleted file mode 100644 index ac1b7ce4e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$22.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$23.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$23.class deleted file mode 100644 index c90467aa6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$23.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$24.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$24.class deleted file mode 100644 index 7991de98a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$24.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$25.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$25.class deleted file mode 100644 index 86295d1f0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$25.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$26.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$26.class deleted file mode 100644 index 3ef178d20..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$26.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$27.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$27.class deleted file mode 100644 index 784c0ee3e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$27.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$28.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$28.class deleted file mode 100644 index 1193064be..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$28.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$29.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$29.class deleted file mode 100644 index ddd31f716..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$29.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$3.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$3.class deleted file mode 100644 index 3053c8411..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$30.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$30.class deleted file mode 100644 index 345bcd53b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$30.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$31.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$31.class deleted file mode 100644 index 416adb7ab..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$31.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$32.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$32.class deleted file mode 100644 index 608c442a3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$32.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$33.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$33.class deleted file mode 100644 index fb155553d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$33.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$34.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$34.class deleted file mode 100644 index 732a52b7c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$34.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$35.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$35.class deleted file mode 100644 index bf29b2aec..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$35.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$36.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$36.class deleted file mode 100644 index 91d75c014..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$36.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$4.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$4.class deleted file mode 100644 index 120d088af..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$5.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$5.class deleted file mode 100644 index b75c642d1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$5.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$6.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$6.class deleted file mode 100644 index 03c5567e2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$6.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$7.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$7.class deleted file mode 100644 index cae04b4f1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$7.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$8.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$8.class deleted file mode 100644 index 5901201bb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$8.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$9.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$9.class deleted file mode 100644 index eaa69401b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$$anon$9.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$.class deleted file mode 100644 index 45e3958ab..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode.class deleted file mode 100644 index 927dc45ec..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode.tasty deleted file mode 100644 index ef4e42fed..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSReturnCode.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSVarEvaluationResults$.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSVarEvaluationResults$.class deleted file mode 100644 index 38c4d5d7a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSVarEvaluationResults$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSVarEvaluationResults.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSVarEvaluationResults.class deleted file mode 100644 index f6b99bed3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSVarEvaluationResults.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSVarEvaluationResults.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSVarEvaluationResults.tasty deleted file mode 100644 index 2206086be..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGSVarEvaluationResults.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGS_B$.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGS_B$.class deleted file mode 100644 index ba62d994d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGS_B$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGS_B$package$.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGS_B$package$.class deleted file mode 100644 index 3e3e7a3c1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGS_B$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGS_B$package.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGS_B$package.class deleted file mode 100644 index 23c2a305a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGS_B$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGS_B$package.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGS_B$package.tasty deleted file mode 100644 index 625ecbbce..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGS_B$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGS_B.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGS_B.class deleted file mode 100644 index a344db1f6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGS_B.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGS_B.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGS_B.tasty deleted file mode 100644 index 8a4dea351..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGS_B.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGS_NoLS$.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGS_NoLS$.class deleted file mode 100644 index 9e5821280..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGS_NoLS$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGS_NoLS$package$.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGS_NoLS$package$.class deleted file mode 100644 index 2042b9bb7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGS_NoLS$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGS_NoLS$package.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGS_NoLS$package.class deleted file mode 100644 index 2a5eef5e7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGS_NoLS$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGS_NoLS$package.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGS_NoLS$package.tasty deleted file mode 100644 index 3bff650c5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGS_NoLS$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGS_NoLS.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGS_NoLS.class deleted file mode 100644 index 718c6bf51..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGS_NoLS.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGS_NoLS.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGS_NoLS.tasty deleted file mode 100644 index ebd7da0bd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LBFGS_NoLS.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LineSearchTriInterval$.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LineSearchTriInterval$.class deleted file mode 100644 index 4fa20bec7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LineSearchTriInterval$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LineSearchTriInterval.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LineSearchTriInterval.class deleted file mode 100644 index bbd249d43..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LineSearchTriInterval.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LineSearchTriInterval.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LineSearchTriInterval.tasty deleted file mode 100644 index 912e12e41..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/LineSearchTriInterval.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/OptimizationLogic.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/OptimizationLogic.class deleted file mode 100644 index 0616e018b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/OptimizationLogic.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/OptimizationLogic.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/OptimizationLogic.tasty deleted file mode 100644 index 4ec385879..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/OptimizationLogic.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/OrthantWisePrms$.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/OrthantWisePrms$.class deleted file mode 100644 index fae87bc3f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/OrthantWisePrms$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/OrthantWisePrms.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/OrthantWisePrms.class deleted file mode 100644 index 55e93b747..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/OrthantWisePrms.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/OrthantWisePrms.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/OrthantWisePrms.tasty deleted file mode 100644 index 641151e21..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/OrthantWisePrms.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/QNewton$.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/QNewton$.class deleted file mode 100644 index abef5b3f9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/QNewton$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/QNewton.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/QNewton.class deleted file mode 100644 index cb33b57e2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/QNewton.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/QNewton.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/QNewton.tasty deleted file mode 100644 index 0f244026a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/QNewton.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/README.txt b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/README.txt deleted file mode 100644 index 123543391..000000000 --- a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/README.txt +++ /dev/null @@ -1,47 +0,0 @@ - -ScalaTion Quasi-Newton Optimizers: ---------------------------------- - -Regular: - BFGS.scala -- Broyden–Fletcher–Goldfarb–Shanno - LBFGS.scala -- Limited-Memory Broyden–Fletcher–Goldfarb–Shanno - -With Bounds: - BFGS_B.scala - LBFGS_B.scala - -With No Line Search: - BFGS_NoLS.scala - LBFGS_NoLS.scala - -With Momentum: - DM_BFGS.scala - DM_LBFGS.scala - -Stochastic Versions: - S_BFGS.scala - S_LBFGS.scala - -Solve Methods: -------------- - -solve -- numerical gradient computation -solve2 -- explicit gradient vector function given - -BFGS has solve3 (numerical graient), solve4 that use LBFGS line search algorithms - -Hyper-parameters: ----------------- - -Step Size -Momentum -Line Search - -Test Cases: ----------- - -ARMA.scala -- compare with SARIMAX from statsmodels - Lake Levels - COVID-19 - Influenze-Like Illness (ILI) - diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSBealeFunction.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSBealeFunction.class deleted file mode 100644 index b3a8c750e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSBealeFunction.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSBealeFunction.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSBealeFunction.tasty deleted file mode 100644 index 87be22fc0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSBealeFunction.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSBohachevsky1Function.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSBohachevsky1Function.class deleted file mode 100644 index ad82fc137..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSBohachevsky1Function.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSBohachevsky1Function.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSBohachevsky1Function.tasty deleted file mode 100644 index 22d3d4448..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSBohachevsky1Function.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSBohachevsky2Function.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSBohachevsky2Function.class deleted file mode 100644 index 81cbb3d0f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSBohachevsky2Function.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSBohachevsky2Function.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSBohachevsky2Function.tasty deleted file mode 100644 index 163a4ab6b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSBohachevsky2Function.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSBohachevsky3Function.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSBohachevsky3Function.class deleted file mode 100644 index 06287a524..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSBohachevsky3Function.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSBohachevsky3Function.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSBohachevsky3Function.tasty deleted file mode 100644 index 7da9e714f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSBohachevsky3Function.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSBoothFunction.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSBoothFunction.class deleted file mode 100644 index 97e2d8e9d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSBoothFunction.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSBoothFunction.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSBoothFunction.tasty deleted file mode 100644 index 6bf3eb3d7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSBoothFunction.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSCamel3Function.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSCamel3Function.class deleted file mode 100644 index ff9388234..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSCamel3Function.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSCamel3Function.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSCamel3Function.tasty deleted file mode 100644 index 4ee463c1e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSCamel3Function.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSCubeFunction.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSCubeFunction.class deleted file mode 100644 index 309be7780..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSCubeFunction.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSCubeFunction.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSCubeFunction.tasty deleted file mode 100644 index 48acd7ccf..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSCubeFunction.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSFreudensteinRothFunction.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSFreudensteinRothFunction.class deleted file mode 100644 index 46671d8ad..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSFreudensteinRothFunction.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSFreudensteinRothFunction.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSFreudensteinRothFunction.tasty deleted file mode 100644 index 2d468dd4b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSFreudensteinRothFunction.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSMcCormickFunction.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSMcCormickFunction.class deleted file mode 100644 index ce701a912..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSMcCormickFunction.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSMcCormickFunction.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSMcCormickFunction.tasty deleted file mode 100644 index 3721a5339..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSMcCormickFunction.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSTest.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSTest.class deleted file mode 100644 index 6d3ec9258..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSTest.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSTest.tasty deleted file mode 100644 index 2639b7097..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSTest2.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSTest2.class deleted file mode 100644 index 8a8d25d5c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSTest2.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSTest2.tasty deleted file mode 100644 index 82403891b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSTest3.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSTest3.class deleted file mode 100644 index 3ba069afb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSTest3.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSTest3.tasty deleted file mode 100644 index ea78cc57e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSTest4.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSTest4.class deleted file mode 100644 index 40063dfce..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSTest4.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSTest4.tasty deleted file mode 100644 index f343b58d7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGSTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGS_NoLSTest.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGS_NoLSTest.class deleted file mode 100644 index 1856def0e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGS_NoLSTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGS_NoLSTest.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGS_NoLSTest.tasty deleted file mode 100644 index 2aae822ab..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGS_NoLSTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGS_NoLSTest2.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGS_NoLSTest2.class deleted file mode 100644 index 94b6962d3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGS_NoLSTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGS_NoLSTest2.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGS_NoLSTest2.tasty deleted file mode 100644 index 9315f90f7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGS_NoLSTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGS_NoLSTest3.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGS_NoLSTest3.class deleted file mode 100644 index 4a7f5f4f7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGS_NoLSTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGS_NoLSTest3.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGS_NoLSTest3.tasty deleted file mode 100644 index c3c3608e3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGS_NoLSTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGS_NoLSTest4.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGS_NoLSTest4.class deleted file mode 100644 index 0e57cc8be..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGS_NoLSTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGS_NoLSTest4.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGS_NoLSTest4.tasty deleted file mode 100644 index e2bbfb782..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bFGS_NoLSTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bealeFunctionLBFGSTest.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bealeFunctionLBFGSTest.class deleted file mode 100644 index 9b95981eb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bealeFunctionLBFGSTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bealeFunctionLBFGSTest.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bealeFunctionLBFGSTest.tasty deleted file mode 100644 index 966cfda3c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bealeFunctionLBFGSTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bohachevsky1FunctionLBFGSTest.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bohachevsky1FunctionLBFGSTest.class deleted file mode 100644 index dd02d29d8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bohachevsky1FunctionLBFGSTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bohachevsky1FunctionLBFGSTest.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bohachevsky1FunctionLBFGSTest.tasty deleted file mode 100644 index 918a69b9e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bohachevsky1FunctionLBFGSTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bohachevsky2FunctionLBFGSTest.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bohachevsky2FunctionLBFGSTest.class deleted file mode 100644 index 70245393c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bohachevsky2FunctionLBFGSTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bohachevsky2FunctionLBFGSTest.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bohachevsky2FunctionLBFGSTest.tasty deleted file mode 100644 index 64533c4d6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bohachevsky2FunctionLBFGSTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bohachevsky3FunctionLBFGSTest.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bohachevsky3FunctionLBFGSTest.class deleted file mode 100644 index 3d7219f53..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bohachevsky3FunctionLBFGSTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bohachevsky3FunctionLBFGSTest.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bohachevsky3FunctionLBFGSTest.tasty deleted file mode 100644 index 483e25e52..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/bohachevsky3FunctionLBFGSTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/boothFunctionLBFGSTest.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/boothFunctionLBFGSTest.class deleted file mode 100644 index a47932e40..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/boothFunctionLBFGSTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/boothFunctionLBFGSTest.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/boothFunctionLBFGSTest.tasty deleted file mode 100644 index 346724608..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/boothFunctionLBFGSTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/camel3FunctionLBFGSTest.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/camel3FunctionLBFGSTest.class deleted file mode 100644 index 0142d827b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/camel3FunctionLBFGSTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/camel3FunctionLBFGSTest.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/camel3FunctionLBFGSTest.tasty deleted file mode 100644 index 045d93be6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/camel3FunctionLBFGSTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/cubeFunctionLBFGSTest.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/cubeFunctionLBFGSTest.class deleted file mode 100644 index 4b78273aa..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/cubeFunctionLBFGSTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/cubeFunctionLBFGSTest.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/cubeFunctionLBFGSTest.tasty deleted file mode 100644 index be4776ca7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/cubeFunctionLBFGSTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/freudensteinRothFunctionLBFGSTest.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/freudensteinRothFunctionLBFGSTest.class deleted file mode 100644 index 1cc18b554..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/freudensteinRothFunctionLBFGSTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/freudensteinRothFunctionLBFGSTest.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/freudensteinRothFunctionLBFGSTest.tasty deleted file mode 100644 index 91c88a462..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/freudensteinRothFunctionLBFGSTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/index.html b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/index.html deleted file mode 100644 index eb633915e..000000000 --- a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/index.html +++ /dev/null @@ -1,35 +0,0 @@ - - -

    Source files in quasi_newton Package

    -

    - - \ No newline at end of file diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/lBFGS_BTest.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/lBFGS_BTest.class deleted file mode 100644 index 0f3d3a60f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/lBFGS_BTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/lBFGS_BTest.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/lBFGS_BTest.tasty deleted file mode 100644 index 2c0b0d98f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/lBFGS_BTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/lBFGS_BTest2.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/lBFGS_BTest2.class deleted file mode 100644 index 28cbd3242..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/lBFGS_BTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/lBFGS_BTest2.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/lBFGS_BTest2.tasty deleted file mode 100644 index 963e856d0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/lBFGS_BTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/lBFGS_BTest3.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/lBFGS_BTest3.class deleted file mode 100644 index 59af9fd59..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/lBFGS_BTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/lBFGS_BTest3.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/lBFGS_BTest3.tasty deleted file mode 100644 index 1dab6f3f3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/lBFGS_BTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/lBFGS_NoLSTest.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/lBFGS_NoLSTest.class deleted file mode 100644 index 4c20c0c36..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/lBFGS_NoLSTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/lBFGS_NoLSTest.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/lBFGS_NoLSTest.tasty deleted file mode 100644 index 18204fdb0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/lBFGS_NoLSTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/lBFGS_NoLSTest2.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/lBFGS_NoLSTest2.class deleted file mode 100644 index db9c87ade..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/lBFGS_NoLSTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/lBFGS_NoLSTest2.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/lBFGS_NoLSTest2.tasty deleted file mode 100644 index baac4a132..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/lBFGS_NoLSTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/lBFGS_NoLSTest3.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/lBFGS_NoLSTest3.class deleted file mode 100644 index 42809c5b2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/lBFGS_NoLSTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/lBFGS_NoLSTest3.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/lBFGS_NoLSTest3.tasty deleted file mode 100644 index 8cf29c9f1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/lBFGS_NoLSTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/mccormickFunctionDMLBFGSTest.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/mccormickFunctionDMLBFGSTest.class deleted file mode 100644 index 82c86dee1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/mccormickFunctionDMLBFGSTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/mccormickFunctionDMLBFGSTest.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/mccormickFunctionDMLBFGSTest.tasty deleted file mode 100644 index ca1ac9353..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/mccormickFunctionDMLBFGSTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/mccormickFunctionLBFGSTest.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/mccormickFunctionLBFGSTest.class deleted file mode 100644 index 615530d2d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/mccormickFunctionLBFGSTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/mccormickFunctionLBFGSTest.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/mccormickFunctionLBFGSTest.tasty deleted file mode 100644 index 33bad53cc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newton/mccormickFunctionLBFGSTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/FunctionDescriptors$.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/FunctionDescriptors$.class deleted file mode 100644 index ada8f19a8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/FunctionDescriptors$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/FunctionDescriptors.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/FunctionDescriptors.class deleted file mode 100644 index b2cf02284..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/FunctionDescriptors.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/FunctionDescriptors.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/FunctionDescriptors.tasty deleted file mode 100644 index e3281fa5d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/FunctionDescriptors.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/FunctionOptimizationFFM$.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/FunctionOptimizationFFM$.class deleted file mode 100644 index 3e6c6de18..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/FunctionOptimizationFFM$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/FunctionOptimizationFFM.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/FunctionOptimizationFFM.class deleted file mode 100644 index 67d2f6611..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/FunctionOptimizationFFM.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/FunctionOptimizationFFM.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/FunctionOptimizationFFM.tasty deleted file mode 100644 index 93c74ebaf..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/FunctionOptimizationFFM.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/LBFGS_FFM$.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/LBFGS_FFM$.class deleted file mode 100644 index 3a31c4dab..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/LBFGS_FFM$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/LBFGS_FFM$package$.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/LBFGS_FFM$package$.class deleted file mode 100644 index 61ddf9de6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/LBFGS_FFM$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/LBFGS_FFM$package.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/LBFGS_FFM$package.class deleted file mode 100644 index c05dde7df..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/LBFGS_FFM$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/LBFGS_FFM$package.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/LBFGS_FFM$package.tasty deleted file mode 100644 index 756659833..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/LBFGS_FFM$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/LBFGS_FFM.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/LBFGS_FFM.class deleted file mode 100644 index 5b1b65209..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/LBFGS_FFM.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/LBFGS_FFM.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/LBFGS_FFM.tasty deleted file mode 100644 index c90524e00..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/LBFGS_FFM.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/MethodTypes$.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/MethodTypes$.class deleted file mode 100644 index bbd8f80e3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/MethodTypes$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/MethodTypes.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/MethodTypes.class deleted file mode 100644 index add831aeb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/MethodTypes.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/MethodTypes.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/MethodTypes.tasty deleted file mode 100644 index 38de48c29..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/MethodTypes.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/OptimizationLogicFFM.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/OptimizationLogicFFM.class deleted file mode 100644 index e787da36b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/OptimizationLogicFFM.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/OptimizationLogicFFM.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/OptimizationLogicFFM.tasty deleted file mode 100644 index c97c8380a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/OptimizationLogicFFM.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/OptimizationMethodHandlesFFM$.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/OptimizationMethodHandlesFFM$.class deleted file mode 100644 index ed892ab46..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/OptimizationMethodHandlesFFM$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/OptimizationMethodHandlesFFM.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/OptimizationMethodHandlesFFM.class deleted file mode 100644 index 8fde42f3b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/OptimizationMethodHandlesFFM.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/OptimizationMethodHandlesFFM.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/OptimizationMethodHandlesFFM.tasty deleted file mode 100644 index b57ee010b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/OptimizationMethodHandlesFFM.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/bohachevsky2FunctionLBFGS_FFMTest.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/bohachevsky2FunctionLBFGS_FFMTest.class deleted file mode 100644 index b09a2d5af..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/bohachevsky2FunctionLBFGS_FFMTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/bohachevsky2FunctionLBFGS_FFMTest.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/bohachevsky2FunctionLBFGS_FFMTest.tasty deleted file mode 100644 index ca5757603..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/bohachevsky2FunctionLBFGS_FFMTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/boothFunctionLBFGS_FFMTest.class b/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/boothFunctionLBFGS_FFMTest.class deleted file mode 100644 index 9cef8d7fb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/boothFunctionLBFGS_FFMTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/boothFunctionLBFGS_FFMTest.tasty b/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/boothFunctionLBFGS_FFMTest.tasty deleted file mode 100644 index a3bfc936b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/boothFunctionLBFGS_FFMTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/index.html b/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/index.html deleted file mode 100644 index 37c87e3af..000000000 --- a/target/scala-3.6.4/classes/scalation/optimization/quasi_newtonC/index.html +++ /dev/null @@ -1,13 +0,0 @@ - - -

    Source files in quasi_newtonC Package

    -

    - - \ No newline at end of file diff --git a/target/scala-3.6.4/classes/scalation/optimization/sPSATest.class b/target/scala-3.6.4/classes/scalation/optimization/sPSATest.class deleted file mode 100644 index 8c7bc92b3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/sPSATest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/sPSATest.tasty b/target/scala-3.6.4/classes/scalation/optimization/sPSATest.tasty deleted file mode 100644 index e954de513..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/sPSATest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/tabuSearchTest.class b/target/scala-3.6.4/classes/scalation/optimization/tabuSearchTest.class deleted file mode 100644 index 0b98369af..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/tabuSearchTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/tabuSearchTest.tasty b/target/scala-3.6.4/classes/scalation/optimization/tabuSearchTest.tasty deleted file mode 100644 index 5bca57d4f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/tabuSearchTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/tabuSearchTest2.class b/target/scala-3.6.4/classes/scalation/optimization/tabuSearchTest2.class deleted file mode 100644 index 741b86499..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/tabuSearchTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/tabuSearchTest2.tasty b/target/scala-3.6.4/classes/scalation/optimization/tabuSearchTest2.tasty deleted file mode 100644 index 0396594b3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/tabuSearchTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/wolfeConditionsTest.class b/target/scala-3.6.4/classes/scalation/optimization/wolfeConditionsTest.class deleted file mode 100644 index 826cd9589..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/wolfeConditionsTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/wolfeConditionsTest.tasty b/target/scala-3.6.4/classes/scalation/optimization/wolfeConditionsTest.tasty deleted file mode 100644 index 9c1b69c15..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/wolfeConditionsTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/wolfeLS2Test.class b/target/scala-3.6.4/classes/scalation/optimization/wolfeLS2Test.class deleted file mode 100644 index 5a4e0937b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/wolfeLS2Test.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/wolfeLS2Test.tasty b/target/scala-3.6.4/classes/scalation/optimization/wolfeLS2Test.tasty deleted file mode 100644 index 1f3c089c4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/wolfeLS2Test.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/wolfeLS2Test2.class b/target/scala-3.6.4/classes/scalation/optimization/wolfeLS2Test2.class deleted file mode 100644 index 8b6cb150a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/wolfeLS2Test2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/wolfeLS2Test2.tasty b/target/scala-3.6.4/classes/scalation/optimization/wolfeLS2Test2.tasty deleted file mode 100644 index ca91543fc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/wolfeLS2Test2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/wolfeLS2Test3.class b/target/scala-3.6.4/classes/scalation/optimization/wolfeLS2Test3.class deleted file mode 100644 index 32f5a9ebf..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/wolfeLS2Test3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/wolfeLS2Test3.tasty b/target/scala-3.6.4/classes/scalation/optimization/wolfeLS2Test3.tasty deleted file mode 100644 index efb45d32c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/wolfeLS2Test3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/wolfeLS2Test4.class b/target/scala-3.6.4/classes/scalation/optimization/wolfeLS2Test4.class deleted file mode 100644 index 7b757ffe4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/wolfeLS2Test4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/wolfeLS2Test4.tasty b/target/scala-3.6.4/classes/scalation/optimization/wolfeLS2Test4.tasty deleted file mode 100644 index 4ab6db101..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/wolfeLS2Test4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/wolfeLS3Test.class b/target/scala-3.6.4/classes/scalation/optimization/wolfeLS3Test.class deleted file mode 100644 index af4a3b786..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/wolfeLS3Test.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/wolfeLS3Test.tasty b/target/scala-3.6.4/classes/scalation/optimization/wolfeLS3Test.tasty deleted file mode 100644 index 1d7899d39..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/wolfeLS3Test.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/wolfeLS3Test2.class b/target/scala-3.6.4/classes/scalation/optimization/wolfeLS3Test2.class deleted file mode 100644 index a9c41eb36..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/wolfeLS3Test2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/wolfeLS3Test2.tasty b/target/scala-3.6.4/classes/scalation/optimization/wolfeLS3Test2.tasty deleted file mode 100644 index bcc397f0b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/wolfeLS3Test2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/wolfeLS3Test3.class b/target/scala-3.6.4/classes/scalation/optimization/wolfeLS3Test3.class deleted file mode 100644 index 41d61d881..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/wolfeLS3Test3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/wolfeLS3Test3.tasty b/target/scala-3.6.4/classes/scalation/optimization/wolfeLS3Test3.tasty deleted file mode 100644 index 06fd14f12..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/wolfeLS3Test3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/wolfeLS3Test4.class b/target/scala-3.6.4/classes/scalation/optimization/wolfeLS3Test4.class deleted file mode 100644 index d0916f2a5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/wolfeLS3Test4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/wolfeLS3Test4.tasty b/target/scala-3.6.4/classes/scalation/optimization/wolfeLS3Test4.tasty deleted file mode 100644 index 2ad4e1628..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/wolfeLS3Test4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/wolfeLSTest.class b/target/scala-3.6.4/classes/scalation/optimization/wolfeLSTest.class deleted file mode 100644 index 698400cb0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/wolfeLSTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/wolfeLSTest.tasty b/target/scala-3.6.4/classes/scalation/optimization/wolfeLSTest.tasty deleted file mode 100644 index 6a4bb17d8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/wolfeLSTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/wolfeLSTest2.class b/target/scala-3.6.4/classes/scalation/optimization/wolfeLSTest2.class deleted file mode 100644 index 709accfc8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/wolfeLSTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/wolfeLSTest2.tasty b/target/scala-3.6.4/classes/scalation/optimization/wolfeLSTest2.tasty deleted file mode 100644 index 610197a77..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/wolfeLSTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/wolfeLSTest3.class b/target/scala-3.6.4/classes/scalation/optimization/wolfeLSTest3.class deleted file mode 100644 index f7cf6d18d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/wolfeLSTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/optimization/wolfeLSTest3.tasty b/target/scala-3.6.4/classes/scalation/optimization/wolfeLSTest3.tasty deleted file mode 100644 index 22c2c6e44..000000000 Binary files a/target/scala-3.6.4/classes/scalation/optimization/wolfeLSTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Bernoulli$.class b/target/scala-3.6.4/classes/scalation/random/Bernoulli$.class deleted file mode 100644 index 1a8d66028..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Bernoulli$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Bernoulli.class b/target/scala-3.6.4/classes/scalation/random/Bernoulli.class deleted file mode 100644 index 8fa397aa2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Bernoulli.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Bernoulli.tasty b/target/scala-3.6.4/classes/scalation/random/Bernoulli.tasty deleted file mode 100644 index 2856efef3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Bernoulli.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Beta$.class b/target/scala-3.6.4/classes/scalation/random/Beta$.class deleted file mode 100644 index acde7b8e5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Beta$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Beta.class b/target/scala-3.6.4/classes/scalation/random/Beta.class deleted file mode 100644 index e26ca2e69..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Beta.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Beta.tasty b/target/scala-3.6.4/classes/scalation/random/Beta.tasty deleted file mode 100644 index 58225d544..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Beta.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Binomial$.class b/target/scala-3.6.4/classes/scalation/random/Binomial$.class deleted file mode 100644 index 19f294439..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Binomial$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Binomial.class b/target/scala-3.6.4/classes/scalation/random/Binomial.class deleted file mode 100644 index ccb8d877e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Binomial.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Binomial.tasty b/target/scala-3.6.4/classes/scalation/random/Binomial.tasty deleted file mode 100644 index b72c85beb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Binomial.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/CDF$.class b/target/scala-3.6.4/classes/scalation/random/CDF$.class deleted file mode 100644 index 72e0df7b3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/CDF$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/CDF$package$.class b/target/scala-3.6.4/classes/scalation/random/CDF$package$.class deleted file mode 100644 index be34a100a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/CDF$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/CDF$package.class b/target/scala-3.6.4/classes/scalation/random/CDF$package.class deleted file mode 100644 index 7286b4fe6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/CDF$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/CDF$package.tasty b/target/scala-3.6.4/classes/scalation/random/CDF$package.tasty deleted file mode 100644 index 39cb9e03a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/CDF$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/CDF.class b/target/scala-3.6.4/classes/scalation/random/CDF.class deleted file mode 100644 index 3fb699173..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/CDF.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/CDF.tasty b/target/scala-3.6.4/classes/scalation/random/CDF.tasty deleted file mode 100644 index 1c152f467..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/CDF.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Cauchy$.class b/target/scala-3.6.4/classes/scalation/random/Cauchy$.class deleted file mode 100644 index 986e4ace8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Cauchy$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Cauchy.class b/target/scala-3.6.4/classes/scalation/random/Cauchy.class deleted file mode 100644 index ab567c254..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Cauchy.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Cauchy.tasty b/target/scala-3.6.4/classes/scalation/random/Cauchy.tasty deleted file mode 100644 index 054db381d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Cauchy.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/ChiSquare$.class b/target/scala-3.6.4/classes/scalation/random/ChiSquare$.class deleted file mode 100644 index 2a4155a09..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/ChiSquare$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/ChiSquare.class b/target/scala-3.6.4/classes/scalation/random/ChiSquare.class deleted file mode 100644 index e036b7233..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/ChiSquare.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/ChiSquare.tasty b/target/scala-3.6.4/classes/scalation/random/ChiSquare.tasty deleted file mode 100644 index 04b484e7e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/ChiSquare.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Dice$.class b/target/scala-3.6.4/classes/scalation/random/Dice$.class deleted file mode 100644 index 5387183dd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Dice$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Dice.class b/target/scala-3.6.4/classes/scalation/random/Dice.class deleted file mode 100644 index d3e4f126b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Dice.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Dice.tasty b/target/scala-3.6.4/classes/scalation/random/Dice.tasty deleted file mode 100644 index f0aec6798..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Dice.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Dir$.class b/target/scala-3.6.4/classes/scalation/random/Dir$.class deleted file mode 100644 index 978ad6cb4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Dir$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Dir.class b/target/scala-3.6.4/classes/scalation/random/Dir.class deleted file mode 100644 index 4e1372ecd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Dir.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Dir.tasty b/target/scala-3.6.4/classes/scalation/random/Dir.tasty deleted file mode 100644 index 81764f8fe..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Dir.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Discrete$.class b/target/scala-3.6.4/classes/scalation/random/Discrete$.class deleted file mode 100644 index 695a086be..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Discrete$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Discrete.class b/target/scala-3.6.4/classes/scalation/random/Discrete.class deleted file mode 100644 index 002358e21..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Discrete.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Discrete.tasty b/target/scala-3.6.4/classes/scalation/random/Discrete.tasty deleted file mode 100644 index 20c85cc63..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Discrete.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Erlang$.class b/target/scala-3.6.4/classes/scalation/random/Erlang$.class deleted file mode 100644 index 770d87888..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Erlang$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Erlang.class b/target/scala-3.6.4/classes/scalation/random/Erlang.class deleted file mode 100644 index c390c6dab..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Erlang.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Erlang.tasty b/target/scala-3.6.4/classes/scalation/random/Erlang.tasty deleted file mode 100644 index 5d92873fb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Erlang.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Exponential$.class b/target/scala-3.6.4/classes/scalation/random/Exponential$.class deleted file mode 100644 index 98ab90eea..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Exponential$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Exponential.class b/target/scala-3.6.4/classes/scalation/random/Exponential.class deleted file mode 100644 index a4b6956f6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Exponential.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Exponential.tasty b/target/scala-3.6.4/classes/scalation/random/Exponential.tasty deleted file mode 100644 index db0a0ba82..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Exponential.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Fisher$.class b/target/scala-3.6.4/classes/scalation/random/Fisher$.class deleted file mode 100644 index ddb7f8884..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Fisher$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Fisher.class b/target/scala-3.6.4/classes/scalation/random/Fisher.class deleted file mode 100644 index 81e0fd418..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Fisher.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Fisher.tasty b/target/scala-3.6.4/classes/scalation/random/Fisher.tasty deleted file mode 100644 index c3fdfb2b6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Fisher.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Gamma$.class b/target/scala-3.6.4/classes/scalation/random/Gamma$.class deleted file mode 100644 index d701ac8df..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Gamma$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Gamma.class b/target/scala-3.6.4/classes/scalation/random/Gamma.class deleted file mode 100644 index ab5c763ad..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Gamma.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Gamma.tasty b/target/scala-3.6.4/classes/scalation/random/Gamma.tasty deleted file mode 100644 index db2e03ae4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Gamma.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Geometric$.class b/target/scala-3.6.4/classes/scalation/random/Geometric$.class deleted file mode 100644 index 6b0be19ef..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Geometric$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Geometric.class b/target/scala-3.6.4/classes/scalation/random/Geometric.class deleted file mode 100644 index d91f691d6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Geometric.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Geometric.tasty b/target/scala-3.6.4/classes/scalation/random/Geometric.tasty deleted file mode 100644 index d57cdf7fe..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Geometric.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/HyperExponential$.class b/target/scala-3.6.4/classes/scalation/random/HyperExponential$.class deleted file mode 100644 index 4c22ee2d2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/HyperExponential$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/HyperExponential.class b/target/scala-3.6.4/classes/scalation/random/HyperExponential.class deleted file mode 100644 index abbd6cfaa..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/HyperExponential.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/HyperExponential.tasty b/target/scala-3.6.4/classes/scalation/random/HyperExponential.tasty deleted file mode 100644 index 7586a4d84..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/HyperExponential.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/HyperExponential_$.class b/target/scala-3.6.4/classes/scalation/random/HyperExponential_$.class deleted file mode 100644 index d9b589dd9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/HyperExponential_$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/HyperExponential_.class b/target/scala-3.6.4/classes/scalation/random/HyperExponential_.class deleted file mode 100644 index 6fcd33b51..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/HyperExponential_.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/HyperExponential_.tasty b/target/scala-3.6.4/classes/scalation/random/HyperExponential_.tasty deleted file mode 100644 index 81153496a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/HyperExponential_.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/HyperGeometric$.class b/target/scala-3.6.4/classes/scalation/random/HyperGeometric$.class deleted file mode 100644 index 31dafacb8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/HyperGeometric$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/HyperGeometric.class b/target/scala-3.6.4/classes/scalation/random/HyperGeometric.class deleted file mode 100644 index 0dfadcf40..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/HyperGeometric.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/HyperGeometric.tasty b/target/scala-3.6.4/classes/scalation/random/HyperGeometric.tasty deleted file mode 100644 index 020b2b8c6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/HyperGeometric.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Known$.class b/target/scala-3.6.4/classes/scalation/random/Known$.class deleted file mode 100644 index fb4b9660f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Known$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Known.class b/target/scala-3.6.4/classes/scalation/random/Known.class deleted file mode 100644 index ccb7717ff..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Known.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Known.tasty b/target/scala-3.6.4/classes/scalation/random/Known.tasty deleted file mode 100644 index 1095c085f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Known.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/LogNormal$.class b/target/scala-3.6.4/classes/scalation/random/LogNormal$.class deleted file mode 100644 index 14c211ba6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/LogNormal$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/LogNormal.class b/target/scala-3.6.4/classes/scalation/random/LogNormal.class deleted file mode 100644 index 64dd10b6f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/LogNormal.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/LogNormal.tasty b/target/scala-3.6.4/classes/scalation/random/LogNormal.tasty deleted file mode 100644 index ac45437f1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/LogNormal.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Logistic$.class b/target/scala-3.6.4/classes/scalation/random/Logistic$.class deleted file mode 100644 index 351f7f088..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Logistic$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Logistic.class b/target/scala-3.6.4/classes/scalation/random/Logistic.class deleted file mode 100644 index 9cb3dfe1b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Logistic.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Logistic.tasty b/target/scala-3.6.4/classes/scalation/random/Logistic.tasty deleted file mode 100644 index 9417b7b38..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Logistic.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Multinomial$.class b/target/scala-3.6.4/classes/scalation/random/Multinomial$.class deleted file mode 100644 index 69c040e5e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Multinomial$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Multinomial.class b/target/scala-3.6.4/classes/scalation/random/Multinomial.class deleted file mode 100644 index 8dda82958..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Multinomial.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Multinomial.tasty b/target/scala-3.6.4/classes/scalation/random/Multinomial.tasty deleted file mode 100644 index d37d9534a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Multinomial.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/NHPoissonProcess$.class b/target/scala-3.6.4/classes/scalation/random/NHPoissonProcess$.class deleted file mode 100644 index b6f39fcd8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/NHPoissonProcess$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/NHPoissonProcess.class b/target/scala-3.6.4/classes/scalation/random/NHPoissonProcess.class deleted file mode 100644 index b4fb8bdde..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/NHPoissonProcess.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/NHPoissonProcess.tasty b/target/scala-3.6.4/classes/scalation/random/NHPoissonProcess.tasty deleted file mode 100644 index 9d2b4c70c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/NHPoissonProcess.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/NegativeBinomial$.class b/target/scala-3.6.4/classes/scalation/random/NegativeBinomial$.class deleted file mode 100644 index ab1076286..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/NegativeBinomial$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/NegativeBinomial.class b/target/scala-3.6.4/classes/scalation/random/NegativeBinomial.class deleted file mode 100644 index 5650dc676..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/NegativeBinomial.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/NegativeBinomial.tasty b/target/scala-3.6.4/classes/scalation/random/NegativeBinomial.tasty deleted file mode 100644 index 224405399..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/NegativeBinomial.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Normal$.class b/target/scala-3.6.4/classes/scalation/random/Normal$.class deleted file mode 100644 index 943e793dd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Normal$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Normal.class b/target/scala-3.6.4/classes/scalation/random/Normal.class deleted file mode 100644 index e117691e7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Normal.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Normal.tasty b/target/scala-3.6.4/classes/scalation/random/Normal.tasty deleted file mode 100644 index f2985d71f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Normal.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/NormalMat$.class b/target/scala-3.6.4/classes/scalation/random/NormalMat$.class deleted file mode 100644 index a23fa85d6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/NormalMat$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/NormalMat.class b/target/scala-3.6.4/classes/scalation/random/NormalMat.class deleted file mode 100644 index 1e2430054..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/NormalMat.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/NormalMat.tasty b/target/scala-3.6.4/classes/scalation/random/NormalMat.tasty deleted file mode 100644 index f89c7b66d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/NormalMat.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/NormalTen$.class b/target/scala-3.6.4/classes/scalation/random/NormalTen$.class deleted file mode 100644 index ca90519e1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/NormalTen$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/NormalTen.class b/target/scala-3.6.4/classes/scalation/random/NormalTen.class deleted file mode 100644 index 011a4c030..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/NormalTen.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/NormalTen.tasty b/target/scala-3.6.4/classes/scalation/random/NormalTen.tasty deleted file mode 100644 index 13f0e4fbb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/NormalTen.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/NormalVec$.class b/target/scala-3.6.4/classes/scalation/random/NormalVec$.class deleted file mode 100644 index d315eac22..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/NormalVec$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/NormalVec.class b/target/scala-3.6.4/classes/scalation/random/NormalVec.class deleted file mode 100644 index dc55c865f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/NormalVec.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/NormalVec.tasty b/target/scala-3.6.4/classes/scalation/random/NormalVec.tasty deleted file mode 100644 index 63f860570..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/NormalVec.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/NormalVec_$.class b/target/scala-3.6.4/classes/scalation/random/NormalVec_$.class deleted file mode 100644 index 68f7d2538..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/NormalVec_$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/NormalVec_.class b/target/scala-3.6.4/classes/scalation/random/NormalVec_.class deleted file mode 100644 index 5fa59a575..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/NormalVec_.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/NormalVec_.tasty b/target/scala-3.6.4/classes/scalation/random/NormalVec_.tasty deleted file mode 100644 index 5eab12a16..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/NormalVec_.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/NormalVec_c$.class b/target/scala-3.6.4/classes/scalation/random/NormalVec_c$.class deleted file mode 100644 index 7b99b7010..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/NormalVec_c$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/NormalVec_c.class b/target/scala-3.6.4/classes/scalation/random/NormalVec_c.class deleted file mode 100644 index d4da1af27..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/NormalVec_c.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/NormalVec_c.tasty b/target/scala-3.6.4/classes/scalation/random/NormalVec_c.tasty deleted file mode 100644 index 694ea9f93..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/NormalVec_c.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/PermutedVecD$.class b/target/scala-3.6.4/classes/scalation/random/PermutedVecD$.class deleted file mode 100644 index 26a8c374f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/PermutedVecD$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/PermutedVecD.class b/target/scala-3.6.4/classes/scalation/random/PermutedVecD.class deleted file mode 100644 index 019a1b025..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/PermutedVecD.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/PermutedVecD.tasty b/target/scala-3.6.4/classes/scalation/random/PermutedVecD.tasty deleted file mode 100644 index fd0aa3f00..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/PermutedVecD.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/PermutedVecI$.class b/target/scala-3.6.4/classes/scalation/random/PermutedVecI$.class deleted file mode 100644 index 31a8e3f68..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/PermutedVecI$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/PermutedVecI.class b/target/scala-3.6.4/classes/scalation/random/PermutedVecI.class deleted file mode 100644 index 67274d513..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/PermutedVecI.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/PermutedVecI.tasty b/target/scala-3.6.4/classes/scalation/random/PermutedVecI.tasty deleted file mode 100644 index c58ef4efb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/PermutedVecI.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Poisson$.class b/target/scala-3.6.4/classes/scalation/random/Poisson$.class deleted file mode 100644 index 843e4f67c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Poisson$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Poisson.class b/target/scala-3.6.4/classes/scalation/random/Poisson.class deleted file mode 100644 index 72198e4ae..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Poisson.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Poisson.tasty b/target/scala-3.6.4/classes/scalation/random/Poisson.tasty deleted file mode 100644 index 56a9dfb5a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Poisson.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/PoissonProcess$.class b/target/scala-3.6.4/classes/scalation/random/PoissonProcess$.class deleted file mode 100644 index ce5e56915..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/PoissonProcess$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/PoissonProcess$package$.class b/target/scala-3.6.4/classes/scalation/random/PoissonProcess$package$.class deleted file mode 100644 index 68470dda0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/PoissonProcess$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/PoissonProcess$package.class b/target/scala-3.6.4/classes/scalation/random/PoissonProcess$package.class deleted file mode 100644 index e014bd6a7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/PoissonProcess$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/PoissonProcess$package.tasty b/target/scala-3.6.4/classes/scalation/random/PoissonProcess$package.tasty deleted file mode 100644 index 572c6c878..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/PoissonProcess$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/PoissonProcess.class b/target/scala-3.6.4/classes/scalation/random/PoissonProcess.class deleted file mode 100644 index d5ffd39d0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/PoissonProcess.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/PoissonProcess.tasty b/target/scala-3.6.4/classes/scalation/random/PoissonProcess.tasty deleted file mode 100644 index f1ad35d95..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/PoissonProcess.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/PowerLaw$.class b/target/scala-3.6.4/classes/scalation/random/PowerLaw$.class deleted file mode 100644 index 1b0b9a39c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/PowerLaw$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/PowerLaw.class b/target/scala-3.6.4/classes/scalation/random/PowerLaw.class deleted file mode 100644 index f965c7909..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/PowerLaw.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/PowerLaw.tasty b/target/scala-3.6.4/classes/scalation/random/PowerLaw.tasty deleted file mode 100644 index d6e12e93e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/PowerLaw.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/ProbabilityVec$.class b/target/scala-3.6.4/classes/scalation/random/ProbabilityVec$.class deleted file mode 100644 index 375c8cbef..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/ProbabilityVec$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/ProbabilityVec.class b/target/scala-3.6.4/classes/scalation/random/ProbabilityVec.class deleted file mode 100644 index b7ae1cc82..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/ProbabilityVec.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/ProbabilityVec.tasty b/target/scala-3.6.4/classes/scalation/random/ProbabilityVec.tasty deleted file mode 100644 index 5bcbeba8a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/ProbabilityVec.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Quantile$.class b/target/scala-3.6.4/classes/scalation/random/Quantile$.class deleted file mode 100644 index 5e8ffa543..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Quantile$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Quantile$package$.class b/target/scala-3.6.4/classes/scalation/random/Quantile$package$.class deleted file mode 100644 index 754eac3eb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Quantile$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Quantile$package.class b/target/scala-3.6.4/classes/scalation/random/Quantile$package.class deleted file mode 100644 index d0141e275..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Quantile$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Quantile$package.tasty b/target/scala-3.6.4/classes/scalation/random/Quantile$package.tasty deleted file mode 100644 index edde7e046..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Quantile$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Quantile.class b/target/scala-3.6.4/classes/scalation/random/Quantile.class deleted file mode 100644 index e1fa2dfab..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Quantile.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Quantile.tasty b/target/scala-3.6.4/classes/scalation/random/Quantile.tasty deleted file mode 100644 index 970a886fb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Quantile.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/RNG$package$.class b/target/scala-3.6.4/classes/scalation/random/RNG$package$.class deleted file mode 100644 index 6e4409a72..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/RNG$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/RNG$package.class b/target/scala-3.6.4/classes/scalation/random/RNG$package.class deleted file mode 100644 index 56319b829..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/RNG$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/RNG$package.tasty b/target/scala-3.6.4/classes/scalation/random/RNG$package.tasty deleted file mode 100644 index 257343eea..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/RNG$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/RNG.class b/target/scala-3.6.4/classes/scalation/random/RNG.class deleted file mode 100644 index 039bda263..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/RNG.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/RNG.tasty b/target/scala-3.6.4/classes/scalation/random/RNG.tasty deleted file mode 100644 index 7fcfb93ff..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/RNG.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/RNGStream$.class b/target/scala-3.6.4/classes/scalation/random/RNGStream$.class deleted file mode 100644 index ef2ef7205..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/RNGStream$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/RNGStream.class b/target/scala-3.6.4/classes/scalation/random/RNGStream.class deleted file mode 100644 index cc2a4efb2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/RNGStream.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/RNGStream.tasty b/target/scala-3.6.4/classes/scalation/random/RNGStream.tasty deleted file mode 100644 index bca2830cb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/RNGStream.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/RNGTester$.class b/target/scala-3.6.4/classes/scalation/random/RNGTester$.class deleted file mode 100644 index efafd80df..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/RNGTester$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/RNGTester$CoGram$1.class b/target/scala-3.6.4/classes/scalation/random/RNGTester$CoGram$1.class deleted file mode 100644 index 3aaa0f464..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/RNGTester$CoGram$1.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/RNGTester.class b/target/scala-3.6.4/classes/scalation/random/RNGTester.class deleted file mode 100644 index 609ac27d8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/RNGTester.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/RNGTester.tasty b/target/scala-3.6.4/classes/scalation/random/RNGTester.tasty deleted file mode 100644 index d7dc68842..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/RNGTester.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Randi$.class b/target/scala-3.6.4/classes/scalation/random/Randi$.class deleted file mode 100644 index 6a10eed41..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Randi$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Randi.class b/target/scala-3.6.4/classes/scalation/random/Randi.class deleted file mode 100644 index 198039922..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Randi.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Randi.tasty b/target/scala-3.6.4/classes/scalation/random/Randi.tasty deleted file mode 100644 index 49f622dc2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Randi.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Randi0$.class b/target/scala-3.6.4/classes/scalation/random/Randi0$.class deleted file mode 100644 index 599a60cb8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Randi0$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Randi0.class b/target/scala-3.6.4/classes/scalation/random/Randi0.class deleted file mode 100644 index c401a283c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Randi0.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Randi0.tasty b/target/scala-3.6.4/classes/scalation/random/Randi0.tasty deleted file mode 100644 index f67833fed..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Randi0.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/RandiU0$.class b/target/scala-3.6.4/classes/scalation/random/RandiU0$.class deleted file mode 100644 index 0e03cff01..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/RandiU0$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/RandiU0.class b/target/scala-3.6.4/classes/scalation/random/RandiU0.class deleted file mode 100644 index 10e9283cd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/RandiU0.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/RandiU0.tasty b/target/scala-3.6.4/classes/scalation/random/RandiU0.tasty deleted file mode 100644 index 2070e924a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/RandiU0.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Random$.class b/target/scala-3.6.4/classes/scalation/random/Random$.class deleted file mode 100644 index a4f59fe69..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Random$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Random.class b/target/scala-3.6.4/classes/scalation/random/Random.class deleted file mode 100644 index 7a1ec3175..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Random.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Random.tasty b/target/scala-3.6.4/classes/scalation/random/Random.tasty deleted file mode 100644 index d5acf0602..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Random.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Random0$.class b/target/scala-3.6.4/classes/scalation/random/Random0$.class deleted file mode 100644 index 6919e54c3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Random0$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Random0.class b/target/scala-3.6.4/classes/scalation/random/Random0.class deleted file mode 100644 index bffbab0bc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Random0.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Random0.tasty b/target/scala-3.6.4/classes/scalation/random/Random0.tasty deleted file mode 100644 index fd7ab110e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Random0.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Random2$.class b/target/scala-3.6.4/classes/scalation/random/Random2$.class deleted file mode 100644 index c02b788d8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Random2$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Random2.class b/target/scala-3.6.4/classes/scalation/random/Random2.class deleted file mode 100644 index 02a581196..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Random2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Random2.tasty b/target/scala-3.6.4/classes/scalation/random/Random2.tasty deleted file mode 100644 index aa75d1569..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Random2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Random3$.class b/target/scala-3.6.4/classes/scalation/random/Random3$.class deleted file mode 100644 index 0a9caf49e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Random3$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Random3.class b/target/scala-3.6.4/classes/scalation/random/Random3.class deleted file mode 100644 index fa6f6ec9f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Random3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Random3.tasty b/target/scala-3.6.4/classes/scalation/random/Random3.tasty deleted file mode 100644 index 992b3802d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Random3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/RandomMatD$.class b/target/scala-3.6.4/classes/scalation/random/RandomMatD$.class deleted file mode 100644 index ef3d65df2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/RandomMatD$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/RandomMatD.class b/target/scala-3.6.4/classes/scalation/random/RandomMatD.class deleted file mode 100644 index e27552911..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/RandomMatD.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/RandomMatD.tasty b/target/scala-3.6.4/classes/scalation/random/RandomMatD.tasty deleted file mode 100644 index 74d4a714f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/RandomMatD.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/RandomSeeds$.class b/target/scala-3.6.4/classes/scalation/random/RandomSeeds$.class deleted file mode 100644 index 1ca64b68b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/RandomSeeds$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/RandomSeeds.class b/target/scala-3.6.4/classes/scalation/random/RandomSeeds.class deleted file mode 100644 index d53abd9c8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/RandomSeeds.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/RandomSeeds.tasty b/target/scala-3.6.4/classes/scalation/random/RandomSeeds.tasty deleted file mode 100644 index 3bae0098e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/RandomSeeds.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/RandomSeeds3$.class b/target/scala-3.6.4/classes/scalation/random/RandomSeeds3$.class deleted file mode 100644 index 8ef9db857..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/RandomSeeds3$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/RandomSeeds3.class b/target/scala-3.6.4/classes/scalation/random/RandomSeeds3.class deleted file mode 100644 index b4e490274..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/RandomSeeds3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/RandomSeeds3.tasty b/target/scala-3.6.4/classes/scalation/random/RandomSeeds3.tasty deleted file mode 100644 index 73cb10613..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/RandomSeeds3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/RandomSet$.class b/target/scala-3.6.4/classes/scalation/random/RandomSet$.class deleted file mode 100644 index f92d5f286..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/RandomSet$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/RandomSet.class b/target/scala-3.6.4/classes/scalation/random/RandomSet.class deleted file mode 100644 index 4fabd4e3a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/RandomSet.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/RandomSet.tasty b/target/scala-3.6.4/classes/scalation/random/RandomSet.tasty deleted file mode 100644 index 4f100f842..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/RandomSet.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/RandomSetS$.class b/target/scala-3.6.4/classes/scalation/random/RandomSetS$.class deleted file mode 100644 index 7ce89d7f2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/RandomSetS$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/RandomSetS.class b/target/scala-3.6.4/classes/scalation/random/RandomSetS.class deleted file mode 100644 index 0e4896961..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/RandomSetS.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/RandomSetS.tasty b/target/scala-3.6.4/classes/scalation/random/RandomSetS.tasty deleted file mode 100644 index e27c0782c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/RandomSetS.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/RandomSetW$.class b/target/scala-3.6.4/classes/scalation/random/RandomSetW$.class deleted file mode 100644 index 8b3f5a6f1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/RandomSetW$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/RandomSetW.class b/target/scala-3.6.4/classes/scalation/random/RandomSetW.class deleted file mode 100644 index 4b77c6ffa..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/RandomSetW.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/RandomSetW.tasty b/target/scala-3.6.4/classes/scalation/random/RandomSetW.tasty deleted file mode 100644 index c62930c97..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/RandomSetW.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/RandomStr$.class b/target/scala-3.6.4/classes/scalation/random/RandomStr$.class deleted file mode 100644 index 0504656f6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/RandomStr$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/RandomStr.class b/target/scala-3.6.4/classes/scalation/random/RandomStr.class deleted file mode 100644 index a272eb9c6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/RandomStr.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/RandomStr.tasty b/target/scala-3.6.4/classes/scalation/random/RandomStr.tasty deleted file mode 100644 index ae1e8dac7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/RandomStr.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/RandomTenD$.class b/target/scala-3.6.4/classes/scalation/random/RandomTenD$.class deleted file mode 100644 index 0899be1c7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/RandomTenD$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/RandomTenD.class b/target/scala-3.6.4/classes/scalation/random/RandomTenD.class deleted file mode 100644 index eb79c24c2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/RandomTenD.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/RandomTenD.tasty b/target/scala-3.6.4/classes/scalation/random/RandomTenD.tasty deleted file mode 100644 index de86267c6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/RandomTenD.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/RandomVecD$.class b/target/scala-3.6.4/classes/scalation/random/RandomVecD$.class deleted file mode 100644 index ab5e8559d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/RandomVecD$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/RandomVecD.class b/target/scala-3.6.4/classes/scalation/random/RandomVecD.class deleted file mode 100644 index 9871859fc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/RandomVecD.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/RandomVecD.tasty b/target/scala-3.6.4/classes/scalation/random/RandomVecD.tasty deleted file mode 100644 index c34c3ef40..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/RandomVecD.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/RandomVecD_$.class b/target/scala-3.6.4/classes/scalation/random/RandomVecD_$.class deleted file mode 100644 index 4c76b58dc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/RandomVecD_$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/RandomVecD_.class b/target/scala-3.6.4/classes/scalation/random/RandomVecD_.class deleted file mode 100644 index 494832d38..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/RandomVecD_.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/RandomVecD_.tasty b/target/scala-3.6.4/classes/scalation/random/RandomVecD_.tasty deleted file mode 100644 index bbdcfe1d1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/RandomVecD_.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/RandomVecI$.class b/target/scala-3.6.4/classes/scalation/random/RandomVecI$.class deleted file mode 100644 index 5682414f4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/RandomVecI$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/RandomVecI.class b/target/scala-3.6.4/classes/scalation/random/RandomVecI.class deleted file mode 100644 index 2a26fc2ab..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/RandomVecI.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/RandomVecI.tasty b/target/scala-3.6.4/classes/scalation/random/RandomVecI.tasty deleted file mode 100644 index a3d279749..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/RandomVecI.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/RandomVecS$.class b/target/scala-3.6.4/classes/scalation/random/RandomVecS$.class deleted file mode 100644 index d4c9669b4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/RandomVecS$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/RandomVecS.class b/target/scala-3.6.4/classes/scalation/random/RandomVecS.class deleted file mode 100644 index 8d9b72624..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/RandomVecS.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/RandomVecS.tasty b/target/scala-3.6.4/classes/scalation/random/RandomVecS.tasty deleted file mode 100644 index 27343cfaf..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/RandomVecS.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/RandomVecSample$.class b/target/scala-3.6.4/classes/scalation/random/RandomVecSample$.class deleted file mode 100644 index 5e535d70c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/RandomVecSample$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/RandomVecSample.class b/target/scala-3.6.4/classes/scalation/random/RandomVecSample.class deleted file mode 100644 index ea6036f40..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/RandomVecSample.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/RandomVecSample.tasty b/target/scala-3.6.4/classes/scalation/random/RandomVecSample.tasty deleted file mode 100644 index 69b54a714..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/RandomVecSample.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/RandomVecTrend$.class b/target/scala-3.6.4/classes/scalation/random/RandomVecTrend$.class deleted file mode 100644 index eac244976..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/RandomVecTrend$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/RandomVecTrend.class b/target/scala-3.6.4/classes/scalation/random/RandomVecTrend.class deleted file mode 100644 index d26c42b97..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/RandomVecTrend.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/RandomVecTrend.tasty b/target/scala-3.6.4/classes/scalation/random/RandomVecTrend.tasty deleted file mode 100644 index 24480ed63..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/RandomVecTrend.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/RandomWord$.class b/target/scala-3.6.4/classes/scalation/random/RandomWord$.class deleted file mode 100644 index b71f64f33..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/RandomWord$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/RandomWord.class b/target/scala-3.6.4/classes/scalation/random/RandomWord.class deleted file mode 100644 index da00e2f78..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/RandomWord.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/RandomWord.tasty b/target/scala-3.6.4/classes/scalation/random/RandomWord.tasty deleted file mode 100644 index 2709f2331..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/RandomWord.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Sharp$.class b/target/scala-3.6.4/classes/scalation/random/Sharp$.class deleted file mode 100644 index 4f54708ca..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Sharp$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Sharp.class b/target/scala-3.6.4/classes/scalation/random/Sharp.class deleted file mode 100644 index f58b0875c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Sharp.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Sharp.tasty b/target/scala-3.6.4/classes/scalation/random/Sharp.tasty deleted file mode 100644 index 5291c169a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Sharp.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/StdNormal$.class b/target/scala-3.6.4/classes/scalation/random/StdNormal$.class deleted file mode 100644 index 39dc7938a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/StdNormal$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/StdNormal.class b/target/scala-3.6.4/classes/scalation/random/StdNormal.class deleted file mode 100644 index c630276a4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/StdNormal.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/StdNormal.tasty b/target/scala-3.6.4/classes/scalation/random/StdNormal.tasty deleted file mode 100644 index ef242db35..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/StdNormal.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/StreamMaker$.class b/target/scala-3.6.4/classes/scalation/random/StreamMaker$.class deleted file mode 100644 index 81a377cc1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/StreamMaker$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/StreamMaker$package$.class b/target/scala-3.6.4/classes/scalation/random/StreamMaker$package$.class deleted file mode 100644 index 23eca38dd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/StreamMaker$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/StreamMaker$package.class b/target/scala-3.6.4/classes/scalation/random/StreamMaker$package.class deleted file mode 100644 index dff21fed8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/StreamMaker$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/StreamMaker$package.tasty b/target/scala-3.6.4/classes/scalation/random/StreamMaker$package.tasty deleted file mode 100644 index 568339367..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/StreamMaker$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/StreamMaker.class b/target/scala-3.6.4/classes/scalation/random/StreamMaker.class deleted file mode 100644 index 6fe1a1131..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/StreamMaker.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/StreamMaker.tasty b/target/scala-3.6.4/classes/scalation/random/StreamMaker.tasty deleted file mode 100644 index 8afccf5ea..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/StreamMaker.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/StreamMaker3$package$.class b/target/scala-3.6.4/classes/scalation/random/StreamMaker3$package$.class deleted file mode 100644 index e4321e90c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/StreamMaker3$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/StreamMaker3$package.class b/target/scala-3.6.4/classes/scalation/random/StreamMaker3$package.class deleted file mode 100644 index 2494e5cb4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/StreamMaker3$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/StreamMaker3$package.tasty b/target/scala-3.6.4/classes/scalation/random/StreamMaker3$package.tasty deleted file mode 100644 index 46a95c962..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/StreamMaker3$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/StudentT$.class b/target/scala-3.6.4/classes/scalation/random/StudentT$.class deleted file mode 100644 index 7b7192d1e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/StudentT$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/StudentT.class b/target/scala-3.6.4/classes/scalation/random/StudentT.class deleted file mode 100644 index 19519e6ca..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/StudentT.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/StudentT.tasty b/target/scala-3.6.4/classes/scalation/random/StudentT.tasty deleted file mode 100644 index 6e8459898..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/StudentT.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/TimeVariate.class b/target/scala-3.6.4/classes/scalation/random/TimeVariate.class deleted file mode 100644 index d5d542231..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/TimeVariate.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/TimeVariate.tasty b/target/scala-3.6.4/classes/scalation/random/TimeVariate.tasty deleted file mode 100644 index 7e234e564..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/TimeVariate.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Trapezoidal$.class b/target/scala-3.6.4/classes/scalation/random/Trapezoidal$.class deleted file mode 100644 index 92ac2e8db..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Trapezoidal$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Trapezoidal.class b/target/scala-3.6.4/classes/scalation/random/Trapezoidal.class deleted file mode 100644 index 269cb7936..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Trapezoidal.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Trapezoidal.tasty b/target/scala-3.6.4/classes/scalation/random/Trapezoidal.tasty deleted file mode 100644 index 5ff8f7569..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Trapezoidal.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Triangular$.class b/target/scala-3.6.4/classes/scalation/random/Triangular$.class deleted file mode 100644 index b5aa5f815..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Triangular$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Triangular.class b/target/scala-3.6.4/classes/scalation/random/Triangular.class deleted file mode 100644 index 66b761adc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Triangular.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Triangular.tasty b/target/scala-3.6.4/classes/scalation/random/Triangular.tasty deleted file mode 100644 index a69527487..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Triangular.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Trinomial$.class b/target/scala-3.6.4/classes/scalation/random/Trinomial$.class deleted file mode 100644 index cd49cfc90..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Trinomial$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Trinomial.class b/target/scala-3.6.4/classes/scalation/random/Trinomial.class deleted file mode 100644 index b30dddc06..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Trinomial.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Trinomial.tasty b/target/scala-3.6.4/classes/scalation/random/Trinomial.tasty deleted file mode 100644 index e363facfe..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Trinomial.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Uniform$.class b/target/scala-3.6.4/classes/scalation/random/Uniform$.class deleted file mode 100644 index a779ac400..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Uniform$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Uniform.class b/target/scala-3.6.4/classes/scalation/random/Uniform.class deleted file mode 100644 index 282dddb60..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Uniform.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Uniform.tasty b/target/scala-3.6.4/classes/scalation/random/Uniform.tasty deleted file mode 100644 index f7b17e2a4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Uniform.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Variate$.class b/target/scala-3.6.4/classes/scalation/random/Variate$.class deleted file mode 100644 index d7f3645ba..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Variate$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Variate$package$.class b/target/scala-3.6.4/classes/scalation/random/Variate$package$.class deleted file mode 100644 index b10ea3a1a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Variate$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Variate$package.class b/target/scala-3.6.4/classes/scalation/random/Variate$package.class deleted file mode 100644 index e2303acd1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Variate$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Variate$package.tasty b/target/scala-3.6.4/classes/scalation/random/Variate$package.tasty deleted file mode 100644 index 340887e41..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Variate$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Variate.class b/target/scala-3.6.4/classes/scalation/random/Variate.class deleted file mode 100644 index 5391b7c26..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Variate.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Variate.tasty b/target/scala-3.6.4/classes/scalation/random/Variate.tasty deleted file mode 100644 index 5efbf0b2c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Variate.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/VariateMat$.class b/target/scala-3.6.4/classes/scalation/random/VariateMat$.class deleted file mode 100644 index 188b8db51..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/VariateMat$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/VariateMat$package$.class b/target/scala-3.6.4/classes/scalation/random/VariateMat$package$.class deleted file mode 100644 index 2b2ab5596..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/VariateMat$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/VariateMat$package.class b/target/scala-3.6.4/classes/scalation/random/VariateMat$package.class deleted file mode 100644 index 9d9454cef..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/VariateMat$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/VariateMat$package.tasty b/target/scala-3.6.4/classes/scalation/random/VariateMat$package.tasty deleted file mode 100644 index a15bebbfe..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/VariateMat$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/VariateMat.class b/target/scala-3.6.4/classes/scalation/random/VariateMat.class deleted file mode 100644 index 936ef652c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/VariateMat.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/VariateMat.tasty b/target/scala-3.6.4/classes/scalation/random/VariateMat.tasty deleted file mode 100644 index a887b90ff..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/VariateMat.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/VariateSet$.class b/target/scala-3.6.4/classes/scalation/random/VariateSet$.class deleted file mode 100644 index 0c5598f89..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/VariateSet$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/VariateSet$package$.class b/target/scala-3.6.4/classes/scalation/random/VariateSet$package$.class deleted file mode 100644 index 176261197..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/VariateSet$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/VariateSet$package.class b/target/scala-3.6.4/classes/scalation/random/VariateSet$package.class deleted file mode 100644 index 1ff903162..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/VariateSet$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/VariateSet$package.tasty b/target/scala-3.6.4/classes/scalation/random/VariateSet$package.tasty deleted file mode 100644 index ccbea5329..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/VariateSet$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/VariateSet.class b/target/scala-3.6.4/classes/scalation/random/VariateSet.class deleted file mode 100644 index f78cb4c3a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/VariateSet.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/VariateSet.tasty b/target/scala-3.6.4/classes/scalation/random/VariateSet.tasty deleted file mode 100644 index 59afe7f77..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/VariateSet.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/VariateStr$package$.class b/target/scala-3.6.4/classes/scalation/random/VariateStr$package$.class deleted file mode 100644 index 2dea52a12..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/VariateStr$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/VariateStr$package.class b/target/scala-3.6.4/classes/scalation/random/VariateStr$package.class deleted file mode 100644 index 98c63a285..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/VariateStr$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/VariateStr$package.tasty b/target/scala-3.6.4/classes/scalation/random/VariateStr$package.tasty deleted file mode 100644 index 484459e5f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/VariateStr$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/VariateTen$.class b/target/scala-3.6.4/classes/scalation/random/VariateTen$.class deleted file mode 100644 index 11c15ac52..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/VariateTen$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/VariateTen$package$.class b/target/scala-3.6.4/classes/scalation/random/VariateTen$package$.class deleted file mode 100644 index b2d203055..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/VariateTen$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/VariateTen$package.class b/target/scala-3.6.4/classes/scalation/random/VariateTen$package.class deleted file mode 100644 index 248cac485..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/VariateTen$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/VariateTen$package.tasty b/target/scala-3.6.4/classes/scalation/random/VariateTen$package.tasty deleted file mode 100644 index 887985a9e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/VariateTen$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/VariateTen.class b/target/scala-3.6.4/classes/scalation/random/VariateTen.class deleted file mode 100644 index 7c9431e62..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/VariateTen.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/VariateTen.tasty b/target/scala-3.6.4/classes/scalation/random/VariateTen.tasty deleted file mode 100644 index 240c74e06..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/VariateTen.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/VariateVec$.class b/target/scala-3.6.4/classes/scalation/random/VariateVec$.class deleted file mode 100644 index 439ad8a90..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/VariateVec$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/VariateVec$package$.class b/target/scala-3.6.4/classes/scalation/random/VariateVec$package$.class deleted file mode 100644 index 65868e193..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/VariateVec$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/VariateVec$package.class b/target/scala-3.6.4/classes/scalation/random/VariateVec$package.class deleted file mode 100644 index d0446a2e6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/VariateVec$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/VariateVec$package.tasty b/target/scala-3.6.4/classes/scalation/random/VariateVec$package.tasty deleted file mode 100644 index 57d58d8a1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/VariateVec$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/VariateVec.class b/target/scala-3.6.4/classes/scalation/random/VariateVec.class deleted file mode 100644 index d8db40da3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/VariateVec.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/VariateVec.tasty b/target/scala-3.6.4/classes/scalation/random/VariateVec.tasty deleted file mode 100644 index 5b04e81f6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/VariateVec.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Weibull$.class b/target/scala-3.6.4/classes/scalation/random/Weibull$.class deleted file mode 100644 index 8404a7d1f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Weibull$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Weibull.class b/target/scala-3.6.4/classes/scalation/random/Weibull.class deleted file mode 100644 index 15c621634..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Weibull.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/Weibull.tasty b/target/scala-3.6.4/classes/scalation/random/Weibull.tasty deleted file mode 100644 index 374a28006..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/Weibull.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/cDFTest_ChiSquare.class b/target/scala-3.6.4/classes/scalation/random/cDFTest_ChiSquare.class deleted file mode 100644 index f545d7f7b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/cDFTest_ChiSquare.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/cDFTest_ChiSquare.tasty b/target/scala-3.6.4/classes/scalation/random/cDFTest_ChiSquare.tasty deleted file mode 100644 index f50aeaa81..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/cDFTest_ChiSquare.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/cDFTest_Empirical.class b/target/scala-3.6.4/classes/scalation/random/cDFTest_Empirical.class deleted file mode 100644 index 9d73ef85e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/cDFTest_Empirical.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/cDFTest_Empirical.tasty b/target/scala-3.6.4/classes/scalation/random/cDFTest_Empirical.tasty deleted file mode 100644 index 9af88c2df..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/cDFTest_Empirical.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/cDFTest_Exponential.class b/target/scala-3.6.4/classes/scalation/random/cDFTest_Exponential.class deleted file mode 100644 index 6cd73c39d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/cDFTest_Exponential.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/cDFTest_Exponential.tasty b/target/scala-3.6.4/classes/scalation/random/cDFTest_Exponential.tasty deleted file mode 100644 index 0dae2e9e2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/cDFTest_Exponential.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/cDFTest_Fisher.class b/target/scala-3.6.4/classes/scalation/random/cDFTest_Fisher.class deleted file mode 100644 index 5f2ba9a38..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/cDFTest_Fisher.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/cDFTest_Fisher.tasty b/target/scala-3.6.4/classes/scalation/random/cDFTest_Fisher.tasty deleted file mode 100644 index 0cc771977..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/cDFTest_Fisher.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/cDFTest_Fisher2.class b/target/scala-3.6.4/classes/scalation/random/cDFTest_Fisher2.class deleted file mode 100644 index 1ba52adb5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/cDFTest_Fisher2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/cDFTest_Fisher2.tasty b/target/scala-3.6.4/classes/scalation/random/cDFTest_Fisher2.tasty deleted file mode 100644 index 0e85b0221..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/cDFTest_Fisher2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/cDFTest_Normal.class b/target/scala-3.6.4/classes/scalation/random/cDFTest_Normal.class deleted file mode 100644 index 592fbca8e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/cDFTest_Normal.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/cDFTest_Normal.tasty b/target/scala-3.6.4/classes/scalation/random/cDFTest_Normal.tasty deleted file mode 100644 index cc0418876..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/cDFTest_Normal.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/cDFTest_Normal_Diff.class b/target/scala-3.6.4/classes/scalation/random/cDFTest_Normal_Diff.class deleted file mode 100644 index 95569a915..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/cDFTest_Normal_Diff.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/cDFTest_Normal_Diff.tasty b/target/scala-3.6.4/classes/scalation/random/cDFTest_Normal_Diff.tasty deleted file mode 100644 index 118e80efd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/cDFTest_Normal_Diff.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/cDFTest_StudentT.class b/target/scala-3.6.4/classes/scalation/random/cDFTest_StudentT.class deleted file mode 100644 index 69a272708..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/cDFTest_StudentT.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/cDFTest_StudentT.tasty b/target/scala-3.6.4/classes/scalation/random/cDFTest_StudentT.tasty deleted file mode 100644 index 6622b8687..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/cDFTest_StudentT.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/cDFTest_Uniform.class b/target/scala-3.6.4/classes/scalation/random/cDFTest_Uniform.class deleted file mode 100644 index ca958e49d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/cDFTest_Uniform.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/cDFTest_Uniform.tasty b/target/scala-3.6.4/classes/scalation/random/cDFTest_Uniform.tasty deleted file mode 100644 index 26d5f6bc1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/cDFTest_Uniform.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/cDFTest_Weibull.class b/target/scala-3.6.4/classes/scalation/random/cDFTest_Weibull.class deleted file mode 100644 index 0ecd2f400..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/cDFTest_Weibull.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/cDFTest_Weibull.tasty b/target/scala-3.6.4/classes/scalation/random/cDFTest_Weibull.tasty deleted file mode 100644 index cd64b4198..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/cDFTest_Weibull.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/cLTTest.class b/target/scala-3.6.4/classes/scalation/random/cLTTest.class deleted file mode 100644 index 92d3cb0c3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/cLTTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/cLTTest.tasty b/target/scala-3.6.4/classes/scalation/random/cLTTest.tasty deleted file mode 100644 index 2fdc07d04..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/cLTTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/diceTest.class b/target/scala-3.6.4/classes/scalation/random/diceTest.class deleted file mode 100644 index d32230165..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/diceTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/diceTest.tasty b/target/scala-3.6.4/classes/scalation/random/diceTest.tasty deleted file mode 100644 index 6154f96a0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/diceTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/index.html b/target/scala-3.6.4/classes/scalation/random/index.html deleted file mode 100644 index d3b59340b..000000000 --- a/target/scala-3.6.4/classes/scalation/random/index.html +++ /dev/null @@ -1,25 +0,0 @@ - - -

    Source files in random Package

    -

    - - \ No newline at end of file diff --git a/target/scala-3.6.4/classes/scalation/random/poissonProcessTest.class b/target/scala-3.6.4/classes/scalation/random/poissonProcessTest.class deleted file mode 100644 index e92cae521..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/poissonProcessTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/poissonProcessTest.tasty b/target/scala-3.6.4/classes/scalation/random/poissonProcessTest.tasty deleted file mode 100644 index 39429ef28..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/poissonProcessTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/quantileTest_ChiSquare.class b/target/scala-3.6.4/classes/scalation/random/quantileTest_ChiSquare.class deleted file mode 100644 index 2b9c754ca..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/quantileTest_ChiSquare.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/quantileTest_ChiSquare.tasty b/target/scala-3.6.4/classes/scalation/random/quantileTest_ChiSquare.tasty deleted file mode 100644 index eaac94e9f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/quantileTest_ChiSquare.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/quantileTest_Empirical.class b/target/scala-3.6.4/classes/scalation/random/quantileTest_Empirical.class deleted file mode 100644 index d5a5cc20e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/quantileTest_Empirical.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/quantileTest_Empirical.tasty b/target/scala-3.6.4/classes/scalation/random/quantileTest_Empirical.tasty deleted file mode 100644 index 6d783f114..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/quantileTest_Empirical.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/quantileTest_Exponential.class b/target/scala-3.6.4/classes/scalation/random/quantileTest_Exponential.class deleted file mode 100644 index be781fdfb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/quantileTest_Exponential.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/quantileTest_Exponential.tasty b/target/scala-3.6.4/classes/scalation/random/quantileTest_Exponential.tasty deleted file mode 100644 index 6b0bc8fd7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/quantileTest_Exponential.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/quantileTest_Fisher.class b/target/scala-3.6.4/classes/scalation/random/quantileTest_Fisher.class deleted file mode 100644 index 25ebffb8f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/quantileTest_Fisher.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/quantileTest_Fisher.tasty b/target/scala-3.6.4/classes/scalation/random/quantileTest_Fisher.tasty deleted file mode 100644 index d660666bc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/quantileTest_Fisher.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/quantileTest_Normal.class b/target/scala-3.6.4/classes/scalation/random/quantileTest_Normal.class deleted file mode 100644 index a0f1fd8ab..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/quantileTest_Normal.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/quantileTest_Normal.tasty b/target/scala-3.6.4/classes/scalation/random/quantileTest_Normal.tasty deleted file mode 100644 index 1cfeadca5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/quantileTest_Normal.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/quantileTest_StudentT.class b/target/scala-3.6.4/classes/scalation/random/quantileTest_StudentT.class deleted file mode 100644 index d877ae3f2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/quantileTest_StudentT.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/quantileTest_StudentT.tasty b/target/scala-3.6.4/classes/scalation/random/quantileTest_StudentT.tasty deleted file mode 100644 index 04899f639..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/quantileTest_StudentT.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/quantileTest_Uniform.class b/target/scala-3.6.4/classes/scalation/random/quantileTest_Uniform.class deleted file mode 100644 index 14542346f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/quantileTest_Uniform.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/quantileTest_Uniform.tasty b/target/scala-3.6.4/classes/scalation/random/quantileTest_Uniform.tasty deleted file mode 100644 index 5d958c21f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/quantileTest_Uniform.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/rNGTest.class b/target/scala-3.6.4/classes/scalation/random/rNGTest.class deleted file mode 100644 index eb4bd04b0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/rNGTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/rNGTest.tasty b/target/scala-3.6.4/classes/scalation/random/rNGTest.tasty deleted file mode 100644 index 9669cd745..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/rNGTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/randomStrTest.class b/target/scala-3.6.4/classes/scalation/random/randomStrTest.class deleted file mode 100644 index 66e3e51c2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/randomStrTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/randomStrTest.tasty b/target/scala-3.6.4/classes/scalation/random/randomStrTest.tasty deleted file mode 100644 index 96470637e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/randomStrTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/randomWordTest.class b/target/scala-3.6.4/classes/scalation/random/randomWordTest.class deleted file mode 100644 index 7bfa92238..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/randomWordTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/randomWordTest.tasty b/target/scala-3.6.4/classes/scalation/random/randomWordTest.tasty deleted file mode 100644 index d2666d278..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/randomWordTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/streamMaker3.class b/target/scala-3.6.4/classes/scalation/random/streamMaker3.class deleted file mode 100644 index a0c7a5d34..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/streamMaker3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/streamMaker3.tasty b/target/scala-3.6.4/classes/scalation/random/streamMaker3.tasty deleted file mode 100644 index 0c22a20b3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/streamMaker3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/streamMakerGen.class b/target/scala-3.6.4/classes/scalation/random/streamMakerGen.class deleted file mode 100644 index e6276c7b2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/streamMakerGen.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/streamMakerGen.tasty b/target/scala-3.6.4/classes/scalation/random/streamMakerGen.tasty deleted file mode 100644 index 2c8e95f2e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/streamMakerGen.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/variateMatTest.class b/target/scala-3.6.4/classes/scalation/random/variateMatTest.class deleted file mode 100644 index cadb371d1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/variateMatTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/variateMatTest.tasty b/target/scala-3.6.4/classes/scalation/random/variateMatTest.tasty deleted file mode 100644 index 990d0d5b4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/variateMatTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/variateSetTest.class b/target/scala-3.6.4/classes/scalation/random/variateSetTest.class deleted file mode 100644 index 497c52a80..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/variateSetTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/variateSetTest.tasty b/target/scala-3.6.4/classes/scalation/random/variateSetTest.tasty deleted file mode 100644 index 0554ee0d7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/variateSetTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/variateSetTest2.class b/target/scala-3.6.4/classes/scalation/random/variateSetTest2.class deleted file mode 100644 index 142cd937f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/variateSetTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/variateSetTest2.tasty b/target/scala-3.6.4/classes/scalation/random/variateSetTest2.tasty deleted file mode 100644 index b51bd1166..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/variateSetTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/variateSetTest3.class b/target/scala-3.6.4/classes/scalation/random/variateSetTest3.class deleted file mode 100644 index 76e9f3e14..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/variateSetTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/variateSetTest3.tasty b/target/scala-3.6.4/classes/scalation/random/variateSetTest3.tasty deleted file mode 100644 index b1b046a91..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/variateSetTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/variateTenTest.class b/target/scala-3.6.4/classes/scalation/random/variateTenTest.class deleted file mode 100644 index e9a79f448..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/variateTenTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/variateTenTest.tasty b/target/scala-3.6.4/classes/scalation/random/variateTenTest.tasty deleted file mode 100644 index 99b928d5f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/variateTenTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/variateTest.class b/target/scala-3.6.4/classes/scalation/random/variateTest.class deleted file mode 100644 index 82603de7f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/variateTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/variateTest.tasty b/target/scala-3.6.4/classes/scalation/random/variateTest.tasty deleted file mode 100644 index 1f94a1f91..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/variateTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/variateVecTest.class b/target/scala-3.6.4/classes/scalation/random/variateVecTest.class deleted file mode 100644 index 391ae5a71..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/variateVecTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/variateVecTest.tasty b/target/scala-3.6.4/classes/scalation/random/variateVecTest.tasty deleted file mode 100644 index 1ee59a335..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/variateVecTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/variateVecTest2.class b/target/scala-3.6.4/classes/scalation/random/variateVecTest2.class deleted file mode 100644 index f2c5af11f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/variateVecTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/random/variateVecTest2.tasty b/target/scala-3.6.4/classes/scalation/random/variateVecTest2.tasty deleted file mode 100644 index dc1af1263..000000000 Binary files a/target/scala-3.6.4/classes/scalation/random/variateVecTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/readFileTest.class b/target/scala-3.6.4/classes/scalation/readFileTest.class deleted file mode 100644 index af837e8c6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/readFileTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/readFileTest.tasty b/target/scala-3.6.4/classes/scalation/readFileTest.tasty deleted file mode 100644 index 08d8a7524..000000000 Binary files a/target/scala-3.6.4/classes/scalation/readFileTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/readFileTest2.class b/target/scala-3.6.4/classes/scalation/readFileTest2.class deleted file mode 100644 index bbf756353..000000000 Binary files a/target/scala-3.6.4/classes/scalation/readFileTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/readFileTest2.tasty b/target/scala-3.6.4/classes/scalation/readFileTest2.tasty deleted file mode 100644 index b7af834d2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/readFileTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/redirectOutTest.class b/target/scala-3.6.4/classes/scalation/redirectOutTest.class deleted file mode 100644 index cee531e87..000000000 Binary files a/target/scala-3.6.4/classes/scalation/redirectOutTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/redirectOutTest.tasty b/target/scala-3.6.4/classes/scalation/redirectOutTest.tasty deleted file mode 100644 index f8644c33e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/redirectOutTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/ringTest.class b/target/scala-3.6.4/classes/scalation/ringTest.class deleted file mode 100644 index 6294942a9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/ringTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/ringTest.tasty b/target/scala-3.6.4/classes/scalation/ringTest.tasty deleted file mode 100644 index c4bfb97c4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/ringTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/runCalc.class b/target/scala-3.6.4/classes/scalation/runCalc.class deleted file mode 100644 index 9396a6136..000000000 Binary files a/target/scala-3.6.4/classes/scalation/runCalc.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/runCalc.tasty b/target/scala-3.6.4/classes/scalation/runCalc.tasty deleted file mode 100644 index d99dab296..000000000 Binary files a/target/scala-3.6.4/classes/scalation/runCalc.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/runCalcHelp.class b/target/scala-3.6.4/classes/scalation/runCalcHelp.class deleted file mode 100644 index dfb74b955..000000000 Binary files a/target/scala-3.6.4/classes/scalation/runCalcHelp.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/runCalcHelp.tasty b/target/scala-3.6.4/classes/scalation/runCalcHelp.tasty deleted file mode 100644 index 4123d5364..000000000 Binary files a/target/scala-3.6.4/classes/scalation/runCalcHelp.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/Arc$.class b/target/scala-3.6.4/classes/scalation/scala2d/Arc$.class deleted file mode 100644 index 47b008f6b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/Arc$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/Arc.class b/target/scala-3.6.4/classes/scalation/scala2d/Arc.class deleted file mode 100644 index 555174e1d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/Arc.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/Arc.tasty b/target/scala-3.6.4/classes/scalation/scala2d/Arc.tasty deleted file mode 100644 index 9ac740426..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/Arc.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/Arrow$.class b/target/scala-3.6.4/classes/scalation/scala2d/Arrow$.class deleted file mode 100644 index 49857b26b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/Arrow$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/Arrow$package$.class b/target/scala-3.6.4/classes/scalation/scala2d/Arrow$package$.class deleted file mode 100644 index 48ce41795..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/Arrow$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/Arrow$package$Canvas$1.class b/target/scala-3.6.4/classes/scalation/scala2d/Arrow$package$Canvas$1.class deleted file mode 100644 index 5fe3672cf..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/Arrow$package$Canvas$1.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/Arrow$package.class b/target/scala-3.6.4/classes/scalation/scala2d/Arrow$package.class deleted file mode 100644 index 51dd602ec..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/Arrow$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/Arrow$package.tasty b/target/scala-3.6.4/classes/scalation/scala2d/Arrow$package.tasty deleted file mode 100644 index bdd484194..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/Arrow$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/Arrow.class b/target/scala-3.6.4/classes/scalation/scala2d/Arrow.class deleted file mode 100644 index efac5a818..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/Arrow.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/Arrow.tasty b/target/scala-3.6.4/classes/scalation/scala2d/Arrow.tasty deleted file mode 100644 index c75b36f8c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/Arrow.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/Base$package$.class b/target/scala-3.6.4/classes/scalation/scala2d/Base$package$.class deleted file mode 100644 index 45d653703..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/Base$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/Base$package.class b/target/scala-3.6.4/classes/scalation/scala2d/Base$package.class deleted file mode 100644 index 1f140d414..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/Base$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/Base$package.tasty b/target/scala-3.6.4/classes/scalation/scala2d/Base$package.tasty deleted file mode 100644 index 20de097b0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/Base$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/BorderLayout$.class b/target/scala-3.6.4/classes/scalation/scala2d/BorderLayout$.class deleted file mode 100644 index 72eff8485..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/BorderLayout$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/BorderLayout.class b/target/scala-3.6.4/classes/scalation/scala2d/BorderLayout.class deleted file mode 100644 index 7c681a92b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/BorderLayout.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/BorderLayout.tasty b/target/scala-3.6.4/classes/scalation/scala2d/BorderLayout.tasty deleted file mode 100644 index b5302fabb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/BorderLayout.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/Colors$.class b/target/scala-3.6.4/classes/scalation/scala2d/Colors$.class deleted file mode 100644 index 3a58bbc8b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/Colors$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/Colors$Randi$.class b/target/scala-3.6.4/classes/scalation/scala2d/Colors$Randi$.class deleted file mode 100644 index e07b9af5f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/Colors$Randi$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/Colors$Randi.class b/target/scala-3.6.4/classes/scalation/scala2d/Colors$Randi.class deleted file mode 100644 index b95900854..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/Colors$Randi.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/Colors$package$.class b/target/scala-3.6.4/classes/scalation/scala2d/Colors$package$.class deleted file mode 100644 index 7a4b3a035..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/Colors$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/Colors$package.class b/target/scala-3.6.4/classes/scalation/scala2d/Colors$package.class deleted file mode 100644 index f24756b0c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/Colors$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/Colors$package.tasty b/target/scala-3.6.4/classes/scalation/scala2d/Colors$package.tasty deleted file mode 100644 index bee3a10cb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/Colors$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/Colors.class b/target/scala-3.6.4/classes/scalation/scala2d/Colors.class deleted file mode 100644 index 83f4921cb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/Colors.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/Colors.tasty b/target/scala-3.6.4/classes/scalation/scala2d/Colors.tasty deleted file mode 100644 index 452137a1d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/Colors.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/CurvilinearShape.class b/target/scala-3.6.4/classes/scalation/scala2d/CurvilinearShape.class deleted file mode 100644 index e007cde8d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/CurvilinearShape.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/CurvilinearShape.tasty b/target/scala-3.6.4/classes/scalation/scala2d/CurvilinearShape.tasty deleted file mode 100644 index 956fa21f7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/CurvilinearShape.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/Ellipse$.class b/target/scala-3.6.4/classes/scalation/scala2d/Ellipse$.class deleted file mode 100644 index f5ed78755..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/Ellipse$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/Ellipse.class b/target/scala-3.6.4/classes/scalation/scala2d/Ellipse.class deleted file mode 100644 index 8cb6e2164..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/Ellipse.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/Ellipse.tasty b/target/scala-3.6.4/classes/scalation/scala2d/Ellipse.tasty deleted file mode 100644 index 0f0b32fe8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/Ellipse.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/Frame.class b/target/scala-3.6.4/classes/scalation/scala2d/Frame.class deleted file mode 100644 index 6ad7d1534..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/Frame.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/Frame.tasty b/target/scala-3.6.4/classes/scalation/scala2d/Frame.tasty deleted file mode 100644 index 665e107d8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/Frame.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/Hexagon$.class b/target/scala-3.6.4/classes/scalation/scala2d/Hexagon$.class deleted file mode 100644 index c6560e8cf..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/Hexagon$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/Hexagon.class b/target/scala-3.6.4/classes/scalation/scala2d/Hexagon.class deleted file mode 100644 index fdff8956b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/Hexagon.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/Hexagon.tasty b/target/scala-3.6.4/classes/scalation/scala2d/Hexagon.tasty deleted file mode 100644 index d63a4d1a5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/Hexagon.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/ImageWriter$package$.class b/target/scala-3.6.4/classes/scalation/scala2d/ImageWriter$package$.class deleted file mode 100644 index 89e5513ca..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/ImageWriter$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/ImageWriter$package.class b/target/scala-3.6.4/classes/scalation/scala2d/ImageWriter$package.class deleted file mode 100644 index e530b0846..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/ImageWriter$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/ImageWriter$package.tasty b/target/scala-3.6.4/classes/scalation/scala2d/ImageWriter$package.tasty deleted file mode 100644 index 166dc83d3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/ImageWriter$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/Line$.class b/target/scala-3.6.4/classes/scalation/scala2d/Line$.class deleted file mode 100644 index ef29379d6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/Line$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/Line.class b/target/scala-3.6.4/classes/scalation/scala2d/Line.class deleted file mode 100644 index aaba8e411..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/Line.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/Line.tasty b/target/scala-3.6.4/classes/scalation/scala2d/Line.tasty deleted file mode 100644 index d993698ce..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/Line.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/Octagon$.class b/target/scala-3.6.4/classes/scalation/scala2d/Octagon$.class deleted file mode 100644 index 6fca1a336..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/Octagon$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/Octagon.class b/target/scala-3.6.4/classes/scalation/scala2d/Octagon.class deleted file mode 100644 index d30f22e4a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/Octagon.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/Octagon.tasty b/target/scala-3.6.4/classes/scalation/scala2d/Octagon.tasty deleted file mode 100644 index 0fddbe463..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/Octagon.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/Path$.class b/target/scala-3.6.4/classes/scalation/scala2d/Path$.class deleted file mode 100644 index 857b23a2a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/Path$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/Path.class b/target/scala-3.6.4/classes/scalation/scala2d/Path.class deleted file mode 100644 index 56e29162e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/Path.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/Path.tasty b/target/scala-3.6.4/classes/scalation/scala2d/Path.tasty deleted file mode 100644 index 1a7c83f87..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/Path.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/Pentagon$.class b/target/scala-3.6.4/classes/scalation/scala2d/Pentagon$.class deleted file mode 100644 index 03279ab38..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/Pentagon$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/Pentagon.class b/target/scala-3.6.4/classes/scalation/scala2d/Pentagon.class deleted file mode 100644 index f0f8202c3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/Pentagon.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/Pentagon.tasty b/target/scala-3.6.4/classes/scalation/scala2d/Pentagon.tasty deleted file mode 100644 index b92074e13..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/Pentagon.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/Polygon$package$.class b/target/scala-3.6.4/classes/scalation/scala2d/Polygon$package$.class deleted file mode 100644 index 9f1391109..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/Polygon$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/Polygon$package$Canvas$1.class b/target/scala-3.6.4/classes/scalation/scala2d/Polygon$package$Canvas$1.class deleted file mode 100644 index dbc5249fc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/Polygon$package$Canvas$1.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/Polygon$package$Canvas$2.class b/target/scala-3.6.4/classes/scalation/scala2d/Polygon$package$Canvas$2.class deleted file mode 100644 index 85f6c3553..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/Polygon$package$Canvas$2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/Polygon$package.class b/target/scala-3.6.4/classes/scalation/scala2d/Polygon$package.class deleted file mode 100644 index e45db21e8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/Polygon$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/Polygon$package.tasty b/target/scala-3.6.4/classes/scalation/scala2d/Polygon$package.tasty deleted file mode 100644 index 3cc511871..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/Polygon$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/Polygon.class b/target/scala-3.6.4/classes/scalation/scala2d/Polygon.class deleted file mode 100644 index 7ea2b968d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/Polygon.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/Polygon.tasty b/target/scala-3.6.4/classes/scalation/scala2d/Polygon.tasty deleted file mode 100644 index 46e9f3c95..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/Polygon.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/QArrow$.class b/target/scala-3.6.4/classes/scalation/scala2d/QArrow$.class deleted file mode 100644 index 5cab85443..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/QArrow$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/QArrow$package$.class b/target/scala-3.6.4/classes/scalation/scala2d/QArrow$package$.class deleted file mode 100644 index 3a34f7eb0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/QArrow$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/QArrow$package$Canvas$1.class b/target/scala-3.6.4/classes/scalation/scala2d/QArrow$package$Canvas$1.class deleted file mode 100644 index d3634c397..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/QArrow$package$Canvas$1.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/QArrow$package.class b/target/scala-3.6.4/classes/scalation/scala2d/QArrow$package.class deleted file mode 100644 index 249ac7b7e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/QArrow$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/QArrow$package.tasty b/target/scala-3.6.4/classes/scalation/scala2d/QArrow$package.tasty deleted file mode 100644 index 4b38146cf..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/QArrow$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/QArrow.class b/target/scala-3.6.4/classes/scalation/scala2d/QArrow.class deleted file mode 100644 index ca4032708..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/QArrow.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/QArrow.tasty b/target/scala-3.6.4/classes/scalation/scala2d/QArrow.tasty deleted file mode 100644 index a521c0110..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/QArrow.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/QCurve$.class b/target/scala-3.6.4/classes/scalation/scala2d/QCurve$.class deleted file mode 100644 index 81b7d28e6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/QCurve$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/QCurve$package$.class b/target/scala-3.6.4/classes/scalation/scala2d/QCurve$package$.class deleted file mode 100644 index 4b740fcbd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/QCurve$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/QCurve$package$Canvas$1.class b/target/scala-3.6.4/classes/scalation/scala2d/QCurve$package$Canvas$1.class deleted file mode 100644 index 21ba9123c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/QCurve$package$Canvas$1.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/QCurve$package$QCurveAnimator$1$Canvas.class b/target/scala-3.6.4/classes/scalation/scala2d/QCurve$package$QCurveAnimator$1$Canvas.class deleted file mode 100644 index c70b4753e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/QCurve$package$QCurveAnimator$1$Canvas.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/QCurve$package$QCurveAnimator$1.class b/target/scala-3.6.4/classes/scalation/scala2d/QCurve$package$QCurveAnimator$1.class deleted file mode 100644 index bd7a317e2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/QCurve$package$QCurveAnimator$1.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/QCurve$package.class b/target/scala-3.6.4/classes/scalation/scala2d/QCurve$package.class deleted file mode 100644 index d12e16974..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/QCurve$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/QCurve$package.tasty b/target/scala-3.6.4/classes/scalation/scala2d/QCurve$package.tasty deleted file mode 100644 index 10ae4220a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/QCurve$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/QCurve.class b/target/scala-3.6.4/classes/scalation/scala2d/QCurve.class deleted file mode 100644 index 6ad21838b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/QCurve.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/QCurve.tasty b/target/scala-3.6.4/classes/scalation/scala2d/QCurve.tasty deleted file mode 100644 index 2bd73a42c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/QCurve.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/Quad$.class b/target/scala-3.6.4/classes/scalation/scala2d/Quad$.class deleted file mode 100644 index fa181f9d7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/Quad$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/Quad.class b/target/scala-3.6.4/classes/scalation/scala2d/Quad.class deleted file mode 100644 index 1335562a9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/Quad.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/Quad.tasty b/target/scala-3.6.4/classes/scalation/scala2d/Quad.tasty deleted file mode 100644 index 73d9f20a8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/Quad.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/Rectangle$.class b/target/scala-3.6.4/classes/scalation/scala2d/Rectangle$.class deleted file mode 100644 index 5304ec858..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/Rectangle$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/Rectangle.class b/target/scala-3.6.4/classes/scalation/scala2d/Rectangle.class deleted file mode 100644 index 176f53ea4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/Rectangle.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/Rectangle.tasty b/target/scala-3.6.4/classes/scalation/scala2d/Rectangle.tasty deleted file mode 100644 index 2f4df9201..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/Rectangle.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/RoundRectangle$.class b/target/scala-3.6.4/classes/scalation/scala2d/RoundRectangle$.class deleted file mode 100644 index 54a148145..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/RoundRectangle$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/RoundRectangle.class b/target/scala-3.6.4/classes/scalation/scala2d/RoundRectangle.class deleted file mode 100644 index 8808ae272..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/RoundRectangle.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/RoundRectangle.tasty b/target/scala-3.6.4/classes/scalation/scala2d/RoundRectangle.tasty deleted file mode 100644 index 1d2fd8b0f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/RoundRectangle.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/Shapes$package$.class b/target/scala-3.6.4/classes/scalation/scala2d/Shapes$package$.class deleted file mode 100644 index 618ac5733..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/Shapes$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/Shapes$package$Canvas$1.class b/target/scala-3.6.4/classes/scalation/scala2d/Shapes$package$Canvas$1.class deleted file mode 100644 index 731ef8fab..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/Shapes$package$Canvas$1.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/Shapes$package.class b/target/scala-3.6.4/classes/scalation/scala2d/Shapes$package.class deleted file mode 100644 index 3e9295943..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/Shapes$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/Shapes$package.tasty b/target/scala-3.6.4/classes/scalation/scala2d/Shapes$package.tasty deleted file mode 100644 index 27764c5d8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/Shapes$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/Transform.class b/target/scala-3.6.4/classes/scalation/scala2d/Transform.class deleted file mode 100644 index 215d13c9e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/Transform.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/Transform.tasty b/target/scala-3.6.4/classes/scalation/scala2d/Transform.tasty deleted file mode 100644 index 396ec1ae3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/Transform.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/Triangle$.class b/target/scala-3.6.4/classes/scalation/scala2d/Triangle$.class deleted file mode 100644 index 3f81a645d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/Triangle$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/Triangle.class b/target/scala-3.6.4/classes/scalation/scala2d/Triangle.class deleted file mode 100644 index c395f62f0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/Triangle.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/Triangle.tasty b/target/scala-3.6.4/classes/scalation/scala2d/Triangle.tasty deleted file mode 100644 index dcefbe4d8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/Triangle.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/TrigConstants$.class b/target/scala-3.6.4/classes/scalation/scala2d/TrigConstants$.class deleted file mode 100644 index db431f618..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/TrigConstants$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/TrigConstants.class b/target/scala-3.6.4/classes/scalation/scala2d/TrigConstants.class deleted file mode 100644 index 902561292..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/TrigConstants.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/TrigConstants.tasty b/target/scala-3.6.4/classes/scalation/scala2d/TrigConstants.tasty deleted file mode 100644 index 25e69d1ae..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/TrigConstants.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/VizFrame$.class b/target/scala-3.6.4/classes/scalation/scala2d/VizFrame$.class deleted file mode 100644 index a234163dc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/VizFrame$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/VizFrame.class b/target/scala-3.6.4/classes/scalation/scala2d/VizFrame.class deleted file mode 100644 index 52006a063..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/VizFrame.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/VizFrame.tasty b/target/scala-3.6.4/classes/scalation/scala2d/VizFrame.tasty deleted file mode 100644 index add312de3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/VizFrame.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/ZoomablePanel.class b/target/scala-3.6.4/classes/scalation/scala2d/ZoomablePanel.class deleted file mode 100644 index 948b0bbd2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/ZoomablePanel.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/ZoomablePanel.tasty b/target/scala-3.6.4/classes/scalation/scala2d/ZoomablePanel.tasty deleted file mode 100644 index b496ea4be..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/ZoomablePanel.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/arrowTest.class b/target/scala-3.6.4/classes/scalation/scala2d/arrowTest.class deleted file mode 100644 index ae85d8438..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/arrowTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/arrowTest.tasty b/target/scala-3.6.4/classes/scalation/scala2d/arrowTest.tasty deleted file mode 100644 index 9ff8117fa..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/arrowTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/colorsTest.class b/target/scala-3.6.4/classes/scalation/scala2d/colorsTest.class deleted file mode 100644 index 23cd65784..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/colorsTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/colorsTest.tasty b/target/scala-3.6.4/classes/scalation/scala2d/colorsTest.tasty deleted file mode 100644 index d6c3f3f92..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/colorsTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/index.html b/target/scala-3.6.4/classes/scalation/scala2d/index.html deleted file mode 100644 index 1e5c550c6..000000000 --- a/target/scala-3.6.4/classes/scalation/scala2d/index.html +++ /dev/null @@ -1,19 +0,0 @@ - - -

    Source files in scala2d Package

    -

    - - \ No newline at end of file diff --git a/target/scala-3.6.4/classes/scalation/scala2d/lineTest.class b/target/scala-3.6.4/classes/scalation/scala2d/lineTest.class deleted file mode 100644 index 817b668e6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/lineTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/lineTest.tasty b/target/scala-3.6.4/classes/scalation/scala2d/lineTest.tasty deleted file mode 100644 index f30ad283b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/lineTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/old/Polygon.scala.bak b/target/scala-3.6.4/classes/scalation/scala2d/old/Polygon.scala.bak deleted file mode 100644 index 42c9ac50a..000000000 --- a/target/scala-3.6.4/classes/scalation/scala2d/old/Polygon.scala.bak +++ /dev/null @@ -1,352 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Thu Oct 22 15:05:06 EDT 2009 - * @see LICENSE (MIT style license file). - */ - -package scalation -package scala2d - -import scala.math.{cos, Pi, sin} - -import Colors._ -import Constants._ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Polygon` class enhances the `Path2D.Double` class (from the `java.awt.geom` - * package) by adding a constructor for building a polygon given its vertices. - * @param vertex the n >= 3 corner points of the polygon - */ -case class Polygon (vertex: Array [R2]) - extends java.awt.geom.Path2D.Double: - - private val flaw = flawf ("Polygon") - - { - val n = vertex.length - if n < 3 then flaw ("constructor", "need at least 3 vertices to make a polygon") - moveTo (vertex(0).x, vertex(0).y) - for i <- 1 until n do lineTo (vertex(i).x, vertex(i).y) - lineTo (vertex(0).x, vertex(0).y) - } // primary constructor - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Get the top-left coordinates of the polygons bounding box. - */ - def getTopLeft: R2 = - val bounds = getBounds2D () - new R2 (bounds.getX (), bounds.getY ()) - end getTopLeft - -end Polygon - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The Triangle class enhances the Path2D.Double class (from the `java.awt.geom` - * package) by adding a constructor for building a triangle given its vertices. - * @param vertex the three corner points of the triangle - */ -case class Triangle (vertex: Array [R2]) - extends java.awt.geom.Path2D.Double: - - private val flaw = flawf ("Triangle") - - { - val n = vertex.length - if (n != 3) flaw ("constructor", "need exactly 3 vertices to make a triangle") - moveTo (vertex(0).x, vertex(0).y) - for (i <- 1 until n) lineTo (vertex(i).x, vertex(i).y) - lineTo (vertex(0).x, vertex(0).y) - } // primary constructor - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Construct a Right Isosceles Triangle. - * @param topLeft the top left point for the triangle (min x and y coordinates) - * @param side the length of the two sides emanating from top-left - */ - def this (topLeft: R2, side: Double) = - this (Array [R2] (topLeft, - new R2 (topLeft.x + side, topLeft.y), - new R2 (topLeft.x, topLeft.y + side))) - end this - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Construct a Right Triangle. - * @param topLeft the top left point for the triangle (min x and y coordinates) - * @param side1 the width of the triangle (change in x) - * @param side2 the height of the triangle (change in y) - */ - def this (topLeft: R2, side1: Double, side2: Double) = - this (Array [R2] (topLeft, - new R2 (topLeft.x + side1, topLeft.y), - new R2 (topLeft.x, topLeft.y + side2))) - end this - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Get the top-left coordinates of the polygons bounding box. - */ - def getTopLeft: R2 = - val bounds = getBounds2D () - new R2 (bounds.getX (), bounds.getY ()) - end getTopLeft - -end Triangle - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Quad` class enhances the `Path2D.Double` class (from the `java.awt.geom` - * package) by adding a constructor for building a quadrilateral given its vertices. - * @param vertex the four corner points of the quadrilateral - */ -case class Quad (vertex: Array [R2]) - extends java.awt.geom.Path2D.Double: - - private val flaw = flawf ("Quad") - - { - val n = vertex.length - if (n != 4) flaw ("constructor", "need exactly 4 vertices to make a quad") - moveTo (vertex(0).x, vertex(0).y) - for (i <- 1 until n) lineTo (vertex(i).x, vertex(i).y) - lineTo (vertex(0).x, vertex(0).y) - } // primary constructor - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Construct a square. - * @param topLeft the top left point for the square (min x and y coordinates) - * @param side the length of each side in the square - */ - def this (topLeft: R2, side: Double) = - this (Array [R2] (topLeft, - new R2 (topLeft.x + side, topLeft.y), - new R2 (topLeft.x + side, topLeft.y + side), - new R2 (topLeft.x, topLeft.y + side))) - end this - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Construct a parallelogram (rectangle if shift is 0). - * @param topLeft the top left point for the parallelogram (min x and y coordinates). - * @param side1 the width of the parallelogram (change in x) - * @param side2 the height of the parallelogram (change in y) - * @param shift the x-shift between top and bottom sides - */ - def this (topLeft: R2, side1: Double, side2: Double, shift: Double = 0.0) = - this (Array [R2] (topLeft, - new R2 (topLeft.x + side1, topLeft.y), - new R2 (topLeft.x + side1 + shift, topLeft.y +side2), - new R2 (topLeft.x + shift, topLeft.y + side2))) - end this - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Get the top-left coordinates of the polygons bounding box. - */ - def getTopLeft: R2 = - val bounds = getBounds2D () - new R2 (bounds.getX (), bounds.getY ()) - end getTopLeft - -end Quad - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Pentagon` class enhances the `Path2D.Double` class (from the `java.awt.geom` - * package) by adding a constructor for building a pentagon given its vertices. - * @param vertex the five corner points of the pentagon - */ -case class Pentagon (vertex: Array [R2]) - extends java.awt.geom.Path2D.Double: - - private val flaw = flawf ("Pentagon") - - { - val n = vertex.length - if (n != 5) flaw ("constructor", "need exactly 5 vertices to make a pentagon") - moveTo (vertex(0).x, vertex(0).y) - for (i <- 1 until n) lineTo (vertex(i).x, vertex(i).y) - lineTo (vertex(0).x, vertex(0).y) - } // primary constructor - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Construct a equilateral `Pentagon`. - * @param topLeft the top left point for the pentagon (min x and y coordinates) - * @param side the length of each side in the pentagon - */ - def this (topLeft: R2, side: Double) = - this (Array [R2] (new R2 (topLeft.x + cos72 * side, topLeft.y), - new R2 (topLeft.x + (1.0 + cos72) * side, topLeft.y), - new R2 (topLeft.x + (1.0 + 2.0 * cos72) * side, topLeft.y + sin72 * side), - new R2 (topLeft.x + (0.5 + cos72) * side, topLeft.y + (1.0 + 2.0 * cos72) * side), - new R2 (topLeft.x, topLeft.y + sin72 * side))) - end this - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Get the top-left coordinates of the polygons bounding box. - */ - def getTopLeft: R2 = - val bounds = getBounds2D () - new R2 (bounds.getX (), bounds.getY ()) - end getTopLeft - -end Pentagon - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Hexagon` class enhances the `Path2D.Double` class (from the `java.awt.geom` - * package) by adding a constructor for building a hexagon given its vertices. - * @param vertex the six corner points of the hexagon - */ -case class Hexagon (vertex: Array [R2]) - extends java.awt.geom.Path2D.Double: - - private val flaw = flawf ("Hexagon") - - { - val n = vertex.length - if (n != 6) flaw ("constructor", "need exactly 6 vertices to make a hexagon") - moveTo (vertex(0).x, vertex(0).y) - for (i <- 1 until n) lineTo (vertex(i).x, vertex(i).y) - lineTo (vertex(0).x, vertex(0).y) - } // primary constructor - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Construct a equilateral `Hexagon`. - * @param topLeft the top left point for the hexagon (min x and y coordinates) - * @param side the length of each side in the hexagon - */ - def this (topLeft: R2, side: Double) = - this (Array [R2] (topLeft, - new R2 (topLeft.x + side, topLeft.y), - new R2 (topLeft.x + (1.0 + cos60) * side, topLeft.y + sin60 * side), - new R2 (topLeft.x + side, topLeft.y + 2.0 * sin60 * side), - new R2 (topLeft.x, topLeft.y + 2.0 * sin60 * side), - new R2 (topLeft.x - cos60 * side, topLeft.y + sin60 * side))) - end this - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Get the top-left coordinates of the polygons bounding box. - */ - def getTopLeft: R2 = - val bounds = getBounds2D () - new R2 (bounds.getX (), bounds.getY ()) - end getTopLeft - -end Hexagon - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Octagon` class enhances the `Path2D.Double` class (from the `java.awt.geom` - * package) by adding a constructor for building an octagon given its vertices. - * @param vertex the eight corner points of the octagon - */ -case class Octagon (vertex: Array [R2]) - extends java.awt.geom.Path2D.Double: - - private val flaw = flawf ("Octagon") - - { - val n = vertex.length - if (n != 8) flaw ("constructor", "need exactly 8 vertices to make an octagon") - moveTo (vertex(0).x, vertex(0).y) - for (i <- 1 until n) lineTo (vertex(i).x, vertex(i).y) - lineTo (vertex(0).x, vertex(0).y) - } // primary constructor - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Construct a equi-lateral `Octagon`. - * @param topLeft the top left point for the octagon (min x and y coordinates) - * @param side the length of each side in the octagon - */ - def this (topLeft: R2, side: Double) = - this (Array [R2] (new R2 (topLeft.x + cos45 * side, topLeft.y), - new R2 (topLeft.x + (1.0 + cos45) * side, topLeft.y), - new R2 (topLeft.x + (1.0 + 2.0 * cos45) * side, topLeft.y + cos45 * side), - new R2 (topLeft.x + (1.0 + 2.0 * cos45) * side, topLeft.y + (1.0 + cos45) * side), - new R2 (topLeft.x + (1.0 + cos45) * side, topLeft.y + (1.0 + 2.0 * cos45) * side), - new R2 (topLeft.x + cos45 * side, topLeft.y + (1.0 + 2.0 * cos45) * side), - new R2 (topLeft.x, topLeft.y + (1.0 + cos45) * side), - new R2 (topLeft.x, topLeft.y + cos45 * side))) - end this - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Get the top-left coordinates of the polygons bounding box. - */ - def getTopLeft: R2 = - val bounds = getBounds2D () - new R2 (bounds.getX (), bounds.getY ()) - end getTopLeft - -end Octagon - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Useful constants. - */ -object Constants: - - val cos45 = cos (Pi / 4.0) // same value for sin45 - val cos60 = cos (Pi / 3.0) - val sin60 = sin (Pi / 3.0) - val cos72 = cos (2.0 * Pi / 5.0) - val sin72 = sin (2.0 * Pi / 5.0) - -end Constants - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `PolygonTest` object tests the `Polygon`, `Triangle`, `Quad`, `Hexagon` - * and Octagon classes. - * > runMain scalation.scala2d.PolygonTest - */ -object PolygonTest extends App: - - banner ("Running PolygonTest") - - private val dot = Ellipse () - private val triangle = new Triangle (new R2 (100, 100), 100, 150) - private val triangleXY = triangle.getTopLeft - private val square = new Quad (new R2 (400, 100), 150) - private val squareXY = square.getTopLeft - private val parogram = new Quad (new R2 (100, 350), 150, 100, 25) - private val parogramXY = parogram.getTopLeft - private val pentagon = new Pentagon (new R2 (400, 350), 90) - private val pentagonXY = pentagon.getTopLeft - private val hexagon = new Hexagon (new R2 (100, 600), 80) - private val hexagonXY = hexagon.getTopLeft - private val octagon = new Octagon (new R2 (400, 600), 70) - private val octagonXY = octagon.getTopLeft - - class Canvas extends Panel: - - setBackground (white) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Paint the components into the canvas (drawing panel). - * @param gr low-resolution graphics environment - */ - override def paintComponent (gr: Graphics): Unit = - super.paintComponent (gr) - val g2d = gr.asInstanceOf [Graphics2D] // use hi-resolution - g2d.setPaint (red); g2d.fill (triangle) - g2d.setPaint (black); dot.setFrame (triangleXY.x, triangleXY.y, 5, 5); g2d.fill (dot) - g2d.setPaint (green); g2d.fill (square) - g2d.setPaint (black); dot.setFrame (squareXY.x, squareXY.y, 5, 5); g2d.fill (dot) - g2d.setPaint (blue); g2d.fill (parogram) - g2d.setPaint (black); dot.setFrame (parogramXY.x, parogramXY.y, 5, 5); g2d.fill (dot) - g2d.setPaint (yellow); g2d.fill (pentagon) - g2d.setPaint (black); dot.setFrame (pentagonXY.x, pentagonXY.y, 5, 5); g2d.fill (dot) - g2d.setPaint (cyan); g2d.fill (hexagon) - g2d.setPaint (black); dot.setFrame (hexagonXY.x, hexagonXY.y, 5, 5); g2d.fill (dot) - g2d.setPaint (magenta); g2d.fill (octagon) - g2d.setPaint (black); dot.setFrame (octagonXY.x, octagonXY.y, 5, 5); g2d.fill (dot) - end paintComponent - - end Canvas - - // Put the drawing canvas in the visualization frame - - new VizFrame ("PolygonTest", new Canvas (), 700, 900) - -end PolygonTest - diff --git a/target/scala-3.6.4/classes/scalation/scala2d/old/Polygon.scala.bak2 b/target/scala-3.6.4/classes/scalation/scala2d/old/Polygon.scala.bak2 deleted file mode 100644 index dd927a235..000000000 --- a/target/scala-3.6.4/classes/scalation/scala2d/old/Polygon.scala.bak2 +++ /dev/null @@ -1,357 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Thu Oct 22 15:05:06 EDT 2009 - * @see LICENSE (MIT style license file). - */ - -package scalation -package scala2d - -import scala.math.{cos, Pi, sin} - -import Colors._ -import Constants._ - -// FIX - add setFrame method - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Polygon` class enhances the `Path2D.Double` class (from the `java.awt.geom` - * package) by adding a constructor for building a polygon given its vertices. - * @param vertex the n >= 3 corner points of the polygon - */ -case class Polygon (vertex: Array [R2]) - extends java.awt.geom.Path2D.Double: - - private val flaw = flawf ("Polygon") - - { - val n = vertex.length - if n < 3 then flaw ("constructor", "need at least 3 vertices to make a polygon") - moveTo (vertex(0).x, vertex(0).y) - for i <- 1 until n do lineTo (vertex(i).x, vertex(i).y) - lineTo (vertex(0).x, vertex(0).y) - } // primary constructor - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Get the top-left coordinates of the polygons bounding box. - */ - def getTopLeft: R2 = - val bounds = getBounds2D () - new R2 (bounds.getX (), bounds.getY ()) - end getTopLeft - -end Polygon - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The Triangle class enhances the Path2D.Double class (from the `java.awt.geom` - * package) by adding a constructor for building a triangle given its vertices. - * @param vertex the three corner points of the triangle - */ -case class Triangle (vertex: Array [R2]) - extends java.awt.geom.Path2D.Double: - - private val flaw = flawf ("Triangle") - - private val n = vertex.length - if n != 3 then flaw ("constructor", "need exactly 3 vertices to make a triangle") - moveTo (vertex(0).x, vertex(0).y) - for i <- 1 until n do lineTo (vertex(i).x, vertex(i).y) - lineTo (vertex(0).x, vertex(0).y) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Construct a Right Isosceles Triangle. - * @param topLeft the top left point for the triangle (min x and y coordinates) - * @param side the length of the two sides emanating from top-left - */ - def this (topLeft: R2, side: Double) = - this (Array [R2] (topLeft, - new R2 (topLeft.x + side, topLeft.y), - new R2 (topLeft.x, topLeft.y + side))) - end this - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Construct a Right Triangle. - * @param topLeft the top left point for the triangle (min x and y coordinates) - * @param side1 the width of the triangle (change in x) - * @param side2 the height of the triangle (change in y) - */ - def this (topLeft: R2, side1: Double, side2: Double) = - this (Array [R2] (topLeft, - new R2 (topLeft.x + side1, topLeft.y), - new R2 (topLeft.x, topLeft.y + side2))) - end this - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Get the top-left coordinates of the polygons bounding box. - */ - def getTopLeft: R2 = - val bounds = getBounds2D () - new R2 (bounds.getX (), bounds.getY ()) - end getTopLeft - -end Triangle - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Quad` class enhances the `Path2D.Double` class (from the `java.awt.geom` - * package) by adding a constructor for building a quadrilateral given its vertices. - * @param vertex the four corner points of the quadrilateral - */ -case class Quad (vertex: Array [R2]) - extends java.awt.geom.Path2D.Double: - - private val flaw = flawf ("Quad") - - private val n = vertex.length - if n != 4 then flaw ("constructor", "need exactly 4 vertices to make a quad") - moveTo (vertex(0).x, vertex(0).y) - for i <- 1 until n do lineTo (vertex(i).x, vertex(i).y) - lineTo (vertex(0).x, vertex(0).y) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Construct a square. - * @param topLeft the top left point for the square (min x and y coordinates) - * @param side the length of each side in the square - */ - def this (topLeft: R2, side: Double) = - this (Array [R2] (topLeft, - new R2 (topLeft.x + side, topLeft.y), - new R2 (topLeft.x + side, topLeft.y + side), - new R2 (topLeft.x, topLeft.y + side))) - end this - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Construct a parallelogram (rectangle if shift is 0). - * @param topLeft the top left point for the parallelogram (min x and y coordinates). - * @param side1 the width of the parallelogram (change in x) - * @param side2 the height of the parallelogram (change in y) - * @param shift the x-shift between top and bottom sides - */ - def this (topLeft: R2, side1: Double, side2: Double, shift: Double = 0.0) = - this (Array [R2] (topLeft, - new R2 (topLeft.x + side1, topLeft.y), - new R2 (topLeft.x + side1 + shift, topLeft.y +side2), - new R2 (topLeft.x + shift, topLeft.y + side2))) - end this - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Get the top-left coordinates of the polygons bounding box. - */ - def getTopLeft: R2 = - val bounds = getBounds2D () - new R2 (bounds.getX (), bounds.getY ()) - end getTopLeft - -end Quad - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Pentagon` class enhances the `Path2D.Double` class (from the `java.awt.geom` - * package) by adding a constructor for building a pentagon given its vertices. - * @param vertex the five corner points of the pentagon - */ -case class Pentagon (vertex: Array [R2]) - extends java.awt.geom.Path2D.Double: - - private val flaw = flawf ("Pentagon") - - private val n = vertex.length - if n != 5 then flaw ("constructor", "need exactly 5 vertices to make a pentagon") - moveTo (vertex(0).x, vertex(0).y) - for i <- 1 until n do lineTo (vertex(i).x, vertex(i).y) - lineTo (vertex(0).x, vertex(0).y) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Construct a equilateral `Pentagon`. - * @param topLeft the top left point for the pentagon (min x and y coordinates) - * @param side the length of each side in the pentagon - */ - def this (topLeft: R2, side: Double) = - this (Array [R2] (new R2 (topLeft.x + cos72 * side, topLeft.y), - new R2 (topLeft.x + (1.0 + cos72) * side, topLeft.y), - new R2 (topLeft.x + (1.0 + 2.0 * cos72) * side, topLeft.y + sin72 * side), - new R2 (topLeft.x + (0.5 + cos72) * side, topLeft.y + (1.0 + 2.0 * cos72) * side), - new R2 (topLeft.x, topLeft.y + sin72 * side))) - end this - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Get the top-left coordinates of the polygons bounding box. - */ - def getTopLeft: R2 = - val bounds = getBounds2D () - new R2 (bounds.getX (), bounds.getY ()) - end getTopLeft - -end Pentagon - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Hexagon` class enhances the `Path2D.Double` class (from the `java.awt.geom` - * package) by adding a constructor for building a hexagon given its vertices. - * @param vertex the six corner points of the hexagon - */ -case class Hexagon (vertex: Array [R2]) - extends java.awt.geom.Path2D.Double: - - private val flaw = flawf ("Hexagon") - - private val n = vertex.length - if n != 6 then flaw ("constructor", "need exactly 6 vertices to make a hexagon") - moveTo (vertex(0).x, vertex(0).y) - for i <- 1 until n do lineTo (vertex(i).x, vertex(i).y) - lineTo (vertex(0).x, vertex(0).y) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Construct a equilateral `Hexagon`. - * @param topLeft the top left point for the hexagon (min x and y coordinates) - * @param side the length of each side in the hexagon - */ - def this (topLeft: R2, side: Double) = - this (Array [R2] (topLeft, - new R2 (topLeft.x + side, topLeft.y), - new R2 (topLeft.x + (1.0 + cos60) * side, topLeft.y + sin60 * side), - new R2 (topLeft.x + side, topLeft.y + 2.0 * sin60 * side), - new R2 (topLeft.x, topLeft.y + 2.0 * sin60 * side), - new R2 (topLeft.x - cos60 * side, topLeft.y + sin60 * side))) - end this - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Get the top-left coordinates of the polygons bounding box. - */ - def getTopLeft: R2 = - val bounds = getBounds2D () - new R2 (bounds.getX (), bounds.getY ()) - end getTopLeft - -end Hexagon - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Octagon` class enhances the `Path2D.Double` class (from the `java.awt.geom` - * package) by adding a constructor for building an octagon given its vertices. - * @param vertex the eight corner points of the octagon - */ -case class Octagon (vertex: Array [R2]) - extends java.awt.geom.Path2D.Double: - - private val flaw = flawf ("Octagon") - - private val n = vertex.length - if n != 8 then flaw ("constructor", "need exactly 8 vertices to make an octagon") - moveTo (vertex(0).x, vertex(0).y) - for i <- 1 until n do lineTo (vertex(i).x, vertex(i).y) - lineTo (vertex(0).x, vertex(0).y) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Construct a equi-lateral `Octagon`. - * @param topLeft the top left point for the octagon (min x and y coordinates) - * @param side the length of each side in the octagon - */ - def this (topLeft: R2, side: Double) = - this (Array [R2] (new R2 (topLeft.x + cos45 * side, topLeft.y), - new R2 (topLeft.x + (1.0 + cos45) * side, topLeft.y), - new R2 (topLeft.x + (1.0 + 2.0 * cos45) * side, topLeft.y + cos45 * side), - new R2 (topLeft.x + (1.0 + 2.0 * cos45) * side, topLeft.y + (1.0 + cos45) * side), - - new R2 (topLeft.x + (1.0 + cos45) * side, topLeft.y + (1.0 + 2.0 * cos45) * side), - new R2 (topLeft.x + cos45 * side, topLeft.y + (1.0 + 2.0 * cos45) * side), - new R2 (topLeft.x, topLeft.y + (1.0 + cos45) * side), - new R2 (topLeft.x, topLeft.y + cos45 * side))) - end this - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Get the top-left coordinates of the polygons bounding box. - */ - def getTopLeft: R2 = - val bounds = getBounds2D () - new R2 (bounds.getX (), bounds.getY ()) - end getTopLeft - -end Octagon - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Useful constants. - */ -object Constants: - - val cos45 = cos (Pi / 4.0) // same value for sin45 - val cos60 = cos (Pi / 3.0) - val sin60 = sin (Pi / 3.0) - val cos72 = cos (2.0 * Pi / 5.0) - val sin72 = sin (2.0 * Pi / 5.0) - -end Constants - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `PolygonTest` object tests the `Polygon`, `Triangle`, `Quad`, `Hexagon` - * and Octagon classes. - * > runMain scalation.scala2d.PolygonTest - */ -object PolygonTest extends App: - - banner ("Running PolygonTest") - - private val dot = Ellipse () - private val triangle = new Triangle (new R2 (100, 100), 100, 150) - private val triangleXY = triangle.getTopLeft - private val square = new Quad (new R2 (400, 100), 150) - private val squareXY = square.getTopLeft - private val parogram = new Quad (new R2 (100, 350), 150, 100, 25) - private val parogramXY = parogram.getTopLeft - private val pentagon = new Pentagon (new R2 (400, 350), 90) - private val pentagonXY = pentagon.getTopLeft - private val hexagon = new Hexagon (new R2 (100, 600), 80) - private val hexagonXY = hexagon.getTopLeft - private val octagon = new Octagon (new R2 (400, 600), 70) - private val octagonXY = octagon.getTopLeft -/* - private val stopSign = new OctagonHF (); stopSign.setFrame (100, 850, 70, 0.5) - private val stopSignXY = stopSign.getTopLeft - private val stopSign2 = new OctagonVF (); stopSign2.setFrame (400, 850, 70, 0.5) - private val stopSign2XY = stopSign2.getTopLeft -*/ - - class Canvas extends Panel: - - setBackground (white) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Paint the components into the canvas (drawing panel). - * @param gr low-resolution graphics environment - */ - override def paintComponent (gr: Graphics): Unit = - super.paintComponent (gr) - val g2d = gr.asInstanceOf [Graphics2D] // use hi-resolution - g2d.setPaint (orange); g2d.fill (triangle) - g2d.setPaint (black); dot.setFrame (triangleXY.x, triangleXY.y, 5, 5); g2d.fill (dot) - g2d.setPaint (green); g2d.fill (square) - g2d.setPaint (black); dot.setFrame (squareXY.x, squareXY.y, 5, 5); g2d.fill (dot) - g2d.setPaint (blue); g2d.fill (parogram) - g2d.setPaint (black); dot.setFrame (parogramXY.x, parogramXY.y, 5, 5); g2d.fill (dot) - g2d.setPaint (yellow); g2d.fill (pentagon) - g2d.setPaint (black); dot.setFrame (pentagonXY.x, pentagonXY.y, 5, 5); g2d.fill (dot) - g2d.setPaint (cyan); g2d.fill (hexagon) - g2d.setPaint (black); dot.setFrame (hexagonXY.x, hexagonXY.y, 5, 5); g2d.fill (dot) - g2d.setPaint (magenta); g2d.fill (octagon) - g2d.setPaint (black); dot.setFrame (octagonXY.x, octagonXY.y, 5, 5); g2d.fill (dot) -/* - g2d.setPaint (red); g2d.fill (stopSign) - g2d.setPaint (black); dot.setFrame (stopSignXY.x, stopSignXY.y, 5, 5); g2d.fill (dot) - g2d.setPaint (red); g2d.fill (stopSign2) - g2d.setPaint (black); dot.setFrame (stopSign2XY.x, stopSign2XY.y, 5, 5); g2d.fill (dot) -*/ - end paintComponent - - end Canvas - - // Put the drawing canvas in the visualization frame - - new VizFrame ("PolygonTest", new Canvas (), 700, 1100) - -end PolygonTest - diff --git a/target/scala-3.6.4/classes/scalation/scala2d/old/Polygon.scala.bak3 b/target/scala-3.6.4/classes/scalation/scala2d/old/Polygon.scala.bak3 deleted file mode 100644 index e5fd70114..000000000 --- a/target/scala-3.6.4/classes/scalation/scala2d/old/Polygon.scala.bak3 +++ /dev/null @@ -1,229 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Thu Oct 22 15:05:06 EDT 2009 - * @see LICENSE (MIT style license file). - */ - -package scalation -package scala2d - -import scala.math.{cos, Pi, sin} - -import scalation.mathstat.VectorD - -import Colors._ -import Constants._ - -// FIX - add setFrame method - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Triangle` class extends the `Polygon` class (from the `java.awt.geom`). - */ -class Triangle () - extends Polygon (): - - private val flaw = flawf ("Triangle") - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Set the points/vetices for the triangle. - * @param x the x-coordinates - * @param y the y-coordinates - */ - def addPoints (x: VectorD, y: VectorD): Unit = - if x.dim != 3 || y.dim != 3 then flaw ("addPoints", "need exactly 3 vertices to make a triangle") - for i <- x.indices do addPoint (x(i).toInt, y(i).toInt) - end addPoints - -end Triangle - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Quad` class extends the `Polygon` class (from the `java.awt.geom`). - */ -class Quad () - extends Polygon (): - - private val flaw = flawf ("Quad") - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Set the points/vetices for the quad. - * @param x the x-coordinates - * @param y the y-coordinates - */ - def addPoints (x: VectorD, y: VectorD): Unit = - if x.dim != 4 || y.dim != 4 then flaw ("addPoints", "need exactly 4 vertices to make a quad") - for i <- x.indices do addPoint (x(i).toInt, y(i).toInt) - end addPoints - -end Quad - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Pentagon` class extends the `Polygon` class (from the `java.awt.geom`). - */ -class Pentagon () - extends Polygon (): - - private val flaw = flawf ("Pentagon") - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Set the points/vetices for the quad. - * @param x the x-coordinates - * @param y the y-coordinates - */ - def addPoints (x: VectorD, y: VectorD): Unit = - if x.dim != 5 || y.dim != 5 then flaw ("addPoints", "need exactly 5 vertices to make a pentagon") - for i <- x.indices do addPoint (x(i).toInt, y(i).toInt) - end addPoints - -end Pentagon - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Hexagon` class extends the `Polygon` class (from the `java.awt.geom`). - */ -class Hexagon () - extends Polygon (): - - private val flaw = flawf ("Hexagon") - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Set the points/vetices for the quad. - * @param x the x-coordinates - * @param y the y-coordinates - */ - def addPoints (x: VectorD, y: VectorD): Unit = - if x.dim != 6 || y.dim != 6 then flaw ("addPoints", "need exactly 6 vertices to make a hexagon") - for i <- x.indices do addPoint (x(i).toInt, y(i).toInt) - end addPoints - -end Hexagon - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Octagon` class extends the `Polygon` class (from the `java.awt.geom`). - */ -class Octagon () - extends Polygon (): - - private val flaw = flawf ("Octagon") - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Set the points/vetices for the quad. - * @param x the x-coordinates - * @param y the y-coordinates - */ - def addPoints (x: VectorD, y: VectorD): Unit = - if x.dim != 8 || y.dim != 8 then flaw ("addPoints", "need exactly 8 vertices to make a hexagon") - for i <- x.indices do addPoint (x(i).toInt, y(i).toInt) - end addPoints - -end Octagon - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `OctagonHV` class extends the `Polygon` class (from the `java.awt.geom`). - */ -class OctagonHV () - extends Polygon (): - - private val flaw = flawf ("OctagonHV") - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Set the points/vetices for the horizontally flattened octagon. - * @param x the x-coordinates - * @param y the y-coordinates - */ - def addPoints (x: VectorD, y: VectorD): Unit = - if x.dim != 8 || y.dim != 8 then flaw ("addPoints", "need exactly 8 vertices to make a hexagon") - for i <- x.indices do addPoint (x(i).toInt, y(i).toInt) - end addPoints - -end OctagonHV - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** Useful constants. - */ -object Constants: - - val cos45 = cos (Pi / 4.0) // same value for sin45 - val cos60 = cos (Pi / 3.0) - val sin60 = sin (Pi / 3.0) - val cos72 = cos (2.0 * Pi / 5.0) - val sin72 = sin (2.0 * Pi / 5.0) - -end Constants - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `PolygonTest` object tests the `Polygon`, `Triangle`, `Quad`, `Hexagon` - * and Octagon classes. - * > runMain scalation.scala2d.PolygonTest - */ -object PolygonTest extends App: - - banner ("Running PolygonTest") - - private val dot = Ellipse () - private val triangle = new Triangle () - triangle.addPionts (VectorD (100, 100, 150), VectorD (100, 150, 150)) - private val triangleXY = triangle.getTopLeft -/* - private val square = new Quad (new R2 (400, 100), 150) - private val squareXY = square.getTopLeft - private val parogram = new Quad (new R2 (100, 350), 150, 100, 25) - private val parogramXY = parogram.getTopLeft - private val pentagon = new Pentagon (new R2 (400, 350), 90) - private val pentagonXY = pentagon.getTopLeft - private val hexagon = new Hexagon (new R2 (100, 600), 80) - private val hexagonXY = hexagon.getTopLeft - private val octagon = new Octagon (new R2 (400, 600), 70) - private val octagonXY = octagon.getTopLeft - private val stopSign = new OctagonHF (); stopSign.setFrame (100, 850, 70, 0.5) - private val stopSignXY = stopSign.getTopLeft - private val stopSign2 = new OctagonVF (); stopSign2.setFrame (400, 850, 70, 0.5) - private val stopSign2XY = stopSign2.getTopLeft -*/ - - class Canvas extends Panel: - - setBackground (white) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Paint the components into the canvas (drawing panel). - * @param gr low-resolution graphics environment - */ - override def paintComponent (gr: Graphics): Unit = - super.paintComponent (gr) - val g2d = gr.asInstanceOf [Graphics2D] // use hi-resolution - g2d.setPaint (orange); g2d.fill (triangle) - g2d.setPaint (black); dot.setFrame (triangleXY.x, triangleXY.y, 5, 5); g2d.fill (dot) -/* - g2d.setPaint (green); g2d.fill (square) - g2d.setPaint (black); dot.setFrame (squareXY.x, squareXY.y, 5, 5); g2d.fill (dot) - g2d.setPaint (blue); g2d.fill (parogram) - g2d.setPaint (black); dot.setFrame (parogramXY.x, parogramXY.y, 5, 5); g2d.fill (dot) - g2d.setPaint (yellow); g2d.fill (pentagon) - g2d.setPaint (black); dot.setFrame (pentagonXY.x, pentagonXY.y, 5, 5); g2d.fill (dot) - g2d.setPaint (cyan); g2d.fill (hexagon) - g2d.setPaint (black); dot.setFrame (hexagonXY.x, hexagonXY.y, 5, 5); g2d.fill (dot) - g2d.setPaint (magenta); g2d.fill (octagon) - g2d.setPaint (black); dot.setFrame (octagonXY.x, octagonXY.y, 5, 5); g2d.fill (dot) - g2d.setPaint (red); g2d.fill (stopSign) - g2d.setPaint (black); dot.setFrame (stopSignXY.x, stopSignXY.y, 5, 5); g2d.fill (dot) - g2d.setPaint (red); g2d.fill (stopSign2) - g2d.setPaint (black); dot.setFrame (stopSign2XY.x, stopSign2XY.y, 5, 5); g2d.fill (dot) -*/ - end paintComponent - - end Canvas - - // Put the drawing canvas in the visualization frame - - new VizFrame ("PolygonTest", new Canvas (), 700, 1100) - -end PolygonTest - diff --git a/target/scala-3.6.4/classes/scalation/scala2d/polygonTest.class b/target/scala-3.6.4/classes/scalation/scala2d/polygonTest.class deleted file mode 100644 index 171e0f0c8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/polygonTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/polygonTest.tasty b/target/scala-3.6.4/classes/scalation/scala2d/polygonTest.tasty deleted file mode 100644 index dd3b87fc6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/polygonTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/polygonTest2.class b/target/scala-3.6.4/classes/scalation/scala2d/polygonTest2.class deleted file mode 100644 index c0d795c0d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/polygonTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/polygonTest2.tasty b/target/scala-3.6.4/classes/scalation/scala2d/polygonTest2.tasty deleted file mode 100644 index d774f3084..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/polygonTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/qArrowTest.class b/target/scala-3.6.4/classes/scalation/scala2d/qArrowTest.class deleted file mode 100644 index 383c68c52..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/qArrowTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/qArrowTest.tasty b/target/scala-3.6.4/classes/scalation/scala2d/qArrowTest.tasty deleted file mode 100644 index 2669f4b77..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/qArrowTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/qCurveTest.class b/target/scala-3.6.4/classes/scalation/scala2d/qCurveTest.class deleted file mode 100644 index b17e685ba..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/qCurveTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/qCurveTest.tasty b/target/scala-3.6.4/classes/scalation/scala2d/qCurveTest.tasty deleted file mode 100644 index bd06477f6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/qCurveTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/qCurveTest2.class b/target/scala-3.6.4/classes/scalation/scala2d/qCurveTest2.class deleted file mode 100644 index c542ac4cf..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/qCurveTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/qCurveTest2.tasty b/target/scala-3.6.4/classes/scalation/scala2d/qCurveTest2.tasty deleted file mode 100644 index bd14ad169..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/qCurveTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/writeImageTest.class b/target/scala-3.6.4/classes/scalation/scala2d/writeImageTest.class deleted file mode 100644 index b8ad8feb9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/writeImageTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala2d/writeImageTest.tasty b/target/scala-3.6.4/classes/scalation/scala2d/writeImageTest.tasty deleted file mode 100644 index b0815d0a0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala2d/writeImageTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala3d/Clock.class b/target/scala-3.6.4/classes/scalation/scala3d/Clock.class deleted file mode 100644 index c7cf37444..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala3d/Clock.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala3d/Clock.tasty b/target/scala-3.6.4/classes/scalation/scala3d/Clock.tasty deleted file mode 100644 index 7a1ff478d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala3d/Clock.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala3d/Gfx3DExample.scalaa b/target/scala-3.6.4/classes/scalation/scala3d/Gfx3DExample.scalaa deleted file mode 100644 index b7c57db44..000000000 --- a/target/scala-3.6.4/classes/scalation/scala3d/Gfx3DExample.scalaa +++ /dev/null @@ -1,77 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Thu Aug 5 14:50:12 EDT 2021 - * @see LICENSE (MIT style license file). - * - * @title Translation of Jenkov Tutorial from Java to Scala - JavaFX 3D Example - * - * @see tutorials.jenkov.com/javafx/3d.html - * @see alvinalexander.com/source-code/scala-javafx-application-launch-scene-stylesheets/ - * @see openjfx.io/javadoc/16/ - */ - -// FIX - fails to launch Application - could be a build.sbt problem - -package scalation -package scala3d - -import javafx.application.{Application, ConditionalFeature, Platform} -import javafx.scene.{Group, PerspectiveCamera, Scene} -import javafx.scene.shape.{Box, CullFace} -import javafx.scene.transform.Rotate -import javafx.stage.Stage - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Gfx3DExample` companion obejct is use to invoke the `Gfx3DExample` class. - * > runMain scalation.scala3d.Gfx3DExample - */ -object Gfx3DExample: - - def main (args: Array [String]): Unit = - Application.launch (classOf [Gfx3DExample], args :_*) - end main - -end Gfx3DExample - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Gfx3DExample` class displays a 3D `Box`. - */ -class Gfx3DExample extends Application: - - def start (primaryStage: Stage): Unit = - - val is3DSupported = Platform.isSupported (ConditionalFeature.SCENE3D) - if ! is3DSupported then - println ("Sorry, 3D is not supported in JavaFX on this platform.") - return - end if - - val box = new Box (100, 100, 100) - box.setCullFace (CullFace.NONE) - box.setTranslateX (250) - box.setTranslateY (100) - box.setTranslateZ (400) - - val fixedEyeAtCameraZero = false - val camera = new PerspectiveCamera (fixedEyeAtCameraZero) - camera.setTranslateX (150) - camera.setTranslateY (-100) - camera.setTranslateZ (250) - - val root = new Group (box) - root.setRotationAxis (Rotate.X_AXIS) - root.setRotate (30) - - val scene = new Scene (root, 500, 300, true) - scene.setCamera (camera) - primaryStage.setScene (scene) - primaryStage.setTitle ("3D Example") - - primaryStage.show () - end start - -end Gfx3DExample - diff --git a/target/scala-3.6.4/classes/scalation/scala3d/README.txt b/target/scala-3.6.4/classes/scalation/scala3d/README.txt deleted file mode 100644 index dd962a8c3..000000000 --- a/target/scala-3.6.4/classes/scalation/scala3d/README.txt +++ /dev/null @@ -1,4 +0,0 @@ - -This package is under development and is exploring the use of JavaFx/ScalaFx -as a lightweight 3D solution versus say libGDX or LWJGL. - diff --git a/target/scala-3.6.4/classes/scalation/scala3d/Road3d$$anon$10.class b/target/scala-3.6.4/classes/scalation/scala3d/Road3d$$anon$10.class deleted file mode 100644 index e925538ec..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala3d/Road3d$$anon$10.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala3d/Road3d$$anon$2.class b/target/scala-3.6.4/classes/scalation/scala3d/Road3d$$anon$2.class deleted file mode 100644 index 734e050bb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala3d/Road3d$$anon$2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala3d/Road3d$$anon$3.class b/target/scala-3.6.4/classes/scalation/scala3d/Road3d$$anon$3.class deleted file mode 100644 index 8ffd2ad29..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala3d/Road3d$$anon$3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala3d/Road3d$$anon$4.class b/target/scala-3.6.4/classes/scalation/scala3d/Road3d$$anon$4.class deleted file mode 100644 index 632e5c6e8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala3d/Road3d$$anon$4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala3d/Road3d$$anon$5.class b/target/scala-3.6.4/classes/scalation/scala3d/Road3d$$anon$5.class deleted file mode 100644 index c5c2131eb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala3d/Road3d$$anon$5.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala3d/Road3d$$anon$6.class b/target/scala-3.6.4/classes/scalation/scala3d/Road3d$$anon$6.class deleted file mode 100644 index 02ce53a25..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala3d/Road3d$$anon$6.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala3d/Road3d$$anon$7.class b/target/scala-3.6.4/classes/scalation/scala3d/Road3d$$anon$7.class deleted file mode 100644 index feb8ff3eb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala3d/Road3d$$anon$7.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala3d/Road3d$$anon$8.class b/target/scala-3.6.4/classes/scalation/scala3d/Road3d$$anon$8.class deleted file mode 100644 index bf6f4a506..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala3d/Road3d$$anon$8.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala3d/Road3d$$anon$9.class b/target/scala-3.6.4/classes/scalation/scala3d/Road3d$$anon$9.class deleted file mode 100644 index 842ce2a54..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala3d/Road3d$$anon$9.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala3d/Road3d$.class b/target/scala-3.6.4/classes/scalation/scala3d/Road3d$.class deleted file mode 100644 index b65722c90..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala3d/Road3d$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala3d/Road3d$Direction$$anon$1.class b/target/scala-3.6.4/classes/scalation/scala3d/Road3d$Direction$$anon$1.class deleted file mode 100644 index 6de889802..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala3d/Road3d$Direction$$anon$1.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala3d/Road3d$Direction$.class b/target/scala-3.6.4/classes/scalation/scala3d/Road3d$Direction$.class deleted file mode 100644 index 33e026fc4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala3d/Road3d$Direction$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala3d/Road3d$Direction.class b/target/scala-3.6.4/classes/scalation/scala3d/Road3d$Direction.class deleted file mode 100644 index d722eafdc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala3d/Road3d$Direction.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala3d/Road3d.class b/target/scala-3.6.4/classes/scalation/scala3d/Road3d.class deleted file mode 100644 index 224e2ea98..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala3d/Road3d.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala3d/Road3d.tasty b/target/scala-3.6.4/classes/scalation/scala3d/Road3d.tasty deleted file mode 100644 index 4f3a9e046..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala3d/Road3d.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala3d/Sink$.class b/target/scala-3.6.4/classes/scalation/scala3d/Sink$.class deleted file mode 100644 index b5b4d8543..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala3d/Sink$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala3d/Sink.class b/target/scala-3.6.4/classes/scalation/scala3d/Sink.class deleted file mode 100644 index 9ea1c5758..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala3d/Sink.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala3d/Sink.tasty b/target/scala-3.6.4/classes/scalation/scala3d/Sink.tasty deleted file mode 100644 index 76ad68a84..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala3d/Sink.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala3d/Source$.class b/target/scala-3.6.4/classes/scalation/scala3d/Source$.class deleted file mode 100644 index 1d989251b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala3d/Source$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala3d/Source.class b/target/scala-3.6.4/classes/scalation/scala3d/Source.class deleted file mode 100644 index 4a42d5e69..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala3d/Source.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala3d/Source.tasty b/target/scala-3.6.4/classes/scalation/scala3d/Source.tasty deleted file mode 100644 index 33e778a80..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala3d/Source.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala3d/Vehicle$.class b/target/scala-3.6.4/classes/scalation/scala3d/Vehicle$.class deleted file mode 100644 index 1b41f1ac2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala3d/Vehicle$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala3d/Vehicle.class b/target/scala-3.6.4/classes/scalation/scala3d/Vehicle.class deleted file mode 100644 index c24bdf8c0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala3d/Vehicle.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala3d/Vehicle.tasty b/target/scala-3.6.4/classes/scalation/scala3d/Vehicle.tasty deleted file mode 100644 index f7ce71ee0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/scala3d/Vehicle.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/scala3d/index.html b/target/scala-3.6.4/classes/scalation/scala3d/index.html deleted file mode 100644 index 3e950e7a7..000000000 --- a/target/scala-3.6.4/classes/scalation/scala3d/index.html +++ /dev/null @@ -1,14 +0,0 @@ - - -

    Source files in scala3d Package

    -

    - - \ No newline at end of file diff --git a/target/scala-3.6.4/classes/scalation/setExtTest.class b/target/scala-3.6.4/classes/scalation/setExtTest.class deleted file mode 100644 index e11fd238b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/setExtTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/setExtTest.tasty b/target/scala-3.6.4/classes/scalation/setExtTest.tasty deleted file mode 100644 index 0876ca364..000000000 Binary files a/target/scala-3.6.4/classes/scalation/setExtTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/Completion.class b/target/scala-3.6.4/classes/scalation/simulation/Completion.class deleted file mode 100644 index 3c3b0adca..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/Completion.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/Completion.tasty b/target/scala-3.6.4/classes/scalation/simulation/Completion.tasty deleted file mode 100644 index af2c24b65..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/Completion.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/Coroutine$.class b/target/scala-3.6.4/classes/scalation/simulation/Coroutine$.class deleted file mode 100644 index 20eb16517..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/Coroutine$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/Coroutine.class b/target/scala-3.6.4/classes/scalation/simulation/Coroutine.class deleted file mode 100644 index 0bbc33101..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/Coroutine.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/Coroutine.tasty b/target/scala-3.6.4/classes/scalation/simulation/Coroutine.tasty deleted file mode 100644 index f3fb6f7c9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/Coroutine.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/CoroutineTest$.class b/target/scala-3.6.4/classes/scalation/simulation/CoroutineTest$.class deleted file mode 100644 index 1ac702c15..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/CoroutineTest$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/CoroutineTest$Cor1.class b/target/scala-3.6.4/classes/scalation/simulation/CoroutineTest$Cor1.class deleted file mode 100644 index ba024582b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/CoroutineTest$Cor1.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/CoroutineTest$Cor2.class b/target/scala-3.6.4/classes/scalation/simulation/CoroutineTest$Cor2.class deleted file mode 100644 index 9c6cc8ef2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/CoroutineTest$Cor2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/CoroutineTest.class b/target/scala-3.6.4/classes/scalation/simulation/CoroutineTest.class deleted file mode 100644 index 9a878aedc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/CoroutineTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/CoroutineTest.tasty b/target/scala-3.6.4/classes/scalation/simulation/CoroutineTest.tasty deleted file mode 100644 index 1ad342a44..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/CoroutineTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/Identifiable$.class b/target/scala-3.6.4/classes/scalation/simulation/Identifiable$.class deleted file mode 100644 index 59619b9dd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/Identifiable$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/Identifiable.class b/target/scala-3.6.4/classes/scalation/simulation/Identifiable.class deleted file mode 100644 index 585416222..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/Identifiable.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/Identifiable.tasty b/target/scala-3.6.4/classes/scalation/simulation/Identifiable.tasty deleted file mode 100644 index a931fde71..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/Identifiable.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/Locatable.class b/target/scala-3.6.4/classes/scalation/simulation/Locatable.class deleted file mode 100644 index f3459e2aa..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/Locatable.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/Locatable.tasty b/target/scala-3.6.4/classes/scalation/simulation/Locatable.tasty deleted file mode 100644 index edd37a827..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/Locatable.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/Locatable2.class b/target/scala-3.6.4/classes/scalation/simulation/Locatable2.class deleted file mode 100644 index 05121bb8a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/Locatable2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/Locatable2.tasty b/target/scala-3.6.4/classes/scalation/simulation/Locatable2.tasty deleted file mode 100644 index d6823480f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/Locatable2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/Modelable.class b/target/scala-3.6.4/classes/scalation/simulation/Modelable.class deleted file mode 100644 index 4ffbb7b8b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/Modelable.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/Modelable.tasty b/target/scala-3.6.4/classes/scalation/simulation/Modelable.tasty deleted file mode 100644 index e570c4f35..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/Modelable.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/Monitor$.class b/target/scala-3.6.4/classes/scalation/simulation/Monitor$.class deleted file mode 100644 index 6b0f0239e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/Monitor$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/Monitor$package$.class b/target/scala-3.6.4/classes/scalation/simulation/Monitor$package$.class deleted file mode 100644 index 5310faf4e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/Monitor$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/Monitor$package$Mon$2$.class b/target/scala-3.6.4/classes/scalation/simulation/Monitor$package$Mon$2$.class deleted file mode 100644 index 6212c526f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/Monitor$package$Mon$2$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/Monitor$package.class b/target/scala-3.6.4/classes/scalation/simulation/Monitor$package.class deleted file mode 100644 index 856baa630..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/Monitor$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/Monitor$package.tasty b/target/scala-3.6.4/classes/scalation/simulation/Monitor$package.tasty deleted file mode 100644 index f23578436..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/Monitor$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/Monitor.class b/target/scala-3.6.4/classes/scalation/simulation/Monitor.class deleted file mode 100644 index d28d87707..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/Monitor.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/Monitor.tasty b/target/scala-3.6.4/classes/scalation/simulation/Monitor.tasty deleted file mode 100644 index 603a3aca4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/Monitor.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/NH_PoissonProcess$.class b/target/scala-3.6.4/classes/scalation/simulation/NH_PoissonProcess$.class deleted file mode 100644 index ae0358e4d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/NH_PoissonProcess$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/NH_PoissonProcess$package$.class b/target/scala-3.6.4/classes/scalation/simulation/NH_PoissonProcess$package$.class deleted file mode 100644 index 40f548697..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/NH_PoissonProcess$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/NH_PoissonProcess$package.class b/target/scala-3.6.4/classes/scalation/simulation/NH_PoissonProcess$package.class deleted file mode 100644 index d92e35cae..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/NH_PoissonProcess$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/NH_PoissonProcess$package.tasty b/target/scala-3.6.4/classes/scalation/simulation/NH_PoissonProcess$package.tasty deleted file mode 100644 index 693b06984..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/NH_PoissonProcess$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/NH_PoissonProcess.class b/target/scala-3.6.4/classes/scalation/simulation/NH_PoissonProcess.class deleted file mode 100644 index 04a3a3be7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/NH_PoissonProcess.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/NH_PoissonProcess.tasty b/target/scala-3.6.4/classes/scalation/simulation/NH_PoissonProcess.tasty deleted file mode 100644 index 3106e2cd9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/NH_PoissonProcess.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/PoissonProcess$.class b/target/scala-3.6.4/classes/scalation/simulation/PoissonProcess$.class deleted file mode 100644 index 0efc3efda..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/PoissonProcess$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/PoissonProcess$package$.class b/target/scala-3.6.4/classes/scalation/simulation/PoissonProcess$package$.class deleted file mode 100644 index ea64dcda7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/PoissonProcess$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/PoissonProcess$package.class b/target/scala-3.6.4/classes/scalation/simulation/PoissonProcess$package.class deleted file mode 100644 index 3adfea92f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/PoissonProcess$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/PoissonProcess$package.tasty b/target/scala-3.6.4/classes/scalation/simulation/PoissonProcess$package.tasty deleted file mode 100644 index b4dcd8d50..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/PoissonProcess$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/PoissonProcess.class b/target/scala-3.6.4/classes/scalation/simulation/PoissonProcess.class deleted file mode 100644 index f073e360d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/PoissonProcess.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/PoissonProcess.tasty b/target/scala-3.6.4/classes/scalation/simulation/PoissonProcess.tasty deleted file mode 100644 index 278b39051..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/PoissonProcess.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/Temporal.class b/target/scala-3.6.4/classes/scalation/simulation/Temporal.class deleted file mode 100644 index 315dc8c34..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/Temporal.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/Temporal.tasty b/target/scala-3.6.4/classes/scalation/simulation/Temporal.tasty deleted file mode 100644 index 6aacd75c4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/Temporal.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/activity/ArcD$.class b/target/scala-3.6.4/classes/scalation/simulation/activity/ArcD$.class deleted file mode 100644 index 0c9b02711..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/activity/ArcD$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/activity/ArcD.class b/target/scala-3.6.4/classes/scalation/simulation/activity/ArcD.class deleted file mode 100644 index 79f8ed3d0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/activity/ArcD.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/activity/ArcD.tasty b/target/scala-3.6.4/classes/scalation/simulation/activity/ArcD.tasty deleted file mode 100644 index 1726a9713..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/activity/ArcD.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/activity/ArcI$.class b/target/scala-3.6.4/classes/scalation/simulation/activity/ArcI$.class deleted file mode 100644 index fe322c267..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/activity/ArcI$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/activity/ArcI.class b/target/scala-3.6.4/classes/scalation/simulation/activity/ArcI.class deleted file mode 100644 index dfbcf45f3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/activity/ArcI.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/activity/ArcI.tasty b/target/scala-3.6.4/classes/scalation/simulation/activity/ArcI.tasty deleted file mode 100644 index 24f7c78ae..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/activity/ArcI.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/activity/Counter$.class b/target/scala-3.6.4/classes/scalation/simulation/activity/Counter$.class deleted file mode 100644 index 55e502ac2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/activity/Counter$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/activity/Counter.class b/target/scala-3.6.4/classes/scalation/simulation/activity/Counter.class deleted file mode 100644 index afd775130..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/activity/Counter.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/activity/Counter.tasty b/target/scala-3.6.4/classes/scalation/simulation/activity/Counter.tasty deleted file mode 100644 index 5015f82e1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/activity/Counter.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/activity/PetriNet$package$.class b/target/scala-3.6.4/classes/scalation/simulation/activity/PetriNet$package$.class deleted file mode 100644 index 9228bc349..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/activity/PetriNet$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/activity/PetriNet$package.class b/target/scala-3.6.4/classes/scalation/simulation/activity/PetriNet$package.class deleted file mode 100644 index 7a44c43b2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/activity/PetriNet$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/activity/PetriNet$package.tasty b/target/scala-3.6.4/classes/scalation/simulation/activity/PetriNet$package.tasty deleted file mode 100644 index bf9c9031e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/activity/PetriNet$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/activity/PetriNet.class b/target/scala-3.6.4/classes/scalation/simulation/activity/PetriNet.class deleted file mode 100644 index 4c82c850e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/activity/PetriNet.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/activity/PetriNet.tasty b/target/scala-3.6.4/classes/scalation/simulation/activity/PetriNet.tasty deleted file mode 100644 index 19f6ddb34..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/activity/PetriNet.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/activity/PetriNetRules.class b/target/scala-3.6.4/classes/scalation/simulation/activity/PetriNetRules.class deleted file mode 100644 index 7412758f2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/activity/PetriNetRules.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/activity/PetriNetRules.tasty b/target/scala-3.6.4/classes/scalation/simulation/activity/PetriNetRules.tasty deleted file mode 100644 index 55b9ddacb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/activity/PetriNetRules.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/activity/PetriNetRulesTest$.class b/target/scala-3.6.4/classes/scalation/simulation/activity/PetriNetRulesTest$.class deleted file mode 100644 index 1d8d71dcb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/activity/PetriNetRulesTest$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/activity/PetriNetRulesTest.class b/target/scala-3.6.4/classes/scalation/simulation/activity/PetriNetRulesTest.class deleted file mode 100644 index f2e39f1c1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/activity/PetriNetRulesTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/activity/PetriNetRulesTest.tasty b/target/scala-3.6.4/classes/scalation/simulation/activity/PetriNetRulesTest.tasty deleted file mode 100644 index 5fb561060..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/activity/PetriNetRulesTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/activity/PlaceD.class b/target/scala-3.6.4/classes/scalation/simulation/activity/PlaceD.class deleted file mode 100644 index 1fa8ae73b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/activity/PlaceD.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/activity/PlaceD.tasty b/target/scala-3.6.4/classes/scalation/simulation/activity/PlaceD.tasty deleted file mode 100644 index 1861de580..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/activity/PlaceD.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/activity/PlaceI.class b/target/scala-3.6.4/classes/scalation/simulation/activity/PlaceI.class deleted file mode 100644 index 2b6ea60f1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/activity/PlaceI.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/activity/PlaceI.tasty b/target/scala-3.6.4/classes/scalation/simulation/activity/PlaceI.tasty deleted file mode 100644 index 0c09cacd9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/activity/PlaceI.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/activity/Transition.class b/target/scala-3.6.4/classes/scalation/simulation/activity/Transition.class deleted file mode 100644 index 08031aceb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/activity/Transition.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/activity/Transition.tasty b/target/scala-3.6.4/classes/scalation/simulation/activity/Transition.tasty deleted file mode 100644 index 62803e62d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/activity/Transition.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/activity/index.html b/target/scala-3.6.4/classes/scalation/simulation/activity/index.html deleted file mode 100644 index 462ce9149..000000000 --- a/target/scala-3.6.4/classes/scalation/simulation/activity/index.html +++ /dev/null @@ -1,9 +0,0 @@ - - -

    Source files in activity Package

    -

    - - \ No newline at end of file diff --git a/target/scala-3.6.4/classes/scalation/simulation/activity/petriNetTest.class b/target/scala-3.6.4/classes/scalation/simulation/activity/petriNetTest.class deleted file mode 100644 index 5883e2f83..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/activity/petriNetTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/activity/petriNetTest.tasty b/target/scala-3.6.4/classes/scalation/simulation/activity/petriNetTest.tasty deleted file mode 100644 index e2ccf20cc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/activity/petriNetTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/Bus.scalaa b/target/scala-3.6.4/classes/scalation/simulation/agent/Bus.scalaa deleted file mode 100644 index ed737aac0..000000000 --- a/target/scala-3.6.4/classes/scalation/simulation/agent/Bus.scalaa +++ /dev/null @@ -1,66 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Wed Oct 20 14:54:48 EDT 2021 - * @see LICENSE (MIT style license file). - * - * @note A Bus Is Used to Transport Several SimAgents Together - */ - -package scalation -package simulation.agent - -import scala.util.control.Breaks.{break, breakable} - -import scala.math.min - -import scalation.mathstat.VectorD -import scalation.random.Variate - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Bus` class is used to collect multiple agents together for transporting. - * The act method must be specified in each subclass where the bus route is specified. - * @param name the name of this bus - * @param director the director controlling the model - * @param lTime the loading/unloading time - * @param cap the capacity of this bus - * @param pos the position (Euclidean coordinates) of this bus - */ -abstract class Bus (name: String, director: Model, lTime: Variate, cap: Int, pos: VectorD = null) - extends SimAgent ("b", director.clock, director, pos): - - private val rider = Array.ofDim [SimAgent] (cap) // seats on this bus - private var nRiders = 0 // current number of riders - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Load agents/riders waiting for this bus in queue q. - * @param que the wait queue where agents are waiting for the bus - */ - def load (que: WaitQueue): Unit = - breakable { - for i <- 0 to cap if rider(i) == null do // find next open seat - if que.isEmpty then break () // break when queue empties - rider(i) = que.dequeue () // rider from queue takes seat i - nRiders += 1 // increment the number of riders - end for - } // breakable - end load - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Unload agents/riders from this bus onto transport t. - * @param tran the transport for agents departing this bus - */ - def unload (tran: Transport): Unit = - for i <- 0 until nRiders do - val r_i = rider(i) // consider the i-th rider - if r_i.nextTransport == tran then // rider i wants to exit - director.schedule (r_i, i) // FIX - use longer delay - rider(i) = null // open seat i - nRiders -= 1 // decrement the number of riders - end if - end for - end unload - -end Bus - diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/EdgeAgents.class b/target/scala-3.6.4/classes/scalation/simulation/agent/EdgeAgents.class deleted file mode 100644 index 1e14f4fed..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/EdgeAgents.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/EdgeAgents.tasty b/target/scala-3.6.4/classes/scalation/simulation/agent/EdgeAgents.tasty deleted file mode 100644 index 641c9775a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/EdgeAgents.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/Gate$.class b/target/scala-3.6.4/classes/scalation/simulation/agent/Gate$.class deleted file mode 100644 index 5bcbcddda..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/Gate$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/Gate.class b/target/scala-3.6.4/classes/scalation/simulation/agent/Gate.class deleted file mode 100644 index b6bc3652d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/Gate.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/Gate.tasty b/target/scala-3.6.4/classes/scalation/simulation/agent/Gate.tasty deleted file mode 100644 index 555d7000a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/Gate.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/Junction$.class b/target/scala-3.6.4/classes/scalation/simulation/agent/Junction$.class deleted file mode 100644 index 8bc0a0fd4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/Junction$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/Junction.class b/target/scala-3.6.4/classes/scalation/simulation/agent/Junction.class deleted file mode 100644 index 882151e8a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/Junction.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/Junction.tasty b/target/scala-3.6.4/classes/scalation/simulation/agent/Junction.tasty deleted file mode 100644 index 642837fbe..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/Junction.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/Link$.class b/target/scala-3.6.4/classes/scalation/simulation/agent/Link$.class deleted file mode 100644 index 97644e35d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/Link$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/Link.class b/target/scala-3.6.4/classes/scalation/simulation/agent/Link.class deleted file mode 100644 index 984a5a386..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/Link.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/Link.tasty b/target/scala-3.6.4/classes/scalation/simulation/agent/Link.tasty deleted file mode 100644 index 79bb9eaec..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/Link.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/Model$.class b/target/scala-3.6.4/classes/scalation/simulation/agent/Model$.class deleted file mode 100644 index 8c904ff19..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/Model$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/Model$Reporter$.class b/target/scala-3.6.4/classes/scalation/simulation/agent/Model$Reporter$.class deleted file mode 100644 index fc2cdb22a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/Model$Reporter$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/Model$Reporter.class b/target/scala-3.6.4/classes/scalation/simulation/agent/Model$Reporter.class deleted file mode 100644 index e4c74201c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/Model$Reporter.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/Model.class b/target/scala-3.6.4/classes/scalation/simulation/agent/Model.class deleted file mode 100644 index 13e4e773e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/Model.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/Model.tasty b/target/scala-3.6.4/classes/scalation/simulation/agent/Model.tasty deleted file mode 100644 index 5e2f321ea..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/Model.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/Monitor$.class b/target/scala-3.6.4/classes/scalation/simulation/agent/Monitor$.class deleted file mode 100644 index dc7cf2b8d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/Monitor$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/Monitor$package$.class b/target/scala-3.6.4/classes/scalation/simulation/agent/Monitor$package$.class deleted file mode 100644 index 532625622..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/Monitor$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/Monitor$package$Mon$2$.class b/target/scala-3.6.4/classes/scalation/simulation/agent/Monitor$package$Mon$2$.class deleted file mode 100644 index c261ca712..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/Monitor$package$Mon$2$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/Monitor$package.class b/target/scala-3.6.4/classes/scalation/simulation/agent/Monitor$package.class deleted file mode 100644 index 0d29c4cc2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/Monitor$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/Monitor$package.tasty b/target/scala-3.6.4/classes/scalation/simulation/agent/Monitor$package.tasty deleted file mode 100644 index addc8c000..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/Monitor$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/Monitor.class b/target/scala-3.6.4/classes/scalation/simulation/agent/Monitor.class deleted file mode 100644 index 05b78e7b0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/Monitor.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/Monitor.tasty b/target/scala-3.6.4/classes/scalation/simulation/agent/Monitor.tasty deleted file mode 100644 index d70c90221..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/Monitor.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/NOTE.txt b/target/scala-3.6.4/classes/scalation/simulation/agent/NOTE.txt deleted file mode 100644 index 910da5ab5..000000000 --- a/target/scala-3.6.4/classes/scalation/simulation/agent/NOTE.txt +++ /dev/null @@ -1,3 +0,0 @@ - -FIX: INCOMPLETE IMPLEMENTATION - diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/QueueOps.class b/target/scala-3.6.4/classes/scalation/simulation/agent/QueueOps.class deleted file mode 100644 index 0daaba43c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/QueueOps.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/QueueOps.tasty b/target/scala-3.6.4/classes/scalation/simulation/agent/QueueOps.tasty deleted file mode 100644 index 51228c551..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/QueueOps.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/Resource$.class b/target/scala-3.6.4/classes/scalation/simulation/agent/Resource$.class deleted file mode 100644 index 953e7f5f7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/Resource$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/Resource.class b/target/scala-3.6.4/classes/scalation/simulation/agent/Resource.class deleted file mode 100644 index 499e1fb61..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/Resource.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/Resource.tasty b/target/scala-3.6.4/classes/scalation/simulation/agent/Resource.tasty deleted file mode 100644 index 6cee93ad8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/Resource.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/Route$.class b/target/scala-3.6.4/classes/scalation/simulation/agent/Route$.class deleted file mode 100644 index 6879ecf50..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/Route$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/Route.class b/target/scala-3.6.4/classes/scalation/simulation/agent/Route.class deleted file mode 100644 index 34dfcc79e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/Route.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/Route.tasty b/target/scala-3.6.4/classes/scalation/simulation/agent/Route.tasty deleted file mode 100644 index 93b38740c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/Route.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/SimAgent$.class b/target/scala-3.6.4/classes/scalation/simulation/agent/SimAgent$.class deleted file mode 100644 index 09a8c8b23..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/SimAgent$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/SimAgent$package$.class b/target/scala-3.6.4/classes/scalation/simulation/agent/SimAgent$package$.class deleted file mode 100644 index 354ee9340..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/SimAgent$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/SimAgent$package$TestAgent$1.class b/target/scala-3.6.4/classes/scalation/simulation/agent/SimAgent$package$TestAgent$1.class deleted file mode 100644 index eb4991481..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/SimAgent$package$TestAgent$1.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/SimAgent$package$TestAgent$3$.class b/target/scala-3.6.4/classes/scalation/simulation/agent/SimAgent$package$TestAgent$3$.class deleted file mode 100644 index d9dacd175..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/SimAgent$package$TestAgent$3$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/SimAgent$package.class b/target/scala-3.6.4/classes/scalation/simulation/agent/SimAgent$package.class deleted file mode 100644 index b832a14b5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/SimAgent$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/SimAgent$package.tasty b/target/scala-3.6.4/classes/scalation/simulation/agent/SimAgent$package.tasty deleted file mode 100644 index 257516868..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/SimAgent$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/SimAgent.class b/target/scala-3.6.4/classes/scalation/simulation/agent/SimAgent.class deleted file mode 100644 index 8eba064d4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/SimAgent.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/SimAgent.tasty b/target/scala-3.6.4/classes/scalation/simulation/agent/SimAgent.tasty deleted file mode 100644 index 1b375ae51..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/SimAgent.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/Sink$.class b/target/scala-3.6.4/classes/scalation/simulation/agent/Sink$.class deleted file mode 100644 index bfd8bd6df..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/Sink$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/Sink.class b/target/scala-3.6.4/classes/scalation/simulation/agent/Sink.class deleted file mode 100644 index 2325ebbff..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/Sink.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/Sink.tasty b/target/scala-3.6.4/classes/scalation/simulation/agent/Sink.tasty deleted file mode 100644 index 575a7fbac..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/Sink.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/Source$.class b/target/scala-3.6.4/classes/scalation/simulation/agent/Source$.class deleted file mode 100644 index 8e2ad023b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/Source$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/Source.class b/target/scala-3.6.4/classes/scalation/simulation/agent/Source.class deleted file mode 100644 index 8015bbeb0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/Source.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/Source.tasty b/target/scala-3.6.4/classes/scalation/simulation/agent/Source.tasty deleted file mode 100644 index 810a47e36..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/Source.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/Statistical.class b/target/scala-3.6.4/classes/scalation/simulation/agent/Statistical.class deleted file mode 100644 index 8909b86d2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/Statistical.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/Statistical.tasty b/target/scala-3.6.4/classes/scalation/simulation/agent/Statistical.tasty deleted file mode 100644 index 53f7ca5d0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/Statistical.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/Transport$.class b/target/scala-3.6.4/classes/scalation/simulation/agent/Transport$.class deleted file mode 100644 index 76fbfd776..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/Transport$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/Transport.class b/target/scala-3.6.4/classes/scalation/simulation/agent/Transport.class deleted file mode 100644 index 57c8963f7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/Transport.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/Transport.tasty b/target/scala-3.6.4/classes/scalation/simulation/agent/Transport.tasty deleted file mode 100644 index 329e725af..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/Transport.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/WaitQueue$.class b/target/scala-3.6.4/classes/scalation/simulation/agent/WaitQueue$.class deleted file mode 100644 index 7db49b0ec..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/WaitQueue$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/WaitQueue.class b/target/scala-3.6.4/classes/scalation/simulation/agent/WaitQueue.class deleted file mode 100644 index c44e3b064..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/WaitQueue.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/WaitQueue.tasty b/target/scala-3.6.4/classes/scalation/simulation/agent/WaitQueue.tasty deleted file mode 100644 index 4ba91247f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/WaitQueue.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/WaitQueue_LCFS$.class b/target/scala-3.6.4/classes/scalation/simulation/agent/WaitQueue_LCFS$.class deleted file mode 100644 index 3bb46ae8f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/WaitQueue_LCFS$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/WaitQueue_LCFS.class b/target/scala-3.6.4/classes/scalation/simulation/agent/WaitQueue_LCFS.class deleted file mode 100644 index 528cf1986..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/WaitQueue_LCFS.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/WaitQueue_LCFS.tasty b/target/scala-3.6.4/classes/scalation/simulation/agent/WaitQueue_LCFS.tasty deleted file mode 100644 index b7488051d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/WaitQueue_LCFS.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/Bank$package$.class b/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/Bank$package$.class deleted file mode 100644 index 928ec7346..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/Bank$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/Bank$package.class b/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/Bank$package.class deleted file mode 100644 index 4376561e8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/Bank$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/Bank$package.tasty b/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/Bank$package.tasty deleted file mode 100644 index 983e27158..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/Bank$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/BankModel$.class b/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/BankModel$.class deleted file mode 100644 index 97868fd4c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/BankModel$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/BankModel$Customer$.class b/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/BankModel$Customer$.class deleted file mode 100644 index 121eb7ebd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/BankModel$Customer$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/BankModel$Customer.class b/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/BankModel$Customer.class deleted file mode 100644 index 4028d56e7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/BankModel$Customer.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/BankModel.class b/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/BankModel.class deleted file mode 100644 index 06e4b5a7d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/BankModel.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/BankModel.tasty b/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/BankModel.tasty deleted file mode 100644 index 8b7c146cb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/BankModel.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/CallCenter$package$.class b/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/CallCenter$package$.class deleted file mode 100644 index f17c27c83..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/CallCenter$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/CallCenter$package.class b/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/CallCenter$package.class deleted file mode 100644 index 71bb3b60b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/CallCenter$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/CallCenter$package.tasty b/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/CallCenter$package.tasty deleted file mode 100644 index 9b32f2600..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/CallCenter$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/CallCenterModel$.class b/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/CallCenterModel$.class deleted file mode 100644 index 8a2216cda..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/CallCenterModel$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/CallCenterModel$Call$.class b/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/CallCenterModel$Call$.class deleted file mode 100644 index a1eff1220..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/CallCenterModel$Call$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/CallCenterModel$Call.class b/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/CallCenterModel$Call.class deleted file mode 100644 index f21833e75..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/CallCenterModel$Call.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/CallCenterModel.class b/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/CallCenterModel.class deleted file mode 100644 index 9a231b169..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/CallCenterModel.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/CallCenterModel.tasty b/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/CallCenterModel.tasty deleted file mode 100644 index 69a046b57..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/CallCenterModel.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/Traffic2L$package$.class b/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/Traffic2L$package$.class deleted file mode 100644 index f9d1a1150..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/Traffic2L$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/Traffic2L$package.class b/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/Traffic2L$package.class deleted file mode 100644 index 24eccf27f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/Traffic2L$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/Traffic2L$package.tasty b/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/Traffic2L$package.tasty deleted file mode 100644 index 9a4514a37..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/Traffic2L$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/Traffic2LModel$.class b/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/Traffic2LModel$.class deleted file mode 100644 index 9ed1a4f90..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/Traffic2LModel$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/Traffic2LModel$Car$.class b/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/Traffic2LModel$Car$.class deleted file mode 100644 index 47608fbaa..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/Traffic2LModel$Car$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/Traffic2LModel$Car.class b/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/Traffic2LModel$Car.class deleted file mode 100644 index 51710447e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/Traffic2LModel$Car.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/Traffic2LModel.class b/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/Traffic2LModel.class deleted file mode 100644 index 7d77407e2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/Traffic2LModel.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/Traffic2LModel.tasty b/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/Traffic2LModel.tasty deleted file mode 100644 index 1719f2ea5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/Traffic2LModel.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/Traffic4L$package$.class b/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/Traffic4L$package$.class deleted file mode 100644 index 5b5af9e02..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/Traffic4L$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/Traffic4L$package.class b/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/Traffic4L$package.class deleted file mode 100644 index ee78ecd37..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/Traffic4L$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/Traffic4L$package.tasty b/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/Traffic4L$package.tasty deleted file mode 100644 index 86d47a2cb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/Traffic4L$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/Traffic4LModel$.class b/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/Traffic4LModel$.class deleted file mode 100644 index c2ac07318..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/Traffic4LModel$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/Traffic4LModel$Car$.class b/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/Traffic4LModel$Car$.class deleted file mode 100644 index 173156e61..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/Traffic4LModel$Car$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/Traffic4LModel$Car.class b/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/Traffic4LModel$Car.class deleted file mode 100644 index a70d81be4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/Traffic4LModel$Car.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/Traffic4LModel.class b/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/Traffic4LModel.class deleted file mode 100644 index feb84eb14..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/Traffic4LModel.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/Traffic4LModel.tasty b/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/Traffic4LModel.tasty deleted file mode 100644 index 4a9a7e503..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/Traffic4LModel.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/UGABusRoutes$package$.class b/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/UGABusRoutes$package$.class deleted file mode 100644 index b5a346945..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/UGABusRoutes$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/UGABusRoutes$package.class b/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/UGABusRoutes$package.class deleted file mode 100644 index bc0b06fbc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/UGABusRoutes$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/UGABusRoutes$package.tasty b/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/UGABusRoutes$package.tasty deleted file mode 100644 index c5608276d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/UGABusRoutes$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/UGABusRoutesModel$.class b/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/UGABusRoutesModel$.class deleted file mode 100644 index dcc1fbfec..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/UGABusRoutesModel$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/UGABusRoutesModel$Bus$.class b/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/UGABusRoutesModel$Bus$.class deleted file mode 100644 index 6b680e014..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/UGABusRoutesModel$Bus$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/UGABusRoutesModel$Bus.class b/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/UGABusRoutesModel$Bus.class deleted file mode 100644 index 1d0942441..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/UGABusRoutesModel$Bus.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/UGABusRoutesModel.class b/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/UGABusRoutesModel.class deleted file mode 100644 index 76b8b3ad1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/UGABusRoutesModel.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/UGABusRoutesModel.tasty b/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/UGABusRoutesModel.tasty deleted file mode 100644 index fc2452080..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/UGABusRoutesModel.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/groups.txt b/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/groups.txt deleted file mode 100644 index 5608eb26c..000000000 --- a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/groups.txt +++ /dev/null @@ -1,162 +0,0 @@ - -Gate: - * @param name the name of the gate - * @param director the model/container for this gate - * @param time the activation time for this gate - * @param line the queue holding entities waiting for this gate to open - * @param onTimeRV distribution of time that gate will be open - * @param offTimeRV distribution of time that gate will be closed - * @param shut0 whether the gate is initially closed (true) or open (false) - * @param cap the maximum number of entities that will be released when the gate is opened - * @param prop the properties of this gate - * @param pos the position (Euclidean coordinates) of this gate - */ -class Gate (name: String, director: Model, time: Double, line: WaitQueue, - onTimeRV: Variate, offTimeRV: Variate, shut0: Boolean = false, cap: Int = 15, - prop: Property = null, pos: VectorD = null) - -Junction: - * @param name the name of this junction - * @param director the `Model` directing the simulation - * @param jTimeRV the jump-time through the junction - * @param prop the properties of this junction - * @param pos the Euclidean coordinates of this junction - */ -class Junction (name: String, director: Model, jTimeRV: Variate, - prop: Property = null, pos: VectorD = null) - -Resource: - * @param name the name of this server - * @param director the `Model` directing the simulation - * @param serviceRV the service time random variate - * @param units the number of service units (e.g., bank tellers) - * @param prop the properties of this server - * @param pos the Euclidean coordinates of this server - */ -class Resource (name: String, director: Model, serviceRV: Variate = null, private var units: Int, - prop: Property = null, pos: VectorD = null) - -Sink: - * @param name the name of this sink - * @param director the director controlling the model - * @param prop the properties of this sink - * @param pos the position (Euclidean coordinate) of this sink - */ -class Sink (name: String, director: Model, - prop: Property = null, pos: VectorD = null) - -Source: - * @param name the name of this source - * @param director the director controlling the model - * @param time the activation time for this source - * @param iArrivalRV the inter-arrival time distribution - * @param makeEntity the function to make entities of a specified type - * @param units the number of entities to make - * @param subtype the subtype can be used for behavior specialization - * @param prop the properties of this source - * @param pos the position (Euclidean coordinates) of this source - */ -class Source (name: String, director: Model, time: Double, iArrivalRV: Variate, - makeEntity: () => SimAgent, units: Int, subtype: Int = 0, - prop: Property = null, pos: VectorD = null) - - * @param name the name of this wait-queue - * @param director the `Model` directing the simulation - * @param cap the capacity of the queue (defaults to unbounded) - * @param prop the properties of this wait-queue - * @param pos the Euclidean coordinates for this wait-queue - */ -class WaitQueue (name: String, director: Model, cap: Int = Int.MaxValue, - prop: Property = null, pos: VectorD = null) - - * @param name the name of this wait-queue - * @param director the `Model` directing the simulation - * @param cap the capacity of the queue (defaults to unbounded) - * @param prop the properties of this wait-queue - * @param pos the Euclidean coordinates for this wait-queue - */ -class WaitQueue_LCFS (name: String, director: Model, cap: Int = Int.MaxValue, - prop: Property = null, pos: VectorD = null) - -============================================================================= - -Gate: - * @param director the director controlling the model - * @param time the activation time for these gates - * @param onTimeRV distribution of time that gates will be open - * @param offTimeRV distribution of time that gates will be closed - * @param cap the maximum number of entities that will be released when the gate is opened - * @param prop the properties of these gates - * @param xy the (x, y) coordinates for the top-left corner of the reference gate. - * @param gte repeated gate specific info: - */ - def group (director: Model, time: Double, onTimeRV: Variate, offTimeRV: Variate, cap: Int = 15, - prop: Property = null, xy: (Double, Double), - gte: (String, WaitQueue, (Double, Double))*): VEC [Gate] = - -Junction: - * @param director the director controlling the model - * @param jTimeRV the jump-time through the junctions - * @param prop the properties of these junctions - * @param xy the (x, y) coordinates for the top-left corner of the reference junction. - * @param jnt repeated junction specific info: - */ - def group (director: Model, jTimeRV: Variate, - prop: Property = null, xy: (Double, Double), - jnt: (String, (Double, Double))*): VEC [Junction] = - -Resource: - * @param director the `Model` directing the simulation - * @param serviceRV the service time distribution - * @param prop the properties of these servers - * @param xy the (x, y) coordinates for the top-left corner of the reference resource. - * @param rsc repeated resource specific info: - */ - def group (director: Model, serviceRV: Variate, - prop: Property = null, xy: (Double, Double), - rsc: (String, Int, (Double, Double))*): VEC [Resource] = - -Sink: - * @param director the director controlling the model - * @param prop the properties of these sinks - * @param xy the (x, y) coordinates for the top-left corner of the reference sink. - * @param snk repeated sink specific info: - */ - def group (director: Model, - prop: Property = null, xy: (Double, Double), - snk: (String, (Double, Double))*): VEC [Sink] = - -Source: - * @param director the director controlling the model - * @param time the activation time for these sources - * @param makeEntity the function to make entities of a specified type - * @param units the number of entities to make - * @param prop the properties of these sources - * @param xy the (x, y) coordinates for the top-left corner of the reference source. - * @param src repeated source specific info: - */ - def group (director: Model,time: Double, makeEntity: () => SimAgent, units: Int, - prop: Property = null, xy: (Double, Double), - src: (String, Variate, Int, (Double, Double))*): VEC [Source] = - -WaitQueue: - * @param director the `Model` directing the simulation - * @param cap the capacity of these queues (defaults to unbounded) - * @param prop the properties of these queues - * @param xy the (x, y) coordinates for the top-left corner of the reference queue. - * @param que repeated queue specific info: - */ - def group (director: Model, cap: Int = Int.MaxValue, - prop: Property = null, xy: (Double, Double), - que: (String, (Double, Double))*): VEC [WaitQueue] = - - * @param director the `Model` directing the simulation - * @param cap the capacity of these queues (defaults to unbounded) - * @param prop the properties of these queues - * @param xy the (x, y) coordinates for the top-left corner of the reference queue. - * @param que repeated queue specific info: - */ - def group (director: Model, cap: Int = 15, - prop: Property = null, xy: (Double, Double), - que: (String, (Double, Double))*): VEC [WaitQueue_LCFS] = - diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/index.html b/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/index.html deleted file mode 100644 index 49d5ee0e0..000000000 --- a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/index.html +++ /dev/null @@ -1,13 +0,0 @@ - - -

    Source files in example_1 Package

    -

    - - \ No newline at end of file diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/old/Traffic2L.scala.bak b/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/old/Traffic2L.scala.bak deleted file mode 100644 index 3b94dbc26..000000000 --- a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/old/Traffic2L.scala.bak +++ /dev/null @@ -1,139 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Mon Sep 20 15:47:16 EDT 2021 - * @see LICENSE (MIT style license file). - * - * @title Example Model: Traffic2L (Two-Lane) for Agent-Based Simulation - */ - -package scalation -package simulation.agent -package example_1 - -import scala.collection.mutable.{ArrayBuffer => VEC} - -import scalation.random.{Bernoulli, Sharp, Uniform} -import scalation.random.RandomSeeds.N_STREAMS - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `runTraffic2L` function is used to launch the `Traffic2LModel` class in - * structure testing mode, where each `Source` will create just one agent. - * > runMain scalation.simulation.agent.example_1.runTraffic2L1 - */ -@main def runTraffic2L1 (): Unit = new Traffic2LModel (nStop = 1) - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `runTraffic2L` function is used to launch the `Traffic2LModel` class. - * > runMain scalation.simulation.agent.example_1.runTraffic2L - */ -@main def runTraffic2L (): Unit = new Traffic2LModel () - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Traffic2LModel` class simulates an intersection with four traffic lights - * `Gates` and four roads. Each road consists of two `Transport`s with one in each - * direction. - * @param name the name of the simulation model - * @param reps the number of independent replications to run - * @param startSim the start time of the simulation - * @param animating whether to animate the model - * @param aniRatio the ratio of simulation speed vs. animation speed - * @param nStop the number arrivals before stopping - * @param stream the base random number stream (0 to 999) - */ -class Traffic2LModel (name: String = "Traffic2L", reps: Int = 1, startSim: Double = 0.0, - animating: Boolean = true, aniRatio: Double = 8.0, - nStop: Int = 20, stream: Int = 0) - extends Model (name, reps, startSim, animating, aniRatio): - - //-------------------------------------------------- - // Initialize Model Constants - - val iaTime = (4000.0, 6000.0) // (lower, upper) on inter-arrival time - val onTime = 8000.0 // on (green-light) time for North-South traffic - val offTime = 6000.0 // off (red-light) time for North-South traffic - val mvTime = (2900.0, 3100.0) // (lower, upper) on move time - val jpTime = 100.0 // jump time - - //-------------------------------------------------- - // Create Random Variates (RVs) - - val iArrivalRV = Uniform (iaTime, stream) - val onTimeRV = Sharp (onTime, (stream + 1) % N_STREAMS) - val offTimeRV = Sharp (offTime, (stream + 2) % N_STREAMS) - val moveRV = Uniform (mvTime, (stream + 3) % N_STREAMS) - val jumpRV = Sharp (jpTime, (stream + 4) % N_STREAMS) - - //-------------------------------------------------- - // Create the Graph Model: Vertices and Edges - - val base = (800.0, 400.0) - - val source = Source.group (this, 0.0, () => Car (), nStop, null, base, - ("sN", iArrivalRV, 0, (-16, -250)), // from North - ("sE", iArrivalRV, 1, (250, -16)), - ("sS", iArrivalRV, 2, (16, 250)), - ("sW", iArrivalRV, 3, (-250, 16))) - - val queue = WaitQueue.group (this, Int.MaxValue, null, base, - ("qN", (-16, -40)), // before North light - ("qE", (40, -16)), - ("qS", (16, 40)), - ("qW", (-40, 16))) - -/* - val light = Gate.group (this, 0.0, onTimeRV, offTimeRV, 15, null, base, - ("lN", queue(0), (-17, 3)), // traffic from North - ("lE", queue(1), (-3, -17)), - ("lS", queue(2), (17, -3)), - ("lW", queue(3), (3, 17))) -*/ - - val light = Gate.group4 (this, 0.0, onTimeRV, offTimeRV, 15, null, base, - ("lN", queue(0)), // traffic from North - ("lE", queue(1)), - ("lS", queue(2)), - ("lW", queue(3))) - - val sink = Sink.group (this, null, base, - ("kS", (-16, 250)), // end for North traffic - ("kW", (-250, -16)), - ("kN", (16, -250)), - ("kE", (250, 16))) - - val road1 = VEC [Transport] () - val link = VEC [Link] () - val road2 = VEC [Transport] () - for i <- source.indices do - road1 += Transport (s"ra$i", this, source(i).vert, queue(i), moveRV) - link += Link ("", this, queue(i), light(i).vert, jumpRV) - road2 += Transport (s"rb$i", this, light(i).vert, sink(i), moveRV) - end for - - //-------------------------------------------------- - // Specify Scripts for each Type of Simulation Agent - - case class Car () extends SimAgent ("c", director.clock, this): - - def act (): Unit = - banner (s"Car $me started") - val i = subtype // from North (0), East (1), South (2), West (3) - road1(i).move (this) // move along road segment 1 - if light(i).open (this) then queue(i).noWait (this) // continue thru for green light - else queue(i).waitIn (this) // stop and wait for red light - link(i).jump (this) // jump thru gate/intersection - road2(i).move (this) // move along road segment 2 - sink(i).leave (this) // end at this sink - end act - - end Car - - simulate () - waitFinished () - Model.shutdown () - -end Traffic2LModel - diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/runBank.class b/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/runBank.class deleted file mode 100644 index 5a6d60d68..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/runBank.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/runBank.tasty b/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/runBank.tasty deleted file mode 100644 index 4eafba8e8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/runBank.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/runCallCenter.class b/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/runCallCenter.class deleted file mode 100644 index 39969c343..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/runCallCenter.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/runCallCenter.tasty b/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/runCallCenter.tasty deleted file mode 100644 index 9a4cc4d6f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/runCallCenter.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/runTraffic2L.class b/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/runTraffic2L.class deleted file mode 100644 index ecf70b8d8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/runTraffic2L.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/runTraffic2L.tasty b/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/runTraffic2L.tasty deleted file mode 100644 index 3ff47c875..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/runTraffic2L.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/runTraffic2L1.class b/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/runTraffic2L1.class deleted file mode 100644 index 8a3f3230c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/runTraffic2L1.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/runTraffic2L1.tasty b/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/runTraffic2L1.tasty deleted file mode 100644 index bafd7c942..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/runTraffic2L1.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/runTraffic4L.class b/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/runTraffic4L.class deleted file mode 100644 index 11a09a52e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/runTraffic4L.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/runTraffic4L.tasty b/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/runTraffic4L.tasty deleted file mode 100644 index 61660873a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/runTraffic4L.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/runUGABusRoutes.class b/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/runUGABusRoutes.class deleted file mode 100644 index 5a1338a26..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/runUGABusRoutes.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/runUGABusRoutes.tasty b/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/runUGABusRoutes.tasty deleted file mode 100644 index 3fdc14405..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/runUGABusRoutes.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/runUGABusRoutes1.class b/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/runUGABusRoutes1.class deleted file mode 100644 index c114f0e6b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/runUGABusRoutes1.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/runUGABusRoutes1.tasty b/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/runUGABusRoutes1.tasty deleted file mode 100644 index b9247bfa0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/example_1/runUGABusRoutes1.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/index.html b/target/scala-3.6.4/classes/scalation/simulation/agent/index.html deleted file mode 100644 index 0cb8b9991..000000000 --- a/target/scala-3.6.4/classes/scalation/simulation/agent/index.html +++ /dev/null @@ -1,26 +0,0 @@ - - -

    Source files in agent Package

    -

    - - \ No newline at end of file diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/monitorTest.class b/target/scala-3.6.4/classes/scalation/simulation/agent/monitorTest.class deleted file mode 100644 index 523df1157..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/monitorTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/monitorTest.tasty b/target/scala-3.6.4/classes/scalation/simulation/agent/monitorTest.tasty deleted file mode 100644 index e551e36b3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/monitorTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/old/Model.scala.bak b/target/scala-3.6.4/classes/scalation/simulation/agent/old/Model.scala.bak deleted file mode 100644 index 7ab76e6af..000000000 --- a/target/scala-3.6.4/classes/scalation/simulation/agent/old/Model.scala.bak +++ /dev/null @@ -1,249 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Mon Sep 7 15:05:06 EDT 2009 - * @see LICENSE (MIT style license file). - * - * @title Base Model Class for Agent-Based Simulation - */ - -package scalation -package simulation.agent - -import scala.collection.mutable.{ArrayBuffer => VEC, PriorityQueue} - -import scalation.animation.{AnimateCommand, CommandType} -import scalation.animation.CommandType.MoveToken -import scalation.database.Identifiable -import scalation.database.graph.{EdgeType, PGraph, VertexType} -import scalation.mathstat.Statistic -import scalation.simulation.{Completion, Coroutine} -import scalation.scala2d.Colors._ -import scalation.scala2d.Shape - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Model` class maintains a property graph making up the model and - * controls the flow of entities (`SimAgent`s) through the model, following the - * agent-based simulation world-view. It maintains a time-ordered priority queue - * to activate/re-activate each of the entities. Each entity (`SimAgent`) is - * implemented as a `Coroutine` and may be thought of as running in its own thread. - * @param _name the name of this simulation model - * @param reps the number of independent replications - * @param startSim the start time of this simulation - * @param animating whether to animate the model - * @param aniRatio the ratio of simulation speed vs. animation speed - */ -class Model (_name: String, reps: Int = 1, startSim: Double = 0.0, - animating: Boolean = true, aniRatio: Double = 10.0) - extends Coroutine (_name) - with Identifiable (_name) - with Completion: - - protected val graphMod = PGraph (name, Model.vertexTypes, Model.edgeTypes, - animating, aniRatio) // the graph model - - private val debug = debugf ("Model", true) // debug function - private val flaw = flawf ("Model") // flaw function - private val agenda = PriorityQueue.empty [SimAgent] // time-ordered activation list - - private [agent] var clock = startSim // the simulation clock - private [agent] var simulating = false // the simulation clock - private [agent] val log = Monitor ("simulation") // log for model execution - private [agent] var nAgents = 0 // current number of live agents - private [agent] val statList = VEC [Statistical] () // list of variables to keep stats on - - val director = this - - debug ("init", s"name = $name, startSim = $startSim") - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Execute the simulation (includes scheduling all Sources) returning summary - * statistics. - */ - def simulate (): Unit = - banner (s"start simulation $name at $startSim") - graphMod.print () - if animating then graphMod.display (100000) // FIX - should be adaptive -// return // end before simulating to only examine initial graph - log.trace (this, "starts", this, clock) - for source <- Source.sources do schedule (source) // put all sources on agenda - start () // start the director thread/agent -> act () - end simulate - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Cleanup the agenda and any stateful components. Any agent left in the - * agenda or a wait queue must be terminated. The model (i.e., the director) - * must be terminated as well. - */ - def cleanup (): Unit = - banner ("Model.cleanup in progress") - - println ("cleanup: agenda") - while ! agenda.isEmpty do // cleanup agents left on agenda - val a = agenda.dequeue () - if a != this then - println (s"cleanup: terminate agent $a in agenda") - a.interrupt () // terminate all agents, except director - end if - end while - -/* - println ("cleanup: wait queues") - for p <- parts do - if p.isInstanceOf [WaitQueue] then // cleanup wait queues - val w = p.asInstanceOf [WaitQueue] - while ! w.isEmpty do - val a = w.dequeue () - println (s"cleanup: terminate agent $a in $w") - a.interrupt () // terminate all agents in queue - end while - end if - end for -*/ - end cleanup - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Schedule the agent to act (be activated) at agent.time (optionally delayed). - * @param agent the agent to be scheduled - * @param delay the amount of time to delay the agent's activation time - */ - def schedule (agent: SimAgent, delay: Double = 0.0): Unit = - if delay < 0.0 then - flaw ("schedule", s"agent $agent delay time is negative: $delay") - banner ("WARN") - end if - agent.time = clock + delay - if agent.time < clock then // out of order scheduling => WARN - flaw ("schedule", s"agent $agent activation time < $clock") - banner ("WARN") - end if -// debug ("schedule", s"now = $clock: schedule agent $agent") - log.trace (this, "schedules agent", agent, clock) - agenda += agent - end schedule - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Yield through the director to the next agent found on the agenda, i.e., - * agent1 yields directly to agent2 without resuming the director's 'act' method. - * @param agent1 the current agent that yields execution - * @param quit whether the current agent wishes to terminate (end execution) - */ - def yield2Next (agent1: SimAgent, quit: Boolean = false): Unit = ??? - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** The model itself is an Agent (not an ordinary `SimAgent`) and may be - * thought of as the director. The director iteratively manages the clock - * and the agenda of agents until the the agenda (priority queue) becomes empty - * or the number of live agents is greater than zero (gates are considered live). - */ - def act (): Unit = - simulating = true - debug ("act", s"agenda = $agenda") - - while ! agenda.isEmpty && nAgents > 0 do // scheduling loop - val agent = agenda.dequeue () // next from priority queue - if agent.time < clock then // out of order execution => QUIT - flaw ("act", s"agent $agent activation time < $clock") - banner ("QUIT") - return - end if - clock = agent.time // advance the time -// debug ("act", s"${this.me} resumes ${agent.me} at $clock") - log.trace (this, "resumes", agent, clock) - yyield (agent) // director yields to agent - end while - - log.trace (this, s"ends", null, clock) - report () // report results - cleanup () - println (s"coroutine counts = $counts") - log.trace (this, "terminates model", null, clock) - simulating = false - hasFinished () // signal via semaphore that simulation is finished - yyield (null, true) // yield and terminate the director - end act - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Put a token command (CreateToken, MoveToken or DestroyToken) on the animation queue. - * @param agent who is being animated - * @param what what animation command - * @param color the color the token - * @param shape the shape of the token - */ - def animate (agent: SimAgent, what: CommandType, color: Color = null, - shape: Shape = null): Unit = - var eid = agent.id - if agent.isInstanceOf [Gate] then eid += 1 // FIX - Gate's vertex is one more - val label = agent.name - val apos = if what == MoveToken then agent.pos(0 to 2) // agent's position (x, y) - else agent.pos // (x, y, w, h) -// debug ("animate", s">>> $label.$eid, $what, $color, $shape, $apos") - if animating then graphMod.add_aniQ (AnimateCommand (what, eid, shape, label, true, color, - apos.toArray, clock)) - end animate - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compare the order of agents based on their activation times. - * @param agent the first agent in comparison - private def orderedAgent (agent1: SimAgent): Ordered [SimAgent] = - new Ordered [SimAgent] - { def compare (agent2: SimAgent) = agent2.time compare agent1.time } - end orderedAgent - */ - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the statistical results of the simulation (statistics for each vertex). - * Includes both sample and time-persistent statistics. - */ - def getStatistics: VEC [Statistic] = - val stats = VEC [Statistic] () -// for v <- graphMod.vt(0).verts do v.addStats (stats) // FIX - stats - end getStatistics - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Report on the statistical results of the simulation. - */ - private def report (): Unit = - println (Statistic.line) - println (Statistic.labels) - println (Statistic.line) - for stat <- getStatistics do println (stat) - println (Statistic.line) - end report - -end Model - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Model` companion object provides a shutdown method and methods to add - * vertex/edge types to the model. - */ -object Model: - - private val vertexTypes = VEC [VertexType] () // collection of vertex types - private val edgeTypes = VEC [EdgeType] () // collection of edge types - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Add vertex type vt to the collection of vertex types. - * @param vt the vertex type to add - */ - def add (vt: VertexType): Unit = vertexTypes += vt - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Add edge type et to the collection of edge types. - * @param et the edge type to add - */ - def add (et: EdgeType): Unit = edgeTypes += et - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Shutdown the Model execution infrastructure (WARNING: this method should - * only be called right before program termination). Make sure all threads - * have finished (e.g., call `waitFinished`), not just the main thread. - * If `shutdown` is not called, the application may hang. - */ - def shutdown (): Unit = Coroutine.shutdown () - -end Model - diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/old/Segment.scala.bak b/target/scala-3.6.4/classes/scalation/simulation/agent/old/Segment.scala.bak deleted file mode 100644 index 7583c7b09..000000000 --- a/target/scala-3.6.4/classes/scalation/simulation/agent/old/Segment.scala.bak +++ /dev/null @@ -1,44 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Mon Sep 20 15:47:16 EDT 2021 - * @see LICENSE (MIT style license file). - * - * @title Segment of an Edge - */ - -package scalation -package simulation.agent_based - -import scalation.database.{Identifiable, Spatial} -import scalation.database.graph.Edge -import scalation.mathstat.VectorD - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Segment` class represents part of an edge. - * @param name the name of segment - * @param pos the position of the beginning of the segment - * @param link the the edge this segment is part of - */ -class Segment (name: String, pos: VectorD, link: Edge) - extends Identifiable (name) - with Spatial (pos): - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert the segment object to a string. - */ - override def toString: String = s"Segment ($name, $pos, ${link.id})" - -end Segment - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Segment` object contains an index that maps a `Segment` to a set of `SimAgent`s. - */ -object Segment: - - val index = Map [Segment, Set [SimAgent]] () // the index map - -end Segment - diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/old/Topological.scala.bak b/target/scala-3.6.4/classes/scalation/simulation/agent/old/Topological.scala.bak deleted file mode 100644 index 755c9c632..000000000 --- a/target/scala-3.6.4/classes/scalation/simulation/agent/old/Topological.scala.bak +++ /dev/null @@ -1,38 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Mon Sep 27 15:03:10 EDT 2021 - * @see LICENSE (MIT style license file). - * - * @title Topological Objects Positioned in a Graph - */ - -package scalation -package simulation.agent_based - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Topological` trait provides topological coordinates that are topologically - * partially ordered. - * @param s the segment of the directed edge it is on - * @param d its distance along the segment - */ -trait Topological (private var s: Segment, private var d: Double) - extends PartiallyOrdered [Topological]: - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Compare two spatial objects based on their space coordinates. - * @param other the other item to compare with this item - */ - def tryCompareTo [B >: Topological: AsPartiallyOrdered] (other: B): Option [Int] = - val oth = other.asInstanceOf [Topological] - if s == oth.s then Option (d compare oth.d) - else oth.s tryCompareTo oth.s - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Convert the spatial object to a string. - */ - override def toString: String = s"Topological ($s, $d)" - -end Topological - diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/simAgentTest.class b/target/scala-3.6.4/classes/scalation/simulation/agent/simAgentTest.class deleted file mode 100644 index 5c266d79a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/simAgentTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/agent/simAgentTest.tasty b/target/scala-3.6.4/classes/scalation/simulation/agent/simAgentTest.tasty deleted file mode 100644 index 004bb239f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/agent/simAgentTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/CausalLink$.class b/target/scala-3.6.4/classes/scalation/simulation/event/CausalLink$.class deleted file mode 100644 index d6d23d768..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/CausalLink$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/CausalLink.class b/target/scala-3.6.4/classes/scalation/simulation/event/CausalLink.class deleted file mode 100644 index 56b533d87..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/CausalLink.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/CausalLink.tasty b/target/scala-3.6.4/classes/scalation/simulation/event/CausalLink.tasty deleted file mode 100644 index 5152447d4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/CausalLink.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/Entity$.class b/target/scala-3.6.4/classes/scalation/simulation/event/Entity$.class deleted file mode 100644 index f0090f501..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/Entity$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/Entity.class b/target/scala-3.6.4/classes/scalation/simulation/event/Entity.class deleted file mode 100644 index c8deef4f9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/Entity.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/Entity.tasty b/target/scala-3.6.4/classes/scalation/simulation/event/Entity.tasty deleted file mode 100644 index 7e7727a15..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/Entity.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/Event$.class b/target/scala-3.6.4/classes/scalation/simulation/event/Event$.class deleted file mode 100644 index ca23e8a45..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/Event$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/Event.class b/target/scala-3.6.4/classes/scalation/simulation/event/Event.class deleted file mode 100644 index 52c5b5879..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/Event.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/Event.tasty b/target/scala-3.6.4/classes/scalation/simulation/event/Event.tasty deleted file mode 100644 index 797dbf908..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/Event.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/EventNode$.class b/target/scala-3.6.4/classes/scalation/simulation/event/EventNode$.class deleted file mode 100644 index 408f47e8c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/EventNode$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/EventNode.class b/target/scala-3.6.4/classes/scalation/simulation/event/EventNode.class deleted file mode 100644 index 87cd300e9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/EventNode.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/EventNode.tasty b/target/scala-3.6.4/classes/scalation/simulation/event/EventNode.tasty deleted file mode 100644 index 86143414f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/EventNode.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/Ex_Template$package$.class b/target/scala-3.6.4/classes/scalation/simulation/event/Ex_Template$package$.class deleted file mode 100644 index bf8945fd6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/Ex_Template$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/Ex_Template$package.class b/target/scala-3.6.4/classes/scalation/simulation/event/Ex_Template$package.class deleted file mode 100644 index 527c940a2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/Ex_Template$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/Ex_Template$package.tasty b/target/scala-3.6.4/classes/scalation/simulation/event/Ex_Template$package.tasty deleted file mode 100644 index 6799dcb97..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/Ex_Template$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/Model$.class b/target/scala-3.6.4/classes/scalation/simulation/event/Model$.class deleted file mode 100644 index f549759a3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/Model$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/Model.class b/target/scala-3.6.4/classes/scalation/simulation/event/Model.class deleted file mode 100644 index 52483fb50..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/Model.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/Model.tasty b/target/scala-3.6.4/classes/scalation/simulation/event/Model.tasty deleted file mode 100644 index c73e4ae06..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/Model.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/SOMEModel$.class b/target/scala-3.6.4/classes/scalation/simulation/event/SOMEModel$.class deleted file mode 100644 index 61d179798..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/SOMEModel$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/SOMEModel$Arrival$.class b/target/scala-3.6.4/classes/scalation/simulation/event/SOMEModel$Arrival$.class deleted file mode 100644 index 4dac5c985..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/SOMEModel$Arrival$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/SOMEModel$Arrival.class b/target/scala-3.6.4/classes/scalation/simulation/event/SOMEModel$Arrival.class deleted file mode 100644 index 3cbc0d58b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/SOMEModel$Arrival.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/SOMEModel.class b/target/scala-3.6.4/classes/scalation/simulation/event/SOMEModel.class deleted file mode 100644 index 9bce184a4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/SOMEModel.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/SOMEModel.tasty b/target/scala-3.6.4/classes/scalation/simulation/event/SOMEModel.tasty deleted file mode 100644 index 5fe507285..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/SOMEModel.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/WaitQueue$.class b/target/scala-3.6.4/classes/scalation/simulation/event/WaitQueue$.class deleted file mode 100644 index c4b7c32b3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/WaitQueue$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/WaitQueue.class b/target/scala-3.6.4/classes/scalation/simulation/event/WaitQueue.class deleted file mode 100644 index fe5463bd1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/WaitQueue.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/WaitQueue.tasty b/target/scala-3.6.4/classes/scalation/simulation/event/WaitQueue.tasty deleted file mode 100644 index a31598253..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/WaitQueue.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/WaitQueue_LCFS$.class b/target/scala-3.6.4/classes/scalation/simulation/event/WaitQueue_LCFS$.class deleted file mode 100644 index a190d6aff..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/WaitQueue_LCFS$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/WaitQueue_LCFS.class b/target/scala-3.6.4/classes/scalation/simulation/event/WaitQueue_LCFS.class deleted file mode 100644 index 4faab38ad..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/WaitQueue_LCFS.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/WaitQueue_LCFS.tasty b/target/scala-3.6.4/classes/scalation/simulation/event/WaitQueue_LCFS.tasty deleted file mode 100644 index ea5a052cc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/WaitQueue_LCFS.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/Bank$package$.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/Bank$package$.class deleted file mode 100644 index 21f4bae98..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/Bank$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/Bank$package.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/Bank$package.class deleted file mode 100644 index 2c584d5f7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/Bank$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/Bank$package.tasty b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/Bank$package.tasty deleted file mode 100644 index ebe1cd145..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/Bank$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/Bank2$package$.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/Bank2$package$.class deleted file mode 100644 index 5b4e7e9e5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/Bank2$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/Bank2$package.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/Bank2$package.class deleted file mode 100644 index cdfb52c71..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/Bank2$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/Bank2$package.tasty b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/Bank2$package.tasty deleted file mode 100644 index 4b6295cae..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/Bank2$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/Bank3$package$.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/Bank3$package$.class deleted file mode 100644 index e98bccbe4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/Bank3$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/Bank3$package.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/Bank3$package.class deleted file mode 100644 index 848752304..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/Bank3$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/Bank3$package.tasty b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/Bank3$package.tasty deleted file mode 100644 index f4b30a10a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/Bank3$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/BankModel$.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/BankModel$.class deleted file mode 100644 index 26564a2e8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/BankModel$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/BankModel$Arrival$.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/BankModel$Arrival$.class deleted file mode 100644 index 20908de37..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/BankModel$Arrival$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/BankModel$Arrival.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/BankModel$Arrival.class deleted file mode 100644 index 1f647c072..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/BankModel$Arrival.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/BankModel$Departure$.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/BankModel$Departure$.class deleted file mode 100644 index c7d3fd0a3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/BankModel$Departure$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/BankModel$Departure.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/BankModel$Departure.class deleted file mode 100644 index b03b9b9e2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/BankModel$Departure.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/BankModel.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/BankModel.class deleted file mode 100644 index dc734c27f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/BankModel.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/BankModel.tasty b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/BankModel.tasty deleted file mode 100644 index eb66f8c48..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/BankModel.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/BankModel2$.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/BankModel2$.class deleted file mode 100644 index 94ee9510f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/BankModel2$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/BankModel2$Arrival$.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/BankModel2$Arrival$.class deleted file mode 100644 index 0f91a49d7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/BankModel2$Arrival$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/BankModel2$Arrival.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/BankModel2$Arrival.class deleted file mode 100644 index 2ea0c448c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/BankModel2$Arrival.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/BankModel2$Departure$.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/BankModel2$Departure$.class deleted file mode 100644 index 6665062f5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/BankModel2$Departure$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/BankModel2$Departure.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/BankModel2$Departure.class deleted file mode 100644 index 5aed04a6c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/BankModel2$Departure.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/BankModel2.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/BankModel2.class deleted file mode 100644 index e84bba298..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/BankModel2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/BankModel2.tasty b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/BankModel2.tasty deleted file mode 100644 index 9bae32960..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/BankModel2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/BankModel3$.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/BankModel3$.class deleted file mode 100644 index b5cfe3852..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/BankModel3$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/BankModel3$Arrival$.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/BankModel3$Arrival$.class deleted file mode 100644 index 8b26b4494..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/BankModel3$Arrival$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/BankModel3$Arrival.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/BankModel3$Arrival.class deleted file mode 100644 index 6fbad2cc8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/BankModel3$Arrival.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/BankModel3$Departure$.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/BankModel3$Departure$.class deleted file mode 100644 index b1b2f4941..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/BankModel3$Departure$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/BankModel3$Departure.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/BankModel3$Departure.class deleted file mode 100644 index 59104937d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/BankModel3$Departure.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/BankModel3.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/BankModel3.class deleted file mode 100644 index 814fc77b8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/BankModel3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/BankModel3.tasty b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/BankModel3.tasty deleted file mode 100644 index 7e6a03992..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/BankModel3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/CallCenter$package$.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/CallCenter$package$.class deleted file mode 100644 index 84bf06f00..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/CallCenter$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/CallCenter$package.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/CallCenter$package.class deleted file mode 100644 index 24b6df30d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/CallCenter$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/CallCenter$package.tasty b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/CallCenter$package.tasty deleted file mode 100644 index dab9b895b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/CallCenter$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/CallCenter2$package$.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/CallCenter2$package$.class deleted file mode 100644 index 9f1001edc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/CallCenter2$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/CallCenter2$package.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/CallCenter2$package.class deleted file mode 100644 index 1aa3c9793..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/CallCenter2$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/CallCenter2$package.tasty b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/CallCenter2$package.tasty deleted file mode 100644 index 399e3bdd3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/CallCenter2$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/CallCenterModel$.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/CallCenterModel$.class deleted file mode 100644 index 0fd2a98d9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/CallCenterModel$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/CallCenterModel$Arrival$.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/CallCenterModel$Arrival$.class deleted file mode 100644 index e5bcc50c8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/CallCenterModel$Arrival$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/CallCenterModel$Arrival.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/CallCenterModel$Arrival.class deleted file mode 100644 index 10d1e47f2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/CallCenterModel$Arrival.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/CallCenterModel$Departure$.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/CallCenterModel$Departure$.class deleted file mode 100644 index 7c59963bd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/CallCenterModel$Departure$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/CallCenterModel$Departure.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/CallCenterModel$Departure.class deleted file mode 100644 index dc7cb14c2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/CallCenterModel$Departure.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/CallCenterModel.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/CallCenterModel.class deleted file mode 100644 index ed4d2b739..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/CallCenterModel.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/CallCenterModel.tasty b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/CallCenterModel.tasty deleted file mode 100644 index 91ce1612a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/CallCenterModel.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/CallCenterModel2$.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/CallCenterModel2$.class deleted file mode 100644 index 04f4d65e9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/CallCenterModel2$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/CallCenterModel2$Arrival$.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/CallCenterModel2$Arrival$.class deleted file mode 100644 index 48d102426..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/CallCenterModel2$Arrival$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/CallCenterModel2$Arrival.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/CallCenterModel2$Arrival.class deleted file mode 100644 index 399603a43..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/CallCenterModel2$Arrival.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/CallCenterModel2$Departure$.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/CallCenterModel2$Departure$.class deleted file mode 100644 index 98dc423d2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/CallCenterModel2$Departure$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/CallCenterModel2$Departure.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/CallCenterModel2$Departure.class deleted file mode 100644 index 15df005b5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/CallCenterModel2$Departure.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/CallCenterModel2.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/CallCenterModel2.class deleted file mode 100644 index 09ed10e93..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/CallCenterModel2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/CallCenterModel2.tasty b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/CallCenterModel2.tasty deleted file mode 100644 index 143b0473c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/CallCenterModel2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/FastFood$package$.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/FastFood$package$.class deleted file mode 100644 index 72a850a7c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/FastFood$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/FastFood$package.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/FastFood$package.class deleted file mode 100644 index c8169371d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/FastFood$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/FastFood$package.tasty b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/FastFood$package.tasty deleted file mode 100644 index 692292456..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/FastFood$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/FastFoodModel$.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/FastFoodModel$.class deleted file mode 100644 index f3de7a5f6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/FastFoodModel$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/FastFoodModel$Arrival$.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/FastFoodModel$Arrival$.class deleted file mode 100644 index 571b0daf0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/FastFoodModel$Arrival$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/FastFoodModel$Arrival.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/FastFoodModel$Arrival.class deleted file mode 100644 index c82d000f7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/FastFoodModel$Arrival.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/FastFoodModel$Departure$.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/FastFoodModel$Departure$.class deleted file mode 100644 index 49f0d03ea..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/FastFoodModel$Departure$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/FastFoodModel$Departure.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/FastFoodModel$Departure.class deleted file mode 100644 index 2a2b8ee91..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/FastFoodModel$Departure.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/FastFoodModel.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/FastFoodModel.class deleted file mode 100644 index 10e4f3d35..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/FastFoodModel.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/FastFoodModel.tasty b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/FastFoodModel.tasty deleted file mode 100644 index 821e4c340..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/FastFoodModel.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/Machine$package$.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/Machine$package$.class deleted file mode 100644 index 55ade47fd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/Machine$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/Machine$package.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/Machine$package.class deleted file mode 100644 index 9d734d11f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/Machine$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/Machine$package.tasty b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/Machine$package.tasty deleted file mode 100644 index a13e00377..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/Machine$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/MachineModel$.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/MachineModel$.class deleted file mode 100644 index a0373c55b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/MachineModel$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/MachineModel$Arrival$.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/MachineModel$Arrival$.class deleted file mode 100644 index 48e8d6d8f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/MachineModel$Arrival$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/MachineModel$Arrival.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/MachineModel$Arrival.class deleted file mode 100644 index a31e997ee..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/MachineModel$Arrival.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/MachineModel$FinishMachine1$.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/MachineModel$FinishMachine1$.class deleted file mode 100644 index 880ad5251..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/MachineModel$FinishMachine1$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/MachineModel$FinishMachine1.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/MachineModel$FinishMachine1.class deleted file mode 100644 index 4223bf7a4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/MachineModel$FinishMachine1.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/MachineModel$FinishMachine2$.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/MachineModel$FinishMachine2$.class deleted file mode 100644 index 92e7d0a80..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/MachineModel$FinishMachine2$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/MachineModel$FinishMachine2.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/MachineModel$FinishMachine2.class deleted file mode 100644 index c8019be0f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/MachineModel$FinishMachine2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/MachineModel.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/MachineModel.class deleted file mode 100644 index bc8c2aab2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/MachineModel.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/MachineModel.tasty b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/MachineModel.tasty deleted file mode 100644 index e5a1218f9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/MachineModel.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/Poisson$package$.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/Poisson$package$.class deleted file mode 100644 index 5a611a096..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/Poisson$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/Poisson$package.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/Poisson$package.class deleted file mode 100644 index 9e3d4d9af..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/Poisson$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/Poisson$package.tasty b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/Poisson$package.tasty deleted file mode 100644 index dc7b00425..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/Poisson$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/Poisson2$package$.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/Poisson2$package$.class deleted file mode 100644 index 1aa3927db..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/Poisson2$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/Poisson2$package.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/Poisson2$package.class deleted file mode 100644 index c40a6970f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/Poisson2$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/Poisson2$package.tasty b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/Poisson2$package.tasty deleted file mode 100644 index e2c92febc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/Poisson2$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/PoissonModel$.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/PoissonModel$.class deleted file mode 100644 index bf039b82e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/PoissonModel$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/PoissonModel$Arrival$.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/PoissonModel$Arrival$.class deleted file mode 100644 index 171958069..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/PoissonModel$Arrival$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/PoissonModel$Arrival.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/PoissonModel$Arrival.class deleted file mode 100644 index 269e8ab0b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/PoissonModel$Arrival.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/PoissonModel.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/PoissonModel.class deleted file mode 100644 index 299e5fee2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/PoissonModel.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/PoissonModel.tasty b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/PoissonModel.tasty deleted file mode 100644 index 815dfaf87..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/PoissonModel.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/PoissonModel2$.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/PoissonModel2$.class deleted file mode 100644 index 61a013e7b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/PoissonModel2$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/PoissonModel2$Arrival$.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/PoissonModel2$Arrival$.class deleted file mode 100644 index ca45b189b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/PoissonModel2$Arrival$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/PoissonModel2$Arrival.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/PoissonModel2$Arrival.class deleted file mode 100644 index f230e1b18..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/PoissonModel2$Arrival.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/PoissonModel2.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/PoissonModel2.class deleted file mode 100644 index 8d86c9aad..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/PoissonModel2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/PoissonModel2.tasty b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/PoissonModel2.tasty deleted file mode 100644 index e7990e2a8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/PoissonModel2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/index.html b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/index.html deleted file mode 100644 index 6325888ad..000000000 --- a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/index.html +++ /dev/null @@ -1,16 +0,0 @@ - - -

    Source files in example_1 Package

    -

    - - \ No newline at end of file diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/runBank.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/runBank.class deleted file mode 100644 index c0416fc3c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/runBank.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/runBank.tasty b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/runBank.tasty deleted file mode 100644 index a33e3a39b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/runBank.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/runBank2.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/runBank2.class deleted file mode 100644 index 0e4b2a689..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/runBank2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/runBank2.tasty b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/runBank2.tasty deleted file mode 100644 index a9d0dedff..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/runBank2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/runBank3.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/runBank3.class deleted file mode 100644 index f0cfca224..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/runBank3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/runBank3.tasty b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/runBank3.tasty deleted file mode 100644 index a517f1c44..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/runBank3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/runCallCenter.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/runCallCenter.class deleted file mode 100644 index b380990a4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/runCallCenter.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/runCallCenter.tasty b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/runCallCenter.tasty deleted file mode 100644 index b50253676..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/runCallCenter.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/runCallCenter2.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/runCallCenter2.class deleted file mode 100644 index 718f27c4e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/runCallCenter2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/runCallCenter2.tasty b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/runCallCenter2.tasty deleted file mode 100644 index ea7536e59..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/runCallCenter2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/runFastFood.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/runFastFood.class deleted file mode 100644 index 1430944df..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/runFastFood.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/runFastFood.tasty b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/runFastFood.tasty deleted file mode 100644 index 5edb0fb30..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/runFastFood.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/runMachine.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/runMachine.class deleted file mode 100644 index 8b4eadbfd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/runMachine.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/runMachine.tasty b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/runMachine.tasty deleted file mode 100644 index b554dc015..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/runMachine.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/runPoisson.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/runPoisson.class deleted file mode 100644 index 462b5870c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/runPoisson.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/runPoisson.tasty b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/runPoisson.tasty deleted file mode 100644 index c6a2f4f14..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/runPoisson.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/runPoisson2.class b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/runPoisson2.class deleted file mode 100644 index d3153e9b5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/runPoisson2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/runPoisson2.tasty b/target/scala-3.6.4/classes/scalation/simulation/event/example_1/runPoisson2.tasty deleted file mode 100644 index 60b5eb44b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/example_1/runPoisson2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/index.html b/target/scala-3.6.4/classes/scalation/simulation/event/index.html deleted file mode 100644 index 6ed41b473..000000000 --- a/target/scala-3.6.4/classes/scalation/simulation/event/index.html +++ /dev/null @@ -1,16 +0,0 @@ - - -

    Source files in event Package

    -

    - - \ No newline at end of file diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/runSOME.class b/target/scala-3.6.4/classes/scalation/simulation/event/runSOME.class deleted file mode 100644 index fc8d15186..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/runSOME.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/event/runSOME.tasty b/target/scala-3.6.4/classes/scalation/simulation/event/runSOME.tasty deleted file mode 100644 index 6ada71a9e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/event/runSOME.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/index.html b/target/scala-3.6.4/classes/scalation/simulation/index.html deleted file mode 100644 index 4b114b4bd..000000000 --- a/target/scala-3.6.4/classes/scalation/simulation/index.html +++ /dev/null @@ -1,25 +0,0 @@ - - -

    Source files in simulation Package

    -

    - - \ No newline at end of file diff --git a/target/scala-3.6.4/classes/scalation/simulation/monitorTest.class b/target/scala-3.6.4/classes/scalation/simulation/monitorTest.class deleted file mode 100644 index f25443557..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/monitorTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/monitorTest.tasty b/target/scala-3.6.4/classes/scalation/simulation/monitorTest.tasty deleted file mode 100644 index 0b37abb83..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/monitorTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/Cards$$anon$1.class b/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/Cards$$anon$1.class deleted file mode 100644 index 183bfc146..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/Cards$$anon$1.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/Cards$.class b/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/Cards$.class deleted file mode 100644 index 4f0873665..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/Cards$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/Cards$package$.class b/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/Cards$package$.class deleted file mode 100644 index e3c8d97ab..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/Cards$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/Cards$package.class b/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/Cards$package.class deleted file mode 100644 index d29ed6e41..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/Cards$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/Cards$package.tasty b/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/Cards$package.tasty deleted file mode 100644 index 332b820e9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/Cards$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/Cards.class b/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/Cards.class deleted file mode 100644 index 8d4bcf52b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/Cards.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/Cards.tasty b/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/Cards.tasty deleted file mode 100644 index f5ffc9ef1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/Cards.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/GrainDropping$package$.class b/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/GrainDropping$package$.class deleted file mode 100644 index 7b1bb01e5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/GrainDropping$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/GrainDropping$package.class b/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/GrainDropping$package.class deleted file mode 100644 index c2de9e85d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/GrainDropping$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/GrainDropping$package.tasty b/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/GrainDropping$package.tasty deleted file mode 100644 index ecfcb4123..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/GrainDropping$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/GrainDropping.class b/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/GrainDropping.class deleted file mode 100644 index f5c3194f9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/GrainDropping.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/GrainDropping.tasty b/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/GrainDropping.tasty deleted file mode 100644 index e4f39ee82..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/GrainDropping.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/MonteCarloIntegration$.class b/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/MonteCarloIntegration$.class deleted file mode 100644 index 72f8ac313..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/MonteCarloIntegration$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/MonteCarloIntegration$package$.class b/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/MonteCarloIntegration$package$.class deleted file mode 100644 index 4ed55dc16..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/MonteCarloIntegration$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/MonteCarloIntegration$package.class b/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/MonteCarloIntegration$package.class deleted file mode 100644 index 8153c5457..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/MonteCarloIntegration$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/MonteCarloIntegration$package.tasty b/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/MonteCarloIntegration$package.tasty deleted file mode 100644 index 8ebaa82ae..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/MonteCarloIntegration$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/MonteCarloIntegration.class b/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/MonteCarloIntegration.class deleted file mode 100644 index e5f28ee4e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/MonteCarloIntegration.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/MonteCarloIntegration.tasty b/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/MonteCarloIntegration.tasty deleted file mode 100644 index d07cce46b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/MonteCarloIntegration.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/MontyHall$package$.class b/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/MontyHall$package$.class deleted file mode 100644 index da6242e83..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/MontyHall$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/MontyHall$package.class b/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/MontyHall$package.class deleted file mode 100644 index bb3d2dd2a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/MontyHall$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/MontyHall$package.tasty b/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/MontyHall$package.tasty deleted file mode 100644 index e91c3d261..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/MontyHall$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/RollDice$.class b/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/RollDice$.class deleted file mode 100644 index ffa0cd745..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/RollDice$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/RollDice$package$.class b/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/RollDice$package$.class deleted file mode 100644 index a23d5943c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/RollDice$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/RollDice$package.class b/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/RollDice$package.class deleted file mode 100644 index 70df3095e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/RollDice$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/RollDice$package.tasty b/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/RollDice$package.tasty deleted file mode 100644 index 81173c5ec..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/RollDice$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/RollDice.class b/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/RollDice.class deleted file mode 100644 index ad55396df..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/RollDice.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/RollDice.tasty b/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/RollDice.tasty deleted file mode 100644 index a033b6665..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/RollDice.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/SphereVolume$package$.class b/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/SphereVolume$package$.class deleted file mode 100644 index 7f5700d97..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/SphereVolume$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/SphereVolume$package.class b/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/SphereVolume$package.class deleted file mode 100644 index 799779a72..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/SphereVolume$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/SphereVolume$package.tasty b/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/SphereVolume$package.tasty deleted file mode 100644 index 2acce8257..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/SphereVolume$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/cardsTest.class b/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/cardsTest.class deleted file mode 100644 index df63babb8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/cardsTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/cardsTest.tasty b/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/cardsTest.tasty deleted file mode 100644 index 8675a0217..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/cardsTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/cardsTest2.class b/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/cardsTest2.class deleted file mode 100644 index 566927463..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/cardsTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/cardsTest2.tasty b/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/cardsTest2.tasty deleted file mode 100644 index c0ce2409c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/cardsTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/cardsTest3.class b/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/cardsTest3.class deleted file mode 100644 index 74066ff88..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/cardsTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/cardsTest3.tasty b/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/cardsTest3.tasty deleted file mode 100644 index a27e1eb91..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/cardsTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/grainDroppingTest.class b/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/grainDroppingTest.class deleted file mode 100644 index c569b30ec..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/grainDroppingTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/grainDroppingTest.tasty b/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/grainDroppingTest.tasty deleted file mode 100644 index e9453f7f9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/grainDroppingTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/index.html b/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/index.html deleted file mode 100644 index c3f173a71..000000000 --- a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/index.html +++ /dev/null @@ -1,13 +0,0 @@ - - -

    Source files in monte_carlo Package

    -

    - - \ No newline at end of file diff --git a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/monteCarloIntegrationTest.class b/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/monteCarloIntegrationTest.class deleted file mode 100644 index 94666e458..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/monteCarloIntegrationTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/monteCarloIntegrationTest.tasty b/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/monteCarloIntegrationTest.tasty deleted file mode 100644 index da0e69987..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/monteCarloIntegrationTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/montyHall.class b/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/montyHall.class deleted file mode 100644 index cb4cae2c2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/montyHall.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/montyHall.tasty b/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/montyHall.tasty deleted file mode 100644 index dd2580ae5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/montyHall.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/old/Poker.scala.bak b/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/old/Poker.scala.bak deleted file mode 100644 index 1ec86dc74..000000000 --- a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/old/Poker.scala.bak +++ /dev/null @@ -1,77 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Fri Jan 14 21:14:29 EST 2022 - * @see LICENSE (MIT style license file). - * - * @title Poker Hands - */ - -package scalation -package simulation -package monte_carlo - -import scala.runtime.ScalaRunTime.stringOf - -import Cards._ - -object Poker: - - def checkPair (deck: Cards, hand: IndexedSeq [Int]): Boolean = - for i <- hand.indices do - println (s"card $i = ${value (hand(i))}") - for j <- i+1 until hand.length do - if value (hand(i))._1 == value (hand(j))._1 then - println (s"pair: ${value (hand(i))}, ${value (hand(j))}") - return true - end if - end for - end for - false - end checkPair - - def checkTrip (deck: Cards, hand: IndexedSeq [Int]): Boolean = - for i <- hand.indices do - println (s"card $i = ${value (hand(i))}") - for j <- i+1 until hand.length do - if value (hand(i))._1 == value (hand(j))._1 then - for k <- j+1 until hand.length do - if value (hand(j))._1 == value (hand(k))._1 then - println (s"Tripple: ${value (hand(j))}, ${value (hand(k))}") - return true - end if - end for - end if - end for - end for - false - end checkTrip - -end Poker - -// runMain scalation.simulation.monte_carlo.runPoker - -@main def runPoker (): Unit = - - val deck = new Cards () - println ("\nOrdered deck of cards:") - println (deck) - deck.shuffle () - println ("\nShuffled deck of cards:") - println (deck) - - for h <- 1 to 200 do - banner (s"Hand $h") - val hand = for i <- 1 to 5 yield deck.draw () - println ("\n5 card hand = " + hand) - - println ("pair = " + Poker.checkPair (deck, hand)) - val trip = Poker.checkTrip (deck, hand) - println ("trip = " + trip) - if trip then return - deck.shuffle () - end for - -end runPoker - diff --git a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/old/Sph.scala.bak b/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/old/Sph.scala.bak deleted file mode 100644 index ce2937968..000000000 --- a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/old/Sph.scala.bak +++ /dev/null @@ -1,27 +0,0 @@ - -package scalation.simulation.montecarlo - -// runMain scalation.simulation.montecarlo.monteCarloSphere - -import scala.util.Random - -def Count(iter: Int): Int = { - val randX = new Random - val randY = new Random - val randZ = new Random - var counter = 0 - for (i <- 0 until iter) { - // since we are in the quarter of the sphere we get coordinates from 0 to 1 - val x = randX.nextDouble // in [0,1] - val y = randY.nextDouble // in [0,1] - val z = randZ.nextDouble // in [0,1] - if (x*x + y*y + z*z <= 1) counter= counter + 1 - } - counter -} - -//@main def monteCarloSphere(iter: Int): Double = 4.0 * Count(iter) / iter -@main def monteCarloSphere(): Unit = - - println (4.0 * Count(1000) / 1000.0) - diff --git a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/old/Sph3.scala.bak b/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/old/Sph3.scala.bak deleted file mode 100644 index ec6143c57..000000000 --- a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/old/Sph3.scala.bak +++ /dev/null @@ -1,35 +0,0 @@ - -package scalation.simulation.montecarlo - -// runMain scalation.simulation.montecarlo.calculateVolume - -import scala.math.pow -import scala.math.sqrt - -val pi = 3.14 -val n = 10000 -val height = 1 / n.asInstanceOf[Double] -var volume_disc = 0.0 -var volume_total = 0.0 -var r = 0.0 - -/** unit_sphere: 1 = x^2 + y^2 + z^2 - * radius_circle = sqrt(x^2+y^2) - * Solve for sqrt(x^2+y^2) using equation for unit sphere: 1 = x^2 + y^2 + z^2 - * sqrt(x^2+y^2) = sqrt(1 - z^2) = radius_circle as a function of z, the height of sphere - * Slice sphere into thin discs with height = 1/n, where n > 0 - * Focus on top half of sphere and let n = num of discs = i = num of repetitions - * volume_sphere = [sum over i of (volume_disc)] * 2 - * volume_disc = pi * radius_circle^2 * height - * radius_circle = sqrt(1 - z^2) = sqrt(1 - (height * i)^2) - */ -@main def calculateVolume(): Unit = - for i <- 1 to n do // volume of one half of sphere - r = sqrt(1 - pow(height*i,2)) - volume_disc = pi * pow(r,2) * height - volume_total = volume_total + volume_disc - end for - volume_total = volume_total * 2 // add the other half of sphere - println("Volume = " + volume_total) -end calculateVolume - diff --git a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/rollDiceTest.class b/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/rollDiceTest.class deleted file mode 100644 index 6dd109d49..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/rollDiceTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/rollDiceTest.tasty b/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/rollDiceTest.tasty deleted file mode 100644 index 19809923e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/rollDiceTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/rollDiceTest2.class b/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/rollDiceTest2.class deleted file mode 100644 index 50395d021..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/rollDiceTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/rollDiceTest2.tasty b/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/rollDiceTest2.tasty deleted file mode 100644 index 84b8837e6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/rollDiceTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/rollDiceTest3.class b/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/rollDiceTest3.class deleted file mode 100644 index 499c06fe7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/rollDiceTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/rollDiceTest3.tasty b/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/rollDiceTest3.tasty deleted file mode 100644 index 1c0bf2b4b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/rollDiceTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/sphereVolumeTest.class b/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/sphereVolumeTest.class deleted file mode 100644 index 62a7443a6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/sphereVolumeTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/sphereVolumeTest.tasty b/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/sphereVolumeTest.tasty deleted file mode 100644 index 5950b7ac8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/monte_carlo/sphereVolumeTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/nH_PoissonProcessTest.class b/target/scala-3.6.4/classes/scalation/simulation/nH_PoissonProcessTest.class deleted file mode 100644 index 57e17a94a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/nH_PoissonProcessTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/nH_PoissonProcessTest.tasty b/target/scala-3.6.4/classes/scalation/simulation/nH_PoissonProcessTest.tasty deleted file mode 100644 index 1a9b0cb7a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/nH_PoissonProcessTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/nH_PoissonProcessTest2.class b/target/scala-3.6.4/classes/scalation/simulation/nH_PoissonProcessTest2.class deleted file mode 100644 index 86f9db054..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/nH_PoissonProcessTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/nH_PoissonProcessTest2.tasty b/target/scala-3.6.4/classes/scalation/simulation/nH_PoissonProcessTest2.tasty deleted file mode 100644 index 1f07be725..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/nH_PoissonProcessTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/old/Coroutine.scala.bak b/target/scala-3.6.4/classes/scalation/simulation/old/Coroutine.scala.bak deleted file mode 100644 index fc4e39c45..000000000 --- a/target/scala-3.6.4/classes/scalation/simulation/old/Coroutine.scala.bak +++ /dev/null @@ -1,217 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller, Michael Cotterell - * @version 2.0 - * @date Sat Mar 21 20:34:23 EDT 2015 - * @see LICENSE (MIT style license file). - * - * Coroutine implementation options: (1) Java Threads, - * Prototypes for (2) Scala Actors, (3) Akka Actors, (4) Scala Continuations - * This one uses Java Threads and a Cached Thread Pool - */ - -package scalation -package simulation - -import java.util.concurrent.{Executors, ExecutorService, Future, Semaphore, ThreadPoolExecutor} - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Coroutine` class supports (one-at-a-time) quasi-concurrent programming. - * A coroutine runs/acts until it yields control from 'this' to 'that' coroutine. - * When resumed, a coroutines continues its execution where it left off. - * @param label the label for the class of coroutines to be created. - */ -abstract class Coroutine (label: String = "cor") - extends Runnable: - - import Coroutine._ - - private val debug = debugf ("Coroutine", false) // debug function - private val _sema = new Semaphore (0) // waiting semaphore - private var started = false // whether this coroutine has started - - nCreated += 1 - private val id = label + "." + nCreated - debug ("init", s"$id waits to be STARTed") - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the Coroutine counts. - */ - def counts: (Int, Int, Int) = (nCreated, nStarted, nTerminated) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Thread's 'run' method delegates to the 'act' method. Upon interruption - * the 'act' method is run again from the beginning. - */ - def run (): Unit = - nStarted += 1 - try - act () - catch case ex: InterruptedException => - debug ("run", s"INTERRUPTED coroutine $id") - end try - nTerminated +=1 - debug ("run", s"TERMINATE coroutine $id") - end run - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Actor model features the 'act' method, even though threads are used. - * This abstract method must be implemented in application models. - */ - def act (): Unit - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Yield control from 'this' to 'that' coroutine. - * @param that the other coroutine to yield control to - * @param quit whether 'this' coroutine is to terminate (true) - * or wait to be resumed (false) - */ - def yyield (that: Coroutine, quit: Boolean = false): Unit = - if that != null then - if that.started then - debug ("yyield", s"$id RESUMEs that coroutine ${that.id}") - that.resume () - else - debug ("yyield", s"$id STARTs that new coroutine ${that.id}") - that.start () - end if - end if - - if quit then - debug ("yyield", s"$id TERMINATEs") - return - else - _sema.acquire () // wait until resumed - end if - end yyield - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Start this coroutine, i.e., invoke its 'run' -> 'act' method. This - * function returns a future. - */ - def start (): Future [_] = - started = true - if pool == null then - flaw ("start", "the coroutine system must be started using Coroutine.startup; expect undefined behavior.") - end if - pool.submit (this) - end start - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Interrupt this waiting coroutine. - */ - def interrupt (): Unit = - Thread.currentThread ().interrupt () - end interrupt - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Resume this coroutine. - */ - private def resume (): Unit = _sema.release () - -end Coroutine - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Coroutine` companion object provides functions to start-up and shutdown - * the coroutine system as well as counters for the `Coroutine` class. - */ -object Coroutine: - - private val debug = debugf ("Coroutine", false) // debug function - private val flaw = flawf ("Coroutine") // flaw function - - private val CORE_THREADS = 0 // number of core threads - private val SHUTDOWN_TIMEOUT = 60 // shutdown timeout, in seconds - private var pool: ExecutorService = null // thread pool - - private var nCreated = 0 // number of Coroutines created - private var nStarted = 0 // number of Coroutines started - private var nTerminated = 0 // number of Coroutines terminated - - startup () // automatic startup at program start - -// sys.addShutdownHook ({ // automatic shutdown at program end -// pool.shutdown () -// pool.shutdownNow () -// }) - - private def threadPoolExecutor = pool.asInstanceOf [ThreadPoolExecutor] - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Start-up the coroutine system. This function can also set the core - * number of threads for the internal cached thread pool. - * @param nCoreThreads the new core size - */ - private def startup (nCoreThreads: Int = CORE_THREADS): Unit = - if pool == null then - pool = Executors.newCachedThreadPool () - if nCoreThreads != CORE_THREADS then threadPoolExecutor.setCorePoolSize (nCoreThreads) - else - flaw ("startup", "coroutine system is already started") - end if - end startup - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Shutdown the coroutine thread pool and return the largest number of threads - * that have ever simultaneously been in the pool. Must be called at program - * end or application will hang. - */ - def shutdown (): Int = - var lps = 0 // largest pool size - debug ("Coroutine", "shutdown") - if pool != null then - pool.shutdown () // prevent new submissions to pool - pool.shutdownNow () // interrupt all threads remaining in pool - lps = threadPoolExecutor.getLargestPoolSize - pool = null - else flaw ("shutdown", "coroutine system is already shutdown") - lps - end shutdown - -end Coroutine - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `CoroutineTest` object is used to test the `Coroutine` class. - * Should print: - * `Cor1`: phase 1 - * `Cor2`: phase 1 - * `Cor1`: phase 2 - * `Cor2`: phase 2 - * > runMain scalation.simulation.CoroutineTest - */ -object CoroutineTest extends App: // object since it needs forward reference - - class Cor1 extends Coroutine: - - override def act (): Unit = - println ("Cor1: phase 1") - yyield (cor2) - println ("Cor1: phase 2") - yyield (cor2, true) - end act - - end Cor1 - - class Cor2 extends Coroutine: - - override def act (): Unit = - println ("Cor2: phase 1") - yyield (cor1) - println ("Cor2: phase 2") - yyield (null, true) - end act - - end Cor2 - -// Coroutine.startup () - - val cor1 = new Cor1 () - val cor2 = new Cor2 () - - println ("start coroutines") - cor1.start () - -end CoroutineTest - diff --git a/target/scala-3.6.4/classes/scalation/simulation/old/NH_PoissonProcess.scala.bak b/target/scala-3.6.4/classes/scalation/simulation/old/NH_PoissonProcess.scala.bak deleted file mode 100644 index d7e23294e..000000000 --- a/target/scala-3.6.4/classes/scalation/simulation/old/NH_PoissonProcess.scala.bak +++ /dev/null @@ -1,109 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Wed Aug 25 15:38:28 EDT 2021 - * @see LICENSE (MIT style license file). - * - * @title Non-Homogeneous Process Process (NHPP) - */ - -package scalation -package simulation - -import scala.collection.mutable.ArrayBuffer - -import scalation.mathstat._ -import scalation.random.{Exponential, VariateVec} - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `NH_PoissonProcess` class generates data following a Non-Homogeneous Poisson - * Process. - * @param t the terminal time - * @param lambdaf the arrival rate function, lambda(t) - * @param stream the random number stream to use - */ -case class NH_PoissonProcess (t: Double, lambdaf: FunctionS2S, stream: Int = 0) - extends VariateVec (stream): - - private val t_ia = Exponential (1.0, stream) // interarrival time distribution (mu = 1) - private var t_a = VectorD.nullv // arrival time vector - - def mean: VectorD = VectorD.fill (1)(lambdaf (t) * t) // mean of N(t) - FIX - - def pf (z: VectorD): Double = ??? - - def igen: VectorI = gen.toInt - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Generate the all arrival times in the time interval [0, t], returning them - * as a vector. - */ - def gen: VectorD = - val atime = ArrayBuffer [Double] () - var now = 0.0 - while now <= t do - val lamb = lambdaf (now) - now += t_ia.gen / lamb - atime += now - end while - t_a = VectorD (atime) - t_a - end gen - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the number of arrivals by time tt. - * @param tt the inquiry time (how many arrivals by time tt) - */ - def num (tt: Double): Int = - if t_a == null then gen - for i <- t_a.indices if t_a(i) > tt do return i - t_a.dim - end num - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the arrivals/events occurring during each time interval of length t_span. - * @param t_span the time span for an interval (e.g., 5 minute time span) - */ - def flow (t_span: Double): VectorI = - if t_a == null then gen - val intervals = (t / t_span).toInt - val flow = new VectorI (intervals+1) - for i <- 1 to intervals do - val start = num (i * t_span) - val end = if (i < flow.dim) num ((i+1) * t_span) else num (t) - flow(i) = end - start - end for - flow - end flow - -end NH_PoissonProcess - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `NH_PoissonProcessTest` object is used to test the `NH_PoissonProcess` class. - * Example of car arrivals and determination of traffic flow (car per 5-minutes - * passing by a sensor). - * > runMain scalation.simulation.NH_PoissonProcessTest - */ -object NH_PoissonProcessTest extends App: - - val t_end = 50.0 // simulate for 50 minutes - val tl = VectorD.range (0, 101) / 2.0 - def lambdaf (t: Double): Double = 1.5 - 0.001 * (t - 25.0)~^2 - new Plot (tl, func2vector (lambdaf, (0, t_end)), null, "Arrival Rate Function: lambdaf", lines = true) - - val pp = NH_PoissonProcess (t_end, lambdaf) - println (s"pp.gen = ${pp.gen}") - println (s"pp.num (5) = ${pp.num (5)}") - - val t = VectorD.range (0, 501) / 10.0 - val nt = VectorI (for tt <- t yield pp.num (tt)) - new Plot (t, nt.toDouble, null, "NH_PoissonProcess total cars", lines = true) - - val flw = pp.flow (5.0) - val tflw = VectorD.range (0, 11) * 5.0 - new Plot (tflw, flw.toDouble, null, "NH_PoissonProcess cars per 5 min.", lines = true) - -end NH_PoissonProcessTest - diff --git a/target/scala-3.6.4/classes/scalation/simulation/poissonProcessTest.class b/target/scala-3.6.4/classes/scalation/simulation/poissonProcessTest.class deleted file mode 100644 index c6ab79134..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/poissonProcessTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/poissonProcessTest.tasty b/target/scala-3.6.4/classes/scalation/simulation/poissonProcessTest.tasty deleted file mode 100644 index 4353ec992..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/poissonProcessTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/Bus.class b/target/scala-3.6.4/classes/scalation/simulation/process/Bus.class deleted file mode 100644 index 3737d3482..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/Bus.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/Bus.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/Bus.tasty deleted file mode 100644 index 574e8a0f7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/Bus.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/Component.class b/target/scala-3.6.4/classes/scalation/simulation/process/Component.class deleted file mode 100644 index 79d472ee4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/Component.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/Component.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/Component.tasty deleted file mode 100644 index 046394b24..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/Component.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/Dynamics.class b/target/scala-3.6.4/classes/scalation/simulation/process/Dynamics.class deleted file mode 100644 index 09ac5d786..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/Dynamics.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/Dynamics.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/Dynamics.tasty deleted file mode 100644 index c2f371651..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/Dynamics.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/Ex_Template.scsla b/target/scala-3.6.4/classes/scalation/simulation/process/Ex_Template.scsla deleted file mode 100644 index 710ab99b1..000000000 --- a/target/scala-3.6.4/classes/scalation/simulation/process/Ex_Template.scsla +++ /dev/null @@ -1,86 +0,0 @@ - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Sat Sep 11 15:17:56 EDT 2021 - * @see LICENSE (MIT style license file). - * - * @note Example Model: SOME for Process-Interaction Simulation (A Template) - */ - -package scalation -package simulation -package process - -import scalation.random.Exponential - -/******************************************************************************* -See Example Models in sub-directories: - example_1: One-Shot Simulation (OSS) - defaults: 1 rep, animation on, move method, Model (no batching) - example_MIR: Method of Independent Replications (MIR) - defaults: 10 rep, animation off, jump method, Model (no batching) - example_MBM: Method of Batch Means (MBM) - defaults: 1 rep, animation off, jump method, Model_MBM (batching) -*******************************************************************************/ - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `runSOME` function is used to launch the `SOMEModel` class. - * The code severs as a template for writing useful simulation models. - * > runMain scalation.simulation.process.runSOME - */ -@main def runSOME (): Unit = new SOMEModel () - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `SOMEModel` class defines a template for process-interaction models. - * Caveat: must add 'from' and 'to' components before transport!! - * @param name the name of the simulation model - * @param reps the number of independent replications to run - * @param animating whether to animate the model - * @param aniRatio the ratio of simulation speed vs. animation speed - * @param nStop the number arrivals before stopping - * @param stream the base random number stream (0 to 999) - */ -class SOMEModel (name: String = "SOME", reps: Int = 1, animating: Boolean = true, - aniRatio: Double = 8.0, nStop: Int = 100, stream: Int = 0) - extends Model (name, reps, animating, aniRatio): - - //-------------------------------------------------- - // Initialize Model Constants - - val lambda = 6.0 // customer arrival rate (per hour) - - //-------------------------------------------------- - // Create Random Variables (RVs) - - val iArrivalRV = Exponential (HOUR / lambda, stream) - - //-------------------------------------------------- - // Create Model Components - - val entry = Source ("entry", this, () => SOMEActor (), 0, nStop, iArrivalRV, (100, 290)) - val exit = Sink ("exit", (600, 290)) - - addComponent (entry, exit) - - //-------------------------------------------------- - // Specify Scripts for each Type of Simulation Actor - - case class SOMEActor () extends SimActor ("s", this): - - override def act (): Unit = - println ("SOMEActor: please write the script for this actor") - exit.leave () - end act - - end SOMEActor - - simulate () - waitFinished () - Model.shutdown () - -end SOMEModel - diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/Gate$.class b/target/scala-3.6.4/classes/scalation/simulation/process/Gate$.class deleted file mode 100644 index 01c7c530b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/Gate$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/Gate.class b/target/scala-3.6.4/classes/scalation/simulation/process/Gate.class deleted file mode 100644 index 69fc28d72..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/Gate.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/Gate.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/Gate.tasty deleted file mode 100644 index aeed1f7e6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/Gate.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/GippsDynamics$.class b/target/scala-3.6.4/classes/scalation/simulation/process/GippsDynamics$.class deleted file mode 100644 index 1462a3edc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/GippsDynamics$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/GippsDynamics.class b/target/scala-3.6.4/classes/scalation/simulation/process/GippsDynamics.class deleted file mode 100644 index 81456a380..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/GippsDynamics.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/GippsDynamics.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/GippsDynamics.tasty deleted file mode 100644 index b2da2cd95..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/GippsDynamics.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/IDMDynamics$.class b/target/scala-3.6.4/classes/scalation/simulation/process/IDMDynamics$.class deleted file mode 100644 index 463825042..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/IDMDynamics$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/IDMDynamics.class b/target/scala-3.6.4/classes/scalation/simulation/process/IDMDynamics.class deleted file mode 100644 index 50ebfb19d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/IDMDynamics.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/IDMDynamics.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/IDMDynamics.tasty deleted file mode 100644 index 227a5484f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/IDMDynamics.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/Junction$.class b/target/scala-3.6.4/classes/scalation/simulation/process/Junction$.class deleted file mode 100644 index d50740e93..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/Junction$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/Junction.class b/target/scala-3.6.4/classes/scalation/simulation/process/Junction.class deleted file mode 100644 index 04bc62b7d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/Junction.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/Junction.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/Junction.tasty deleted file mode 100644 index c6e3c3ff7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/Junction.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/Model$$anon$1.class b/target/scala-3.6.4/classes/scalation/simulation/process/Model$$anon$1.class deleted file mode 100644 index 4bd241739..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/Model$$anon$1.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/Model$.class b/target/scala-3.6.4/classes/scalation/simulation/process/Model$.class deleted file mode 100644 index 1753a3e95..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/Model$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/Model.class b/target/scala-3.6.4/classes/scalation/simulation/process/Model.class deleted file mode 100644 index 03268ed93..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/Model.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/Model.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/Model.tasty deleted file mode 100644 index 269d62cc4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/Model.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/Model_MBM$.class b/target/scala-3.6.4/classes/scalation/simulation/process/Model_MBM$.class deleted file mode 100644 index 79b595aaf..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/Model_MBM$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/Model_MBM.class b/target/scala-3.6.4/classes/scalation/simulation/process/Model_MBM.class deleted file mode 100644 index f2f0b525d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/Model_MBM.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/Model_MBM.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/Model_MBM.tasty deleted file mode 100644 index 1e9517b41..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/Model_MBM.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/Motion.scala.bak b/target/scala-3.6.4/classes/scalation/simulation/process/Motion.scala.bak deleted file mode 100644 index 986b9ab1d..000000000 --- a/target/scala-3.6.4/classes/scalation/simulation/process/Motion.scala.bak +++ /dev/null @@ -1,125 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author Casey Bowman - * @version 2.0 - * @date Fri Feb 19 08:58:42 EST 2021 - * @see LICENSE (MIT style license file). - * - * @note Supports Motion Models, e.g., Car-Following Models - */ - -package scalation -package simulation -package process - -import scala.math.{min, sqrt} - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Motion` object provides several options for Car-Following models for - * vehicles to use to move along roads. - */ -object Motion: - - private val FREERANGE = 50.0 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the velocity of the vehicle based on Gipps' model. - * @param an the max acceleration of driver n - * @param bn the max deceleration of driver n (negative #) - * @param sp the size of the predecessor's vehicle - * @param Vn the desired velocity of driver n - * @param xn the current position of driver n - * @param vn the current velocity of driver n - * @param xp the current position of the predecessor - * @param vp the current velocity of the predecessor - * @param rt the reaction time of driver n - */ - def gipps (an: Double, bn: Double, sp: Double, Vn: Double, xn: Double, - vn: Double, xp: Double, vp: Double, rt: Double): Double = - val free = vn * 2.5 * an * rt * (1.0 - vn / Vn) * sqrt (0.025 + vn / Vn) - val cong = bn * rt + sqrt (bn * bn * rt * rt - bn * (2 * (xp - sp - xn) - vn * rt - vp * vp / bn)) - min (free, cong) - end gipps - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the velocity of the vehicle based on Gipps' model for a vehicle and its predecessor. - * @param cn the current vehicle - * @param cp the predecessor of the current vehicle - */ - def gipps (cn: Vehicle, cp: Vehicle): Double = - if cp == null then - gipps (cn.amax, cn.bmax, cn.len, cn.vmax, cn.t_disp, cn.velocity, cn.t_disp + 1000, cn.vmax, cn.rt) - else - gipps (cn.amax, cn.bmax, cp.len, cn.vmax, cn.t_disp, cn.velocity, cp.t_disp, cp.velocity, cn.rt) - end gipps - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the acceleration of the vehicle based on the Intelligent Driver Model. - * @param an ... - */ - def iDM (an: Double, bn: Double, sp: Double, Vn: Double, xn: Double, vn: Double, - xp: Double, vp: Double, T: Double, s0: Double, δ: Double): Double = - val Δx = xp - xn - sp - val Δv = vn - vp - val ss = s0 + vn * T + (vn * Δv) / (2.0 * sqrt (an * bn)) - an * (1.0 - (vn / Vn) ~^ δ - (ss / Δx) ~^ 2.0) - end iDM - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the acceleration of the vehicle based on the Intelligent Driver Model - * when there is no predecessor. - */ - def iDMFree (an: Double, vn: Double, Vn: Double, δ: Double = 4.0): Double = - an * (1.0 - (vn / Vn) ~^ δ) - end iDMFree - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the acceleration of the vehicle based on the Intelligent Driver Model - * for a vehicle and its predecessor. - * @param cn the current vehicle - * @param cp the predecessor of the current vehicle - */ - def iDM (cn: Vehicle, cp: Vehicle, δ: Double = 4.0): Double = - if cp == null then - iDMFree (cn.amax, cn.velocity, cn.vmax, δ) - else if cp.t_disp - cn.t_disp > FREERANGE then - iDMFree (cn.amax, cn.velocity, cn.vmax, δ) - else - iDM (cn.amax, -cn.bmax, cp.len, cn.vmax, cn.t_disp, cn.velocity, cp.t_disp, cp.velocity, cn.T, cn.s, δ) - end iDM - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the displacement difference. - * @param cn the current vehicle - * @param cp the predecessor of the current vehicle - */ - def basic (cn: Vehicle, cp: Vehicle): Double = - if cp == null then - 1.0 - (cn.velocity / cn.vmax) - else - val dx = cp.t_disp - cn.t_disp -// val dv = cp.velocity - cn.velocity - if dx > FREERANGE then 1.0 - (cn.velocity / cn.vmax) else 0.0 - end basic - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Butcher's Method (fifth order) for numerically solving an ordinary differential equation. - * @param Ft the "original" function value at time t - * @param ft the "derivative" function value at time t - * @param ft_rt the "derivative" function value at time t - rt - * @param rt the time difference (reaction time) - * - * FIX - integrate into Dynamics package - */ - def butcher (Ft: Double, ft: Double, ft_rt: Double, rt: Double): Double = - val _1_by_9 = 1.0 / 9.0 - val k1 = ft_rt - val k3 = ft_rt + 0.25 * (ft - ft_rt) - val k4 = ft_rt + 0.50 * (ft - ft_rt) - val k5 = ft_rt + 0.75 * (ft - ft_rt) - val k6 = ft - Ft + _1_by_9 * (7 * k1 + 32 * k3 + 12 * k4 + 32 * k5 + 7 * k6) * rt - end butcher - -end Motion - diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/Path$.class b/target/scala-3.6.4/classes/scalation/simulation/process/Path$.class deleted file mode 100644 index 854f97f67..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/Path$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/Path$package$.class b/target/scala-3.6.4/classes/scalation/simulation/process/Path$package$.class deleted file mode 100644 index 7cfa4f5ab..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/Path$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/Path$package$PathModel$1$Car$.class b/target/scala-3.6.4/classes/scalation/simulation/process/Path$package$PathModel$1$Car$.class deleted file mode 100644 index 8c1ada403..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/Path$package$PathModel$1$Car$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/Path$package$PathModel$1$Car.class b/target/scala-3.6.4/classes/scalation/simulation/process/Path$package$PathModel$1$Car.class deleted file mode 100644 index 72bf9b4f7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/Path$package$PathModel$1$Car.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/Path$package$PathModel$1.class b/target/scala-3.6.4/classes/scalation/simulation/process/Path$package$PathModel$1.class deleted file mode 100644 index 23f51f856..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/Path$package$PathModel$1.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/Path$package.class b/target/scala-3.6.4/classes/scalation/simulation/process/Path$package.class deleted file mode 100644 index 00e5e06d9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/Path$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/Path$package.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/Path$package.tasty deleted file mode 100644 index c12fe98da..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/Path$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/Path.class b/target/scala-3.6.4/classes/scalation/simulation/process/Path.class deleted file mode 100644 index 1f5203163..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/Path.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/Path.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/Path.tasty deleted file mode 100644 index 0b934aeaf..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/Path.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/README.txt b/target/scala-3.6.4/classes/scalation/simulation/process/README.txt deleted file mode 100644 index 81d6e7bec..000000000 --- a/target/scala-3.6.4/classes/scalation/simulation/process/README.txt +++ /dev/null @@ -1,43 +0,0 @@ - -Actor based Process-Interaction Models --------------------------------------- - -Top Consepts: - -Component.scala -- Base Trait for Nodes and Edges -Model.scala -- Engine to Drive the Simulation Model -Model_MBM.scala -- extended for the Method-of-Batched Means (MBM) -Recorder.scala -- Records the Flow of Actors/Vehicles (Counts and Speed) - -Nodes: - -Gate.scala -- Controls the flow of actors (e.g., a traffic light) -Junction.scala -- Simple connector between pathways -Resource.scala -- Component that provides services to actors -Sink.scala -- Terminal for completing actors -Source.scala -- Generator for making new actors of type SimActor -VSource.scala -- Generator for making new actors of type Vehicle -WaitQueue.scala -- FCFS wait queue -WaitQueue_LCFS.scala -- LCFS wait queue (stack) - -Edges/Pathways: - -Transport.scala -- Connects From-Node to To-Node - motion governed by Random Variate -VTransport.scala -- same, except motion governed by equations of motion, e.g., Gipps Dynamics -Path.scala -- Multi-lane pathway made up of Transports or VTransports -Route.scala -- Multi-stage, multi-lane pathway made of Paths and Junctions - -Actors/Tokens: - -SimActor.scala -- General-purpose actor (active entity) -Vehicle.scala -- Specialized actor with enhanced motion support -Bus.scala -- Aggregate actor that moves a collection of actors - -Equations of Motion: - -Dynamics.scala -- Physics of motion, e.g., Car-Following models - -Template for Application Models: - -Ex_Template.scala - diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/Recorder$.class b/target/scala-3.6.4/classes/scalation/simulation/process/Recorder$.class deleted file mode 100644 index 308b04c7c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/Recorder$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/Recorder.class b/target/scala-3.6.4/classes/scalation/simulation/process/Recorder.class deleted file mode 100644 index 35ef99d29..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/Recorder.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/Recorder.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/Recorder.tasty deleted file mode 100644 index b1e24bccb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/Recorder.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/Resource$.class b/target/scala-3.6.4/classes/scalation/simulation/process/Resource$.class deleted file mode 100644 index fb94eef93..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/Resource$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/Resource.class b/target/scala-3.6.4/classes/scalation/simulation/process/Resource.class deleted file mode 100644 index c38d72b16..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/Resource.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/Resource.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/Resource.tasty deleted file mode 100644 index e103db6a0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/Resource.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/Route.scalaa b/target/scala-3.6.4/classes/scalation/simulation/process/Route.scalaa deleted file mode 100644 index 033fa8229..000000000 --- a/target/scala-3.6.4/classes/scalation/simulation/process/Route.scalaa +++ /dev/null @@ -1,148 +0,0 @@ - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Sat Jan 25 19:44:16 EST 2014 - * @see LICENSE (MIT style license file). - * - * @note Route for Modeling Multi-Stage, Multi-Lane Pathway - */ - -package scalation -package simulation -package process - -// U N D E R D E V E L O P M E N T - -import scalation.animation.CommandType._ -import scalation.mathstat.VectorD -import scalation.random.Variate - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `Route` class provides a multi-stage, multi-lane pathway between two other - * components where the `Path`s are stitched together with `Junction`s. - * @param name the name of the route - * @param j the number of paths in the route - * @param k the number of lanes/transports in the route - * @param from the starting component - * @param to the ending component - * @param motion the variate or dynamics model for the speed/trip-time for motion down the `Route` - * @param isSpeed whether speed or trip-time is used for motion - * @param bend the bend or curvature of the `Route` (0 => line) - */ -class Route (name: String, j: Int, k: Int, val from: Component, val to: Component, - motion: Variate | Dynamics, isSpeed: Boolean = false, bend: Double = 0.0) - extends Component: - - initComponent (name, Array ()) - - private val debug = debugf ("Route", true) // debug function - - val path = Array.ofDim [Path] (j) // j paths/road segments (PUBLIC access required) - val junc = Array.ofDim [Junction] (j-1) // j-1 junctions (PUBLIC access required) - - private val disp = calcDisp // displacement - - debug ("init", s"name = $name, from = ${from.name}, to = ${to.name} with j = $j paths, k = $k lanes") - - for i <- path.indices do - val jPos = VectorD (from.at(0), from.at(1)) - if i < path.size - 1 then - junc(i) = new Junction (s"${name}_j$i", xy = (jPos(0), jPos(1))) - path(i) = if i == 0 then - new Path (s"${name}_$i", k, from, junc(0), motion, isSpeed, bend) - else if i == junc.size then - new Path (s"${name}_$i", k, junc.last, to, motion, isSpeed, bend) - else - new Path (s"${name}_$i", k, junc(i-1), junc(i), motion, isSpeed, bend) - subpart += path(i) - if i < junc.size then subpart += junc(i) - jPos += disp - end for - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Get the first vehicle in lane i of `Route` (the first element in vtree). - * @param i the i-th lane - */ - def getFirst (i: Int): Vehicle = path(0).lane(i).asInstanceOf [VTransport].vtree.getFirst - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the number of segments/paths. - */ - def segments: Int = j - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Return the number of lanes. - */ - def lanes: Int = k - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Give the location of the curve to be its starting point. - */ - override def at: Array [Double] = path(0).lane(0).at - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Calculate the amount of diplacement in the x and y directions between the - * junctions assuming they are evenly spaced. - */ - private def calcDisp: VectorD = - val xdist = from.at(0) - to.at(0) - val ydist = from.at(1) - to.at(1) - VectorD (xdist / k.toDouble, ydist / k.toDouble) - end calcDisp - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Tell the animation engine to display this `Route`. - */ - def display (): Unit = - for p <- path do p.display () - end display - -end Route - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `routeTest` main function is used to test the `Route` class, which is a - * composite class. It simulates a two-segment, two-lane road in one direction. - * > runMain scalation.simulation.process.routeTest - */ -@main def routeTest (): Unit = - - import scalation.random.{Bernoulli, Uniform} - - class RouteModel (name: String, nArrivals: Int, iArrivalRV: Variate, moveRV: Variate) - extends Model (name): - - val rng = Bernoulli () - val onRamp = new Source ("onRamp", this, () => Car (), 0, nArrivals, iArrivalRV, (200.0, 200.0)) - val offRamp = new Sink ("offRamp", (600.0, 200.0)) - val road = new Route ("lane", 2, 2, onRamp, offRamp, moveRV, false, 0.25) - - addComponent (onRamp, offRamp, road) - - case class Car () extends SimActor ("c", this): - - override def act (): Unit = - val i = rng.igen // choose a lane for the route - val carAhead = road.getFirst (i) // find the car-ahead in lane i (the one to follow) - SimActor.addToAlist (this, carAhead) // add this car after the car-ahead in alist - for j <- 0 until road.segments do - road.path(j).lane(i).move () // move along the j-th path in the i-th lane - if j < road.segments - 1 then - road.junc(j).jump () // enter the j-th junction - SimActor.removeFromAlist (this) // remove this car from alist - offRamp.leave () // exit - end act - - end Car - - end RouteModel - - val rm = new RouteModel ("road", 10, Uniform (4000, 6000), Uniform (2900, 3100)) - rm.simulate () - rm.waitFinished () - Model.shutdown () - -end routeTest - diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/SimActor$.class b/target/scala-3.6.4/classes/scalation/simulation/process/SimActor$.class deleted file mode 100644 index 6f99341fc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/SimActor$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/SimActor.class b/target/scala-3.6.4/classes/scalation/simulation/process/SimActor.class deleted file mode 100644 index 9d06b158f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/SimActor.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/SimActor.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/SimActor.tasty deleted file mode 100644 index 8aba40da1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/SimActor.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/Sink$.class b/target/scala-3.6.4/classes/scalation/simulation/process/Sink$.class deleted file mode 100644 index 977bbcb8d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/Sink$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/Sink.class b/target/scala-3.6.4/classes/scalation/simulation/process/Sink.class deleted file mode 100644 index 623a6a06a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/Sink.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/Sink.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/Sink.tasty deleted file mode 100644 index 243dbeee9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/Sink.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/Source$.class b/target/scala-3.6.4/classes/scalation/simulation/process/Source$.class deleted file mode 100644 index 441228829..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/Source$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/Source$package$.class b/target/scala-3.6.4/classes/scalation/simulation/process/Source$package$.class deleted file mode 100644 index 4bb0a14e7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/Source$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/Source$package$CarModel$2$.class b/target/scala-3.6.4/classes/scalation/simulation/process/Source$package$CarModel$2$.class deleted file mode 100644 index 99b84cb29..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/Source$package$CarModel$2$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/Source$package$CarModel$2$Car$.class b/target/scala-3.6.4/classes/scalation/simulation/process/Source$package$CarModel$2$Car$.class deleted file mode 100644 index 66d3047c3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/Source$package$CarModel$2$Car$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/Source$package$CarModel$2$Car.class b/target/scala-3.6.4/classes/scalation/simulation/process/Source$package$CarModel$2$Car.class deleted file mode 100644 index 52837a41b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/Source$package$CarModel$2$Car.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/Source$package.class b/target/scala-3.6.4/classes/scalation/simulation/process/Source$package.class deleted file mode 100644 index 1438537a4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/Source$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/Source$package.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/Source$package.tasty deleted file mode 100644 index cf3d892b1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/Source$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/Source.class b/target/scala-3.6.4/classes/scalation/simulation/process/Source.class deleted file mode 100644 index ccb5c92f6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/Source.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/Source.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/Source.tasty deleted file mode 100644 index 4b1d1c383..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/Source.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/Transport$.class b/target/scala-3.6.4/classes/scalation/simulation/process/Transport$.class deleted file mode 100644 index 760a5effe..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/Transport$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/Transport.class b/target/scala-3.6.4/classes/scalation/simulation/process/Transport.class deleted file mode 100644 index 168d2aa90..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/Transport.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/Transport.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/Transport.tasty deleted file mode 100644 index 9915069fb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/Transport.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/VSource$.class b/target/scala-3.6.4/classes/scalation/simulation/process/VSource$.class deleted file mode 100644 index c1f86b1b4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/VSource$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/VSource$package$.class b/target/scala-3.6.4/classes/scalation/simulation/process/VSource$package$.class deleted file mode 100644 index a76ff0807..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/VSource$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/VSource$package$CarModel$2$.class b/target/scala-3.6.4/classes/scalation/simulation/process/VSource$package$CarModel$2$.class deleted file mode 100644 index 153d8b6a4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/VSource$package$CarModel$2$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/VSource$package$CarModel$2$Car$.class b/target/scala-3.6.4/classes/scalation/simulation/process/VSource$package$CarModel$2$Car$.class deleted file mode 100644 index fe6e09c2c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/VSource$package$CarModel$2$Car$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/VSource$package$CarModel$2$Car.class b/target/scala-3.6.4/classes/scalation/simulation/process/VSource$package$CarModel$2$Car.class deleted file mode 100644 index 00715558d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/VSource$package$CarModel$2$Car.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/VSource$package.class b/target/scala-3.6.4/classes/scalation/simulation/process/VSource$package.class deleted file mode 100644 index 0023659e5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/VSource$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/VSource$package.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/VSource$package.tasty deleted file mode 100644 index 251edfb73..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/VSource$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/VSource.class b/target/scala-3.6.4/classes/scalation/simulation/process/VSource.class deleted file mode 100644 index c7fd645d2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/VSource.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/VSource.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/VSource.tasty deleted file mode 100644 index 3be4c9caa..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/VSource.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/VTransport$.class b/target/scala-3.6.4/classes/scalation/simulation/process/VTransport$.class deleted file mode 100644 index e2658615f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/VTransport$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/VTransport.class b/target/scala-3.6.4/classes/scalation/simulation/process/VTransport.class deleted file mode 100644 index 32f962a5b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/VTransport.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/VTransport.scala.bak b/target/scala-3.6.4/classes/scalation/simulation/process/VTransport.scala.bak deleted file mode 100644 index 91cddce04..000000000 --- a/target/scala-3.6.4/classes/scalation/simulation/process/VTransport.scala.bak +++ /dev/null @@ -1,126 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller, Casey Bowman - * @version 2.0 - * @date Tue Feb 4 14:56:34 EST 2020 - * @see LICENSE (MIT style license file). - * - * @note Variable Speed Transport is a Pathway between Components - */ - -package scalation -package simulation -package process - -import scalation.animation.CommandType._ -import scalation.mathstat._ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `VTransport` class provides a variable-speed pathway between two other components. - * The components in a `Model` conceptually form a 'graph' in which the edges - * are `VTransport`s and the nodes are other `Component`s. - * @see `animation.Dgraph.move2Boundary` that aligns edge with node boundaries. - * @param name the name of the variable-speed transport - * @param from the starting component - * @param to the ending component - * @param motion the dynamics model for the speed/trip-time for motion down the `VTransport` - * @param isSpeed whether speed or trip-time is used for motion - * @param bend the bend or curvature of the `VTransport` (0 => line) - * @param shift1 the x-y shift for the transport's first end-point (from-side) - * @param shift2 the x-y shift for the transport's second end-point (to-side) - */ -class VTransport (name: String, from_ : Component, to_ : Component, - motion: Dynamics, isSpeed: Boolean = false, bend: Double = 0.0, - shift1: VectorD = VectorD (0, 0), shift2: VectorD = VectorD (0, 0)) - extends Transport (name, from_, to_, null, isSpeed, bend, shift1, shift2): - - private val debug = debugf ("VTransport", true) // debug function - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Move the entity (SimActor) smoothly down this VTransport (e.g., road). - * Repeatedely move it along the VTransport/Edge/QCurve. - * Caveat: tokens coordinates are computed using a shadow QCurve (same coordinates - * as the one that will be created by the animation engine). - * @param x the currect displacement for the actor/vehicle (try 0.0) - * @param fraction the fraction of the remaining transform to move along (try 1.0) - */ - def move (x: Double, fraction: Double): Unit = - val actor = director.theActor.asInstanceOf [Vehicle] - actor.disp = x - tally (Vehicle.rt) - - var done = false - while actor.disp < fraction * curve.length && ! done do - director.log.trace (this, "moves for " + Vehicle.rt, actor, director.clock) - motion.update (actor) - debug ("move", s"${actor.name}, x = ${actor.disp}, VTransport = $name") - director.animate (actor, MoveToken, null, null, calcPoint (actor.disp / fraction)) - if actor.disp <= actor.t_disp then - done = true - val p = actor.pred - val s = actor.succ - if p != null && s != null then { p.succ = s; s.pred = p } - else if p != null && s == null then p.succ = null - else if p == null && s != null then s.pred = null - actor.pred = null - actor.succ = null - actor.schedule (Vehicle.rt) - actor.yieldToDirector () - end while - end move - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Calculate the (x, y) point in the simulation space for the vehicle. - * @param s the current displacement along the road of the vehicle. - */ - def calcPoint (s: Double): Array [Double] = - val prop = s / curve.length - val x = p1(0) + (p2(0) - p1(8)) * prop - val y = p1(1) + (p2(1) - p1(1)) * prop - Array (x - RAD, y - RAD) - end calcPoint - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Method to nudge the road over a little bit for use with multi-lane - * highways. If nudge factor is zero then the road will not be nudged. - * Uses the slope m; FIX angle might work better - * @param λ the nudge factor - * - def nudgeAt (λ: Double): Unit = - val a1 = p1.getX() - val b1 = p1.getY() - val a2 = p2.getX() - val b2 = p2.getY() - if m == Double.PositiveInfinity || m == Double.NegativeInfinity then - p1.setLocation (a1 + λ, b1) - p2.setLocation (a2 + λ, b2) - else if m == 0.0 then - p1.setLocation (a1, b1 + λ) - p2.setLocation (a2, b2 + λ) - else - val α = λ * Math.sqrt (1.0 / (m * m + 1)) - val c1 = a1 - m * α - val d1 = b1 + α - val c2 = a2 - m * α - val d2 = b2 + α - p1.setLocation (c1, d1) - p2.setLocation (c2, d2) - end nudgeAt - - import java.io._ - - var pw: PrintWriter = null - - def openPrint (path: String): Unit = - pw = new PrintWriter (new FileWriter (new File (path))) } - end openPrint - - def pw_println (s: String): Unit = - if pw != null then { pw.println (s); pw.flush () } - end pw_println - - def closePrint (): Unit = pw.close () - */ - -end VTransport - diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/VTransport.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/VTransport.tasty deleted file mode 100644 index 980745233..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/VTransport.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/Vehicle$.class b/target/scala-3.6.4/classes/scalation/simulation/process/Vehicle$.class deleted file mode 100644 index 54c0d8f86..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/Vehicle$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/Vehicle.class b/target/scala-3.6.4/classes/scalation/simulation/process/Vehicle.class deleted file mode 100644 index 363ee538c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/Vehicle.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/Vehicle.scala.bak b/target/scala-3.6.4/classes/scalation/simulation/process/Vehicle.scala.bak deleted file mode 100644 index a58a48a75..000000000 --- a/target/scala-3.6.4/classes/scalation/simulation/process/Vehicle.scala.bak +++ /dev/null @@ -1,116 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller, Casey Bowman - * @version 2.0 - * @date Tue Feb 4 14:56:34 EST 2020 - * @see LICENSE (MIT style license file). - * - * @note Vehicle Is Enhanced SimActor Supporting Changing Velocities - */ - -package scalation -package simulation -package process - -import scala.math.log - -val vehicleProps = Map ("τ" -> 1.0, // reaction time (defaults) - "amax" -> 2.0, // max acceleration - "bmax" -> -1.5, // max deceleration - "v0" -> 0.0, // starting velocity - "vmax" -> 33.528, // max velocity - "T" -> 3.0, // min time headway - "s" -> 5.0, // min distance headway - "len" -> 4.0) // length of the vehicle - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The Vehicle class extends the `SimActor` and represents a vehicle on a road. - * @param label the label/name of the vehicle - * @param director the model to which this vehicle belongs - * @param prop the property map governing the motion of the vehicle - */ -abstract class Vehicle (label: String, director: Model, - prop: Map [String, Double] = vehicleProps) - extends SimActor (label, director): - - private val debug = debugf ("Vehicle", true) // debug function - - private [process] var disp = 0.0 // set initial current displacement to 0 - private [process] var t_disp = 0.0 // set initial total displacement to 0 - private [process] var velocity = prop("v0") // set initial velocity to v0 - private var o_t_disp = t_disp // set initial old total displacement t_disp - private var o_velocity = velocity // set initial old velocity to velocity - private var acc = 0.0 // set initial acceleration to 0 - private var o_acc = acc // set initial old acceleration acc - - def rt: Double = prop("rt") // reaction time - def amax: Double = prop("amax") // max acceleration - def bmax: Double = prop("bmax") // max deceleration - def v0: Double = prop("v0") // starting velocity - def vmax: Double = prop("vmax") // max velocity - def T: Double = prop("T") // min time headway - def s: Double = prop("s") // min distance headway - def len: Double = prop("len") // length of the vehicle - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Update the values of the vehicle: velocity, displacement, lane according - * to the car-following model being used. - * @param gipps whether to use the Gipps or IDM car-following model - */ - def update (gipps: Boolean = true): Unit = - if gipps then gippsUpdate () - else iDMUpdate () - end update - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Update the vehicle's velocity and position using Gipps' Model (located in `Motion`) - * and Butcher's method for solving ordinary differential equations. - * @param prop the property map governing the motion of a vehicle - */ - def gippsUpdate (): Unit = - val v = Motion.gipps (this, pred.asInstanceOf [Vehicle]) - val x = Motion.butcher (t_disp, v, velocity, rt) - o_velocity = velocity - velocity = v - val dx = x - t_disp - disp += dx - o_t_disp = t_disp - t_disp = x - end gippsUpdate - - //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Update the vehicle's acceleration, velocity, and position using the - * Intelligent Driver Model (located in `Motion`) and Butcher's method - * for solving ordinary differential equations. - */ - def iDMUpdate (): Unit = - var a = Motion.iDM (this, pred.asInstanceOf [Vehicle], rt) // was delta ??? - debug ("iDMUpdate", s"name: \t the new ACCELERATION is: $a") - if a.isNaN then a = 0.0 - if a.isNegInfinity then a = bmax // max braking acceleration - if a.isPosInfinity then a = amax // max forward acceleration - if a < 0.0 && a < bmax then - val r = log(a) / log (bmax) - a = if r > 5.0 then 3.0 * bmax else bmax // FIX - unclear - if a > 0.0 && a > amax then a = amax - - var v = Motion.butcher (velocity, a, acc, rt) - debug ("iDMUpdate", s"name: \t the new VELOCITY is: $v") - if v < 0.0 then v = 1.0 // move slowly, not stopped - - val x = Motion.butcher (t_disp, v, velocity, rt) - debug ("iDMUpdate", s"name: \t the new POSITION is: $x") - - o_acc = acc - acc = a - o_velocity = velocity - velocity = v - val dx = x - t_disp - disp += dx - o_t_disp = t_disp - t_disp = x - end iDMUpdate - -end Vehicle - diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/Vehicle.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/Vehicle.tasty deleted file mode 100644 index 089955780..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/Vehicle.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/WaitQueue$.class b/target/scala-3.6.4/classes/scalation/simulation/process/WaitQueue$.class deleted file mode 100644 index 457a602ca..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/WaitQueue$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/WaitQueue.class b/target/scala-3.6.4/classes/scalation/simulation/process/WaitQueue.class deleted file mode 100644 index fa26d5836..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/WaitQueue.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/WaitQueue.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/WaitQueue.tasty deleted file mode 100644 index 8208cda12..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/WaitQueue.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/WaitQueue_LCFS$.class b/target/scala-3.6.4/classes/scalation/simulation/process/WaitQueue_LCFS$.class deleted file mode 100644 index 38d732704..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/WaitQueue_LCFS$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/WaitQueue_LCFS.class b/target/scala-3.6.4/classes/scalation/simulation/process/WaitQueue_LCFS.class deleted file mode 100644 index 4189d2ae1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/WaitQueue_LCFS.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/WaitQueue_LCFS.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/WaitQueue_LCFS.tasty deleted file mode 100644 index 282164b22..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/WaitQueue_LCFS.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/Bank$package$.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/Bank$package$.class deleted file mode 100644 index 30ff7438c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/Bank$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/Bank$package.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/Bank$package.class deleted file mode 100644 index 5d95a9da5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/Bank$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/Bank$package.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/Bank$package.tasty deleted file mode 100644 index fa2e1f9b4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/Bank$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/BankModel$.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/BankModel$.class deleted file mode 100644 index 13cc9eb20..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/BankModel$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/BankModel$Customer$.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/BankModel$Customer$.class deleted file mode 100644 index 9860b3b4e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/BankModel$Customer$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/BankModel$Customer.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/BankModel$Customer.class deleted file mode 100644 index 6418975ef..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/BankModel$Customer.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/BankModel.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/BankModel.class deleted file mode 100644 index 6f5dcdf5a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/BankModel.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/BankModel.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/BankModel.tasty deleted file mode 100644 index dad1ce209..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/BankModel.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/CallCenter$package$.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/CallCenter$package$.class deleted file mode 100644 index c39e281c8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/CallCenter$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/CallCenter$package.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/CallCenter$package.class deleted file mode 100644 index 452b35ff1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/CallCenter$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/CallCenter$package.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/CallCenter$package.tasty deleted file mode 100644 index 5a957c133..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/CallCenter$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/CallCenterModel$.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/CallCenterModel$.class deleted file mode 100644 index fee711292..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/CallCenterModel$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/CallCenterModel$Call$.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/CallCenterModel$Call$.class deleted file mode 100644 index d921c84ac..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/CallCenterModel$Call$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/CallCenterModel$Call.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/CallCenterModel$Call.class deleted file mode 100644 index 76d764495..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/CallCenterModel$Call.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/CallCenterModel.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/CallCenterModel.class deleted file mode 100644 index 905c7d652..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/CallCenterModel.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/CallCenterModel.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/CallCenterModel.tasty deleted file mode 100644 index 68363cbb9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/CallCenterModel.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/EmerDept$package$.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/EmerDept$package$.class deleted file mode 100644 index 558fd7732..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/EmerDept$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/EmerDept$package.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/EmerDept$package.class deleted file mode 100644 index 786c3464d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/EmerDept$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/EmerDept$package.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/EmerDept$package.tasty deleted file mode 100644 index c86630920..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/EmerDept$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/EmerDeptModel$.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/EmerDeptModel$.class deleted file mode 100644 index cec77cd7c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/EmerDeptModel$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/EmerDeptModel$Patient$.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/EmerDeptModel$Patient$.class deleted file mode 100644 index 5ff185a64..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/EmerDeptModel$Patient$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/EmerDeptModel$Patient.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/EmerDeptModel$Patient.class deleted file mode 100644 index a10961f48..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/EmerDeptModel$Patient.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/EmerDeptModel.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/EmerDeptModel.class deleted file mode 100644 index bc52b26a3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/EmerDeptModel.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/EmerDeptModel.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/EmerDeptModel.tasty deleted file mode 100644 index 2a51d5a6e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/EmerDeptModel.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/Loop$package$.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/Loop$package$.class deleted file mode 100644 index 5eb3fd448..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/Loop$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/Loop$package.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/Loop$package.class deleted file mode 100644 index 0729a5ba4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/Loop$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/Loop$package.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/Loop$package.tasty deleted file mode 100644 index 45b8ef728..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/Loop$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/LoopModel$.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/LoopModel$.class deleted file mode 100644 index d2f520ffd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/LoopModel$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/LoopModel$Car1$.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/LoopModel$Car1$.class deleted file mode 100644 index 2d515e224..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/LoopModel$Car1$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/LoopModel$Car1.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/LoopModel$Car1.class deleted file mode 100644 index ba6b32aa7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/LoopModel$Car1.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/LoopModel$Car2$.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/LoopModel$Car2$.class deleted file mode 100644 index caa187d6c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/LoopModel$Car2$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/LoopModel$Car2.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/LoopModel$Car2.class deleted file mode 100644 index 4f5e94af4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/LoopModel$Car2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/LoopModel.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/LoopModel.class deleted file mode 100644 index aa48876a5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/LoopModel.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/LoopModel.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/LoopModel.tasty deleted file mode 100644 index a1ee25c4c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/LoopModel.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/Machine$package$.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/Machine$package$.class deleted file mode 100644 index e6c203afc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/Machine$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/Machine$package.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/Machine$package.class deleted file mode 100644 index f7dcce32d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/Machine$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/Machine$package.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/Machine$package.tasty deleted file mode 100644 index 703a467c0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/Machine$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/MachineModel$.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/MachineModel$.class deleted file mode 100644 index 41d37e9c1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/MachineModel$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/MachineModel$Part$.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/MachineModel$Part$.class deleted file mode 100644 index 3a9d6606c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/MachineModel$Part$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/MachineModel$Part.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/MachineModel$Part.class deleted file mode 100644 index 57f0df66f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/MachineModel$Part.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/MachineModel.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/MachineModel.class deleted file mode 100644 index c5a2d3097..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/MachineModel.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/MachineModel.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/MachineModel.tasty deleted file mode 100644 index 1c60f3a12..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/MachineModel.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/OneWayStreet$package$.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/OneWayStreet$package$.class deleted file mode 100644 index 59193ebf1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/OneWayStreet$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/OneWayStreet$package.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/OneWayStreet$package.class deleted file mode 100644 index 078b57f80..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/OneWayStreet$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/OneWayStreet$package.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/OneWayStreet$package.tasty deleted file mode 100644 index f6827c057..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/OneWayStreet$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/OneWayStreetModel$.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/OneWayStreetModel$.class deleted file mode 100644 index 741ab2805..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/OneWayStreetModel$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/OneWayStreetModel$Car$.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/OneWayStreetModel$Car$.class deleted file mode 100644 index 6377fa89f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/OneWayStreetModel$Car$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/OneWayStreetModel$Car.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/OneWayStreetModel$Car.class deleted file mode 100644 index 4ec8d9a39..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/OneWayStreetModel$Car.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/OneWayStreetModel.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/OneWayStreetModel.class deleted file mode 100644 index af6ed04b9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/OneWayStreetModel.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/OneWayStreetModel.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/OneWayStreetModel.tasty deleted file mode 100644 index f013a39cb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/OneWayStreetModel.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/OneWayVehicle$package$.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/OneWayVehicle$package$.class deleted file mode 100644 index 033ed5436..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/OneWayVehicle$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/OneWayVehicle$package.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/OneWayVehicle$package.class deleted file mode 100644 index d6c9bb87a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/OneWayVehicle$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/OneWayVehicle$package.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/OneWayVehicle$package.tasty deleted file mode 100644 index e27b9f026..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/OneWayVehicle$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/OneWayVehicleModel$.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/OneWayVehicleModel$.class deleted file mode 100644 index ec71f0acc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/OneWayVehicleModel$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/OneWayVehicleModel$Car$.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/OneWayVehicleModel$Car$.class deleted file mode 100644 index 01ef6dc75..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/OneWayVehicleModel$Car$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/OneWayVehicleModel$Car.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/OneWayVehicleModel$Car.class deleted file mode 100644 index 6101558b9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/OneWayVehicleModel$Car.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/OneWayVehicleModel.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/OneWayVehicleModel.class deleted file mode 100644 index e4c413dcf..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/OneWayVehicleModel.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/OneWayVehicleModel.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/OneWayVehicleModel.tasty deleted file mode 100644 index 532d7e4fa..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/OneWayVehicleModel.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/Road$package$.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/Road$package$.class deleted file mode 100644 index fc13f2204..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/Road$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/Road$package.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/Road$package.class deleted file mode 100644 index 1818b9177..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/Road$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/Road$package.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/Road$package.tasty deleted file mode 100644 index e6ec168d6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/Road$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/RoadModel$.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/RoadModel$.class deleted file mode 100644 index dc43b23e9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/RoadModel$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/RoadModel$Car1$.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/RoadModel$Car1$.class deleted file mode 100644 index 6a9aa398e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/RoadModel$Car1$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/RoadModel$Car1.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/RoadModel$Car1.class deleted file mode 100644 index 7576f2a4f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/RoadModel$Car1.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/RoadModel$Car2$.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/RoadModel$Car2$.class deleted file mode 100644 index 55dea444a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/RoadModel$Car2$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/RoadModel$Car2.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/RoadModel$Car2.class deleted file mode 100644 index 8a14d95f9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/RoadModel$Car2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/RoadModel.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/RoadModel.class deleted file mode 100644 index 8ffe71a73..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/RoadModel.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/RoadModel.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/RoadModel.tasty deleted file mode 100644 index 8d0821671..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/RoadModel.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/Traffic$package$.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/Traffic$package$.class deleted file mode 100644 index 32d176e42..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/Traffic$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/Traffic$package.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/Traffic$package.class deleted file mode 100644 index 0557554d8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/Traffic$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/Traffic$package.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/Traffic$package.tasty deleted file mode 100644 index a5cc23984..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/Traffic$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficDyn$package$.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficDyn$package$.class deleted file mode 100644 index 085f196c5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficDyn$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficDyn$package.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficDyn$package.class deleted file mode 100644 index 5af951341..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficDyn$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficDyn$package.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficDyn$package.tasty deleted file mode 100644 index 4edc90e7c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficDyn$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficDynModel$.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficDynModel$.class deleted file mode 100644 index 278b5c22d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficDynModel$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficDynModel$Car$.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficDynModel$Car$.class deleted file mode 100644 index 402f4dfae..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficDynModel$Car$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficDynModel$Car.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficDynModel$Car.class deleted file mode 100644 index 12fa6424c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficDynModel$Car.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficDynModel.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficDynModel.class deleted file mode 100644 index af6afebb1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficDynModel.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficDynModel.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficDynModel.tasty deleted file mode 100644 index 6ab7a7d18..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficDynModel.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficLaneChange$package$.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficLaneChange$package$.class deleted file mode 100644 index 284ad87fc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficLaneChange$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficLaneChange$package.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficLaneChange$package.class deleted file mode 100644 index 7c2497ae2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficLaneChange$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficLaneChange$package.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficLaneChange$package.tasty deleted file mode 100644 index bb1a6a910..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficLaneChange$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficLaneChangeModel$.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficLaneChangeModel$.class deleted file mode 100644 index 099c083d4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficLaneChangeModel$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficLaneChangeModel$Car$.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficLaneChangeModel$Car$.class deleted file mode 100644 index c49508860..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficLaneChangeModel$Car$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficLaneChangeModel$Car.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficLaneChangeModel$Car.class deleted file mode 100644 index f59f6488e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficLaneChangeModel$Car.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficLaneChangeModel.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficLaneChangeModel.class deleted file mode 100644 index b7a26add0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficLaneChangeModel.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficLaneChangeModel.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficLaneChangeModel.tasty deleted file mode 100644 index e6344f727..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficLaneChangeModel.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficModel$.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficModel$.class deleted file mode 100644 index dab04d342..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficModel$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficModel$Car$.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficModel$Car$.class deleted file mode 100644 index 9e8a808a5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficModel$Car$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficModel$Car.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficModel$Car.class deleted file mode 100644 index 7c1790c0f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficModel$Car.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficModel.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficModel.class deleted file mode 100644 index 1cc33d174..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficModel.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficModel.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficModel.tasty deleted file mode 100644 index fd6cf6435..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficModel.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficModelTurn$.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficModelTurn$.class deleted file mode 100644 index 2642d12bf..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficModelTurn$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficModelTurn$Car$.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficModelTurn$Car$.class deleted file mode 100644 index 5be29c75b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficModelTurn$Car$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficModelTurn$Car.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficModelTurn$Car.class deleted file mode 100644 index 5b22414d1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficModelTurn$Car.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficModelTurn.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficModelTurn.class deleted file mode 100644 index a55571f43..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficModelTurn.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficModelTurn.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficModelTurn.tasty deleted file mode 100644 index 828a695eb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficModelTurn.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficTurn$package$.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficTurn$package$.class deleted file mode 100644 index 119270612..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficTurn$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficTurn$package.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficTurn$package.class deleted file mode 100644 index 641d174f1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficTurn$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficTurn$package.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficTurn$package.tasty deleted file mode 100644 index 1087ba696..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/TrafficTurn$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/UGA_Bus$package$.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/UGA_Bus$package$.class deleted file mode 100644 index 0f3a3de99..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/UGA_Bus$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/UGA_Bus$package.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/UGA_Bus$package.class deleted file mode 100644 index f389b75c6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/UGA_Bus$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/UGA_Bus$package.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/UGA_Bus$package.tasty deleted file mode 100644 index 9a19bd2db..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/UGA_Bus$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/UGA_BusModel$.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/UGA_BusModel$.class deleted file mode 100644 index ebd117420..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/UGA_BusModel$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/UGA_BusModel$Rider$.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/UGA_BusModel$Rider$.class deleted file mode 100644 index 4b188aac6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/UGA_BusModel$Rider$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/UGA_BusModel$Rider.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/UGA_BusModel$Rider.class deleted file mode 100644 index 49c5e3fa6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/UGA_BusModel$Rider.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/UGA_BusModel$UGA_Bus$.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/UGA_BusModel$UGA_Bus$.class deleted file mode 100644 index 69f0ed312..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/UGA_BusModel$UGA_Bus$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/UGA_BusModel$UGA_Bus.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/UGA_BusModel$UGA_Bus.class deleted file mode 100644 index e496f38b3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/UGA_BusModel$UGA_Bus.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/UGA_BusModel.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/UGA_BusModel.class deleted file mode 100644 index c21d1148b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/UGA_BusModel.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/UGA_BusModel.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/UGA_BusModel.tasty deleted file mode 100644 index cfb0f8047..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/UGA_BusModel.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/index.html b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/index.html deleted file mode 100644 index 5bf548c13..000000000 --- a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/index.html +++ /dev/null @@ -1,20 +0,0 @@ - - -

    Source files in example_1 Package

    -

    - - \ No newline at end of file diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runBank.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runBank.class deleted file mode 100644 index 000e7fe2b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runBank.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runBank.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runBank.tasty deleted file mode 100644 index a75d7d7e7..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runBank.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runCallCenter.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runCallCenter.class deleted file mode 100644 index 489c233e1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runCallCenter.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runCallCenter.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runCallCenter.tasty deleted file mode 100644 index 7adccffcb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runCallCenter.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runEmerDept.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runEmerDept.class deleted file mode 100644 index 920ae28e1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runEmerDept.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runEmerDept.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runEmerDept.tasty deleted file mode 100644 index 5e4e69c23..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runEmerDept.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runLoop.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runLoop.class deleted file mode 100644 index 99f964f21..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runLoop.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runLoop.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runLoop.tasty deleted file mode 100644 index 683dffbcb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runLoop.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runMachine.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runMachine.class deleted file mode 100644 index 4b48e5bcc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runMachine.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runMachine.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runMachine.tasty deleted file mode 100644 index 90d9de5f4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runMachine.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runOneWayStreet.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runOneWayStreet.class deleted file mode 100644 index 92c710f75..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runOneWayStreet.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runOneWayStreet.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runOneWayStreet.tasty deleted file mode 100644 index 9c2280544..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runOneWayStreet.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runOneWayVehicle.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runOneWayVehicle.class deleted file mode 100644 index 34f03193e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runOneWayVehicle.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runOneWayVehicle.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runOneWayVehicle.tasty deleted file mode 100644 index a523dc672..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runOneWayVehicle.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runRoad.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runRoad.class deleted file mode 100644 index f9be5e480..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runRoad.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runRoad.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runRoad.tasty deleted file mode 100644 index f15d3fe29..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runRoad.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runTraffic.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runTraffic.class deleted file mode 100644 index 56fa825ab..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runTraffic.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runTraffic.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runTraffic.tasty deleted file mode 100644 index 618164a10..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runTraffic.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runTrafficDyn.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runTrafficDyn.class deleted file mode 100644 index 50d181893..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runTrafficDyn.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runTrafficDyn.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runTrafficDyn.tasty deleted file mode 100644 index 45e49da53..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runTrafficDyn.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runTrafficLaneChange.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runTrafficLaneChange.class deleted file mode 100644 index 4086d8c35..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runTrafficLaneChange.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runTrafficLaneChange.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runTrafficLaneChange.tasty deleted file mode 100644 index abdc32892..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runTrafficLaneChange.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runTrafficTurn.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runTrafficTurn.class deleted file mode 100644 index 82cdf6a1c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runTrafficTurn.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runTrafficTurn.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runTrafficTurn.tasty deleted file mode 100644 index 8ba910535..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runTrafficTurn.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runUGA_Bus.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runUGA_Bus.class deleted file mode 100644 index a966a9111..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runUGA_Bus.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runUGA_Bus.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runUGA_Bus.tasty deleted file mode 100644 index cf14541a2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_1/runUGA_Bus.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_MBM/Bank$package$.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_MBM/Bank$package$.class deleted file mode 100644 index 3d398c1a0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_MBM/Bank$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_MBM/Bank$package.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_MBM/Bank$package.class deleted file mode 100644 index 7c8ba88cb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_MBM/Bank$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_MBM/Bank$package.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/example_MBM/Bank$package.tasty deleted file mode 100644 index ba3a58200..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_MBM/Bank$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_MBM/BankModel$.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_MBM/BankModel$.class deleted file mode 100644 index 9c681c08c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_MBM/BankModel$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_MBM/BankModel$Customer$.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_MBM/BankModel$Customer$.class deleted file mode 100644 index 6630306f8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_MBM/BankModel$Customer$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_MBM/BankModel$Customer.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_MBM/BankModel$Customer.class deleted file mode 100644 index d8326c44e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_MBM/BankModel$Customer.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_MBM/BankModel.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_MBM/BankModel.class deleted file mode 100644 index 187f08360..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_MBM/BankModel.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_MBM/BankModel.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/example_MBM/BankModel.tasty deleted file mode 100644 index 855285920..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_MBM/BankModel.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_MBM/index.html b/target/scala-3.6.4/classes/scalation/simulation/process/example_MBM/index.html deleted file mode 100644 index 47a0b6f4b..000000000 --- a/target/scala-3.6.4/classes/scalation/simulation/process/example_MBM/index.html +++ /dev/null @@ -1,8 +0,0 @@ - - -

    Source files in example_MBM Package

    -

    - - \ No newline at end of file diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_MBM/runBank.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_MBM/runBank.class deleted file mode 100644 index a87545dc6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_MBM/runBank.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_MBM/runBank.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/example_MBM/runBank.tasty deleted file mode 100644 index 0e6dc9e61..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_MBM/runBank.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_MBM/testCorrBank.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_MBM/testCorrBank.class deleted file mode 100644 index 519379663..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_MBM/testCorrBank.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_MBM/testCorrBank.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/example_MBM/testCorrBank.tasty deleted file mode 100644 index 63a15302d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_MBM/testCorrBank.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_MIR/Bank$package$.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_MIR/Bank$package$.class deleted file mode 100644 index aafd20868..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_MIR/Bank$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_MIR/Bank$package.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_MIR/Bank$package.class deleted file mode 100644 index 24b458ee6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_MIR/Bank$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_MIR/Bank$package.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/example_MIR/Bank$package.tasty deleted file mode 100644 index 29389ef5d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_MIR/Bank$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_MIR/BankModel$.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_MIR/BankModel$.class deleted file mode 100644 index 9963c08e0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_MIR/BankModel$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_MIR/BankModel$Customer$.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_MIR/BankModel$Customer$.class deleted file mode 100644 index 4e0ccc129..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_MIR/BankModel$Customer$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_MIR/BankModel$Customer.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_MIR/BankModel$Customer.class deleted file mode 100644 index c7c3ea38d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_MIR/BankModel$Customer.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_MIR/BankModel.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_MIR/BankModel.class deleted file mode 100644 index e7be06e76..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_MIR/BankModel.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_MIR/BankModel.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/example_MIR/BankModel.tasty deleted file mode 100644 index a7af276a2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_MIR/BankModel.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_MIR/index.html b/target/scala-3.6.4/classes/scalation/simulation/process/example_MIR/index.html deleted file mode 100644 index c82a7c09c..000000000 --- a/target/scala-3.6.4/classes/scalation/simulation/process/example_MIR/index.html +++ /dev/null @@ -1,8 +0,0 @@ - - -

    Source files in example_MIR Package

    -

    - - \ No newline at end of file diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_MIR/runBank.class b/target/scala-3.6.4/classes/scalation/simulation/process/example_MIR/runBank.class deleted file mode 100644 index e2c30a987..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_MIR/runBank.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/example_MIR/runBank.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/example_MIR/runBank.tasty deleted file mode 100644 index 5de3cee5e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/example_MIR/runBank.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/index.html b/target/scala-3.6.4/classes/scalation/simulation/process/index.html deleted file mode 100644 index 90600b824..000000000 --- a/target/scala-3.6.4/classes/scalation/simulation/process/index.html +++ /dev/null @@ -1,35 +0,0 @@ - - -

    Source files in process Package

    -

    - - \ No newline at end of file diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/pathTest.class b/target/scala-3.6.4/classes/scalation/simulation/process/pathTest.class deleted file mode 100644 index abc968f43..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/pathTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/pathTest.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/pathTest.tasty deleted file mode 100644 index c15a77bd4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/pathTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/sourceTest.class b/target/scala-3.6.4/classes/scalation/simulation/process/sourceTest.class deleted file mode 100644 index 2e5643c82..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/sourceTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/sourceTest.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/sourceTest.tasty deleted file mode 100644 index d7ecafe4b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/sourceTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/vSourceTest.class b/target/scala-3.6.4/classes/scalation/simulation/process/vSourceTest.class deleted file mode 100644 index e32141604..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/vSourceTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/process/vSourceTest.tasty b/target/scala-3.6.4/classes/scalation/simulation/process/vSourceTest.tasty deleted file mode 100644 index f5031ac01..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/process/vSourceTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/JacksonNet$.class b/target/scala-3.6.4/classes/scalation/simulation/queueingnet/JacksonNet$.class deleted file mode 100644 index 33c196cb0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/JacksonNet$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/JacksonNet$package$.class b/target/scala-3.6.4/classes/scalation/simulation/queueingnet/JacksonNet$package$.class deleted file mode 100644 index 75ace28a2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/JacksonNet$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/JacksonNet$package.class b/target/scala-3.6.4/classes/scalation/simulation/queueingnet/JacksonNet$package.class deleted file mode 100644 index 8614a5793..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/JacksonNet$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/JacksonNet$package.tasty b/target/scala-3.6.4/classes/scalation/simulation/queueingnet/JacksonNet$package.tasty deleted file mode 100644 index 6d60a2338..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/JacksonNet$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/JacksonNet.class b/target/scala-3.6.4/classes/scalation/simulation/queueingnet/JacksonNet.class deleted file mode 100644 index 0dc28da86..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/JacksonNet.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/JacksonNet.tasty b/target/scala-3.6.4/classes/scalation/simulation/queueingnet/JacksonNet.tasty deleted file mode 100644 index 2f482e537..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/JacksonNet.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/MGc_Queue$.class b/target/scala-3.6.4/classes/scalation/simulation/queueingnet/MGc_Queue$.class deleted file mode 100644 index 2a2b81b14..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/MGc_Queue$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/MGc_Queue$package$.class b/target/scala-3.6.4/classes/scalation/simulation/queueingnet/MGc_Queue$package$.class deleted file mode 100644 index 9049984ce..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/MGc_Queue$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/MGc_Queue$package.class b/target/scala-3.6.4/classes/scalation/simulation/queueingnet/MGc_Queue$package.class deleted file mode 100644 index 4dce646ab..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/MGc_Queue$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/MGc_Queue$package.tasty b/target/scala-3.6.4/classes/scalation/simulation/queueingnet/MGc_Queue$package.tasty deleted file mode 100644 index bbc5fda25..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/MGc_Queue$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/MGc_Queue.class b/target/scala-3.6.4/classes/scalation/simulation/queueingnet/MGc_Queue.class deleted file mode 100644 index 75c65925e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/MGc_Queue.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/MGc_Queue.tasty b/target/scala-3.6.4/classes/scalation/simulation/queueingnet/MGc_Queue.tasty deleted file mode 100644 index 3a75cc6d6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/MGc_Queue.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/MM1_Queue.class b/target/scala-3.6.4/classes/scalation/simulation/queueingnet/MM1_Queue.class deleted file mode 100644 index b35abc9af..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/MM1_Queue.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/MM1_Queue.tasty b/target/scala-3.6.4/classes/scalation/simulation/queueingnet/MM1_Queue.tasty deleted file mode 100644 index 8cd746c4e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/MM1_Queue.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/MM2_Queue.class b/target/scala-3.6.4/classes/scalation/simulation/queueingnet/MM2_Queue.class deleted file mode 100644 index 3d06f34b1..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/MM2_Queue.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/MM2_Queue.tasty b/target/scala-3.6.4/classes/scalation/simulation/queueingnet/MM2_Queue.tasty deleted file mode 100644 index 686e28c8e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/MM2_Queue.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/MM_Queue$package$.class b/target/scala-3.6.4/classes/scalation/simulation/queueingnet/MM_Queue$package$.class deleted file mode 100644 index d7240934d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/MM_Queue$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/MM_Queue$package.class b/target/scala-3.6.4/classes/scalation/simulation/queueingnet/MM_Queue$package.class deleted file mode 100644 index 14b52f4b6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/MM_Queue$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/MM_Queue$package.tasty b/target/scala-3.6.4/classes/scalation/simulation/queueingnet/MM_Queue$package.tasty deleted file mode 100644 index a4598f99e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/MM_Queue$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/MM_Queue.class b/target/scala-3.6.4/classes/scalation/simulation/queueingnet/MM_Queue.class deleted file mode 100644 index 502368969..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/MM_Queue.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/MM_Queue.tasty b/target/scala-3.6.4/classes/scalation/simulation/queueingnet/MM_Queue.tasty deleted file mode 100644 index 798bda3a3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/MM_Queue.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/MMc_Queue.class b/target/scala-3.6.4/classes/scalation/simulation/queueingnet/MMc_Queue.class deleted file mode 100644 index 0df0aa3ac..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/MMc_Queue.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/MMc_Queue.tasty b/target/scala-3.6.4/classes/scalation/simulation/queueingnet/MMc_Queue.tasty deleted file mode 100644 index f5894d5c3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/MMc_Queue.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/MMck_Queue$.class b/target/scala-3.6.4/classes/scalation/simulation/queueingnet/MMck_Queue$.class deleted file mode 100644 index 9b21c1c4d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/MMck_Queue$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/MMck_Queue$package$.class b/target/scala-3.6.4/classes/scalation/simulation/queueingnet/MMck_Queue$package$.class deleted file mode 100644 index 064f4acc3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/MMck_Queue$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/MMck_Queue$package.class b/target/scala-3.6.4/classes/scalation/simulation/queueingnet/MMck_Queue$package.class deleted file mode 100644 index 07960d335..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/MMck_Queue$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/MMck_Queue$package.tasty b/target/scala-3.6.4/classes/scalation/simulation/queueingnet/MMck_Queue$package.tasty deleted file mode 100644 index d324ea31a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/MMck_Queue$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/MMck_Queue.class b/target/scala-3.6.4/classes/scalation/simulation/queueingnet/MMck_Queue.class deleted file mode 100644 index 114dcd7f6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/MMck_Queue.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/MMck_Queue.tasty b/target/scala-3.6.4/classes/scalation/simulation/queueingnet/MMck_Queue.tasty deleted file mode 100644 index d82762c05..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/MMck_Queue.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/index.html b/target/scala-3.6.4/classes/scalation/simulation/queueingnet/index.html deleted file mode 100644 index c2e9641fd..000000000 --- a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/index.html +++ /dev/null @@ -1,11 +0,0 @@ - - -

    Source files in queueingnet Package

    -

    - - \ No newline at end of file diff --git a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/jacksonNetTest.class b/target/scala-3.6.4/classes/scalation/simulation/queueingnet/jacksonNetTest.class deleted file mode 100644 index 995d6940d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/jacksonNetTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/jacksonNetTest.tasty b/target/scala-3.6.4/classes/scalation/simulation/queueingnet/jacksonNetTest.tasty deleted file mode 100644 index 13d7f05a2..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/jacksonNetTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/mGc_QueueTest.class b/target/scala-3.6.4/classes/scalation/simulation/queueingnet/mGc_QueueTest.class deleted file mode 100644 index b8ef5eef8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/mGc_QueueTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/mGc_QueueTest.tasty b/target/scala-3.6.4/classes/scalation/simulation/queueingnet/mGc_QueueTest.tasty deleted file mode 100644 index 572efcd25..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/mGc_QueueTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/mM_QueueTest.class b/target/scala-3.6.4/classes/scalation/simulation/queueingnet/mM_QueueTest.class deleted file mode 100644 index 5288966a5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/mM_QueueTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/mM_QueueTest.tasty b/target/scala-3.6.4/classes/scalation/simulation/queueingnet/mM_QueueTest.tasty deleted file mode 100644 index d0195b733..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/mM_QueueTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/mMck_QueueTest.class b/target/scala-3.6.4/classes/scalation/simulation/queueingnet/mMck_QueueTest.class deleted file mode 100644 index e530bc985..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/mMck_QueueTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/mMck_QueueTest.tasty b/target/scala-3.6.4/classes/scalation/simulation/queueingnet/mMck_QueueTest.tasty deleted file mode 100644 index 154dc97a6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/mMck_QueueTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/old/MM2_Queue.scala.bak b/target/scala-3.6.4/classes/scalation/simulation/queueingnet/old/MM2_Queue.scala.bak deleted file mode 100644 index cfcdead74..000000000 --- a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/old/MM2_Queue.scala.bak +++ /dev/null @@ -1,101 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Sun Dec 29 21:28:40 EST 2013 - * @see LICENSE (MIT style license file). - * @see http://irh.inf.unideb.hu/~jsztrik/education/16/SOR_Main_Angol.pdf - * - * @title M/M/2 queue - */ - -package scalation -package simulation -package queueingnet - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `MM2_Queue` class is used to solve single node Markovian Queueing problems. - * It models a service station consisting of one queue and '2' servers, i.e., - * an M/M/2 queue. The arrivals are Poisson and the service time distribution - * is Exponential. - *------------------------------------------------------------------------------ - * @see also `MMck_Queue` to model finite capacity queues. - * @see also `MGc_Queue` to model queues with general service time distributions. - *------------------------------------------------------------------------------ - * @param lambda the overall arrival rate - * @param mu the per unit service rate - */ -class MM2_Queue (lambda: Double, mu: Double): - - val p = lambda / (2 * mu) // traffic intensity - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Probability system is empty. - */ - def pi_0: Double = (1 - p) / (1 + p) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Expected length/number in sYstem. - */ - val l_y = (2 * p) / (1 - p~^2) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Expected length/number in Service. - */ - val l_s = lambda / mu - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Expected length of the waiting queue. - */ - val l_q = l_y - l_s - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Expected time in the sYstem, Service and Queue. - */ - val (t_y, t_s, t_q) = (l_y / lambda, l_s / lambda, l_q / lambda) - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** View/check intermediate results. - */ - def view (): Unit = - println ("Check queueing parameters:") - println ("lambda = %g".format (lambda)) // arrival rate - println ("mu = %g".format (mu)) // service rate - println ("2 = %d".format (2)) // number of servers - println ("p = %g".format (p)) // traffic intensity (rho) - println ("pi_0 = %g".format (pi_0)) // probability it is empty - end view - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Report the results. - */ - def report (): Unit = - println ("Results for queue:") - println ("---------------------------------------------------") - println ("| Queue | l_q = %8.4g".format (l_q) + " | t_q = %8.4g".format (t_q) + " |") - println ("| Service | l_s = %8.4g".format (l_s) + " | t_s = %8.4g".format (t_s) + " |") - println ("| sYstem | l_y = %8.4g".format (l_y) + " | t_y = %8.4g".format (t_y) + " |") - println ("---------------------------------------------------") - println ("After time unit coversion") - println (s"t_q = ${60*t_q}, t_s = ${60*t_s}, t_y = ${60*t_y}") - end report - -end MM2_Queue - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `mM2_QueueTest` main function is used to test the `MM2_Queue` class. - * > runMain scalation.simulation.queueingnet.mM2_QueueTest - */ -@main def mM2_QueueTest (): Unit = - - val lambda = 12.0 // customer arrival rate (per hour) - val mu = 7.5 // customer service rate (per hour) - - println("\nM/M/2 Queue Results:") - val mm2 = new MM2_Queue (lambda, mu) // M/M/2 Queue - mm2.view () - mm2.report () - -end mM2_QueueTest - diff --git a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/old/MMc_Queue.scala.bak b/target/scala-3.6.4/classes/scalation/simulation/queueingnet/old/MMc_Queue.scala.bak deleted file mode 100644 index 712de468c..000000000 --- a/target/scala-3.6.4/classes/scalation/simulation/queueingnet/old/MMc_Queue.scala.bak +++ /dev/null @@ -1,131 +0,0 @@ - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** @author John Miller - * @version 2.0 - * @date Sun Dec 29 21:28:40 EST 2013 - * @see LICENSE (MIT style license file). - * @see http://irh.inf.unideb.hu/~jsztrik/education/16/SOR_Main_Angol.pdf - * - * @title M/M/c queue - */ - -package scalation -package simulation -package queueingnet - -import scalation.mathstat.Combinatorics.fac - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `MMc_Queue` class is used to solve single node Markovian Queueing problems. - * It models a service station consisting of one queue and 'c' servers, i.e., - * an M/M/c queue. The arrivals are Poisson and the service time distribution - * is Exponential. - *------------------------------------------------------------------------------ - * @see also `MMck_Queue` to model finite capacity queues. - * @see also `MGc_Queue` to model queues with general service time distributions. - *------------------------------------------------------------------------------ - * @param lambda the arrival rate - * @param mu the service rate - * @param c the number of servers - */ -class MMc_Queue (lambda: Double, mu: Double, c: Int = 1): - - private val flaw = flawf ("MMc_Queue") // flaw function - - if c < 1 then flaw ("constructor", "must have at least on server") - - protected val rho = lambda / mu // traffic intensity - protected val c_fac = fac (c) // c! (factorial) - private val a = rho / c.toDouble // server utilization factor - protected val _1_a = 1.0 - a // one minus a - private val rhoc = rho~^c / c_fac // all servers busy probability factor - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Probability system is empty. - */ - def prob_0: Double = - val sum = (for (i <- 0 until c) yield rho~^i / fac (i)).sum - 1.0 / (sum + rho~^c / (c_fac * _1_a)) - end prob_0 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Expected length of the waiting queue. - */ - val l_q = prob_0 * rho * rhoc / _1_a~^2 - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Expected length/number in Service. - */ - val l_s = lambda / mu - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Expected length/number in sYstem. - */ - val l_y = l_q + l_s - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Expected time in the waiting Queue (using Little's Law). - */ - val t_q = l_q / lambda - def t_wait: Double = (prob_0 * rho * rhoc / _1_a~^2) / lambda - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Expected time in Service. - */ - val t_s = 1.0 / mu - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Expected time in the sYstem. - */ - val t_y = t_q + t_s - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** View/check intermediate results. - */ - def view (): Unit = - println ("Check queueing parameters:") - println ("lambda = %g".format (lambda)) // arrival rate - println ("mu = %g".format (mu)) // service rate - println ("c = %d".format (c)) // number of servers - println ("rho = %g".format (rho)) // traffic intensity - println ("a = %g".format (a)) // server utilization factor - end view - - //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - /** Report the results. - */ - def report (): Unit = - println ("Results for queue:") - println ("---------------------------------------------------") - println ("| Queue | l_q = %8.4g".format (l_q) + " | t_q = %8.4g".format (t_q) + " |") - println ("| Service | l_s = %8.4g".format (l_s) + " | t_s = %8.4g".format (t_s) + " |") - println ("| sYstem | l_y = %8.4g".format (l_y) + " | t_y = %8.4g".format (t_y) + " |") - println ("---------------------------------------------------") - println ("After time unit coversion") - println (s"t_q = ${60*t_q}, t_s = ${60*t_s}, t_y = ${60*t_y}") - end report - -end MMc_Queue - - -//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -/** The `mMc_QueueTest` main function is used to test the `MMc_Queue` class. - * > runMain scalation.simulation.queueingnet.mMc_QueueTest - */ -@main def mMc_QueueTest (): Unit = - - val lambda = 6.0 // customer arrival rate (per hour) - val mu = 7.5 // customer service rate (per hour) - - println("\nM/M/1 Queue Results:") - val mm1 = new MMc_Queue (lambda, mu, 1) // M/M/c Queue - mm1.view () - mm1.report () - - println("\nM/M/2 Queue Results:") - val mm2 = new MMc_Queue (2 * lambda, mu, 2) // M/M/c Queue - mm2.view () - mm2.report () - -end mMc_QueueTest - diff --git a/target/scala-3.6.4/classes/scalation/simulation/runCoroutineTest.class b/target/scala-3.6.4/classes/scalation/simulation/runCoroutineTest.class deleted file mode 100644 index 21ac8ffef..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/runCoroutineTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/runCoroutineTest.tasty b/target/scala-3.6.4/classes/scalation/simulation/runCoroutineTest.tasty deleted file mode 100644 index f6854048a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/runCoroutineTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/state/MarkovCT.class b/target/scala-3.6.4/classes/scalation/simulation/state/MarkovCT.class deleted file mode 100644 index 2303ce742..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/state/MarkovCT.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/state/MarkovCT.tasty b/target/scala-3.6.4/classes/scalation/simulation/state/MarkovCT.tasty deleted file mode 100644 index e678ef218..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/state/MarkovCT.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/state/MarkovChain$package$.class b/target/scala-3.6.4/classes/scalation/simulation/state/MarkovChain$package$.class deleted file mode 100644 index 1a6487974..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/state/MarkovChain$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/state/MarkovChain$package.class b/target/scala-3.6.4/classes/scalation/simulation/state/MarkovChain$package.class deleted file mode 100644 index 1787fdc44..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/state/MarkovChain$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/state/MarkovChain$package.tasty b/target/scala-3.6.4/classes/scalation/simulation/state/MarkovChain$package.tasty deleted file mode 100644 index 9e7067173..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/state/MarkovChain$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/state/MarkovChain.class b/target/scala-3.6.4/classes/scalation/simulation/state/MarkovChain.class deleted file mode 100644 index ecf2226fb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/state/MarkovChain.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/state/MarkovChain.tasty b/target/scala-3.6.4/classes/scalation/simulation/state/MarkovChain.tasty deleted file mode 100644 index 44fdf4e3c..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/state/MarkovChain.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/state/MarkovChainCT$package$.class b/target/scala-3.6.4/classes/scalation/simulation/state/MarkovChainCT$package$.class deleted file mode 100644 index f1d038f72..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/state/MarkovChainCT$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/state/MarkovChainCT$package.class b/target/scala-3.6.4/classes/scalation/simulation/state/MarkovChainCT$package.class deleted file mode 100644 index bd5195b9b..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/state/MarkovChainCT$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/state/MarkovChainCT$package.tasty b/target/scala-3.6.4/classes/scalation/simulation/state/MarkovChainCT$package.tasty deleted file mode 100644 index 83c230ef3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/state/MarkovChainCT$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/state/index.html b/target/scala-3.6.4/classes/scalation/simulation/state/index.html deleted file mode 100644 index 3bab7ccd7..000000000 --- a/target/scala-3.6.4/classes/scalation/simulation/state/index.html +++ /dev/null @@ -1,9 +0,0 @@ - - -

    Source files in state Package

    -

    - - \ No newline at end of file diff --git a/target/scala-3.6.4/classes/scalation/simulation/state/markovCTTest.class b/target/scala-3.6.4/classes/scalation/simulation/state/markovCTTest.class deleted file mode 100644 index 7fbcb19b9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/state/markovCTTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/state/markovCTTest.tasty b/target/scala-3.6.4/classes/scalation/simulation/state/markovCTTest.tasty deleted file mode 100644 index 48e04e6ac..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/state/markovCTTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/state/markovChainTest.class b/target/scala-3.6.4/classes/scalation/simulation/state/markovChainTest.class deleted file mode 100644 index 11ab63110..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/state/markovChainTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/state/markovChainTest.tasty b/target/scala-3.6.4/classes/scalation/simulation/state/markovChainTest.tasty deleted file mode 100644 index d0b57c737..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/state/markovChainTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/state/markovChainTest2.class b/target/scala-3.6.4/classes/scalation/simulation/state/markovChainTest2.class deleted file mode 100644 index 051ee86b5..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/state/markovChainTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/state/markovChainTest2.tasty b/target/scala-3.6.4/classes/scalation/simulation/state/markovChainTest2.tasty deleted file mode 100644 index 1bc659356..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/state/markovChainTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/state/markovChainTest3.class b/target/scala-3.6.4/classes/scalation/simulation/state/markovChainTest3.class deleted file mode 100644 index c4c2633dc..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/state/markovChainTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/state/markovChainTest3.tasty b/target/scala-3.6.4/classes/scalation/simulation/state/markovChainTest3.tasty deleted file mode 100644 index 860fbb880..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/state/markovChainTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/state/markovChainTest4.class b/target/scala-3.6.4/classes/scalation/simulation/state/markovChainTest4.class deleted file mode 100644 index 7adc5930e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/state/markovChainTest4.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/state/markovChainTest4.tasty b/target/scala-3.6.4/classes/scalation/simulation/state/markovChainTest4.tasty deleted file mode 100644 index 1430c971f..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/state/markovChainTest4.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/state/markovChainTest5.class b/target/scala-3.6.4/classes/scalation/simulation/state/markovChainTest5.class deleted file mode 100644 index deb9b6da9..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/state/markovChainTest5.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/state/markovChainTest5.tasty b/target/scala-3.6.4/classes/scalation/simulation/state/markovChainTest5.tasty deleted file mode 100644 index de1586d48..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/state/markovChainTest5.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/tableau/CallCenterModel.class b/target/scala-3.6.4/classes/scalation/simulation/tableau/CallCenterModel.class deleted file mode 100644 index 0eee939a6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/tableau/CallCenterModel.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/tableau/CallCenterModel.tasty b/target/scala-3.6.4/classes/scalation/simulation/tableau/CallCenterModel.tasty deleted file mode 100644 index e2fd167a6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/tableau/CallCenterModel.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/tableau/Ex_Bank$package$.class b/target/scala-3.6.4/classes/scalation/simulation/tableau/Ex_Bank$package$.class deleted file mode 100644 index f72577700..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/tableau/Ex_Bank$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/tableau/Ex_Bank$package.class b/target/scala-3.6.4/classes/scalation/simulation/tableau/Ex_Bank$package.class deleted file mode 100644 index 206fb722e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/tableau/Ex_Bank$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/tableau/Ex_Bank$package.tasty b/target/scala-3.6.4/classes/scalation/simulation/tableau/Ex_Bank$package.tasty deleted file mode 100644 index d64b1b287..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/tableau/Ex_Bank$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/tableau/Ex_CallCenter$package$.class b/target/scala-3.6.4/classes/scalation/simulation/tableau/Ex_CallCenter$package$.class deleted file mode 100644 index d51e7cd83..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/tableau/Ex_CallCenter$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/tableau/Ex_CallCenter$package.class b/target/scala-3.6.4/classes/scalation/simulation/tableau/Ex_CallCenter$package.class deleted file mode 100644 index d995ed3c4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/tableau/Ex_CallCenter$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/tableau/Ex_CallCenter$package.tasty b/target/scala-3.6.4/classes/scalation/simulation/tableau/Ex_CallCenter$package.tasty deleted file mode 100644 index bb238a019..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/tableau/Ex_CallCenter$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/tableau/Model$.class b/target/scala-3.6.4/classes/scalation/simulation/tableau/Model$.class deleted file mode 100644 index 571ff72d3..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/tableau/Model$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/tableau/Model$package$.class b/target/scala-3.6.4/classes/scalation/simulation/tableau/Model$package$.class deleted file mode 100644 index 707937fab..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/tableau/Model$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/tableau/Model$package.class b/target/scala-3.6.4/classes/scalation/simulation/tableau/Model$package.class deleted file mode 100644 index 73eb2c040..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/tableau/Model$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/tableau/Model$package.tasty b/target/scala-3.6.4/classes/scalation/simulation/tableau/Model$package.tasty deleted file mode 100644 index 3564a1499..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/tableau/Model$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/tableau/Model.class b/target/scala-3.6.4/classes/scalation/simulation/tableau/Model.class deleted file mode 100644 index 85f713cad..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/tableau/Model.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/tableau/Model.tasty b/target/scala-3.6.4/classes/scalation/simulation/tableau/Model.tasty deleted file mode 100644 index cbf31e350..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/tableau/Model.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/tableau/Queue_MM1$package$.class b/target/scala-3.6.4/classes/scalation/simulation/tableau/Queue_MM1$package$.class deleted file mode 100644 index 4d41f4386..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/tableau/Queue_MM1$package$.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/tableau/Queue_MM1$package.class b/target/scala-3.6.4/classes/scalation/simulation/tableau/Queue_MM1$package.class deleted file mode 100644 index 904599817..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/tableau/Queue_MM1$package.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/tableau/Queue_MM1$package.tasty b/target/scala-3.6.4/classes/scalation/simulation/tableau/Queue_MM1$package.tasty deleted file mode 100644 index f3497d575..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/tableau/Queue_MM1$package.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/tableau/index.html b/target/scala-3.6.4/classes/scalation/simulation/tableau/index.html deleted file mode 100644 index a16444493..000000000 --- a/target/scala-3.6.4/classes/scalation/simulation/tableau/index.html +++ /dev/null @@ -1,11 +0,0 @@ - - -

    Source files in tableau Package

    -

    - - \ No newline at end of file diff --git a/target/scala-3.6.4/classes/scalation/simulation/tableau/runEx_Bank.class b/target/scala-3.6.4/classes/scalation/simulation/tableau/runEx_Bank.class deleted file mode 100644 index bd7473fa8..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/tableau/runEx_Bank.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/tableau/runEx_Bank.tasty b/target/scala-3.6.4/classes/scalation/simulation/tableau/runEx_Bank.tasty deleted file mode 100644 index 70793facb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/tableau/runEx_Bank.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/tableau/runEx_CallCenter.class b/target/scala-3.6.4/classes/scalation/simulation/tableau/runEx_CallCenter.class deleted file mode 100644 index 65f7c01dd..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/tableau/runEx_CallCenter.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/tableau/runEx_CallCenter.tasty b/target/scala-3.6.4/classes/scalation/simulation/tableau/runEx_CallCenter.tasty deleted file mode 100644 index 1fdbbdc90..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/tableau/runEx_CallCenter.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/tableau/runModelTest.class b/target/scala-3.6.4/classes/scalation/simulation/tableau/runModelTest.class deleted file mode 100644 index 6d57d0b4d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/tableau/runModelTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/tableau/runModelTest.tasty b/target/scala-3.6.4/classes/scalation/simulation/tableau/runModelTest.tasty deleted file mode 100644 index 24c26a69e..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/tableau/runModelTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/tableau/runQueue_MM1.class b/target/scala-3.6.4/classes/scalation/simulation/tableau/runQueue_MM1.class deleted file mode 100644 index baa013f70..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/tableau/runQueue_MM1.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/simulation/tableau/runQueue_MM1.tasty b/target/scala-3.6.4/classes/scalation/simulation/tableau/runQueue_MM1.tasty deleted file mode 100644 index 715702d6d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/simulation/tableau/runQueue_MM1.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/skipListTest.class b/target/scala-3.6.4/classes/scalation/skipListTest.class deleted file mode 100644 index 71079ba7a..000000000 Binary files a/target/scala-3.6.4/classes/scalation/skipListTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/skipListTest.tasty b/target/scala-3.6.4/classes/scalation/skipListTest.tasty deleted file mode 100644 index 5cfcdeca6..000000000 Binary files a/target/scala-3.6.4/classes/scalation/skipListTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/timeNumTest.class b/target/scala-3.6.4/classes/scalation/timeNumTest.class deleted file mode 100644 index e3889e193..000000000 Binary files a/target/scala-3.6.4/classes/scalation/timeNumTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/timeNumTest.tasty b/target/scala-3.6.4/classes/scalation/timeNumTest.tasty deleted file mode 100644 index 436e814ed..000000000 Binary files a/target/scala-3.6.4/classes/scalation/timeNumTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/timeNumTest2.class b/target/scala-3.6.4/classes/scalation/timeNumTest2.class deleted file mode 100644 index edf60da65..000000000 Binary files a/target/scala-3.6.4/classes/scalation/timeNumTest2.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/timeNumTest2.tasty b/target/scala-3.6.4/classes/scalation/timeNumTest2.tasty deleted file mode 100644 index 8495e329d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/timeNumTest2.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/timeNumTest3.class b/target/scala-3.6.4/classes/scalation/timeNumTest3.class deleted file mode 100644 index 59ecd5c5d..000000000 Binary files a/target/scala-3.6.4/classes/scalation/timeNumTest3.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/timeNumTest3.tasty b/target/scala-3.6.4/classes/scalation/timeNumTest3.tasty deleted file mode 100644 index ddfb1fcc4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/timeNumTest3.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/timerTest.class b/target/scala-3.6.4/classes/scalation/timerTest.class deleted file mode 100644 index c7e4063e0..000000000 Binary files a/target/scala-3.6.4/classes/scalation/timerTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/timerTest.tasty b/target/scala-3.6.4/classes/scalation/timerTest.tasty deleted file mode 100644 index af7e8a7e4..000000000 Binary files a/target/scala-3.6.4/classes/scalation/timerTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/unicodeTest.class b/target/scala-3.6.4/classes/scalation/unicodeTest.class deleted file mode 100644 index 0198520bb..000000000 Binary files a/target/scala-3.6.4/classes/scalation/unicodeTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/unicodeTest.tasty b/target/scala-3.6.4/classes/scalation/unicodeTest.tasty deleted file mode 100644 index 016221dbf..000000000 Binary files a/target/scala-3.6.4/classes/scalation/unicodeTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/valueTypeTest.class b/target/scala-3.6.4/classes/scalation/valueTypeTest.class deleted file mode 100644 index 88cfb2a47..000000000 Binary files a/target/scala-3.6.4/classes/scalation/valueTypeTest.class and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation/valueTypeTest.tasty b/target/scala-3.6.4/classes/scalation/valueTypeTest.tasty deleted file mode 100644 index 3891ed941..000000000 Binary files a/target/scala-3.6.4/classes/scalation/valueTypeTest.tasty and /dev/null differ diff --git a/target/scala-3.6.4/classes/scalation_src.zip b/target/scala-3.6.4/classes/scalation_src.zip deleted file mode 100644 index 772d01bea..000000000 Binary files a/target/scala-3.6.4/classes/scalation_src.zip and /dev/null differ diff --git a/target/scala-3.6.4/update/update_cache_3/inputs b/target/scala-3.6.4/update/update_cache_3/inputs deleted file mode 100644 index af47de31a..000000000 --- a/target/scala-3.6.4/update/update_cache_3/inputs +++ /dev/null @@ -1 +0,0 @@ -501753987 \ No newline at end of file diff --git a/target/scala-3.6.4/update/update_cache_3/output b/target/scala-3.6.4/update/update_cache_3/output deleted file mode 100644 index 0d19f4bc3..000000000 --- a/target/scala-3.6.4/update/update_cache_3/output +++ /dev/null @@ -1 +0,0 @@ -{"cachedDescriptor":".","configurations":[{"configuration":{"name":"compile"},"modules":[{"module":{"organization":"org.scala-lang","name":"scala3-library_3","revision":"3.6.4","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{"info.versionScheme":"semver-spec"},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"scala3-library_3","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/scala-lang/scala3-library_3/3.6.4/scala3-library_3-3.6.4.jar","extraAttributes":{"info.versionScheme":"semver-spec"},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scala-lang/scala3-library_3/3.6.4/scala3-library_3-3.6.4.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://github.com/scala/scala3","extraAttributes":{"info.versionScheme":"semver-spec"},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["Apache-2.0","https://www.apache.org/licenses/LICENSE-2.0"]],"callers":[]},{"module":{"organization":"org.scalafx","name":"scalafx_3","revision":"22.0.0-R33","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"scalafx_3","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/scalafx/scalafx_3/22.0.0-R33/scalafx_3-22.0.0-R33.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scalafx/scalafx_3/22.0.0-R33/scalafx_3-22.0.0-R33.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"http://www.scalafx.org/","extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["BSD","https://github.com/scalafx/scalafx/blob/master/LICENSE.txt"]],"callers":[]},{"module":{"organization":"org.scala-lang","name":"scala-library","revision":"2.13.15","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{"info.apiURL":"https://www.scala-lang.org/api/2.13.15/"},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"scala-library","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/scala-lang/scala-library/2.13.15/scala-library-2.13.15.jar","extraAttributes":{"info.apiURL":"https://www.scala-lang.org/api/2.13.15/"},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scala-lang/scala-library/2.13.15/scala-library-2.13.15.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://www.scala-lang.org/","extraAttributes":{"info.apiURL":"https://www.scala-lang.org/api/2.13.15/"},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["Apache-2.0","https://www.apache.org/licenses/LICENSE-2.0"]],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-base","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-base","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-base/22/javafx-base-22.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-base/22/javafx-base-22.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-controls","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-controls","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-controls/22/javafx-controls-22.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-controls/22/javafx-controls-22.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-fxml","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-fxml","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-fxml/22/javafx-fxml-22.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-fxml/22/javafx-fxml-22.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-graphics","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-graphics","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-graphics/22/javafx-graphics-22.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-graphics/22/javafx-graphics-22.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-media","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-media","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-media/22/javafx-media-22.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-media/22/javafx-media-22.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-swing","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-swing","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-swing/22/javafx-swing-22.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-swing/22/javafx-swing-22.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-web","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-web","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-web/22/javafx-web-22.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-web/22/javafx-web-22.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-base","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-base","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-base/22/javafx-base-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-base/22/javafx-base-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-controls","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-controls","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-controls/22/javafx-controls-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-controls/22/javafx-controls-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-fxml","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-fxml","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-fxml/22/javafx-fxml-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-fxml/22/javafx-fxml-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-graphics","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-graphics","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-graphics/22/javafx-graphics-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-graphics/22/javafx-graphics-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-media","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-media","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-media/22/javafx-media-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-media/22/javafx-media-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-swing","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-swing","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-swing/22/javafx-swing-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-swing/22/javafx-swing-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-web","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-web","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-web/22/javafx-web-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-web/22/javafx-web-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]}],"details":[]},{"configuration":{"name":"compile-internal"},"modules":[{"module":{"organization":"org.scala-lang","name":"scala3-library_3","revision":"3.6.4","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{"info.versionScheme":"semver-spec"},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"scala3-library_3","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/scala-lang/scala3-library_3/3.6.4/scala3-library_3-3.6.4.jar","extraAttributes":{"info.versionScheme":"semver-spec"},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scala-lang/scala3-library_3/3.6.4/scala3-library_3-3.6.4.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://github.com/scala/scala3","extraAttributes":{"info.versionScheme":"semver-spec"},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["Apache-2.0","https://www.apache.org/licenses/LICENSE-2.0"]],"callers":[]},{"module":{"organization":"org.scalafx","name":"scalafx_3","revision":"22.0.0-R33","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"scalafx_3","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/scalafx/scalafx_3/22.0.0-R33/scalafx_3-22.0.0-R33.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scalafx/scalafx_3/22.0.0-R33/scalafx_3-22.0.0-R33.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"http://www.scalafx.org/","extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["BSD","https://github.com/scalafx/scalafx/blob/master/LICENSE.txt"]],"callers":[]},{"module":{"organization":"org.scala-lang","name":"scala-library","revision":"2.13.15","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{"info.apiURL":"https://www.scala-lang.org/api/2.13.15/"},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"scala-library","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/scala-lang/scala-library/2.13.15/scala-library-2.13.15.jar","extraAttributes":{"info.apiURL":"https://www.scala-lang.org/api/2.13.15/"},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scala-lang/scala-library/2.13.15/scala-library-2.13.15.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://www.scala-lang.org/","extraAttributes":{"info.apiURL":"https://www.scala-lang.org/api/2.13.15/"},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["Apache-2.0","https://www.apache.org/licenses/LICENSE-2.0"]],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-base","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-base","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-base/22/javafx-base-22.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-base/22/javafx-base-22.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-controls","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-controls","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-controls/22/javafx-controls-22.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-controls/22/javafx-controls-22.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-fxml","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-fxml","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-fxml/22/javafx-fxml-22.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-fxml/22/javafx-fxml-22.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-graphics","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-graphics","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-graphics/22/javafx-graphics-22.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-graphics/22/javafx-graphics-22.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-media","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-media","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-media/22/javafx-media-22.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-media/22/javafx-media-22.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-swing","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-swing","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-swing/22/javafx-swing-22.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-swing/22/javafx-swing-22.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-web","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-web","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-web/22/javafx-web-22.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-web/22/javafx-web-22.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-base","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-base","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-base/22/javafx-base-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-base/22/javafx-base-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-controls","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-controls","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-controls/22/javafx-controls-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-controls/22/javafx-controls-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-fxml","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-fxml","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-fxml/22/javafx-fxml-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-fxml/22/javafx-fxml-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-graphics","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-graphics","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-graphics/22/javafx-graphics-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-graphics/22/javafx-graphics-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-media","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-media","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-media/22/javafx-media-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-media/22/javafx-media-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-swing","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-swing","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-swing/22/javafx-swing-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-swing/22/javafx-swing-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-web","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-web","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-web/22/javafx-web-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-web/22/javafx-web-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]}],"details":[]},{"configuration":{"name":"docs"},"modules":[],"details":[]},{"configuration":{"name":"optional"},"modules":[],"details":[]},{"configuration":{"name":"plugin"},"modules":[],"details":[]},{"configuration":{"name":"pom"},"modules":[],"details":[]},{"configuration":{"name":"provided"},"modules":[],"details":[]},{"configuration":{"name":"runtime"},"modules":[{"module":{"organization":"org.scala-lang","name":"scala3-library_3","revision":"3.6.4","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{"info.versionScheme":"semver-spec"},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"scala3-library_3","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/scala-lang/scala3-library_3/3.6.4/scala3-library_3-3.6.4.jar","extraAttributes":{"info.versionScheme":"semver-spec"},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scala-lang/scala3-library_3/3.6.4/scala3-library_3-3.6.4.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://github.com/scala/scala3","extraAttributes":{"info.versionScheme":"semver-spec"},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["Apache-2.0","https://www.apache.org/licenses/LICENSE-2.0"]],"callers":[]},{"module":{"organization":"org.scalafx","name":"scalafx_3","revision":"22.0.0-R33","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"scalafx_3","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/scalafx/scalafx_3/22.0.0-R33/scalafx_3-22.0.0-R33.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scalafx/scalafx_3/22.0.0-R33/scalafx_3-22.0.0-R33.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"http://www.scalafx.org/","extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["BSD","https://github.com/scalafx/scalafx/blob/master/LICENSE.txt"]],"callers":[]},{"module":{"organization":"org.scala-lang","name":"scala-library","revision":"2.13.15","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{"info.apiURL":"https://www.scala-lang.org/api/2.13.15/"},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"scala-library","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/scala-lang/scala-library/2.13.15/scala-library-2.13.15.jar","extraAttributes":{"info.apiURL":"https://www.scala-lang.org/api/2.13.15/"},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scala-lang/scala-library/2.13.15/scala-library-2.13.15.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://www.scala-lang.org/","extraAttributes":{"info.apiURL":"https://www.scala-lang.org/api/2.13.15/"},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["Apache-2.0","https://www.apache.org/licenses/LICENSE-2.0"]],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-base","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-base","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-base/22/javafx-base-22.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-base/22/javafx-base-22.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-controls","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-controls","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-controls/22/javafx-controls-22.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-controls/22/javafx-controls-22.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-fxml","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-fxml","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-fxml/22/javafx-fxml-22.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-fxml/22/javafx-fxml-22.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-graphics","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-graphics","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-graphics/22/javafx-graphics-22.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-graphics/22/javafx-graphics-22.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-media","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-media","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-media/22/javafx-media-22.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-media/22/javafx-media-22.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-swing","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-swing","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-swing/22/javafx-swing-22.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-swing/22/javafx-swing-22.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-web","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-web","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-web/22/javafx-web-22.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-web/22/javafx-web-22.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-base","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-base","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-base/22/javafx-base-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-base/22/javafx-base-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-controls","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-controls","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-controls/22/javafx-controls-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-controls/22/javafx-controls-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-fxml","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-fxml","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-fxml/22/javafx-fxml-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-fxml/22/javafx-fxml-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-graphics","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-graphics","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-graphics/22/javafx-graphics-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-graphics/22/javafx-graphics-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-media","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-media","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-media/22/javafx-media-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-media/22/javafx-media-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-swing","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-swing","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-swing/22/javafx-swing-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-swing/22/javafx-swing-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-web","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-web","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-web/22/javafx-web-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-web/22/javafx-web-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]}],"details":[]},{"configuration":{"name":"runtime-internal"},"modules":[{"module":{"organization":"org.scala-lang","name":"scala3-library_3","revision":"3.6.4","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{"info.versionScheme":"semver-spec"},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"scala3-library_3","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/scala-lang/scala3-library_3/3.6.4/scala3-library_3-3.6.4.jar","extraAttributes":{"info.versionScheme":"semver-spec"},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scala-lang/scala3-library_3/3.6.4/scala3-library_3-3.6.4.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://github.com/scala/scala3","extraAttributes":{"info.versionScheme":"semver-spec"},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["Apache-2.0","https://www.apache.org/licenses/LICENSE-2.0"]],"callers":[]},{"module":{"organization":"org.scalafx","name":"scalafx_3","revision":"22.0.0-R33","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"scalafx_3","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/scalafx/scalafx_3/22.0.0-R33/scalafx_3-22.0.0-R33.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scalafx/scalafx_3/22.0.0-R33/scalafx_3-22.0.0-R33.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"http://www.scalafx.org/","extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["BSD","https://github.com/scalafx/scalafx/blob/master/LICENSE.txt"]],"callers":[]},{"module":{"organization":"org.scala-lang","name":"scala-library","revision":"2.13.15","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{"info.apiURL":"https://www.scala-lang.org/api/2.13.15/"},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"scala-library","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/scala-lang/scala-library/2.13.15/scala-library-2.13.15.jar","extraAttributes":{"info.apiURL":"https://www.scala-lang.org/api/2.13.15/"},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scala-lang/scala-library/2.13.15/scala-library-2.13.15.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://www.scala-lang.org/","extraAttributes":{"info.apiURL":"https://www.scala-lang.org/api/2.13.15/"},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["Apache-2.0","https://www.apache.org/licenses/LICENSE-2.0"]],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-base","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-base","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-base/22/javafx-base-22.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-base/22/javafx-base-22.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-controls","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-controls","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-controls/22/javafx-controls-22.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-controls/22/javafx-controls-22.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-fxml","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-fxml","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-fxml/22/javafx-fxml-22.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-fxml/22/javafx-fxml-22.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-graphics","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-graphics","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-graphics/22/javafx-graphics-22.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-graphics/22/javafx-graphics-22.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-media","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-media","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-media/22/javafx-media-22.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-media/22/javafx-media-22.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-swing","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-swing","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-swing/22/javafx-swing-22.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-swing/22/javafx-swing-22.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-web","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-web","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-web/22/javafx-web-22.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-web/22/javafx-web-22.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-base","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-base","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-base/22/javafx-base-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-base/22/javafx-base-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-controls","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-controls","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-controls/22/javafx-controls-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-controls/22/javafx-controls-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-fxml","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-fxml","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-fxml/22/javafx-fxml-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-fxml/22/javafx-fxml-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-graphics","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-graphics","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-graphics/22/javafx-graphics-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-graphics/22/javafx-graphics-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-media","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-media","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-media/22/javafx-media-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-media/22/javafx-media-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-swing","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-swing","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-swing/22/javafx-swing-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-swing/22/javafx-swing-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-web","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-web","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-web/22/javafx-web-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-web/22/javafx-web-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]}],"details":[]},{"configuration":{"name":"scala-doc-tool"},"modules":[{"module":{"organization":"org.scala-lang","name":"scaladoc_3","revision":"3.6.4","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"scaladoc_3","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/scala-lang/scaladoc_3/3.6.4/scaladoc_3-3.6.4.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scala-lang/scaladoc_3/3.6.4/scaladoc_3-3.6.4.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://github.com/scala/scala3","extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["Apache-2.0","https://www.apache.org/licenses/LICENSE-2.0"]],"callers":[]},{"module":{"organization":"org.scala-lang","name":"scala3-compiler_3","revision":"3.6.4","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"scala3-compiler_3","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/scala-lang/scala3-compiler_3/3.6.4/scala3-compiler_3-3.6.4.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scala-lang/scala3-compiler_3/3.6.4/scala3-compiler_3-3.6.4.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://github.com/scala/scala3","extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["Apache-2.0","https://www.apache.org/licenses/LICENSE-2.0"]],"callers":[]},{"module":{"organization":"org.scala-lang","name":"scala3-tasty-inspector_3","revision":"3.6.4","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"scala3-tasty-inspector_3","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/scala-lang/scala3-tasty-inspector_3/3.6.4/scala3-tasty-inspector_3-3.6.4.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scala-lang/scala3-tasty-inspector_3/3.6.4/scala3-tasty-inspector_3-3.6.4.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://github.com/scala/scala3","extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["Apache-2.0","https://www.apache.org/licenses/LICENSE-2.0"]],"callers":[]},{"module":{"organization":"com.vladsch.flexmark","name":"flexmark","revision":"0.62.2","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"flexmark","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/com/vladsch/flexmark/flexmark/0.62.2/flexmark-0.62.2.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/com/vladsch/flexmark/flexmark/0.62.2/flexmark-0.62.2.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"com.vladsch.flexmark","name":"flexmark-util-ast","revision":"0.62.2","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"flexmark-util-ast","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-util-ast/0.62.2/flexmark-util-ast-0.62.2.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-util-ast/0.62.2/flexmark-util-ast-0.62.2.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"com.vladsch.flexmark","name":"flexmark-util-data","revision":"0.62.2","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"flexmark-util-data","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-util-data/0.62.2/flexmark-util-data-0.62.2.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-util-data/0.62.2/flexmark-util-data-0.62.2.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"com.vladsch.flexmark","name":"flexmark-util-html","revision":"0.62.2","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"flexmark-util-html","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-util-html/0.62.2/flexmark-util-html-0.62.2.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-util-html/0.62.2/flexmark-util-html-0.62.2.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"com.vladsch.flexmark","name":"flexmark-ext-anchorlink","revision":"0.62.2","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"flexmark-ext-anchorlink","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-ext-anchorlink/0.62.2/flexmark-ext-anchorlink-0.62.2.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-ext-anchorlink/0.62.2/flexmark-ext-anchorlink-0.62.2.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"com.vladsch.flexmark","name":"flexmark-ext-autolink","revision":"0.62.2","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"flexmark-ext-autolink","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-ext-autolink/0.62.2/flexmark-ext-autolink-0.62.2.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-ext-autolink/0.62.2/flexmark-ext-autolink-0.62.2.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"com.vladsch.flexmark","name":"flexmark-ext-emoji","revision":"0.62.2","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"flexmark-ext-emoji","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-ext-emoji/0.62.2/flexmark-ext-emoji-0.62.2.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-ext-emoji/0.62.2/flexmark-ext-emoji-0.62.2.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"com.vladsch.flexmark","name":"flexmark-ext-gfm-strikethrough","revision":"0.62.2","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"flexmark-ext-gfm-strikethrough","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-ext-gfm-strikethrough/0.62.2/flexmark-ext-gfm-strikethrough-0.62.2.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-ext-gfm-strikethrough/0.62.2/flexmark-ext-gfm-strikethrough-0.62.2.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"com.vladsch.flexmark","name":"flexmark-ext-gfm-tasklist","revision":"0.62.2","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"flexmark-ext-gfm-tasklist","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-ext-gfm-tasklist/0.62.2/flexmark-ext-gfm-tasklist-0.62.2.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-ext-gfm-tasklist/0.62.2/flexmark-ext-gfm-tasklist-0.62.2.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"com.vladsch.flexmark","name":"flexmark-ext-wikilink","revision":"0.62.2","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"flexmark-ext-wikilink","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-ext-wikilink/0.62.2/flexmark-ext-wikilink-0.62.2.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-ext-wikilink/0.62.2/flexmark-ext-wikilink-0.62.2.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"com.vladsch.flexmark","name":"flexmark-ext-tables","revision":"0.62.2","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"flexmark-ext-tables","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-ext-tables/0.62.2/flexmark-ext-tables-0.62.2.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-ext-tables/0.62.2/flexmark-ext-tables-0.62.2.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"com.vladsch.flexmark","name":"flexmark-ext-yaml-front-matter","revision":"0.62.2","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"flexmark-ext-yaml-front-matter","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-ext-yaml-front-matter/0.62.2/flexmark-ext-yaml-front-matter-0.62.2.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-ext-yaml-front-matter/0.62.2/flexmark-ext-yaml-front-matter-0.62.2.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"nl.big-o","name":"liqp","revision":"0.8.2","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"liqp","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/nl/big-o/liqp/0.8.2/liqp-0.8.2.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/nl/big-o/liqp/0.8.2/liqp-0.8.2.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://github.com/bkiers/Liqp","extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["MIT License","http://www.opensource.org/licenses/mit-license.php"]],"callers":[]},{"module":{"organization":"org.jsoup","name":"jsoup","revision":"1.17.2","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"jsoup","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/jsoup/jsoup/1.17.2/jsoup-1.17.2.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/jsoup/jsoup/1.17.2/jsoup-1.17.2.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://jsoup.org/","extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["The MIT License","https://jsoup.org/license"]],"callers":[]},{"module":{"organization":"com.fasterxml.jackson.dataformat","name":"jackson-dataformat-yaml","revision":"2.15.1","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"jackson-dataformat-yaml","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/com/fasterxml/jackson/dataformat/jackson-dataformat-yaml/2.15.1/jackson-dataformat-yaml-2.15.1.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/com/fasterxml/jackson/dataformat/jackson-dataformat-yaml/2.15.1/jackson-dataformat-yaml-2.15.1.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://github.com/FasterXML/jackson-dataformats-text","extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.scala-lang","name":"scala3-interfaces","revision":"3.6.4","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{"info.versionScheme":"semver-spec"},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"scala3-interfaces","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/scala-lang/scala3-interfaces/3.6.4/scala3-interfaces-3.6.4.jar","extraAttributes":{"info.versionScheme":"semver-spec"},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scala-lang/scala3-interfaces/3.6.4/scala3-interfaces-3.6.4.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://github.com/scala/scala3","extraAttributes":{"info.versionScheme":"semver-spec"},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["Apache-2.0","https://www.apache.org/licenses/LICENSE-2.0"]],"callers":[]},{"module":{"organization":"org.scala-lang","name":"scala3-library_3","revision":"3.6.4","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{"info.versionScheme":"semver-spec"},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"scala3-library_3","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/scala-lang/scala3-library_3/3.6.4/scala3-library_3-3.6.4.jar","extraAttributes":{"info.versionScheme":"semver-spec"},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scala-lang/scala3-library_3/3.6.4/scala3-library_3-3.6.4.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://github.com/scala/scala3","extraAttributes":{"info.versionScheme":"semver-spec"},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["Apache-2.0","https://www.apache.org/licenses/LICENSE-2.0"]],"callers":[]},{"module":{"organization":"org.scala-lang","name":"tasty-core_3","revision":"3.6.4","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{"info.versionScheme":"semver-spec"},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"tasty-core_3","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/scala-lang/tasty-core_3/3.6.4/tasty-core_3-3.6.4.jar","extraAttributes":{"info.versionScheme":"semver-spec"},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scala-lang/tasty-core_3/3.6.4/tasty-core_3-3.6.4.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://github.com/scala/scala3","extraAttributes":{"info.versionScheme":"semver-spec"},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["Apache-2.0","https://www.apache.org/licenses/LICENSE-2.0"]],"callers":[]},{"module":{"organization":"org.scala-lang.modules","name":"scala-asm","revision":"9.7.1-scala-1","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"scala-asm","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/scala-lang/modules/scala-asm/9.7.1-scala-1/scala-asm-9.7.1-scala-1.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scala-lang/modules/scala-asm/9.7.1-scala-1/scala-asm-9.7.1-scala-1.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://github.com/scala/scala-asm","extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["BSD 3-clause","http://opensource.org/licenses/BSD-3-Clause"]],"callers":[]},{"module":{"organization":"org.scala-sbt","name":"compiler-interface","revision":"1.10.4","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{"info.versionScheme":"early-semver"},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"compiler-interface","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/scala-sbt/compiler-interface/1.10.4/compiler-interface-1.10.4.jar","extraAttributes":{"info.versionScheme":"early-semver"},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scala-sbt/compiler-interface/1.10.4/compiler-interface-1.10.4.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://github.com/sbt/zinc","extraAttributes":{"info.versionScheme":"early-semver"},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["Apache-2.0","https://www.apache.org/licenses/LICENSE-2.0"]],"callers":[]},{"module":{"organization":"org.jline","name":"jline-reader","revision":"3.27.1","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"jline-reader","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/jline/jline-reader/3.27.1/jline-reader-3.27.1.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/jline/jline-reader/3.27.1/jline-reader-3.27.1.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.jline","name":"jline-terminal","revision":"3.27.1","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"jline-terminal","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/jline/jline-terminal/3.27.1/jline-terminal-3.27.1.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/jline/jline-terminal/3.27.1/jline-terminal-3.27.1.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.jline","name":"jline-terminal-jni","revision":"3.27.1","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"jline-terminal-jni","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/jline/jline-terminal-jni/3.27.1/jline-terminal-jni-3.27.1.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/jline/jline-terminal-jni/3.27.1/jline-terminal-jni-3.27.1.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"com.vladsch.flexmark","name":"flexmark-util-builder","revision":"0.62.2","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"flexmark-util-builder","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-util-builder/0.62.2/flexmark-util-builder-0.62.2.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-util-builder/0.62.2/flexmark-util-builder-0.62.2.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"com.vladsch.flexmark","name":"flexmark-util-collection","revision":"0.62.2","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"flexmark-util-collection","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-util-collection/0.62.2/flexmark-util-collection-0.62.2.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-util-collection/0.62.2/flexmark-util-collection-0.62.2.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"com.vladsch.flexmark","name":"flexmark-util-dependency","revision":"0.62.2","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"flexmark-util-dependency","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-util-dependency/0.62.2/flexmark-util-dependency-0.62.2.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-util-dependency/0.62.2/flexmark-util-dependency-0.62.2.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"com.vladsch.flexmark","name":"flexmark-util-format","revision":"0.62.2","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"flexmark-util-format","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-util-format/0.62.2/flexmark-util-format-0.62.2.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-util-format/0.62.2/flexmark-util-format-0.62.2.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"com.vladsch.flexmark","name":"flexmark-util-misc","revision":"0.62.2","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"flexmark-util-misc","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-util-misc/0.62.2/flexmark-util-misc-0.62.2.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-util-misc/0.62.2/flexmark-util-misc-0.62.2.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"com.vladsch.flexmark","name":"flexmark-util-sequence","revision":"0.62.2","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"flexmark-util-sequence","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-util-sequence/0.62.2/flexmark-util-sequence-0.62.2.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-util-sequence/0.62.2/flexmark-util-sequence-0.62.2.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"com.vladsch.flexmark","name":"flexmark-util-visitor","revision":"0.62.2","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"flexmark-util-visitor","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-util-visitor/0.62.2/flexmark-util-visitor-0.62.2.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-util-visitor/0.62.2/flexmark-util-visitor-0.62.2.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.jetbrains","name":"annotations","revision":"15.0","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"annotations","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/jetbrains/annotations/15.0/annotations-15.0.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/jetbrains/annotations/15.0/annotations-15.0.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"http://www.jetbrains.org","extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["The Apache Software License, Version 2.0","http://www.apache.org/licenses/LICENSE-2.0.txt"]],"callers":[]},{"module":{"organization":"com.vladsch.flexmark","name":"flexmark-util","revision":"0.62.2","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"flexmark-util","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-util/0.62.2/flexmark-util-0.62.2.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-util/0.62.2/flexmark-util-0.62.2.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.nibor.autolink","name":"autolink","revision":"0.6.0","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"autolink","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/nibor/autolink/autolink/0.6.0/autolink-0.6.0.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/nibor/autolink/autolink/0.6.0/autolink-0.6.0.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://github.com/robinst/autolink-java","extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["MIT License","http://www.opensource.org/licenses/mit-license.php"]],"callers":[]},{"module":{"organization":"com.vladsch.flexmark","name":"flexmark-jira-converter","revision":"0.62.2","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"flexmark-jira-converter","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-jira-converter/0.62.2/flexmark-jira-converter-0.62.2.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-jira-converter/0.62.2/flexmark-jira-converter-0.62.2.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.antlr","name":"antlr4-runtime","revision":"4.7.2","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"antlr4-runtime","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/antlr/antlr4-runtime/4.7.2/antlr4-runtime-4.7.2.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/antlr/antlr4-runtime/4.7.2/antlr4-runtime-4.7.2.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"com.fasterxml.jackson.core","name":"jackson-annotations","revision":"2.15.1","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"jackson-annotations","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/com/fasterxml/jackson/core/jackson-annotations/2.15.1/jackson-annotations-2.15.1.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/com/fasterxml/jackson/core/jackson-annotations/2.15.1/jackson-annotations-2.15.1.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://github.com/FasterXML/jackson","extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["The Apache Software License, Version 2.0","https://www.apache.org/licenses/LICENSE-2.0.txt"]],"callers":[]},{"module":{"organization":"com.fasterxml.jackson.core","name":"jackson-core","revision":"2.15.1","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"jackson-core","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/com/fasterxml/jackson/core/jackson-core/2.15.1/jackson-core-2.15.1.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/com/fasterxml/jackson/core/jackson-core/2.15.1/jackson-core-2.15.1.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://github.com/FasterXML/jackson-core","extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["The Apache Software License, Version 2.0","https://www.apache.org/licenses/LICENSE-2.0.txt"]],"callers":[]},{"module":{"organization":"com.fasterxml.jackson.core","name":"jackson-databind","revision":"2.15.1","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"jackson-databind","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/com/fasterxml/jackson/core/jackson-databind/2.15.1/jackson-databind-2.15.1.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/com/fasterxml/jackson/core/jackson-databind/2.15.1/jackson-databind-2.15.1.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://github.com/FasterXML/jackson","extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["The Apache Software License, Version 2.0","https://www.apache.org/licenses/LICENSE-2.0.txt"]],"callers":[]},{"module":{"organization":"com.fasterxml.jackson.datatype","name":"jackson-datatype-jsr310","revision":"2.12.1","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"jackson-datatype-jsr310","type":"bundle","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/com/fasterxml/jackson/datatype/jackson-datatype-jsr310/2.12.1/jackson-datatype-jsr310-2.12.1.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/com/fasterxml/jackson/datatype/jackson-datatype-jsr310/2.12.1/jackson-datatype-jsr310-2.12.1.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"ua.co.k","name":"strftime4j","revision":"1.0.5","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"strftime4j","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/ua/co/k/strftime4j/1.0.5/strftime4j-1.0.5.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/ua/co/k/strftime4j/1.0.5/strftime4j-1.0.5.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://github.com/msangel/strftime4j","extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["MIT License","http://www.opensource.org/licenses/mit-license.php"]],"callers":[]},{"module":{"organization":"org.yaml","name":"snakeyaml","revision":"2.0","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"snakeyaml","type":"bundle","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/yaml/snakeyaml/2.0/snakeyaml-2.0.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/yaml/snakeyaml/2.0/snakeyaml-2.0.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://bitbucket.org/snakeyaml/snakeyaml","extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["Apache License, Version 2.0","http://www.apache.org/licenses/LICENSE-2.0.txt"]],"callers":[]},{"module":{"organization":"org.scala-lang","name":"scala-library","revision":"2.13.15","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{"info.apiURL":"https://www.scala-lang.org/api/2.13.15/"},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"scala-library","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/scala-lang/scala-library/2.13.15/scala-library-2.13.15.jar","extraAttributes":{"info.apiURL":"https://www.scala-lang.org/api/2.13.15/"},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scala-lang/scala-library/2.13.15/scala-library-2.13.15.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://www.scala-lang.org/","extraAttributes":{"info.apiURL":"https://www.scala-lang.org/api/2.13.15/"},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["Apache-2.0","https://www.apache.org/licenses/LICENSE-2.0"]],"callers":[]},{"module":{"organization":"org.scala-sbt","name":"util-interface","revision":"1.10.4","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{"info.versionScheme":"early-semver"},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"util-interface","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/scala-sbt/util-interface/1.10.4/util-interface-1.10.4.jar","extraAttributes":{"info.versionScheme":"early-semver"},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scala-sbt/util-interface/1.10.4/util-interface-1.10.4.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://github.com/sbt/sbt","extraAttributes":{"info.versionScheme":"early-semver"},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["Apache-2.0","https://github.com/sbt/sbt/blob/develop/LICENSE"]],"callers":[]},{"module":{"organization":"org.jline","name":"jline-native","revision":"3.27.1","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"jline-native","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/jline/jline-native/3.27.1/jline-native-3.27.1.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/jline/jline-native/3.27.1/jline-native-3.27.1.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"com.vladsch.flexmark","name":"flexmark-util-options","revision":"0.62.2","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"flexmark-util-options","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-util-options/0.62.2/flexmark-util-options-0.62.2.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-util-options/0.62.2/flexmark-util-options-0.62.2.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"com.vladsch.flexmark","name":"flexmark-ext-ins","revision":"0.62.2","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"flexmark-ext-ins","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-ext-ins/0.62.2/flexmark-ext-ins-0.62.2.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-ext-ins/0.62.2/flexmark-ext-ins-0.62.2.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"com.vladsch.flexmark","name":"flexmark-ext-superscript","revision":"0.62.2","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"flexmark-ext-superscript","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-ext-superscript/0.62.2/flexmark-ext-superscript-0.62.2.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-ext-superscript/0.62.2/flexmark-ext-superscript-0.62.2.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]}],"details":[]},{"configuration":{"name":"scala-tool"},"modules":[{"module":{"organization":"org.scala-lang","name":"scala3-compiler_3","revision":"3.6.4","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"scala3-compiler_3","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/scala-lang/scala3-compiler_3/3.6.4/scala3-compiler_3-3.6.4.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scala-lang/scala3-compiler_3/3.6.4/scala3-compiler_3-3.6.4.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://github.com/scala/scala3","extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["Apache-2.0","https://www.apache.org/licenses/LICENSE-2.0"]],"callers":[]},{"module":{"organization":"org.scala-lang","name":"scala3-interfaces","revision":"3.6.4","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{"info.versionScheme":"semver-spec"},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"scala3-interfaces","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/scala-lang/scala3-interfaces/3.6.4/scala3-interfaces-3.6.4.jar","extraAttributes":{"info.versionScheme":"semver-spec"},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scala-lang/scala3-interfaces/3.6.4/scala3-interfaces-3.6.4.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://github.com/scala/scala3","extraAttributes":{"info.versionScheme":"semver-spec"},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["Apache-2.0","https://www.apache.org/licenses/LICENSE-2.0"]],"callers":[]},{"module":{"organization":"org.scala-lang","name":"scala3-library_3","revision":"3.6.4","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{"info.versionScheme":"semver-spec"},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"scala3-library_3","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/scala-lang/scala3-library_3/3.6.4/scala3-library_3-3.6.4.jar","extraAttributes":{"info.versionScheme":"semver-spec"},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scala-lang/scala3-library_3/3.6.4/scala3-library_3-3.6.4.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://github.com/scala/scala3","extraAttributes":{"info.versionScheme":"semver-spec"},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["Apache-2.0","https://www.apache.org/licenses/LICENSE-2.0"]],"callers":[]},{"module":{"organization":"org.scala-lang","name":"tasty-core_3","revision":"3.6.4","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{"info.versionScheme":"semver-spec"},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"tasty-core_3","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/scala-lang/tasty-core_3/3.6.4/tasty-core_3-3.6.4.jar","extraAttributes":{"info.versionScheme":"semver-spec"},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scala-lang/tasty-core_3/3.6.4/tasty-core_3-3.6.4.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://github.com/scala/scala3","extraAttributes":{"info.versionScheme":"semver-spec"},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["Apache-2.0","https://www.apache.org/licenses/LICENSE-2.0"]],"callers":[]},{"module":{"organization":"org.scala-lang.modules","name":"scala-asm","revision":"9.7.1-scala-1","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"scala-asm","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/scala-lang/modules/scala-asm/9.7.1-scala-1/scala-asm-9.7.1-scala-1.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scala-lang/modules/scala-asm/9.7.1-scala-1/scala-asm-9.7.1-scala-1.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://github.com/scala/scala-asm","extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["BSD 3-clause","http://opensource.org/licenses/BSD-3-Clause"]],"callers":[]},{"module":{"organization":"org.scala-sbt","name":"compiler-interface","revision":"1.10.4","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{"info.versionScheme":"early-semver"},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"compiler-interface","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/scala-sbt/compiler-interface/1.10.4/compiler-interface-1.10.4.jar","extraAttributes":{"info.versionScheme":"early-semver"},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scala-sbt/compiler-interface/1.10.4/compiler-interface-1.10.4.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://github.com/sbt/zinc","extraAttributes":{"info.versionScheme":"early-semver"},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["Apache-2.0","https://www.apache.org/licenses/LICENSE-2.0"]],"callers":[]},{"module":{"organization":"org.jline","name":"jline-reader","revision":"3.27.1","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"jline-reader","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/jline/jline-reader/3.27.1/jline-reader-3.27.1.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/jline/jline-reader/3.27.1/jline-reader-3.27.1.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.jline","name":"jline-terminal","revision":"3.27.1","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"jline-terminal","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/jline/jline-terminal/3.27.1/jline-terminal-3.27.1.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/jline/jline-terminal/3.27.1/jline-terminal-3.27.1.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.jline","name":"jline-terminal-jni","revision":"3.27.1","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"jline-terminal-jni","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/jline/jline-terminal-jni/3.27.1/jline-terminal-jni-3.27.1.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/jline/jline-terminal-jni/3.27.1/jline-terminal-jni-3.27.1.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.scala-lang","name":"scala-library","revision":"2.13.15","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{"info.apiURL":"https://www.scala-lang.org/api/2.13.15/"},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"scala-library","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/scala-lang/scala-library/2.13.15/scala-library-2.13.15.jar","extraAttributes":{"info.apiURL":"https://www.scala-lang.org/api/2.13.15/"},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scala-lang/scala-library/2.13.15/scala-library-2.13.15.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://www.scala-lang.org/","extraAttributes":{"info.apiURL":"https://www.scala-lang.org/api/2.13.15/"},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["Apache-2.0","https://www.apache.org/licenses/LICENSE-2.0"]],"callers":[]},{"module":{"organization":"org.scala-sbt","name":"util-interface","revision":"1.10.4","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{"info.versionScheme":"early-semver"},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"util-interface","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/scala-sbt/util-interface/1.10.4/util-interface-1.10.4.jar","extraAttributes":{"info.versionScheme":"early-semver"},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scala-sbt/util-interface/1.10.4/util-interface-1.10.4.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://github.com/sbt/sbt","extraAttributes":{"info.versionScheme":"early-semver"},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["Apache-2.0","https://github.com/sbt/sbt/blob/develop/LICENSE"]],"callers":[]},{"module":{"organization":"org.jline","name":"jline-native","revision":"3.27.1","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"jline-native","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/jline/jline-native/3.27.1/jline-native-3.27.1.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/jline/jline-native/3.27.1/jline-native-3.27.1.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]}],"details":[]},{"configuration":{"name":"sources"},"modules":[],"details":[]},{"configuration":{"name":"test"},"modules":[{"module":{"organization":"org.scala-lang","name":"scala3-library_3","revision":"3.6.4","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{"info.versionScheme":"semver-spec"},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"scala3-library_3","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/scala-lang/scala3-library_3/3.6.4/scala3-library_3-3.6.4.jar","extraAttributes":{"info.versionScheme":"semver-spec"},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scala-lang/scala3-library_3/3.6.4/scala3-library_3-3.6.4.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://github.com/scala/scala3","extraAttributes":{"info.versionScheme":"semver-spec"},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["Apache-2.0","https://www.apache.org/licenses/LICENSE-2.0"]],"callers":[]},{"module":{"organization":"org.scalafx","name":"scalafx_3","revision":"22.0.0-R33","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"scalafx_3","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/scalafx/scalafx_3/22.0.0-R33/scalafx_3-22.0.0-R33.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scalafx/scalafx_3/22.0.0-R33/scalafx_3-22.0.0-R33.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"http://www.scalafx.org/","extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["BSD","https://github.com/scalafx/scalafx/blob/master/LICENSE.txt"]],"callers":[]},{"module":{"organization":"org.scala-lang","name":"scala-library","revision":"2.13.15","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{"info.apiURL":"https://www.scala-lang.org/api/2.13.15/"},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"scala-library","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/scala-lang/scala-library/2.13.15/scala-library-2.13.15.jar","extraAttributes":{"info.apiURL":"https://www.scala-lang.org/api/2.13.15/"},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scala-lang/scala-library/2.13.15/scala-library-2.13.15.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://www.scala-lang.org/","extraAttributes":{"info.apiURL":"https://www.scala-lang.org/api/2.13.15/"},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["Apache-2.0","https://www.apache.org/licenses/LICENSE-2.0"]],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-base","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-base","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-base/22/javafx-base-22.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-base/22/javafx-base-22.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-controls","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-controls","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-controls/22/javafx-controls-22.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-controls/22/javafx-controls-22.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-fxml","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-fxml","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-fxml/22/javafx-fxml-22.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-fxml/22/javafx-fxml-22.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-graphics","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-graphics","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-graphics/22/javafx-graphics-22.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-graphics/22/javafx-graphics-22.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-media","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-media","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-media/22/javafx-media-22.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-media/22/javafx-media-22.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-swing","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-swing","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-swing/22/javafx-swing-22.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-swing/22/javafx-swing-22.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-web","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-web","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-web/22/javafx-web-22.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-web/22/javafx-web-22.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-base","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-base","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-base/22/javafx-base-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-base/22/javafx-base-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-controls","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-controls","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-controls/22/javafx-controls-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-controls/22/javafx-controls-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-fxml","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-fxml","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-fxml/22/javafx-fxml-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-fxml/22/javafx-fxml-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-graphics","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-graphics","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-graphics/22/javafx-graphics-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-graphics/22/javafx-graphics-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-media","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-media","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-media/22/javafx-media-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-media/22/javafx-media-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-swing","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-swing","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-swing/22/javafx-swing-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-swing/22/javafx-swing-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-web","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-web","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-web/22/javafx-web-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-web/22/javafx-web-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]}],"details":[]},{"configuration":{"name":"test-internal"},"modules":[{"module":{"organization":"org.scala-lang","name":"scala3-library_3","revision":"3.6.4","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{"info.versionScheme":"semver-spec"},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"scala3-library_3","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/scala-lang/scala3-library_3/3.6.4/scala3-library_3-3.6.4.jar","extraAttributes":{"info.versionScheme":"semver-spec"},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scala-lang/scala3-library_3/3.6.4/scala3-library_3-3.6.4.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://github.com/scala/scala3","extraAttributes":{"info.versionScheme":"semver-spec"},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["Apache-2.0","https://www.apache.org/licenses/LICENSE-2.0"]],"callers":[]},{"module":{"organization":"org.scalafx","name":"scalafx_3","revision":"22.0.0-R33","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"scalafx_3","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/scalafx/scalafx_3/22.0.0-R33/scalafx_3-22.0.0-R33.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scalafx/scalafx_3/22.0.0-R33/scalafx_3-22.0.0-R33.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"http://www.scalafx.org/","extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["BSD","https://github.com/scalafx/scalafx/blob/master/LICENSE.txt"]],"callers":[]},{"module":{"organization":"org.scala-lang","name":"scala-library","revision":"2.13.15","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{"info.apiURL":"https://www.scala-lang.org/api/2.13.15/"},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"scala-library","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/scala-lang/scala-library/2.13.15/scala-library-2.13.15.jar","extraAttributes":{"info.apiURL":"https://www.scala-lang.org/api/2.13.15/"},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scala-lang/scala-library/2.13.15/scala-library-2.13.15.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://www.scala-lang.org/","extraAttributes":{"info.apiURL":"https://www.scala-lang.org/api/2.13.15/"},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["Apache-2.0","https://www.apache.org/licenses/LICENSE-2.0"]],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-base","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-base","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-base/22/javafx-base-22.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-base/22/javafx-base-22.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-controls","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-controls","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-controls/22/javafx-controls-22.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-controls/22/javafx-controls-22.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-fxml","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-fxml","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-fxml/22/javafx-fxml-22.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-fxml/22/javafx-fxml-22.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-graphics","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-graphics","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-graphics/22/javafx-graphics-22.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-graphics/22/javafx-graphics-22.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-media","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-media","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-media/22/javafx-media-22.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-media/22/javafx-media-22.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-swing","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-swing","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-swing/22/javafx-swing-22.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-swing/22/javafx-swing-22.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-web","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-web","type":"jar","extension":"jar","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-web/22/javafx-web-22.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-web/22/javafx-web-22.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-base","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-base","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-base/22/javafx-base-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-base/22/javafx-base-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-controls","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-controls","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-controls/22/javafx-controls-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-controls/22/javafx-controls-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-fxml","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-fxml","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-fxml/22/javafx-fxml-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-fxml/22/javafx-fxml-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-graphics","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-graphics","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-graphics/22/javafx-graphics-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-graphics/22/javafx-graphics-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-media","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-media","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-media/22/javafx-media-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-media/22/javafx-media-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-swing","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-swing","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-swing/22/javafx-swing-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-swing/22/javafx-swing-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-web","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-web","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-web/22/javafx-web-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-web/22/javafx-web-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]}],"details":[]}],"stats":{"resolveTime":-1,"downloadTime":-1,"downloadSize":-1,"cached":false},"stamps":{}} \ No newline at end of file diff --git a/target/scala-3.6.4/zinc/inc_compile_3.zip b/target/scala-3.6.4/zinc/inc_compile_3.zip deleted file mode 100644 index 287a8340c..000000000 Binary files a/target/scala-3.6.4/zinc/inc_compile_3.zip and /dev/null differ diff --git a/target/streams/_global/_global/_global/streams/out b/target/streams/_global/_global/_global/streams/out deleted file mode 100644 index e69de29bb..000000000 diff --git a/target/streams/_global/_global/csrLogger/_global/streams/out b/target/streams/_global/_global/csrLogger/_global/streams/out deleted file mode 100644 index e69de29bb..000000000 diff --git a/target/streams/_global/_global/dumpStructure/_global/streams/out b/target/streams/_global/_global/dumpStructure/_global/streams/out deleted file mode 100644 index 17a698da3..000000000 --- a/target/streams/_global/_global/dumpStructure/_global/streams/out +++ /dev/null @@ -1,2 +0,0 @@ -[info] Writing structure to C:\Users\youse\AppData\Local\Temp\sbt-structure.xml... -[info] Done. diff --git a/target/streams/_global/csrConfiguration/_global/streams/out b/target/streams/_global/csrConfiguration/_global/streams/out deleted file mode 100644 index e69de29bb..000000000 diff --git a/target/streams/_global/csrProject/_global/streams/out b/target/streams/_global/csrProject/_global/streams/out deleted file mode 100644 index e69de29bb..000000000 diff --git a/target/streams/_global/dependencyPositions/_global/streams/update_cache_3/input_dsp b/target/streams/_global/dependencyPositions/_global/streams/update_cache_3/input_dsp deleted file mode 100644 index 0985496e1..000000000 --- a/target/streams/_global/dependencyPositions/_global/streams/update_cache_3/input_dsp +++ /dev/null @@ -1 +0,0 @@ -2041884717 \ No newline at end of file diff --git a/target/streams/_global/dependencyPositions/_global/streams/update_cache_3/output_dsp b/target/streams/_global/dependencyPositions/_global/streams/update_cache_3/output_dsp deleted file mode 100644 index bd3d1728d..000000000 --- a/target/streams/_global/dependencyPositions/_global/streams/update_cache_3/output_dsp +++ /dev/null @@ -1 +0,0 @@ -{"{\"organization\":\"org.scala-lang\",\"name\":\"scala3-library\",\"revision\":\"3.6.4\",\"isChanging\":false,\"isTransitive\":true,\"isForce\":false,\"explicitArtifacts\":[],\"inclusions\":[],\"exclusions\":[],\"extraAttributes\":{},\"crossVersion\":{\"type\":\"Binary\",\"prefix\":\"\",\"suffix\":\"\"}}":{"value":{"$fields":["path","range"],"path":"C:\\Users\\youse\\OneDrive\\Documents\\GitHub\\scalation_2.0\\build.sbt","range":{"$fields":["start","end"],"start":26,"end":27}},"type":"RangePosition"},"{\"organization\":\"org.scalafx\",\"name\":\"scalafx\",\"revision\":\"22.0.0-R33\",\"isChanging\":false,\"isTransitive\":true,\"isForce\":false,\"explicitArtifacts\":[],\"inclusions\":[],\"exclusions\":[],\"extraAttributes\":{},\"crossVersion\":{\"type\":\"Binary\",\"prefix\":\"\",\"suffix\":\"\"}}":{"value":{"$fields":["path","range"],"path":"C:\\Users\\youse\\OneDrive\\Documents\\GitHub\\scalation_2.0\\build.sbt","range":{"$fields":["start","end"],"start":26,"end":27}},"type":"RangePosition"}} \ No newline at end of file diff --git a/target/streams/_global/ivyConfiguration/_global/streams/out b/target/streams/_global/ivyConfiguration/_global/streams/out deleted file mode 100644 index e69de29bb..000000000 diff --git a/target/streams/_global/ivySbt/_global/streams/out b/target/streams/_global/ivySbt/_global/streams/out deleted file mode 100644 index e69de29bb..000000000 diff --git a/target/streams/_global/moduleSettings/_global/streams/out b/target/streams/_global/moduleSettings/_global/streams/out deleted file mode 100644 index e69de29bb..000000000 diff --git a/target/streams/_global/projectDescriptors/_global/streams/out b/target/streams/_global/projectDescriptors/_global/streams/out deleted file mode 100644 index e69de29bb..000000000 diff --git a/target/streams/_global/scalaCompilerBridgeScope/_global/streams/out b/target/streams/_global/scalaCompilerBridgeScope/_global/streams/out deleted file mode 100644 index e69de29bb..000000000 diff --git a/target/streams/_global/update/_global/streams/out b/target/streams/_global/update/_global/streams/out deleted file mode 100644 index dad971a5d..000000000 --- a/target/streams/_global/update/_global/streams/out +++ /dev/null @@ -1,3 +0,0 @@ -[debug] not up to date. inChanged = true, force = false -[debug] Updating ... -[debug] Done updating diff --git a/target/streams/_global/updateClassifiers/_global/streams/out b/target/streams/_global/updateClassifiers/_global/streams/out deleted file mode 100644 index fccf19ae1..000000000 --- a/target/streams/_global/updateClassifiers/_global/streams/out +++ /dev/null @@ -1,3 +0,0 @@ -[debug] not up to date. inChanged = true, force = false -[debug] Updating ProjectRef(uri("file:/C:/Users/youse/OneDrive/Documents/New%20Scalation/scalation_2.0/"), "scalation")... -[debug] Done updating ProjectRef(uri("file:/C:/Users/youse/OneDrive/Documents/New%20Scalation/scalation_2.0/"), "scalation") diff --git a/target/streams/_global/updateClassifiers/_global/streams/update_cache_3/inputs b/target/streams/_global/updateClassifiers/_global/streams/update_cache_3/inputs deleted file mode 100644 index 7ad239251..000000000 --- a/target/streams/_global/updateClassifiers/_global/streams/update_cache_3/inputs +++ /dev/null @@ -1 +0,0 @@ -602016587 \ No newline at end of file diff --git a/target/streams/_global/updateClassifiers/_global/streams/update_cache_3/output b/target/streams/_global/updateClassifiers/_global/streams/update_cache_3/output deleted file mode 100644 index 562caea00..000000000 --- a/target/streams/_global/updateClassifiers/_global/streams/update_cache_3/output +++ /dev/null @@ -1 +0,0 @@ -{"cachedDescriptor":".","configurations":[{"configuration":{"name":"compile"},"modules":[{"module":{"organization":"org.scala-lang","name":"scala3-library_3","revision":"3.6.4","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{"info.versionScheme":"semver-spec"},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"scala3-library_3","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/scala-lang/scala3-library_3/3.6.4/scala3-library_3-3.6.4-sources.jar","extraAttributes":{"info.versionScheme":"semver-spec"},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scala-lang/scala3-library_3/3.6.4/scala3-library_3-3.6.4-sources.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://github.com/scala/scala3","extraAttributes":{"info.versionScheme":"semver-spec"},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["Apache-2.0","https://www.apache.org/licenses/LICENSE-2.0"]],"callers":[]},{"module":{"organization":"org.scalafx","name":"scalafx_3","revision":"22.0.0-R33","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"scalafx_3","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/scalafx/scalafx_3/22.0.0-R33/scalafx_3-22.0.0-R33-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scalafx/scalafx_3/22.0.0-R33/scalafx_3-22.0.0-R33-sources.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"http://www.scalafx.org/","extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["BSD","https://github.com/scalafx/scalafx/blob/master/LICENSE.txt"]],"callers":[]},{"module":{"organization":"org.scala-lang","name":"scala-library","revision":"2.13.15","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{"info.apiURL":"https://www.scala-lang.org/api/2.13.15/"},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"scala-library","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/scala-lang/scala-library/2.13.15/scala-library-2.13.15-sources.jar","extraAttributes":{"info.apiURL":"https://www.scala-lang.org/api/2.13.15/"},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scala-lang/scala-library/2.13.15/scala-library-2.13.15-sources.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://www.scala-lang.org/","extraAttributes":{"info.apiURL":"https://www.scala-lang.org/api/2.13.15/"},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["Apache-2.0","https://www.apache.org/licenses/LICENSE-2.0"]],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-base","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-base","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-base/22/javafx-base-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-base/22/javafx-base-22-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-controls","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-controls","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-controls/22/javafx-controls-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-controls/22/javafx-controls-22-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-fxml","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-fxml","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-fxml/22/javafx-fxml-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-fxml/22/javafx-fxml-22-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-graphics","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-graphics","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-graphics/22/javafx-graphics-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-graphics/22/javafx-graphics-22-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-media","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-media","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-media/22/javafx-media-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-media/22/javafx-media-22-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-swing","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-swing","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-swing/22/javafx-swing-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-swing/22/javafx-swing-22-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-web","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-web","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-web/22/javafx-web-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-web/22/javafx-web-22-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-base","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-base","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-base/22/javafx-base-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-base/22/javafx-base-22-sources.jar"],[{"name":"javafx-base","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-base/22/javafx-base-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-base/22/javafx-base-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-controls","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-controls","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-controls/22/javafx-controls-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-controls/22/javafx-controls-22-sources.jar"],[{"name":"javafx-controls","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-controls/22/javafx-controls-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-controls/22/javafx-controls-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-fxml","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-fxml","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-fxml/22/javafx-fxml-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-fxml/22/javafx-fxml-22-sources.jar"],[{"name":"javafx-fxml","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-fxml/22/javafx-fxml-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-fxml/22/javafx-fxml-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-graphics","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-graphics","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-graphics/22/javafx-graphics-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-graphics/22/javafx-graphics-22-sources.jar"],[{"name":"javafx-graphics","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-graphics/22/javafx-graphics-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-graphics/22/javafx-graphics-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-media","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-media","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-media/22/javafx-media-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-media/22/javafx-media-22-sources.jar"],[{"name":"javafx-media","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-media/22/javafx-media-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-media/22/javafx-media-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-swing","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-swing","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-swing/22/javafx-swing-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-swing/22/javafx-swing-22-sources.jar"],[{"name":"javafx-swing","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-swing/22/javafx-swing-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-swing/22/javafx-swing-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-web","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-web","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-web/22/javafx-web-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-web/22/javafx-web-22-sources.jar"],[{"name":"javafx-web","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-web/22/javafx-web-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-web/22/javafx-web-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]}],"details":[]},{"configuration":{"name":"compile-internal"},"modules":[{"module":{"organization":"org.scala-lang","name":"scala3-library_3","revision":"3.6.4","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{"info.versionScheme":"semver-spec"},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"scala3-library_3","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/scala-lang/scala3-library_3/3.6.4/scala3-library_3-3.6.4-sources.jar","extraAttributes":{"info.versionScheme":"semver-spec"},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scala-lang/scala3-library_3/3.6.4/scala3-library_3-3.6.4-sources.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://github.com/scala/scala3","extraAttributes":{"info.versionScheme":"semver-spec"},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["Apache-2.0","https://www.apache.org/licenses/LICENSE-2.0"]],"callers":[]},{"module":{"organization":"org.scalafx","name":"scalafx_3","revision":"22.0.0-R33","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"scalafx_3","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/scalafx/scalafx_3/22.0.0-R33/scalafx_3-22.0.0-R33-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scalafx/scalafx_3/22.0.0-R33/scalafx_3-22.0.0-R33-sources.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"http://www.scalafx.org/","extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["BSD","https://github.com/scalafx/scalafx/blob/master/LICENSE.txt"]],"callers":[]},{"module":{"organization":"org.scala-lang","name":"scala-library","revision":"2.13.15","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{"info.apiURL":"https://www.scala-lang.org/api/2.13.15/"},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"scala-library","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/scala-lang/scala-library/2.13.15/scala-library-2.13.15-sources.jar","extraAttributes":{"info.apiURL":"https://www.scala-lang.org/api/2.13.15/"},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scala-lang/scala-library/2.13.15/scala-library-2.13.15-sources.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://www.scala-lang.org/","extraAttributes":{"info.apiURL":"https://www.scala-lang.org/api/2.13.15/"},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["Apache-2.0","https://www.apache.org/licenses/LICENSE-2.0"]],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-base","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-base","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-base/22/javafx-base-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-base/22/javafx-base-22-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-controls","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-controls","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-controls/22/javafx-controls-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-controls/22/javafx-controls-22-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-fxml","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-fxml","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-fxml/22/javafx-fxml-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-fxml/22/javafx-fxml-22-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-graphics","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-graphics","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-graphics/22/javafx-graphics-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-graphics/22/javafx-graphics-22-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-media","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-media","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-media/22/javafx-media-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-media/22/javafx-media-22-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-swing","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-swing","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-swing/22/javafx-swing-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-swing/22/javafx-swing-22-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-web","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-web","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-web/22/javafx-web-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-web/22/javafx-web-22-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-base","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-base","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-base/22/javafx-base-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-base/22/javafx-base-22-sources.jar"],[{"name":"javafx-base","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-base/22/javafx-base-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-base/22/javafx-base-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-controls","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-controls","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-controls/22/javafx-controls-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-controls/22/javafx-controls-22-sources.jar"],[{"name":"javafx-controls","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-controls/22/javafx-controls-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-controls/22/javafx-controls-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-fxml","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-fxml","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-fxml/22/javafx-fxml-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-fxml/22/javafx-fxml-22-sources.jar"],[{"name":"javafx-fxml","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-fxml/22/javafx-fxml-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-fxml/22/javafx-fxml-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-graphics","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-graphics","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-graphics/22/javafx-graphics-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-graphics/22/javafx-graphics-22-sources.jar"],[{"name":"javafx-graphics","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-graphics/22/javafx-graphics-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-graphics/22/javafx-graphics-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-media","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-media","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-media/22/javafx-media-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-media/22/javafx-media-22-sources.jar"],[{"name":"javafx-media","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-media/22/javafx-media-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-media/22/javafx-media-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-swing","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-swing","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-swing/22/javafx-swing-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-swing/22/javafx-swing-22-sources.jar"],[{"name":"javafx-swing","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-swing/22/javafx-swing-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-swing/22/javafx-swing-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-web","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-web","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-web/22/javafx-web-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-web/22/javafx-web-22-sources.jar"],[{"name":"javafx-web","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-web/22/javafx-web-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-web/22/javafx-web-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]}],"details":[]},{"configuration":{"name":"docs"},"modules":[],"details":[]},{"configuration":{"name":"optional"},"modules":[],"details":[]},{"configuration":{"name":"plugin"},"modules":[],"details":[]},{"configuration":{"name":"pom"},"modules":[],"details":[]},{"configuration":{"name":"provided"},"modules":[],"details":[]},{"configuration":{"name":"runtime"},"modules":[{"module":{"organization":"org.scala-lang","name":"scala3-library_3","revision":"3.6.4","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{"info.versionScheme":"semver-spec"},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"scala3-library_3","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/scala-lang/scala3-library_3/3.6.4/scala3-library_3-3.6.4-sources.jar","extraAttributes":{"info.versionScheme":"semver-spec"},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scala-lang/scala3-library_3/3.6.4/scala3-library_3-3.6.4-sources.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://github.com/scala/scala3","extraAttributes":{"info.versionScheme":"semver-spec"},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["Apache-2.0","https://www.apache.org/licenses/LICENSE-2.0"]],"callers":[]},{"module":{"organization":"org.scalafx","name":"scalafx_3","revision":"22.0.0-R33","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"scalafx_3","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/scalafx/scalafx_3/22.0.0-R33/scalafx_3-22.0.0-R33-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scalafx/scalafx_3/22.0.0-R33/scalafx_3-22.0.0-R33-sources.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"http://www.scalafx.org/","extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["BSD","https://github.com/scalafx/scalafx/blob/master/LICENSE.txt"]],"callers":[]},{"module":{"organization":"org.scala-lang","name":"scala-library","revision":"2.13.15","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{"info.apiURL":"https://www.scala-lang.org/api/2.13.15/"},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"scala-library","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/scala-lang/scala-library/2.13.15/scala-library-2.13.15-sources.jar","extraAttributes":{"info.apiURL":"https://www.scala-lang.org/api/2.13.15/"},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scala-lang/scala-library/2.13.15/scala-library-2.13.15-sources.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://www.scala-lang.org/","extraAttributes":{"info.apiURL":"https://www.scala-lang.org/api/2.13.15/"},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["Apache-2.0","https://www.apache.org/licenses/LICENSE-2.0"]],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-base","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-base","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-base/22/javafx-base-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-base/22/javafx-base-22-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-controls","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-controls","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-controls/22/javafx-controls-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-controls/22/javafx-controls-22-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-fxml","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-fxml","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-fxml/22/javafx-fxml-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-fxml/22/javafx-fxml-22-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-graphics","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-graphics","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-graphics/22/javafx-graphics-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-graphics/22/javafx-graphics-22-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-media","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-media","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-media/22/javafx-media-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-media/22/javafx-media-22-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-swing","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-swing","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-swing/22/javafx-swing-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-swing/22/javafx-swing-22-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-web","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-web","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-web/22/javafx-web-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-web/22/javafx-web-22-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-base","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-base","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-base/22/javafx-base-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-base/22/javafx-base-22-sources.jar"],[{"name":"javafx-base","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-base/22/javafx-base-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-base/22/javafx-base-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-controls","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-controls","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-controls/22/javafx-controls-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-controls/22/javafx-controls-22-sources.jar"],[{"name":"javafx-controls","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-controls/22/javafx-controls-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-controls/22/javafx-controls-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-fxml","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-fxml","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-fxml/22/javafx-fxml-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-fxml/22/javafx-fxml-22-sources.jar"],[{"name":"javafx-fxml","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-fxml/22/javafx-fxml-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-fxml/22/javafx-fxml-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-graphics","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-graphics","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-graphics/22/javafx-graphics-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-graphics/22/javafx-graphics-22-sources.jar"],[{"name":"javafx-graphics","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-graphics/22/javafx-graphics-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-graphics/22/javafx-graphics-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-media","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-media","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-media/22/javafx-media-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-media/22/javafx-media-22-sources.jar"],[{"name":"javafx-media","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-media/22/javafx-media-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-media/22/javafx-media-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-swing","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-swing","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-swing/22/javafx-swing-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-swing/22/javafx-swing-22-sources.jar"],[{"name":"javafx-swing","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-swing/22/javafx-swing-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-swing/22/javafx-swing-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-web","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-web","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-web/22/javafx-web-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-web/22/javafx-web-22-sources.jar"],[{"name":"javafx-web","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-web/22/javafx-web-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-web/22/javafx-web-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]}],"details":[]},{"configuration":{"name":"runtime-internal"},"modules":[{"module":{"organization":"org.scala-lang","name":"scala3-library_3","revision":"3.6.4","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{"info.versionScheme":"semver-spec"},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"scala3-library_3","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/scala-lang/scala3-library_3/3.6.4/scala3-library_3-3.6.4-sources.jar","extraAttributes":{"info.versionScheme":"semver-spec"},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scala-lang/scala3-library_3/3.6.4/scala3-library_3-3.6.4-sources.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://github.com/scala/scala3","extraAttributes":{"info.versionScheme":"semver-spec"},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["Apache-2.0","https://www.apache.org/licenses/LICENSE-2.0"]],"callers":[]},{"module":{"organization":"org.scalafx","name":"scalafx_3","revision":"22.0.0-R33","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"scalafx_3","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/scalafx/scalafx_3/22.0.0-R33/scalafx_3-22.0.0-R33-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scalafx/scalafx_3/22.0.0-R33/scalafx_3-22.0.0-R33-sources.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"http://www.scalafx.org/","extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["BSD","https://github.com/scalafx/scalafx/blob/master/LICENSE.txt"]],"callers":[]},{"module":{"organization":"org.scala-lang","name":"scala-library","revision":"2.13.15","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{"info.apiURL":"https://www.scala-lang.org/api/2.13.15/"},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"scala-library","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/scala-lang/scala-library/2.13.15/scala-library-2.13.15-sources.jar","extraAttributes":{"info.apiURL":"https://www.scala-lang.org/api/2.13.15/"},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scala-lang/scala-library/2.13.15/scala-library-2.13.15-sources.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://www.scala-lang.org/","extraAttributes":{"info.apiURL":"https://www.scala-lang.org/api/2.13.15/"},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["Apache-2.0","https://www.apache.org/licenses/LICENSE-2.0"]],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-base","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-base","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-base/22/javafx-base-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-base/22/javafx-base-22-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-controls","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-controls","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-controls/22/javafx-controls-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-controls/22/javafx-controls-22-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-fxml","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-fxml","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-fxml/22/javafx-fxml-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-fxml/22/javafx-fxml-22-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-graphics","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-graphics","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-graphics/22/javafx-graphics-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-graphics/22/javafx-graphics-22-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-media","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-media","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-media/22/javafx-media-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-media/22/javafx-media-22-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-swing","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-swing","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-swing/22/javafx-swing-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-swing/22/javafx-swing-22-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-web","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-web","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-web/22/javafx-web-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-web/22/javafx-web-22-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-base","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-base","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-base/22/javafx-base-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-base/22/javafx-base-22-sources.jar"],[{"name":"javafx-base","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-base/22/javafx-base-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-base/22/javafx-base-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-controls","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-controls","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-controls/22/javafx-controls-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-controls/22/javafx-controls-22-sources.jar"],[{"name":"javafx-controls","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-controls/22/javafx-controls-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-controls/22/javafx-controls-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-fxml","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-fxml","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-fxml/22/javafx-fxml-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-fxml/22/javafx-fxml-22-sources.jar"],[{"name":"javafx-fxml","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-fxml/22/javafx-fxml-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-fxml/22/javafx-fxml-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-graphics","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-graphics","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-graphics/22/javafx-graphics-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-graphics/22/javafx-graphics-22-sources.jar"],[{"name":"javafx-graphics","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-graphics/22/javafx-graphics-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-graphics/22/javafx-graphics-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-media","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-media","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-media/22/javafx-media-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-media/22/javafx-media-22-sources.jar"],[{"name":"javafx-media","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-media/22/javafx-media-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-media/22/javafx-media-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-swing","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-swing","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-swing/22/javafx-swing-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-swing/22/javafx-swing-22-sources.jar"],[{"name":"javafx-swing","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-swing/22/javafx-swing-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-swing/22/javafx-swing-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-web","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-web","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-web/22/javafx-web-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-web/22/javafx-web-22-sources.jar"],[{"name":"javafx-web","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-web/22/javafx-web-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-web/22/javafx-web-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]}],"details":[]},{"configuration":{"name":"scala-doc-tool"},"modules":[{"module":{"organization":"org.scala-lang","name":"scaladoc_3","revision":"3.6.4","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"scaladoc_3","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/scala-lang/scaladoc_3/3.6.4/scaladoc_3-3.6.4-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scala-lang/scaladoc_3/3.6.4/scaladoc_3-3.6.4-sources.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://github.com/scala/scala3","extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["Apache-2.0","https://www.apache.org/licenses/LICENSE-2.0"]],"callers":[]},{"module":{"organization":"org.scala-lang","name":"scala3-compiler_3","revision":"3.6.4","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"scala3-compiler_3","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/scala-lang/scala3-compiler_3/3.6.4/scala3-compiler_3-3.6.4-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scala-lang/scala3-compiler_3/3.6.4/scala3-compiler_3-3.6.4-sources.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://github.com/scala/scala3","extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["Apache-2.0","https://www.apache.org/licenses/LICENSE-2.0"]],"callers":[]},{"module":{"organization":"org.scala-lang","name":"scala3-tasty-inspector_3","revision":"3.6.4","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"scala3-tasty-inspector_3","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/scala-lang/scala3-tasty-inspector_3/3.6.4/scala3-tasty-inspector_3-3.6.4-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scala-lang/scala3-tasty-inspector_3/3.6.4/scala3-tasty-inspector_3-3.6.4-sources.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://github.com/scala/scala3","extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["Apache-2.0","https://www.apache.org/licenses/LICENSE-2.0"]],"callers":[]},{"module":{"organization":"com.vladsch.flexmark","name":"flexmark","revision":"0.62.2","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"flexmark","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/com/vladsch/flexmark/flexmark/0.62.2/flexmark-0.62.2-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/com/vladsch/flexmark/flexmark/0.62.2/flexmark-0.62.2-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"com.vladsch.flexmark","name":"flexmark-util-ast","revision":"0.62.2","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"flexmark-util-ast","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-util-ast/0.62.2/flexmark-util-ast-0.62.2-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-util-ast/0.62.2/flexmark-util-ast-0.62.2-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"com.vladsch.flexmark","name":"flexmark-util-data","revision":"0.62.2","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"flexmark-util-data","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-util-data/0.62.2/flexmark-util-data-0.62.2-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-util-data/0.62.2/flexmark-util-data-0.62.2-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"com.vladsch.flexmark","name":"flexmark-util-html","revision":"0.62.2","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"flexmark-util-html","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-util-html/0.62.2/flexmark-util-html-0.62.2-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-util-html/0.62.2/flexmark-util-html-0.62.2-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"com.vladsch.flexmark","name":"flexmark-ext-anchorlink","revision":"0.62.2","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"flexmark-ext-anchorlink","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-ext-anchorlink/0.62.2/flexmark-ext-anchorlink-0.62.2-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-ext-anchorlink/0.62.2/flexmark-ext-anchorlink-0.62.2-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"com.vladsch.flexmark","name":"flexmark-ext-autolink","revision":"0.62.2","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"flexmark-ext-autolink","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-ext-autolink/0.62.2/flexmark-ext-autolink-0.62.2-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-ext-autolink/0.62.2/flexmark-ext-autolink-0.62.2-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"com.vladsch.flexmark","name":"flexmark-ext-emoji","revision":"0.62.2","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"flexmark-ext-emoji","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-ext-emoji/0.62.2/flexmark-ext-emoji-0.62.2-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-ext-emoji/0.62.2/flexmark-ext-emoji-0.62.2-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"com.vladsch.flexmark","name":"flexmark-ext-gfm-strikethrough","revision":"0.62.2","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"flexmark-ext-gfm-strikethrough","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-ext-gfm-strikethrough/0.62.2/flexmark-ext-gfm-strikethrough-0.62.2-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-ext-gfm-strikethrough/0.62.2/flexmark-ext-gfm-strikethrough-0.62.2-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"com.vladsch.flexmark","name":"flexmark-ext-gfm-tasklist","revision":"0.62.2","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"flexmark-ext-gfm-tasklist","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-ext-gfm-tasklist/0.62.2/flexmark-ext-gfm-tasklist-0.62.2-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-ext-gfm-tasklist/0.62.2/flexmark-ext-gfm-tasklist-0.62.2-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"com.vladsch.flexmark","name":"flexmark-ext-wikilink","revision":"0.62.2","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"flexmark-ext-wikilink","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-ext-wikilink/0.62.2/flexmark-ext-wikilink-0.62.2-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-ext-wikilink/0.62.2/flexmark-ext-wikilink-0.62.2-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"com.vladsch.flexmark","name":"flexmark-ext-tables","revision":"0.62.2","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"flexmark-ext-tables","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-ext-tables/0.62.2/flexmark-ext-tables-0.62.2-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-ext-tables/0.62.2/flexmark-ext-tables-0.62.2-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"com.vladsch.flexmark","name":"flexmark-ext-yaml-front-matter","revision":"0.62.2","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"flexmark-ext-yaml-front-matter","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-ext-yaml-front-matter/0.62.2/flexmark-ext-yaml-front-matter-0.62.2-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-ext-yaml-front-matter/0.62.2/flexmark-ext-yaml-front-matter-0.62.2-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"nl.big-o","name":"liqp","revision":"0.8.2","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"liqp","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/nl/big-o/liqp/0.8.2/liqp-0.8.2-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/nl/big-o/liqp/0.8.2/liqp-0.8.2-sources.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://github.com/bkiers/Liqp","extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["MIT License","http://www.opensource.org/licenses/mit-license.php"]],"callers":[]},{"module":{"organization":"org.jsoup","name":"jsoup","revision":"1.17.2","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"jsoup","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/jsoup/jsoup/1.17.2/jsoup-1.17.2-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/jsoup/jsoup/1.17.2/jsoup-1.17.2-sources.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://jsoup.org/","extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["The MIT License","https://jsoup.org/license"]],"callers":[]},{"module":{"organization":"com.fasterxml.jackson.dataformat","name":"jackson-dataformat-yaml","revision":"2.15.1","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"jackson-dataformat-yaml","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/com/fasterxml/jackson/dataformat/jackson-dataformat-yaml/2.15.1/jackson-dataformat-yaml-2.15.1-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/com/fasterxml/jackson/dataformat/jackson-dataformat-yaml/2.15.1/jackson-dataformat-yaml-2.15.1-sources.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://github.com/FasterXML/jackson-dataformats-text","extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.scala-lang","name":"scala3-interfaces","revision":"3.6.4","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{"info.versionScheme":"semver-spec"},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"scala3-interfaces","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/scala-lang/scala3-interfaces/3.6.4/scala3-interfaces-3.6.4-sources.jar","extraAttributes":{"info.versionScheme":"semver-spec"},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scala-lang/scala3-interfaces/3.6.4/scala3-interfaces-3.6.4-sources.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://github.com/scala/scala3","extraAttributes":{"info.versionScheme":"semver-spec"},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["Apache-2.0","https://www.apache.org/licenses/LICENSE-2.0"]],"callers":[]},{"module":{"organization":"org.scala-lang","name":"scala3-library_3","revision":"3.6.4","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{"info.versionScheme":"semver-spec"},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"scala3-library_3","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/scala-lang/scala3-library_3/3.6.4/scala3-library_3-3.6.4-sources.jar","extraAttributes":{"info.versionScheme":"semver-spec"},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scala-lang/scala3-library_3/3.6.4/scala3-library_3-3.6.4-sources.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://github.com/scala/scala3","extraAttributes":{"info.versionScheme":"semver-spec"},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["Apache-2.0","https://www.apache.org/licenses/LICENSE-2.0"]],"callers":[]},{"module":{"organization":"org.scala-lang","name":"tasty-core_3","revision":"3.6.4","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{"info.versionScheme":"semver-spec"},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"tasty-core_3","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/scala-lang/tasty-core_3/3.6.4/tasty-core_3-3.6.4-sources.jar","extraAttributes":{"info.versionScheme":"semver-spec"},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scala-lang/tasty-core_3/3.6.4/tasty-core_3-3.6.4-sources.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://github.com/scala/scala3","extraAttributes":{"info.versionScheme":"semver-spec"},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["Apache-2.0","https://www.apache.org/licenses/LICENSE-2.0"]],"callers":[]},{"module":{"organization":"org.scala-lang.modules","name":"scala-asm","revision":"9.7.1-scala-1","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"scala-asm","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/scala-lang/modules/scala-asm/9.7.1-scala-1/scala-asm-9.7.1-scala-1-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scala-lang/modules/scala-asm/9.7.1-scala-1/scala-asm-9.7.1-scala-1-sources.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://github.com/scala/scala-asm","extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["BSD 3-clause","http://opensource.org/licenses/BSD-3-Clause"]],"callers":[]},{"module":{"organization":"org.scala-sbt","name":"compiler-interface","revision":"1.10.4","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{"info.versionScheme":"early-semver"},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"compiler-interface","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/scala-sbt/compiler-interface/1.10.4/compiler-interface-1.10.4-sources.jar","extraAttributes":{"info.versionScheme":"early-semver"},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scala-sbt/compiler-interface/1.10.4/compiler-interface-1.10.4-sources.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://github.com/sbt/zinc","extraAttributes":{"info.versionScheme":"early-semver"},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["Apache-2.0","https://www.apache.org/licenses/LICENSE-2.0"]],"callers":[]},{"module":{"organization":"org.jline","name":"jline-reader","revision":"3.27.1","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"jline-reader","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/jline/jline-reader/3.27.1/jline-reader-3.27.1-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/jline/jline-reader/3.27.1/jline-reader-3.27.1-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.jline","name":"jline-terminal","revision":"3.27.1","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"jline-terminal","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/jline/jline-terminal/3.27.1/jline-terminal-3.27.1-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/jline/jline-terminal/3.27.1/jline-terminal-3.27.1-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.jline","name":"jline-terminal-jni","revision":"3.27.1","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"jline-terminal-jni","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/jline/jline-terminal-jni/3.27.1/jline-terminal-jni-3.27.1-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/jline/jline-terminal-jni/3.27.1/jline-terminal-jni-3.27.1-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"com.vladsch.flexmark","name":"flexmark-util-builder","revision":"0.62.2","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"flexmark-util-builder","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-util-builder/0.62.2/flexmark-util-builder-0.62.2-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-util-builder/0.62.2/flexmark-util-builder-0.62.2-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"com.vladsch.flexmark","name":"flexmark-util-collection","revision":"0.62.2","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"flexmark-util-collection","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-util-collection/0.62.2/flexmark-util-collection-0.62.2-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-util-collection/0.62.2/flexmark-util-collection-0.62.2-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"com.vladsch.flexmark","name":"flexmark-util-dependency","revision":"0.62.2","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"flexmark-util-dependency","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-util-dependency/0.62.2/flexmark-util-dependency-0.62.2-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-util-dependency/0.62.2/flexmark-util-dependency-0.62.2-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"com.vladsch.flexmark","name":"flexmark-util-format","revision":"0.62.2","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"flexmark-util-format","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-util-format/0.62.2/flexmark-util-format-0.62.2-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-util-format/0.62.2/flexmark-util-format-0.62.2-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"com.vladsch.flexmark","name":"flexmark-util-misc","revision":"0.62.2","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"flexmark-util-misc","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-util-misc/0.62.2/flexmark-util-misc-0.62.2-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-util-misc/0.62.2/flexmark-util-misc-0.62.2-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"com.vladsch.flexmark","name":"flexmark-util-sequence","revision":"0.62.2","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"flexmark-util-sequence","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-util-sequence/0.62.2/flexmark-util-sequence-0.62.2-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-util-sequence/0.62.2/flexmark-util-sequence-0.62.2-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"com.vladsch.flexmark","name":"flexmark-util-visitor","revision":"0.62.2","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"flexmark-util-visitor","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-util-visitor/0.62.2/flexmark-util-visitor-0.62.2-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-util-visitor/0.62.2/flexmark-util-visitor-0.62.2-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.jetbrains","name":"annotations","revision":"15.0","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"annotations","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/jetbrains/annotations/15.0/annotations-15.0-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/jetbrains/annotations/15.0/annotations-15.0-sources.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"http://www.jetbrains.org","extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["The Apache Software License, Version 2.0","http://www.apache.org/licenses/LICENSE-2.0.txt"]],"callers":[]},{"module":{"organization":"com.vladsch.flexmark","name":"flexmark-util","revision":"0.62.2","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[],"missingArtifacts":[{"name":"flexmark-util","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-util/0.62.2/flexmark-util-0.62.2-sources.jar","extraAttributes":{},"allowInsecureProtocol":false}],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.nibor.autolink","name":"autolink","revision":"0.6.0","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"autolink","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/nibor/autolink/autolink/0.6.0/autolink-0.6.0-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/nibor/autolink/autolink/0.6.0/autolink-0.6.0-sources.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://github.com/robinst/autolink-java","extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["MIT License","http://www.opensource.org/licenses/mit-license.php"]],"callers":[]},{"module":{"organization":"com.vladsch.flexmark","name":"flexmark-jira-converter","revision":"0.62.2","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"flexmark-jira-converter","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-jira-converter/0.62.2/flexmark-jira-converter-0.62.2-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-jira-converter/0.62.2/flexmark-jira-converter-0.62.2-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.antlr","name":"antlr4-runtime","revision":"4.7.2","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"antlr4-runtime","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/antlr/antlr4-runtime/4.7.2/antlr4-runtime-4.7.2-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/antlr/antlr4-runtime/4.7.2/antlr4-runtime-4.7.2-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"com.fasterxml.jackson.core","name":"jackson-annotations","revision":"2.15.1","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"jackson-annotations","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/com/fasterxml/jackson/core/jackson-annotations/2.15.1/jackson-annotations-2.15.1-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/com/fasterxml/jackson/core/jackson-annotations/2.15.1/jackson-annotations-2.15.1-sources.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://github.com/FasterXML/jackson","extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["The Apache Software License, Version 2.0","https://www.apache.org/licenses/LICENSE-2.0.txt"]],"callers":[]},{"module":{"organization":"com.fasterxml.jackson.core","name":"jackson-core","revision":"2.15.1","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"jackson-core","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/com/fasterxml/jackson/core/jackson-core/2.15.1/jackson-core-2.15.1-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/com/fasterxml/jackson/core/jackson-core/2.15.1/jackson-core-2.15.1-sources.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://github.com/FasterXML/jackson-core","extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["The Apache Software License, Version 2.0","https://www.apache.org/licenses/LICENSE-2.0.txt"]],"callers":[]},{"module":{"organization":"com.fasterxml.jackson.core","name":"jackson-databind","revision":"2.15.1","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"jackson-databind","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/com/fasterxml/jackson/core/jackson-databind/2.15.1/jackson-databind-2.15.1-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/com/fasterxml/jackson/core/jackson-databind/2.15.1/jackson-databind-2.15.1-sources.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://github.com/FasterXML/jackson","extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["The Apache Software License, Version 2.0","https://www.apache.org/licenses/LICENSE-2.0.txt"]],"callers":[]},{"module":{"organization":"com.fasterxml.jackson.datatype","name":"jackson-datatype-jsr310","revision":"2.12.1","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"jackson-datatype-jsr310","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/com/fasterxml/jackson/datatype/jackson-datatype-jsr310/2.12.1/jackson-datatype-jsr310-2.12.1-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/com/fasterxml/jackson/datatype/jackson-datatype-jsr310/2.12.1/jackson-datatype-jsr310-2.12.1-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"ua.co.k","name":"strftime4j","revision":"1.0.5","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"strftime4j","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/ua/co/k/strftime4j/1.0.5/strftime4j-1.0.5-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/ua/co/k/strftime4j/1.0.5/strftime4j-1.0.5-sources.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://github.com/msangel/strftime4j","extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["MIT License","http://www.opensource.org/licenses/mit-license.php"]],"callers":[]},{"module":{"organization":"org.yaml","name":"snakeyaml","revision":"2.0","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"snakeyaml","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/yaml/snakeyaml/2.0/snakeyaml-2.0-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/yaml/snakeyaml/2.0/snakeyaml-2.0-sources.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://bitbucket.org/snakeyaml/snakeyaml","extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["Apache License, Version 2.0","http://www.apache.org/licenses/LICENSE-2.0.txt"]],"callers":[]},{"module":{"organization":"org.scala-lang","name":"scala-library","revision":"2.13.15","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{"info.apiURL":"https://www.scala-lang.org/api/2.13.15/"},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"scala-library","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/scala-lang/scala-library/2.13.15/scala-library-2.13.15-sources.jar","extraAttributes":{"info.apiURL":"https://www.scala-lang.org/api/2.13.15/"},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scala-lang/scala-library/2.13.15/scala-library-2.13.15-sources.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://www.scala-lang.org/","extraAttributes":{"info.apiURL":"https://www.scala-lang.org/api/2.13.15/"},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["Apache-2.0","https://www.apache.org/licenses/LICENSE-2.0"]],"callers":[]},{"module":{"organization":"org.scala-sbt","name":"util-interface","revision":"1.10.4","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{"info.versionScheme":"early-semver"},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"util-interface","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/scala-sbt/util-interface/1.10.4/util-interface-1.10.4-sources.jar","extraAttributes":{"info.versionScheme":"early-semver"},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scala-sbt/util-interface/1.10.4/util-interface-1.10.4-sources.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://github.com/sbt/sbt","extraAttributes":{"info.versionScheme":"early-semver"},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["Apache-2.0","https://github.com/sbt/sbt/blob/develop/LICENSE"]],"callers":[]},{"module":{"organization":"org.jline","name":"jline-native","revision":"3.27.1","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"jline-native","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/jline/jline-native/3.27.1/jline-native-3.27.1-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/jline/jline-native/3.27.1/jline-native-3.27.1-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"com.vladsch.flexmark","name":"flexmark-util-options","revision":"0.62.2","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"flexmark-util-options","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-util-options/0.62.2/flexmark-util-options-0.62.2-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-util-options/0.62.2/flexmark-util-options-0.62.2-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"com.vladsch.flexmark","name":"flexmark-ext-ins","revision":"0.62.2","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"flexmark-ext-ins","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-ext-ins/0.62.2/flexmark-ext-ins-0.62.2-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-ext-ins/0.62.2/flexmark-ext-ins-0.62.2-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"com.vladsch.flexmark","name":"flexmark-ext-superscript","revision":"0.62.2","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"flexmark-ext-superscript","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-ext-superscript/0.62.2/flexmark-ext-superscript-0.62.2-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/com/vladsch/flexmark/flexmark-ext-superscript/0.62.2/flexmark-ext-superscript-0.62.2-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]}],"details":[]},{"configuration":{"name":"scala-tool"},"modules":[{"module":{"organization":"org.scala-lang","name":"scala3-compiler_3","revision":"3.6.4","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"scala3-compiler_3","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/scala-lang/scala3-compiler_3/3.6.4/scala3-compiler_3-3.6.4-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scala-lang/scala3-compiler_3/3.6.4/scala3-compiler_3-3.6.4-sources.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://github.com/scala/scala3","extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["Apache-2.0","https://www.apache.org/licenses/LICENSE-2.0"]],"callers":[]},{"module":{"organization":"org.scala-lang","name":"scala3-interfaces","revision":"3.6.4","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{"info.versionScheme":"semver-spec"},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"scala3-interfaces","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/scala-lang/scala3-interfaces/3.6.4/scala3-interfaces-3.6.4-sources.jar","extraAttributes":{"info.versionScheme":"semver-spec"},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scala-lang/scala3-interfaces/3.6.4/scala3-interfaces-3.6.4-sources.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://github.com/scala/scala3","extraAttributes":{"info.versionScheme":"semver-spec"},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["Apache-2.0","https://www.apache.org/licenses/LICENSE-2.0"]],"callers":[]},{"module":{"organization":"org.scala-lang","name":"scala3-library_3","revision":"3.6.4","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{"info.versionScheme":"semver-spec"},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"scala3-library_3","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/scala-lang/scala3-library_3/3.6.4/scala3-library_3-3.6.4-sources.jar","extraAttributes":{"info.versionScheme":"semver-spec"},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scala-lang/scala3-library_3/3.6.4/scala3-library_3-3.6.4-sources.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://github.com/scala/scala3","extraAttributes":{"info.versionScheme":"semver-spec"},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["Apache-2.0","https://www.apache.org/licenses/LICENSE-2.0"]],"callers":[]},{"module":{"organization":"org.scala-lang","name":"tasty-core_3","revision":"3.6.4","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{"info.versionScheme":"semver-spec"},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"tasty-core_3","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/scala-lang/tasty-core_3/3.6.4/tasty-core_3-3.6.4-sources.jar","extraAttributes":{"info.versionScheme":"semver-spec"},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scala-lang/tasty-core_3/3.6.4/tasty-core_3-3.6.4-sources.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://github.com/scala/scala3","extraAttributes":{"info.versionScheme":"semver-spec"},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["Apache-2.0","https://www.apache.org/licenses/LICENSE-2.0"]],"callers":[]},{"module":{"organization":"org.scala-lang.modules","name":"scala-asm","revision":"9.7.1-scala-1","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"scala-asm","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/scala-lang/modules/scala-asm/9.7.1-scala-1/scala-asm-9.7.1-scala-1-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scala-lang/modules/scala-asm/9.7.1-scala-1/scala-asm-9.7.1-scala-1-sources.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://github.com/scala/scala-asm","extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["BSD 3-clause","http://opensource.org/licenses/BSD-3-Clause"]],"callers":[]},{"module":{"organization":"org.scala-sbt","name":"compiler-interface","revision":"1.10.4","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{"info.versionScheme":"early-semver"},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"compiler-interface","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/scala-sbt/compiler-interface/1.10.4/compiler-interface-1.10.4-sources.jar","extraAttributes":{"info.versionScheme":"early-semver"},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scala-sbt/compiler-interface/1.10.4/compiler-interface-1.10.4-sources.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://github.com/sbt/zinc","extraAttributes":{"info.versionScheme":"early-semver"},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["Apache-2.0","https://www.apache.org/licenses/LICENSE-2.0"]],"callers":[]},{"module":{"organization":"org.jline","name":"jline-reader","revision":"3.27.1","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"jline-reader","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/jline/jline-reader/3.27.1/jline-reader-3.27.1-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/jline/jline-reader/3.27.1/jline-reader-3.27.1-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.jline","name":"jline-terminal","revision":"3.27.1","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"jline-terminal","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/jline/jline-terminal/3.27.1/jline-terminal-3.27.1-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/jline/jline-terminal/3.27.1/jline-terminal-3.27.1-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.jline","name":"jline-terminal-jni","revision":"3.27.1","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"jline-terminal-jni","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/jline/jline-terminal-jni/3.27.1/jline-terminal-jni-3.27.1-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/jline/jline-terminal-jni/3.27.1/jline-terminal-jni-3.27.1-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.scala-lang","name":"scala-library","revision":"2.13.15","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{"info.apiURL":"https://www.scala-lang.org/api/2.13.15/"},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"scala-library","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/scala-lang/scala-library/2.13.15/scala-library-2.13.15-sources.jar","extraAttributes":{"info.apiURL":"https://www.scala-lang.org/api/2.13.15/"},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scala-lang/scala-library/2.13.15/scala-library-2.13.15-sources.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://www.scala-lang.org/","extraAttributes":{"info.apiURL":"https://www.scala-lang.org/api/2.13.15/"},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["Apache-2.0","https://www.apache.org/licenses/LICENSE-2.0"]],"callers":[]},{"module":{"organization":"org.scala-sbt","name":"util-interface","revision":"1.10.4","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{"info.versionScheme":"early-semver"},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"util-interface","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/scala-sbt/util-interface/1.10.4/util-interface-1.10.4-sources.jar","extraAttributes":{"info.versionScheme":"early-semver"},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scala-sbt/util-interface/1.10.4/util-interface-1.10.4-sources.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://github.com/sbt/sbt","extraAttributes":{"info.versionScheme":"early-semver"},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["Apache-2.0","https://github.com/sbt/sbt/blob/develop/LICENSE"]],"callers":[]},{"module":{"organization":"org.jline","name":"jline-native","revision":"3.27.1","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"jline-native","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/jline/jline-native/3.27.1/jline-native-3.27.1-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/jline/jline-native/3.27.1/jline-native-3.27.1-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]}],"details":[]},{"configuration":{"name":"sources"},"modules":[],"details":[]},{"configuration":{"name":"test"},"modules":[{"module":{"organization":"org.scala-lang","name":"scala3-library_3","revision":"3.6.4","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{"info.versionScheme":"semver-spec"},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"scala3-library_3","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/scala-lang/scala3-library_3/3.6.4/scala3-library_3-3.6.4-sources.jar","extraAttributes":{"info.versionScheme":"semver-spec"},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scala-lang/scala3-library_3/3.6.4/scala3-library_3-3.6.4-sources.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://github.com/scala/scala3","extraAttributes":{"info.versionScheme":"semver-spec"},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["Apache-2.0","https://www.apache.org/licenses/LICENSE-2.0"]],"callers":[]},{"module":{"organization":"org.scalafx","name":"scalafx_3","revision":"22.0.0-R33","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"scalafx_3","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/scalafx/scalafx_3/22.0.0-R33/scalafx_3-22.0.0-R33-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scalafx/scalafx_3/22.0.0-R33/scalafx_3-22.0.0-R33-sources.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"http://www.scalafx.org/","extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["BSD","https://github.com/scalafx/scalafx/blob/master/LICENSE.txt"]],"callers":[]},{"module":{"organization":"org.scala-lang","name":"scala-library","revision":"2.13.15","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{"info.apiURL":"https://www.scala-lang.org/api/2.13.15/"},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"scala-library","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/scala-lang/scala-library/2.13.15/scala-library-2.13.15-sources.jar","extraAttributes":{"info.apiURL":"https://www.scala-lang.org/api/2.13.15/"},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scala-lang/scala-library/2.13.15/scala-library-2.13.15-sources.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://www.scala-lang.org/","extraAttributes":{"info.apiURL":"https://www.scala-lang.org/api/2.13.15/"},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["Apache-2.0","https://www.apache.org/licenses/LICENSE-2.0"]],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-base","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-base","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-base/22/javafx-base-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-base/22/javafx-base-22-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-controls","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-controls","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-controls/22/javafx-controls-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-controls/22/javafx-controls-22-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-fxml","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-fxml","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-fxml/22/javafx-fxml-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-fxml/22/javafx-fxml-22-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-graphics","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-graphics","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-graphics/22/javafx-graphics-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-graphics/22/javafx-graphics-22-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-media","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-media","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-media/22/javafx-media-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-media/22/javafx-media-22-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-swing","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-swing","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-swing/22/javafx-swing-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-swing/22/javafx-swing-22-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-web","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-web","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-web/22/javafx-web-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-web/22/javafx-web-22-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-base","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-base","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-base/22/javafx-base-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-base/22/javafx-base-22-sources.jar"],[{"name":"javafx-base","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-base/22/javafx-base-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-base/22/javafx-base-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-controls","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-controls","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-controls/22/javafx-controls-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-controls/22/javafx-controls-22-sources.jar"],[{"name":"javafx-controls","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-controls/22/javafx-controls-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-controls/22/javafx-controls-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-fxml","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-fxml","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-fxml/22/javafx-fxml-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-fxml/22/javafx-fxml-22-sources.jar"],[{"name":"javafx-fxml","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-fxml/22/javafx-fxml-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-fxml/22/javafx-fxml-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-graphics","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-graphics","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-graphics/22/javafx-graphics-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-graphics/22/javafx-graphics-22-sources.jar"],[{"name":"javafx-graphics","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-graphics/22/javafx-graphics-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-graphics/22/javafx-graphics-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-media","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-media","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-media/22/javafx-media-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-media/22/javafx-media-22-sources.jar"],[{"name":"javafx-media","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-media/22/javafx-media-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-media/22/javafx-media-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-swing","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-swing","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-swing/22/javafx-swing-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-swing/22/javafx-swing-22-sources.jar"],[{"name":"javafx-swing","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-swing/22/javafx-swing-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-swing/22/javafx-swing-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-web","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-web","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-web/22/javafx-web-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-web/22/javafx-web-22-sources.jar"],[{"name":"javafx-web","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-web/22/javafx-web-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-web/22/javafx-web-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]}],"details":[]},{"configuration":{"name":"test-internal"},"modules":[{"module":{"organization":"org.scala-lang","name":"scala3-library_3","revision":"3.6.4","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{"info.versionScheme":"semver-spec"},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"scala3-library_3","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/scala-lang/scala3-library_3/3.6.4/scala3-library_3-3.6.4-sources.jar","extraAttributes":{"info.versionScheme":"semver-spec"},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scala-lang/scala3-library_3/3.6.4/scala3-library_3-3.6.4-sources.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://github.com/scala/scala3","extraAttributes":{"info.versionScheme":"semver-spec"},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["Apache-2.0","https://www.apache.org/licenses/LICENSE-2.0"]],"callers":[]},{"module":{"organization":"org.scalafx","name":"scalafx_3","revision":"22.0.0-R33","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"scalafx_3","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/scalafx/scalafx_3/22.0.0-R33/scalafx_3-22.0.0-R33-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scalafx/scalafx_3/22.0.0-R33/scalafx_3-22.0.0-R33-sources.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"http://www.scalafx.org/","extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["BSD","https://github.com/scalafx/scalafx/blob/master/LICENSE.txt"]],"callers":[]},{"module":{"organization":"org.scala-lang","name":"scala-library","revision":"2.13.15","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{"info.apiURL":"https://www.scala-lang.org/api/2.13.15/"},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"scala-library","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/scala-lang/scala-library/2.13.15/scala-library-2.13.15-sources.jar","extraAttributes":{"info.apiURL":"https://www.scala-lang.org/api/2.13.15/"},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/scala-lang/scala-library/2.13.15/scala-library-2.13.15-sources.jar"]],"missingArtifacts":[],"evicted":false,"homepage":"https://www.scala-lang.org/","extraAttributes":{"info.apiURL":"https://www.scala-lang.org/api/2.13.15/"},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[["Apache-2.0","https://www.apache.org/licenses/LICENSE-2.0"]],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-base","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-base","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-base/22/javafx-base-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-base/22/javafx-base-22-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-controls","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-controls","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-controls/22/javafx-controls-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-controls/22/javafx-controls-22-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-fxml","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-fxml","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-fxml/22/javafx-fxml-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-fxml/22/javafx-fxml-22-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-graphics","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-graphics","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-graphics/22/javafx-graphics-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-graphics/22/javafx-graphics-22-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-media","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-media","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-media/22/javafx-media-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-media/22/javafx-media-22-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-swing","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-swing","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-swing/22/javafx-swing-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-swing/22/javafx-swing-22-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-web","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-web","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-web/22/javafx-web-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-web/22/javafx-web-22-sources.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-base","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-base","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-base/22/javafx-base-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-base/22/javafx-base-22-sources.jar"],[{"name":"javafx-base","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-base/22/javafx-base-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-base/22/javafx-base-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-controls","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-controls","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-controls/22/javafx-controls-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-controls/22/javafx-controls-22-sources.jar"],[{"name":"javafx-controls","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-controls/22/javafx-controls-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-controls/22/javafx-controls-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-fxml","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-fxml","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-fxml/22/javafx-fxml-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-fxml/22/javafx-fxml-22-sources.jar"],[{"name":"javafx-fxml","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-fxml/22/javafx-fxml-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-fxml/22/javafx-fxml-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-graphics","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-graphics","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-graphics/22/javafx-graphics-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-graphics/22/javafx-graphics-22-sources.jar"],[{"name":"javafx-graphics","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-graphics/22/javafx-graphics-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-graphics/22/javafx-graphics-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-media","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-media","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-media/22/javafx-media-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-media/22/javafx-media-22-sources.jar"],[{"name":"javafx-media","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-media/22/javafx-media-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-media/22/javafx-media-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-swing","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-swing","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-swing/22/javafx-swing-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-swing/22/javafx-swing-22-sources.jar"],[{"name":"javafx-swing","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-swing/22/javafx-swing-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-swing/22/javafx-swing-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]},{"module":{"organization":"org.openjfx","name":"javafx-web","revision":"22","configurations":"default","isChanging":false,"isTransitive":true,"isForce":false,"explicitArtifacts":[],"inclusions":[],"exclusions":[],"extraAttributes":{},"crossVersion":{"type":"Disabled"}},"artifacts":[[{"name":"javafx-web","type":"src","extension":"jar","classifier":"sources","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-web/22/javafx-web-22-sources.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-web/22/javafx-web-22-sources.jar"],[{"name":"javafx-web","type":"jar","extension":"jar","classifier":"win","configurations":[],"url":"https://repo1.maven.org/maven2/org/openjfx/javafx-web/22/javafx-web-22-win.jar","extraAttributes":{},"allowInsecureProtocol":false},"file:///C:/Users/youse/AppData/Local/Coursier/Cache/v1/https/repo1.maven.org/maven2/org/openjfx/javafx-web/22/javafx-web-22-win.jar"]],"missingArtifacts":[],"evicted":false,"extraAttributes":{},"configurations":[{"name":"test"},{"name":"optional"},{"name":"compile"},{"name":"default"},{"name":"runtime"}],"licenses":[],"callers":[]}],"details":[]}],"stats":{"resolveTime":-1,"downloadTime":-1,"downloadSize":-1,"cached":false},"stamps":{}} \ No newline at end of file diff --git a/target/streams/compile/_global/_global/compileOutputs/previous b/target/streams/compile/_global/_global/compileOutputs/previous deleted file mode 100644 index ea2dcbfa2..000000000 --- a/target/streams/compile/_global/_global/compileOutputs/previous +++ /dev/null @@ -1 +0,0 @@ -["sbt.Task[scala.collection.Seq[java.nio.file.Path]]",["C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\BiMap$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\BiMap$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\BiMap.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\Bool$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\Bool$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\Calc$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\Calc$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\Calc$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\Calc.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\CircularQueue$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\CircularQueue$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\CircularQueue.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\CommonFunctions$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\CommonFunctions$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\Coordinates$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\Coordinates$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\Coordinates.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\Counter$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\Counter$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\Counter$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\Counter.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\DoublyLinkedList$ListIterator$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\DoublyLinkedList$ListIterator.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\DoublyLinkedList$Node$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\DoublyLinkedList$Node.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\DoublyLinkedList$NodeIterator$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\DoublyLinkedList$NodeIterator.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\DoublyLinkedList$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\DoublyLinkedList$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\DoublyLinkedList.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\Earth$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\Earth.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\EasyWriter$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\EasyWriter$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\EasyWriter$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\EasyWriter.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\Fib$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\Fib$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\Fib$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\Fib.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\FileReader$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\FileReader$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\GenIndexHtml$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\GenIndexHtml$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\HyperParameter$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\HyperParameter$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\HyperParameter.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\LatLong$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\LatLong$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\LatLong$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\LatLong.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\LatLong2CTM$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\LatLong2CTM.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\LatLong2UTM$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\LatLong2UTM.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\Make_VectorI$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\Make_VectorI$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\MergeSortIndirect$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\MergeSortIndirect$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\MergeSortIndirect$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\MergeSortIndirect.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\MultiArrayDeque$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\MultiArrayDeque$package$Car$1.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\MultiArrayDeque$package$Car$3$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\MultiArrayDeque$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\MultiArrayDeques.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\PriorityQueue$$anon$1.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\PriorityQueue$$anon$2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\PriorityQueue$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\PriorityQueue$ResizableArrayAccess.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\PriorityQueue.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\Ring$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\Ring$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\Ring.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\SetExt$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\SetExt$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\SimpleUniform$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\SimpleUniform.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\SkipList$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\SkipList$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\SkipList.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\SkipNodeType.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\TimeNum$$anon$1.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\TimeNum$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\TimeNum$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\TimeNum$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\TimeNum.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\Timer$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\Timer$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\UTM2LatLong$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\UTM2LatLong.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\Unicode$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\Unicode$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\Unicode$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\Unicode.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\Util$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\Util$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\ValueType$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\ValueType$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\ValueTypeOrd$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\ValueTypeOrd.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\animation\\AnimateCommand$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\animation\\AnimateCommand.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\animation\\Animator.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\animation\\CommandType$$anon$1.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\animation\\CommandType$$anon$10.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\animation\\CommandType$$anon$11.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\animation\\CommandType$$anon$12.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\animation\\CommandType$$anon$13.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\animation\\CommandType$$anon$14.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\animation\\CommandType$$anon$15.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\animation\\CommandType$$anon$16.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\animation\\CommandType$$anon$17.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\animation\\CommandType$$anon$18.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\animation\\CommandType$$anon$2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\animation\\CommandType$$anon$3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\animation\\CommandType$$anon$4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\animation\\CommandType$$anon$5.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\animation\\CommandType$$anon$6.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\animation\\CommandType$$anon$7.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\animation\\CommandType$$anon$8.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\animation\\CommandType$$anon$9.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\animation\\CommandType$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\animation\\CommandType.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\animation\\Counter$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\animation\\Counter.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\animation\\DgAnimator$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\animation\\DgAnimator$Canvas.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\animation\\DgAnimator$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\animation\\DgAnimator$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\animation\\DgAnimator.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\animation\\Dgraph$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\animation\\Dgraph$Edge$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\animation\\Dgraph$Edge.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\animation\\Dgraph$Node$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\animation\\Dgraph$Node.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\animation\\Dgraph$Token$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\animation\\Dgraph$Token.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\animation\\Dgraph$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\animation\\Dgraph$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\animation\\Dgraph.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\animation\\EidCounter$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\animation\\EidCounter.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\animation\\PointOn$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\animation\\PointOn$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\animation\\SimpleAnimator$Canvas.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\animation\\SimpleAnimator$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\animation\\SimpleAnimator$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\animation\\SimpleAnimator.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\animation\\SimpleAnimator2$Canvas.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\animation\\SimpleAnimator2$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\animation\\SimpleAnimator2$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\animation\\SimpleAnimator2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\animation\\dgAnimatorTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\animation\\dgAnimatorTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\animation\\dgAnimatorTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\animation\\dgraphTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\animation\\simpleAnimator2Test.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\animation\\simpleAnimatorTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\biMapTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\boolTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\AutoDiff$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\AutoDiff$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\AutoDiff.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\B_Spline$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\B_Spline$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\B_Spline$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\B_Spline.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\BasisFunction$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\BasisFunction.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\DB_Spline$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\DB_Spline$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\DB_Spline$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\DB_Spline.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\DBasisFunction$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\DBasisFunction.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\DFourier$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\DFourier$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\DFourier$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\DFourier.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\DRadial$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\DRadial$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\DRadial$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\DRadial.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\Differential$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\Differential$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\Differential$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\Differential.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\FFT$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\FFT$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\FFT$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\FFT.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\Fourier$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\Fourier$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\Fourier$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\Fourier.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\GaussianFunc.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\Hilbert$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\Hilbert$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\Hilbert$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\Hilbert.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\Integral$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\Integral$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\Integral$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\Integral.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\Node$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\Node.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\Poly$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\Poly$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\Poly$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\Poly.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\Radial$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\Radial$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\Radial$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\Radial.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\RadialType$$anon$1.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\RadialType$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\RadialType.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\autoDiffTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\autoDiffTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\b_SplineTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\b_SplineTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\b_SplineTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\b_SplineTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\dB_SplineTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\dB_SplineTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\dFourierTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\dRadialTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\differentialTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\differentialTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\fFTTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\fourierTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\hilbertTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\integralTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\integralTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\polyTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\calculus\\radialTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\cforTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\circularQueueTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\commonFunTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\coordinatesTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\coordinatesTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\coordinatesTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\coordinatesTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\counterTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\BinTree.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\BpNode$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\BpNode$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\BpNode$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\BpNode.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\BpTreeMap$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\BpTreeMap$TreeIterator$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\BpTreeMap$TreeIterator.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\BpTreeMap$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\BpTreeMap$package$Car$1.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\BpTreeMap$package$Car$3$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\BpTreeMap$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\BpTreeMap.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\BpTreeMultiMap$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\BpTreeMultiMap.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\FD$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\FD.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\HashMultiMap$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\HashMultiMap.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\Identifiable$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\Identifiable.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\JHashMap$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\JHashMap.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\JHashMultiMap$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\JHashMultiMap.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\JTreeMap.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\JTreeMultiMap.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\JavaMap$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\JavaMap$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\KeyType$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\KeyType$given_Ordering_KeyType$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\KeyType.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\LinHashMap$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\LinHashMap$Bucket.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\LinHashMap$HTIterator$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\LinHashMap$HTIterator.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\LinHashMap$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\LinHashMap$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\LinHashMap.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\LinHashMultiMap$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\LinHashMultiMap.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\MakeSchema$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\MakeSchema$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\MakeSchema$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\MakeSchema.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\MaxSpanningTree$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\MaxSpanningTree.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\MinSpanningTree$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\MinSpanningTree$Elem$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\MinSpanningTree$Elem.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\MinSpanningTree$NodeOrder$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\MinSpanningTree$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\MinSpanningTree$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\MinSpanningTree.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\MultiMap$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\MultiMap$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\Normalization$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\Normalization$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\Normalization.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\SpanningTree$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\SpanningTree$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\SpanningTree.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\Spatial.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\TNode$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\TNode$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\TNode$package$NamedTNode$1.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\TNode$package$NamedTNode$3$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\TNode$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\TNode.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\Tabular$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\Tabular$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\Tabular$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\Tabular.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\Temporal.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\TimeInterval$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\TimeInterval$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\TimeInterval$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\TimeInterval.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\TimeOfWeek$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\TimeOfWeek$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\TimeOfWeek$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\TimeOfWeek.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\Tree$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\Tree$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\Tree$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\Tree.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\TreeMultiMap.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\TreeNode$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\TreeNode.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\bpNodeTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\bpNodeTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\bpNodeTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\bpTreeMapTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\bpTreeMapTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\bpTreeMapTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\bpTreeMapTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\bpTreeMapTest5.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph\\Edge$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph\\Edge$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph\\Edge$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph\\Edge.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph\\EdgeType$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph\\EdgeType$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph\\EdgeType$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph\\EdgeType.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph\\PGraph$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph\\PGraph$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph\\PGraph$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph\\PGraph.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph\\SocialNetwork$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph\\SocialNetwork.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph\\Topological$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph\\Topological$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph\\Topological.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph\\Vertex$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph\\Vertex$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph\\Vertex$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph\\Vertex.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph\\VertexType$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph\\VertexType$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph\\VertexType$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph\\VertexType.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph\\edgeTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph\\edgeTypeTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph\\pGraphTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph\\pGraphTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph\\vertexTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph\\vertexTypeTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\DualIso$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\DualIso$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\DualIso.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\DualSim$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\DualSim$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\DualSim.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\ExampleGraphD$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\ExampleGraphD.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\ExampleGraphS$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\ExampleGraphS.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\Graph$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\Graph$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\Graph$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\Graph.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\Graph0$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\Graph0.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\GraphDFS$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\GraphDFS$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\GraphDFS$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\GraphDFS.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\GraphGen$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\GraphGen$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\GraphGen$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\GraphGen.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\GraphIO$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\GraphIO$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\GraphIO$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\GraphIO.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\GraphMatcher.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\GraphMetrics$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\GraphMetrics$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\GraphMetrics$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\GraphMetrics.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\GraphSim$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\GraphSim$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\GraphSim.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\MatchAnswers$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\MatchAnswers$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\MatchAnswers$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\MatchAnswers.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\ShortestPath$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\ShortestPath$Item$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\ShortestPath$Item.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\ShortestPath$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\ShortestPath$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\ShortestPath.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\TopSort$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\TopSort$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\TopSort$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\TopSort.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\TrafficLight$$anon$1.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\TrafficLight$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\TrafficLight.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\dualIsoTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\dualIsoTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\dualIsoTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\dualSimTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\dualSimTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\dualSimTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\graphDFSTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\graphGenTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\graphGenTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\graphGenTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\graphGenTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\graphGenTest5.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\graphGenTest6.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\graphGenTest7.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\graphGenTest8.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\graphIOTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\graphMetricsTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\graphSimTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\graphSimTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\graphSimTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\graphSimTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\graphTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\graphTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\graphTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\matchAnswersTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\shortestPathTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\shortestPathTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_pm\\topSortTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_relation\\Vertex$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_relation\\Vertex.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_relation\\VertexType$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_relation\\VertexType$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_relation\\VertexType$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_relation\\VertexType.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\graph_relation\\vertexTypeTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\javaMapTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\linHashMapTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\logic\\SATsolver$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\logic\\SATsolver$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\logic\\sATsolverTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\makeSchemaTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\makeSchemaTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\minSpanningTreeTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\minSpanningTreeTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\minSpanningTreeTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\minSpanningTreeTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\mugraph_pm\\ExampleMuGraphD$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\mugraph_pm\\ExampleMuGraphD.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\mugraph_pm\\ExampleMuGraphS$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\mugraph_pm\\ExampleMuGraphS.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\mugraph_pm\\MatchAnswers$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\mugraph_pm\\MatchAnswers$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\mugraph_pm\\MatchAnswers$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\mugraph_pm\\MatchAnswers.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\mugraph_pm\\MuDualIso$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\mugraph_pm\\MuDualIso$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\mugraph_pm\\MuDualIso.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\mugraph_pm\\MuDualSim$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\mugraph_pm\\MuDualSim$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\mugraph_pm\\MuDualSim.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\mugraph_pm\\MuGraph$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\mugraph_pm\\MuGraph$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\mugraph_pm\\MuGraph$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\mugraph_pm\\MuGraph.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\mugraph_pm\\MuGraphGen$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\mugraph_pm\\MuGraphGen$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\mugraph_pm\\MuGraphGen.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\mugraph_pm\\MuGraphMatcher.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\mugraph_pm\\MuGraphSim$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\mugraph_pm\\MuGraphSim$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\mugraph_pm\\MuGraphSim.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\mugraph_pm\\matchAnswersTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\mugraph_pm\\muDualIsoTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\mugraph_pm\\muDualIsoTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\mugraph_pm\\muDualIsoTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\mugraph_pm\\muDualSimTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\mugraph_pm\\muDualSimTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\mugraph_pm\\muDualSimTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\mugraph_pm\\muGraphGenTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\mugraph_pm\\muGraphGenTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\mugraph_pm\\muGraphGenTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\mugraph_pm\\muGraphSimTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\mugraph_pm\\muGraphSimTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\mugraph_pm\\muGraphSimTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\mugraph_pm\\muGraphTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\mugraph_pm\\muGraphTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\multiMapTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\normalizationTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\normalizationTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\normalizationTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\normalizationTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\normalizationTest5.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\normalizationTest6.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\normalizationTest7.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\normalizationTest8.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\relation\\Ex_Days$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\relation\\Ex_Days.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\relation\\Ex_ProductSales$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\relation\\Ex_ProductSales.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\relation\\Ex_Teaching$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\relation\\Ex_Teaching$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\relation\\Ex_Teaching$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\relation\\Ex_Teaching.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\relation\\Relation$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\relation\\Relation$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\relation\\Relation$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\relation\\Relation.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\relation\\TableGen$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\relation\\TableGen$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\relation\\TableGen$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\relation\\TableGen.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\relation\\Vectr$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\relation\\Vectr$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\relation\\relationTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\relation\\relationTest11.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\relation\\relationTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\relation\\relationTest5.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\relation\\relationTest7.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\relation\\relationTest8.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\relation\\relationTest9.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\relation\\showTables.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\relation\\tableGenTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\spanningTreeTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\tNodeTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\table\\BankDB$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\table\\BankDB$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\table\\Edge$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\table\\Edge.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\table\\GTable$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\table\\GTable$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\table\\GTable$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\table\\GTable.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\table\\KGTable$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\table\\KGTable$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\table\\KGTable$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\table\\KGTable.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\table\\LTable$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\table\\LTable$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\table\\LTable$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\table\\LTable.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\table\\MovieDB$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\table\\MovieDB$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\table\\PurchaseOrderDB$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\table\\PurchaseOrderDB$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\table\\PurchaseOrderDB.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\table\\TA_AssignmentDB$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\table\\TA_AssignmentDB$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\table\\TA_AssignmentDB$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\table\\TA_AssignmentDB.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\table\\Table$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\table\\Table$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\table\\Table$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\table\\Table.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\table\\TableGen$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\table\\TableGen$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\table\\TableGen$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\table\\TableGen.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\table\\TimeComparison$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\table\\TimeComparison$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\table\\VTable$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\table\\VTable$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\table\\VTable$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\table\\VTable.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\table\\Vertex$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\table\\Vertex.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\table\\Vertex_$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\table\\Vertex_.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\table\\bankDB.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\table\\bankDB2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\table\\gTableTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\table\\gTableTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\table\\gTableTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\table\\kGTableTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\table\\kGTableTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\table\\lTableTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\table\\lTableTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\table\\lTableTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\table\\lTableTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\table\\movieDB.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\table\\showTabs.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\table\\tableGenTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\table\\tableTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\table\\tableTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\table\\tableTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\table\\timer_function.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\table\\vTableTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\table\\vTableTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\timeIntervalTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\timeIntervalTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\timeIntervalTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\timeOfWeekTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\treeTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\treeTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\treeTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\triplegraph\\RDFTriple$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\triplegraph\\RDFTriple.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\triplegraph\\Triple$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\triplegraph\\Triple.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\triplegraph\\TripleGraph$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\triplegraph\\TripleGraph$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\triplegraph\\TripleGraph$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\triplegraph\\TripleGraph.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\triplegraph\\TripleGraphMatcher.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\triplegraph\\TripleGraphSim$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\triplegraph\\TripleGraphSim$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\triplegraph\\TripleGraphSim.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\triplegraph\\tripleGraphSimTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\database\\triplegraph\\tripleGraphTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\doublyLinkedListTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\dynamics\\BallFlight$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\dynamics\\BallFlight$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\dynamics\\DormandPrince$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\dynamics\\DormandPrince$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\dynamics\\DormandPrince$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\dynamics\\DormandPrince.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\dynamics\\DynamicEq$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\dynamics\\DynamicEq$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\dynamics\\DynamicEq$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\dynamics\\DynamicEq.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\dynamics\\FirstOrderPDE$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\dynamics\\FirstOrderPDE$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\dynamics\\FirstOrderPDE.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\dynamics\\Integrator$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\dynamics\\Integrator$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\dynamics\\Integrator.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\dynamics\\LinearDiffEq$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\dynamics\\LinearDiffEq$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\dynamics\\LinearDiffEq.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\dynamics\\ModRosenbrock$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\dynamics\\ModRosenbrock$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\dynamics\\ModRosenbrock$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\dynamics\\ModRosenbrock.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\dynamics\\ParabolicPDE$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\dynamics\\ParabolicPDE$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\dynamics\\ParabolicPDE.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\dynamics\\Radau$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\dynamics\\Radau$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\dynamics\\Radau$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\dynamics\\Radau.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\dynamics\\Reactions$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\dynamics\\Reactions$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\dynamics\\RungeKutta$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\dynamics\\RungeKutta$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\dynamics\\RungeKutta$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\dynamics\\RungeKutta.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\dynamics\\RungeKutta2$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\dynamics\\RungeKutta2$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\dynamics\\RungeKutta2$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\dynamics\\RungeKutta2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\dynamics\\RungeKutta3$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\dynamics\\RungeKutta3$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\dynamics\\RungeKutta3$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\dynamics\\RungeKutta3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\dynamics\\ballFlight.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\dynamics\\dormandPrinceTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\dynamics\\dormandPrinceTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\dynamics\\dormandPrinceTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\dynamics\\dormandPrinceTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\dynamics\\dynamicEqTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\dynamics\\firstOrderPDETest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\dynamics\\firstOrderPDETest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\dynamics\\firstOrderPDETest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\dynamics\\linearDiffEqTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\dynamics\\modRosenbrockTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\dynamics\\modRosenbrockTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\dynamics\\parabolicPDETest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\dynamics\\radauTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\dynamics\\reactions.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\dynamics\\rungeKutta2Test.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\dynamics\\rungeKutta2Test2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\dynamics\\rungeKutta3Test.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\dynamics\\rungeKutta3Test2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\dynamics\\rungeKutta3Test3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\dynamics\\rungeKuttaTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\dynamics\\rungeKuttaTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\easyWriterTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\fibTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\genIndexHtml.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\hyperParameterTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\hyperParameterTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\latLongTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\latLongTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\latLongTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\latLongTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\makeVectorI.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Bidiagonal$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Bidiagonal$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Bidiagonal.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Canvas$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Canvas.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Combinatorics$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Combinatorics$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Combinatorics$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Combinatorics.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Complex$$anon$1.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Complex$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Complex$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Complex$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Complex.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Convert$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Convert$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Correlogram$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Correlogram$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Correlogram$package$CT$2$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Correlogram$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Correlogram.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Eigen$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Eigen$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Eigenvalue.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\EigenvalueSym.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Eigenvector$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Eigenvector.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Fac_Cholesky$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Fac_Cholesky$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Fac_Cholesky.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Fac_Inverse$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Fac_Inverse$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Fac_Inverse$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Fac_Inverse.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Fac_LQ$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Fac_LQ$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Fac_LQ.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Fac_LU$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Fac_LU$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Fac_LU$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Fac_LU.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Fac_QR$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Fac_QR$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Fac_QR$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Fac_QR.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Fac_QR_RR$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Fac_QR_RR$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Fac_QR_RR$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Fac_QR_RR.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Fac_SVD$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Fac_SVD$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Fac_SVD$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Fac_SVD.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Factorization.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\FramelessHistogram.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\FramelessPlot$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\FramelessPlot.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\HCanvas$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\HCanvas.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Hessenburg.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Histogram$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Histogram$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Histogram$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Histogram.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Householder$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Householder$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Householder$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Householder.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\HouseholderT.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\InverseTest$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\InverseTest$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\MatrixCalc$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\MatrixCalc$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\MatrixCalc.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\MatrixD$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\MatrixD$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\MatrixD$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\MatrixD.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\MatrixD2$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\MatrixD2$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\MatrixD2$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\MatrixD2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\MatrixD2Example$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\MatrixD2Example.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\MatrixDExample$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\MatrixDExample.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\MatrixDOps$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\MatrixDOps.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\MatrixI$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\MatrixI.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Pivoting.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\PivotingTest$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\PivotingTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Plot$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Plot$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Plot$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Plot.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\PlotC$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\PlotC$Canvas.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\PlotC$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\PlotC$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\PlotC.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\PlotM$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\PlotM$CanvasP.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\PlotM$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\PlotM$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\PlotM.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Probability$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Probability$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Probability$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Probability.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\RTensor4D$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\RTensor4D$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\RTensor4D$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\RTensor4D.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\RTensorD$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\RTensorD$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\RTensorD$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\RTensorD.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\StatTable$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\StatTable$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\StatTable.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Statistic$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Statistic$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Statistic$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Statistic.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Stats4TS$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Stats4TS$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Stats4TS$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Stats4TS.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\SymTriMatrixD.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\SymmetricQRstep$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\SymmetricQRstep.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\TensorD$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\TensorD$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\TensorD$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\TensorD.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\TimeStatistic$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\TimeStatistic$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\TimeStatistic$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\TimeStatistic.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\TnT_Split$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\TnT_Split$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\TnT_Split$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\TnT_Split.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Transform$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Transform$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Transform$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\Transform.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\VMatrixD$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\VMatrixD$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\VMatrixD$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\VMatrixD.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\VectorC$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\VectorC$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\VectorC$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\VectorC.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\VectorD$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\VectorD$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\VectorD$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\VectorD.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\VectorDOps$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\VectorDOps.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\VectorI$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\VectorI$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\VectorI$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\VectorI.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\VectorL$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\VectorL$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\VectorL$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\VectorL.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\VectorS$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\VectorS$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\VectorS$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\VectorS.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\VectorT$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\VectorT$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\VectorT$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\VectorT.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\bidiagonalTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\combinatoricsTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\combinatoricsTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\complexTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\correlogramTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\cosForm.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\eigenTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\fac_CholeskyTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\fac_CholeskyTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\fac_CholeskyTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\fac_InverseTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\fac_LQTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\fac_LUTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\fac_LUTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\fac_LUTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\fac_QRTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\fac_QRTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\fac_QR_RRTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\fac_SVDTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\fac_SVDTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\fac_SVDTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\fac_SVDTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\histogramTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\householderTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\inverseTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\log1pForm$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\log1pForm.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\logForm$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\logForm.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\matrixCalc0.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\matrixCalc2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\matrixCalc3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\matrixCalc4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\matrixD2Test.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\matrixD2Test2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\matrixDTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\matrixDTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\matrixDTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\matrixDTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\matrixDTest5.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\matrixDTest6.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\matrixDTest7.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\plotCTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\plotMTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\plotMTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\plotTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\powForm.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\probabilityTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\probabilityTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\probabilityTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\probabilityTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\probabilityTest5.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\probabilityTest6.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\rTensor4DTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\rTensorDTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\rangeForm.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\sinForm.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\statTableTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\statisticTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\stats4TSTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\tensorDTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\tensorDTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\tensorDTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\timeStatisticTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\tnT_SplitTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\transformTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\transformTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\transformTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\vMatrixDTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\vectorCTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\vectorCTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\vectorCTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\vectorDTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\vectorDTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\vectorDTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\vectorDTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\vectorDTest5.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\vectorDTest6.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\vectorITest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\vectorLTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\vectorSTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\vectorSTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\vectorTTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\vectorTTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mathstat\\zForm.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mergeSortIndirectTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\mergeSortIndirectTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\AFF$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\AFF.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\ActivationFun$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\ActivationFun$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\ActivationFun$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\ActivationFun.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\BestStep$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\BestStep.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\DistanceOutlier$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\DistanceOutlier.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\Example_AutoMPG$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\Example_AutoMPG$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\Example_AutoMPG$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\Example_AutoMPG.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\Example_BPressure$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\Example_BPressure$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\Example_BPressure$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\Example_BPressure.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\Example_BasketBall$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\Example_BasketBall$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\Example_BasketBall$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\Example_BasketBall.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\ExpRegression$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\ExpRegression$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\ExpRegression$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\ExpRegression.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\ExpandableVariable.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\FeatureSelection$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\FeatureSelection$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\FeatureSelection.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\Fit$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\Fit$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\Fit$package$ft$2$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\Fit$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\Fit.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\FitM$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\FitM.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\Imputation$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\Imputation$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\Imputation.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\ImputeBackward$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\ImputeBackward.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\ImputeForward$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\ImputeForward.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\ImputeMean$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\ImputeMean.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\ImputeMovingAvg$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\ImputeMovingAvg.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\ImputeNormal$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\ImputeNormal.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\ImputeNormalWin$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\ImputeNormalWin.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\ImputeRegression$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\ImputeRegression.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\Initializer$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\Initializer.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\KNN_Regression$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\KNN_Regression$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\KNN_Regression$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\KNN_Regression.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\LassoRegression$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\LassoRegression$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\LassoRegression$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\LassoRegression.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\MatrixTransform$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\MatrixTransform$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\Model$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\Model$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\Model.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\MonitorLoss.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\NoSubModels.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\Node$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\Node.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\NonlinearRegression$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\NonlinearRegression$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\NonlinearRegression$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\NonlinearRegression.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\NullModel$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\NullModel$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\NullModel$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\NullModel.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\Outlier$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\Outlier$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\Outlier.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\Perceptron$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\Perceptron$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\Perceptron$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\Perceptron.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\PoissonRegression$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\PoissonRegression$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\PoissonRegression$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\PoissonRegression.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\PolyORegression$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\PolyORegression$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\PolyORegression$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\PolyORegression.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\PolyRegression$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\PolyRegression$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\PolyRegression$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\PolyRegression.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\Predictor$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\Predictor$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\Predictor$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\Predictor.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\QoF$$anon$1.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\QoF$$anon$10.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\QoF$$anon$11.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\QoF$$anon$12.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\QoF$$anon$13.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\QoF$$anon$14.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\QoF$$anon$15.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\QoF$$anon$16.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\QoF$$anon$17.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\QoF$$anon$18.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\QoF$$anon$19.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\QoF$$anon$2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\QoF$$anon$20.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\QoF$$anon$21.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\QoF$$anon$22.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\QoF$$anon$23.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\QoF$$anon$24.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\QoF$$anon$25.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\QoF$$anon$3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\QoF$$anon$4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\QoF$$anon$5.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\QoF$$anon$6.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\QoF$$anon$7.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\QoF$$anon$8.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\QoF$$anon$9.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\QoF$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\QoF.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\QuantileOutlier$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\QuantileOutlier.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\QuartileXOutlier$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\QuartileXOutlier.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\Regression$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\Regression$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\Regression$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\Regression.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\RegressionCat$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\RegressionCat$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\RegressionCat$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\RegressionCat.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\RegressionTree$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\RegressionTree$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\RegressionTree$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\RegressionTree.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\RegressionTreeGB$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\RegressionTreeGB$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\RegressionTreeGB$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\RegressionTreeGB.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\RegressionTreeMT$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\RegressionTreeMT$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\RegressionTreeMT$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\RegressionTreeMT.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\RegressionTreeRF$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\RegressionTreeRF$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\RegressionTreeRF$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\RegressionTreeRF.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\RegressionTreeRF_MT$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\RegressionTreeRF_MT$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\RegressionTreeRF_MT$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\RegressionTreeRF_MT.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\RegressionWLS$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\RegressionWLS$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\RegressionWLS$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\RegressionWLS.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\RidgeRegression$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\RidgeRegression$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\RidgeRegression$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\RidgeRegression.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\RoundRegression$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\RoundRegression$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\RoundRegression$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\RoundRegression.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\Sampling$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\Sampling$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\Scaling.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\SelectionTech$$anon$1.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\SelectionTech$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\SelectionTech.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\SimpleExpRegression$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\SimpleExpRegression$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\SimpleExpRegression$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\SimpleExpRegression.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\SimpleRegression$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\SimpleRegression$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\SimpleRegression$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\SimpleRegression.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\SimplerRegression$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\SimplerRegression$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\SimplerRegression$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\SimplerRegression.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\SumQueue$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\SumQueue$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\SumQueue$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\SumQueue.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\SumSqQueue$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\SumSqQueue.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\SymLassoRegression$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\SymLassoRegression$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\SymLassoRegression$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\SymLassoRegression.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\SymRidgeRegression$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\SymRidgeRegression$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\SymRidgeRegression$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\SymRidgeRegression.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\SymbolicRegression$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\SymbolicRegression$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\SymbolicRegression$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\SymbolicRegression.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\TestFit.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\TranRegression$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\TranRegression$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\TranRegression$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\TranRegression.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\TranRegressionEx$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\TranRegressionEx.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\TrigRegression$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\TrigRegression$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\TrigRegression$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\TrigRegression.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\Variable$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\Variable$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\Variable$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\Variable.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\VariableKind$$anon$1.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\VariableKind$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\VariableKind.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\activationFunTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\activationFunTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\activationFunTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\activationFunTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\activationFunTest5.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\activationFunTest6.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\BaggingTrees$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\BaggingTrees$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\BaggingTrees$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\BaggingTrees.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\BayesClassifier$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\BayesClassifier$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\BayesClassifier$package$bc$2$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\BayesClassifier$package$bc$4$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\BayesClassifier$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\BayesClassifier.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\Classifier$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\Classifier$BestStep$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\Classifier$BestStep.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\Classifier$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\Classifier$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\Classifier.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\DecisionTree$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\DecisionTree$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\DecisionTree$package$Tree$2$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\DecisionTree$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\DecisionTree.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\DecisionTree_C45$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\DecisionTree_C45$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\DecisionTree_C45$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\DecisionTree_C45.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\DecisionTree_C45wp$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\DecisionTree_C45wp$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\DecisionTree_C45wp$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\DecisionTree_C45wp.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\DecisionTree_ID3$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\DecisionTree_ID3$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\DecisionTree_ID3$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\DecisionTree_ID3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\DecisionTree_ID3wp$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\DecisionTree_ID3wp$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\DecisionTree_ID3wp$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\DecisionTree_ID3wp.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\Example_BreastCancer$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\Example_BreastCancer$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\Example_BreastCancer$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\Example_BreastCancer.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\Example_Diabetes$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\Example_Diabetes$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\Example_Diabetes$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\Example_Diabetes.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\Example_Iris$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\Example_Iris$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\Example_Iris$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\Example_Iris.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\Example_MTcars$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\Example_MTcars.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\Example_PlayTennis$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\Example_PlayTennis$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\Example_PlayTennis$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\Example_PlayTennis.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\Example_PlayTennis_Cont$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\Example_PlayTennis_Cont$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\Example_PlayTennis_Cont$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\Example_PlayTennis_Cont.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\FitC$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\FitC$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\FitC$package$TestFitC$2$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\FitC$package$TestFitC$4$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\FitC$package$TestFitC$6$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\FitC$package$TestFitC$8$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\FitC$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\FitC.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\HiddenMarkov$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\HiddenMarkov$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\HiddenMarkov$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\HiddenMarkov.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\KNN_Classifier$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\KNN_Classifier$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\KNN_Classifier$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\KNN_Classifier.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\LinDiscAnalyis$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\LinDiscAnalyis$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\LinDiscAnalyis$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\LinDiscAnalyis.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\LogisticRegression$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\LogisticRegression$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\LogisticRegression$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\LogisticRegression.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\NaiveBayes$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\NaiveBayes$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\NaiveBayes$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\NaiveBayes.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\NaiveBayesR$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\NaiveBayesR$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\NaiveBayesR$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\NaiveBayesR.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\Node$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\Node.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\NullModel$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\NullModel$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\NullModel$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\NullModel.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\QoFC$$anon$1.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\QoFC$$anon$10.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\QoFC$$anon$11.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\QoFC$$anon$12.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\QoFC$$anon$13.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\QoFC$$anon$14.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\QoFC$$anon$15.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\QoFC$$anon$16.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\QoFC$$anon$17.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\QoFC$$anon$18.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\QoFC$$anon$2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\QoFC$$anon$3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\QoFC$$anon$4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\QoFC$$anon$5.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\QoFC$$anon$6.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\QoFC$$anon$7.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\QoFC$$anon$8.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\QoFC$$anon$9.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\QoFC$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\QoFC.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\RandomForest$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\RandomForest$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\RandomForest$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\RandomForest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\SimpleLDA$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\SimpleLDA$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\SimpleLDA$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\SimpleLDA.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\SimpleLogisticRegression$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\SimpleLogisticRegression$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\SimpleLogisticRegression$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\SimpleLogisticRegression.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\SupportVectorMachine$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\SupportVectorMachine$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\SupportVectorMachine$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\SupportVectorMachine.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\TANBayes$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\TANBayes$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\TANBayes$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\TANBayes.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\baggingTreesTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\baggingTreesTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\baggingTreesTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\baggingTreesTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\baggingTreesTest5.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\baggingTreesTest6.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\baggingTreesTest7.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\bayesClassifierTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\bayesClassifierTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\classifierTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\decisionTreeTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\decisionTree_C45Test.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\decisionTree_C45Test2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\decisionTree_C45Test3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\decisionTree_C45Test4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\decisionTree_C45Test5.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\decisionTree_C45wpTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\decisionTree_C45wpTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\decisionTree_ID3Test.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\decisionTree_ID3Test2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\decisionTree_ID3Test3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\decisionTree_ID3wpTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\decisionTree_ID3wpTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\decisionTree_ID3wpTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\example_BreastCancerTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\example_DiabetesTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\example_IrisTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\example_PlayTennisTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\example_PlayTennis_ContTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\fitCTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\fitCTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\fitCTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\fitCTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\hiddenMarkovTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\hiddenMarkovTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\hiddenMarkovTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\hiddenMarkovTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\kNN_ClassifierTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\kNN_ClassifierTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\kNN_ClassifierTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\kNN_ClassifierTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\linDiscAnalyisTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\logisticRegressionTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\logisticRegressionTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\naiveBayesRTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\naiveBayesRTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\naiveBayesTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\naiveBayesTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\naiveBayesTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\naiveBayesTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\nullModelTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\randomForestTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\randomForestTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\randomForestTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\randomForestTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\randomForestTest5.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\randomForestTest6.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\randomForestTest7.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\simpleLDATest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\simpleLDATest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\simpleLogisticRegressionTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\simpleLogisticRegressionTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\simpleLogisticRegressionTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\simpleLogisticRegressionTest5.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\simpleLogisticRegressionTest6.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\supportVectorMachineTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\supportVectorMachineTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\tANBayesTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\tANBayesTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\tANBayesTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\classifying\\tANBayesTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\Algorithm$$anon$1.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\Algorithm$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\Algorithm.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\Cluster$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\Cluster.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\Clusterer$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\Clusterer.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\ClusteringPredictor$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\ClusteringPredictor$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\ClusteringPredictor$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\ClusteringPredictor.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\Distance$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\Distance$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\GapStatistic$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\GapStatistic$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\GapStatistic$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\GapStatistic.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\HierClusterer$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\HierClusterer$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\HierClusterer$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\HierClusterer.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\KMeansClusterer$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\KMeansClusterer$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\KMeansClusterer$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\KMeansClusterer.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\KMeansClusterer2$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\KMeansClusterer2$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\KMeansClusterer2$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\KMeansClusterer2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\KMeansClustererHW$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\KMeansClustererHW$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\KMeansClustererHW$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\KMeansClustererHW.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\KMeansClustererPP$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\KMeansClustererPP$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\KMeansClustererPP$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\KMeansClustererPP.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\KMeansPPClusterer$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\KMeansPPClusterer$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\KMeansPPClusterer$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\KMeansPPClusterer.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\KMeansPPClustererTester$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\KMeansPPClustererTester.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\MarkovClusterer$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\MarkovClusterer.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\MarkovClustering$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\MarkovClustering$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\RandomGraph$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\RandomGraph$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\RandomGraph.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\TightClusterer$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\TightClusterer$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\TightClusterer$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\TightClusterer.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\clusteringPredictorTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\clusteringPredictorTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\clusteringPredictorTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\gapStatisticTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\gapStatisticTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\hierClustererTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\hierClustererTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\kMeansClusterer2Test.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\kMeansClusterer2Test2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\kMeansClusterer2Test3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\kMeansClusterer2Test4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\kMeansClustererHWTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\kMeansClustererHWTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\kMeansClustererHWTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\kMeansClustererPPTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\kMeansClustererPPTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\kMeansClustererPPTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\kMeansClustererTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\kMeansClustererTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\kMeansClustererTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\kMeansClustererTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\kMeansPPClustererTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\kMeansPPClustererTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\kMeansPPClustererTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\kMeansPPClustererTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\markovClustererTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\markovClustererTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\randomGraphTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\clustering\\tightClustererTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\example_AutoMPG_Correlation.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\example_AutoMPG_NullModel.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\example_AutoMPG_QuadRegression.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\example_AutoMPG_Regression.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\example_AutoMPG_SimpleRegression.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\example_AutoMPG_SimplerRegression.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\example_BPressureTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\example_BPressureTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\example_BasketBallTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\expRegressionTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\expRegressionTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\expRegressionTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\fitTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\fitTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\AR$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\AR$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\AR$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\AR.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\ARIMA$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\ARIMA$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\ARIMA$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\ARIMA.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\ARIMA_diff$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\ARIMA_diff$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\ARIMA_diff$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\ARIMA_diff.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\ARMA$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\ARMA$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\ARMA$package$CG$2$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\ARMA$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\ARMA.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\ARX$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\ARX$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\ARX$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\ARX.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\ARX_D$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\ARX_D$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\ARX_D$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\ARX_D.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\ARX_Quad$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\ARX_Quad$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\ARX_Quad$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\ARX_Quad.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\ARX_Quad_D$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\ARX_Quad_D$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\ARX_Quad_D$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\ARX_Quad_D.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\ARX_Symb$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\ARX_Symb$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\ARX_Symb$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\ARX_Symb.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\ARX_Symb_D$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\ARX_Symb_D$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\ARX_Symb_D$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\ARX_Symb_D.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\ARY$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\ARY$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\ARY$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\ARY.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\ARY_D$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\ARY_D$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\ARY_D$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\ARY_D.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\ARY_Quad$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\ARY_Quad$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\ARY_Quad$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\ARY_Quad.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\Baseline.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\Baselines$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\Baselines$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\DTW$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\DTW$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\DTW$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\DTW.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\Diagnoser.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\Example_Covid$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\Example_Covid$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\Example_Covid$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\Example_Covid.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\Example_GasFurnace$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\Example_GasFurnace$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\Example_GasFurnace$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\Example_GasFurnace.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\Example_ILI$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\Example_ILI$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\Example_ILI$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\Example_ILI.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\Example_LakeLevels$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\Example_LakeLevels.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\ForecastMatrix$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\ForecastMatrix$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\ForecastMatrix$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\ForecastMatrix.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\Forecaster$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\Forecaster.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\Forecaster_D$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\Forecaster_D.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\Forecaster_Reg$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\Forecaster_Reg.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\MakeMatrix4TS$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\MakeMatrix4TS$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\MakeMatrix4TS$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\MakeMatrix4TS.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\MakeMatrix4TSY.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\NullModel$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\NullModel$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\NullModel$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\NullModel.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\Periodogram$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\Periodogram$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\Periodogram.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\RandomWalk$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\RandomWalk$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\RandomWalk$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\RandomWalk.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\RandomWalkS$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\RandomWalkS$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\RandomWalkS$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\RandomWalkS.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\SARY$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\SARY$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\SARY$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\SARY.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\SimpleExpSmoothing$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\SimpleExpSmoothing$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\SimpleExpSmoothing$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\SimpleExpSmoothing.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\SimpleMovingAverage$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\SimpleMovingAverage$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\SimpleMovingAverage$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\SimpleMovingAverage.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\Stationarity$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\Stationarity$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\Stationarity_KPSS$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\Stationarity_KPSS$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\Stationarity_KPSS$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\Stationarity_KPSS.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\TranARY$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\TranARY$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\TranARY$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\TranARY.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\TrendModel$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\TrendModel$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\TrendModel$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\TrendModel.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\UnitRoot.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\WeightedMovingAverage$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\WeightedMovingAverage$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\WeightedMovingAverage$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\WeightedMovingAverage.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\aRIMATest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\aRIMATest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\aRIMATest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\aRIMA_diffTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\aRMATest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\aRMATest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\aRMATest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\aRMATest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\aRMATest5.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\aRMATest6.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\aRMATest7.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\aRTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\aRTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\aRTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\aRTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\aRTest5.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\aRXTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\aRXTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\aRXTest5.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\aRX_DTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\aRX_DTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\aRX_QuadTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\aRX_QuadTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\aRX_QuadTest5.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\aRX_Quad_DTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\aRX_Quad_DTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\aRX_SymbTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\aRX_SymbTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\aRX_Symb_DTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\aRX_Symb_DTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\aRYTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\aRYTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\aRYTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\aRYTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\aRYTest5.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\aRYTest6.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\aRY_DTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\aRY_DTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\aRY_DTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\aRY_DTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\aRY_QuadTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\aRY_QuadTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\aRY_QuadTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\aRY_QuadTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\aRY_QuadTest5.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\baselineTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\dTWTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\example_CovidTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\example_CovidTest10.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\example_CovidTest11.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\example_CovidTest12.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\example_CovidTest13.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\example_CovidTest14.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\example_CovidTest15.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\example_CovidTest16.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\example_CovidTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\example_CovidTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\example_CovidTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\example_CovidTest5.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\example_CovidTest6.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\example_CovidTest7.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\example_CovidTest8.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\example_CovidTest9.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\example_GasFurnaceTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\example_GasFurnaceTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\example_ILITest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\example_ILITest10.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\example_ILITest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\example_ILITest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\example_ILITest5.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\example_ILITest6.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\example_ILITest7.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\example_ILITest8.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\example_ILITest9.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\forecastMatrixTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\forecastMatrixTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\forecastMatrixTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\forecastMatrixTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\multivar\\AR_Star$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\multivar\\AR_Star$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\multivar\\AR_Star$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\multivar\\AR_Star.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\multivar\\ForecastTensor$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\multivar\\ForecastTensor$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\multivar\\ForecastTensor$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\multivar\\ForecastTensor.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\multivar\\RandomWalk_Star$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\multivar\\RandomWalk_Star$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\multivar\\RandomWalk_Star$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\multivar\\RandomWalk_Star.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\multivar\\VAR$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\multivar\\VAR$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\multivar\\VAR$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\multivar\\VAR.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\multivar\\aR_StarTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\multivar\\aR_StarTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\multivar\\aR_StarTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\multivar\\forecastTensorTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\multivar\\forecastTensorTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\multivar\\randomWalk_StarTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\multivar\\randomWalk_StarTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\multivar\\randomWalk_StarTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\multivar\\vARTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\multivar\\vARTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\multivar\\vARTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\multivar\\vARTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\multivar\\vARTest5.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\multivar\\vARTest6.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\Attention$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\Attention$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\Attention$package$att$11$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\Attention$package$att$3$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\Attention$package$att$5$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\Attention$package$att$7$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\Attention$package$att$9$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\Attention$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\Attention.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\DenseLayer$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\DenseLayer.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\DropoutLayer$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\DropoutLayer.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\GRU$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\GRU$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\GRU$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\GRU.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\Gate$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\Gate.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\LSTM$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\LSTM$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\LSTM$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\LSTM.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\LayerNorm$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\LayerNorm.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\NeuralNet_3L4TS$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\NeuralNet_3L4TS$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\NeuralNet_3L4TS$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\NeuralNet_3L4TS.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\NeuralNet_XL4TS$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\NeuralNet_XL4TS$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\NeuralNet_XL4TS$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\NeuralNet_XL4TS.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\PositionalEnc$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\PositionalEnc$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\PositionalEnc$package$pe$2$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\PositionalEnc$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\PositionalEnc.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\RMSNorm$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\RMSNorm.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\RNN$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\RNN$ParamGroup$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\RNN$ParamGroup.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\RNN$ParamGroupVector$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\RNN$ParamGroupVector.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\RNN$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\RNN$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\RNN.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\TrEncoderLayer$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\TrEncoderLayer.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\attentionTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\attentionTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\attentionTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\attentionTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\attentionTest5.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\gRUTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\gRUTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\gRUTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\lSTMTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\lSTMTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\lSTMTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\neuralNet_3L4TSTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\neuralNet_3L4TSTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\neuralNet_3L4TSTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\neuralNet_XL4TSTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\neuralNet_XL4TSTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\neuralNet_XL4TSTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\neuralNet_XL4TSTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\neuralNet_XL4TSTest5.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\positionalEncTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\rNNTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\rNNTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\rNNTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\neuralforecasting\\rNNTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\nullModelTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\nullModelTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\nullModelTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\nullModelTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\nullModelTest5.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\periodogramTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\randomWalkSTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\randomWalkSTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\randomWalkSTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\randomWalkSTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\randomWalkTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\randomWalkTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\randomWalkTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\randomWalkTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\randomWalkTest5.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\randomWalkTest6.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\sARYTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\sARYTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\sARYTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\sARYTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\sARYTest5.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\simpleExpSmoothingTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\simpleExpSmoothingTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\simpleExpSmoothingTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\simpleExpSmoothingTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\simpleMovingAverageTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\simpleMovingAverageTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\simpleMovingAverageTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\simpleMovingAverageTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\stationarity_KPSSTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\stationarity_KPSSTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\stationarity_KPSSTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\stationaryTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\stationaryTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\stationaryTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\tranARYTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\tranARYTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\tranARYTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\tranARYTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\trendModelTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\trendModelTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\trendModelTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\trendModelTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\weightedMovingAverageTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\weightedMovingAverageTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\weightedMovingAverageTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting\\weightedMovingAverageTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\AR$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\AR$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\AR$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\AR.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\AR1MA$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\AR1MA$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\AR1MA$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\AR1MA.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\ARIMA$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\ARIMA$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\ARIMA$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\ARIMA.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\ARIMA_diff$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\ARIMA_diff$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\ARIMA_diff$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\ARIMA_diff.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\ARMA$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\ARMA$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\ARMA$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\ARMA.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\ARX$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\ARX$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\ARX$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\ARX.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\ARX_MV$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\ARX_MV$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\ARX_MV$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\ARX_MV.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\ARX_Quad$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\ARX_Quad$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\ARX_Quad$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\ARX_Quad.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\ARX_QuadTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\ARX_QuadTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\ARX_QuadTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\ARX_QuadTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\ARX_QuadTest5.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\ARX_QuadTest6.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\ARX_Quad_MV$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\ARX_Quad_MV$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\ARX_Quad_MV$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\ARX_Quad_MV.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\ForecastUtil$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\ForecastUtil$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\Forecaster$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\Forecaster$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\Forecaster$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\Forecaster.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\ForecasterX$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\ForecasterX.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\KalmanFilter$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\KalmanFilter$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\KalmanFilter$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\KalmanFilter.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\NullModel$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\NullModel$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\NullModel$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\NullModel.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\QuadSpline$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\QuadSpline$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\QuadSpline$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\QuadSpline.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\RandomWalk$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\RandomWalk$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\RandomWalk$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\RandomWalk.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\RegressionTreeGB4TS$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\RegressionTreeGB4TS$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\RegressionTreeGB4TS$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\RegressionTreeGB4TS.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\RegressionTreeGB4TS2$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\RegressionTreeGB4TS2$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\RegressionTreeGB4TS2$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\RegressionTreeGB4TS2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\RegressionTreeMT4TS$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\RegressionTreeMT4TS$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\RegressionTreeMT4TS$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\RegressionTreeMT4TS.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\RegressionTreeRF4TS$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\RegressionTreeRF4TS$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\RegressionTreeRF4TS$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\RegressionTreeRF4TS.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\RegressionTreeRF_MT4TS$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\RegressionTreeRF_MT4TS$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\RegressionTreeRF_MT4TS$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\RegressionTreeRF_MT4TS.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\RollingValidation$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\RollingValidation$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\RollingValidation$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\RollingValidation.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\SARIMA$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\SARIMA.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\SimpleExpSmoothing$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\SimpleExpSmoothing$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\SimpleExpSmoothing$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\SimpleExpSmoothing.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\SimpleMovingAverage$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\SimpleMovingAverage$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\SimpleMovingAverage$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\SimpleMovingAverage.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\Stationarity$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\Stationarity$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\TrendModel$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\TrendModel$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\TrendModel$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\TrendModel.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\UnitRoot.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\VAR$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\VAR$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\VAR$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\VAR.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\WeightedMovingAverage$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\WeightedMovingAverage$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\WeightedMovingAverage$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\WeightedMovingAverage.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\aR1MATest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\aR1MATest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\aR1MATest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\aR1MATest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\aR1MATest5.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\aRIMATest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\aRIMATest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\aRIMATest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\aRIMATest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\aRIMA_diffTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\aRMATest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\aRMATest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\aRMATest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\aRMATest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\aRMATest5.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\aRMATest6.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\aRMATest7.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\aRTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\aRTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\aRTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\aRTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\aRTest5.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\aRTest6.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\aRTest7.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\aRXTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\aRXTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\aRXTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\aRXTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\aRXTest5.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\aRXTest6.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\aRXTest7.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\aRX_MVTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\aRX_MVTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\aRX_MVTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\aRX_MVTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\aRX_MVTest5.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\aRX_MVTest6.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\aRX_Quad_MVTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\aRX_Quad_MVTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\aRX_Quad_MVTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\aRX_Quad_MVTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\aRX_Quad_MVTest5.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\aRX_Quad_MVTest6.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\buildTensor4TSTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\kalmanFilterTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\nullModelTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\nullModelTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\nullModelTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\quadSplineTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\quadSplineTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\quadSplineTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\quadSplineTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\randomWalkTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\randomWalkTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\randomWalkTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\randomWalkTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\regressionTreeGB4TS2Test.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\regressionTreeGB4TS2Test2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\regressionTreeGB4TS2Test3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\regressionTreeGB4TS2Test4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\regressionTreeGB4TS2Test5.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\regressionTreeGB4TS2Test6.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\regressionTreeGB4TSTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\regressionTreeGB4TSTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\regressionTreeGB4TSTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\regressionTreeGB4TSTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\regressionTreeGB4TSTest5.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\regressionTreeGB4TSTest6.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\regressionTreeGB4TSTest7.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\regressionTreeMT4TSTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\regressionTreeMT4TSTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\regressionTreeMT4TSTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\regressionTreeMT4TSTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\regressionTreeMT4TSTest5.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\regressionTreeMT4TSTest6.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\regressionTreeMT4TSTest7.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\regressionTreeRF4TSTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\regressionTreeRF4TSTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\regressionTreeRF4TSTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\regressionTreeRF4TSTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\regressionTreeRF4TSTest5.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\regressionTreeRF4TSTest6.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\regressionTreeRF4TSTest7.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\regressionTreeRF_MT4TSTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\regressionTreeRF_MT4TSTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\regressionTreeRF_MT4TSTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\regressionTreeRF_MT4TSTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\regressionTreeRF_MT4TSTest5.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\regressionTreeRF_MT4TSTest6.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\regressionTreeRF_MT4TSTest7.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\rollingValidationTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\rollingValidationTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\rollingValidationTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\rollingValidationTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\simpleExpSmoothingTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\simpleExpSmoothingTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\simpleExpSmoothingTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\simpleExpSmoothingTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\simpleExpSmoothingTest5.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\simpleMovingAverageTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\simpleMovingAverageTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\simpleMovingAverageTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\simpleMovingAverageTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\simpleMovingAverageTest5.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\stationaryTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\stationaryTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\stationaryTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\trendModelTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\trendModelTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\trendModelTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\varTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\varTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\varTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\varTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\varTest5.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\varTest6.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\weightedMovingAverageTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\weightedMovingAverageTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\weightedMovingAverageTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\weightedMovingAverageTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\forecasting_old\\weightedMovingAverageTest5.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\imputationTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\imputationTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\kNN_RegressionTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\kNN_RegressionTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\kNN_RegressionTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\lassoRegressionTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\lassoRegressionTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\lassoRegressionTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\matrixTransformTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\matrixTransformTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\CNN_1D$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\CNN_1D$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\CNN_1D$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\CNN_1D.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\CNN_2D$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\CNN_2D$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\CNN_2D$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\CNN_2D.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\CoFilter_1D$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\CoFilter_1D$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\CoFilter_1D$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\CoFilter_1D.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\CoFilter_2D$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\CoFilter_2D$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\CoFilter_2D$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\CoFilter_2D.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\ELM_3L1$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\ELM_3L1$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\ELM_3L1$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\ELM_3L1.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\Example_Concrete$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\Example_Concrete$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\Example_Concrete$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\Example_Concrete.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\NetParam$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\NetParam$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\NetParam$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\NetParam.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\NeuralNet_2L$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\NeuralNet_2L$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\NeuralNet_2L$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\NeuralNet_2L.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\NeuralNet_2L_Ck$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\NeuralNet_2L_Ck.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\NeuralNet_3L$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\NeuralNet_3L$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\NeuralNet_3L$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\NeuralNet_3L.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\NeuralNet_3L_C2$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\NeuralNet_3L_C2$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\NeuralNet_3L_C2$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\NeuralNet_3L_C2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\NeuralNet_3L_Ck$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\NeuralNet_3L_Ck$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\NeuralNet_XL$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\NeuralNet_XL$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\NeuralNet_XL$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\NeuralNet_XL.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\NeuralNet_XLT$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\NeuralNet_XLT$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\NeuralNet_XLT$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\NeuralNet_XLT.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\Optimizer$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\Optimizer.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\Optimizer_Adam.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\Optimizer_SGD.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\Optimizer_SGDM.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\PredictorMV$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\PredictorMV$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\PredictorMV$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\PredictorMV.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\RegressionMV$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\RegressionMV$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\RegressionMV$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\RegressionMV.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\StoppingRule.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\cNN_1DTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\cNN_1DTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\cNN_1DTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\cNN_2DTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\cNN_2DTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\coFilter_1DTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\coFilter_1DTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\coFilter_1DTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\coFilter_2DTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\coFilter_2DTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\coFilter_2DTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\eLM_3L1Test.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\eLM_3L1Test2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\eLM_3L1Test3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\eLM_3L1Test4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\eLM_3L1Test5.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\eLM_3L1Test6.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\example_ConcreteTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\example_ConcreteTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\example_ConcreteTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\neuralNet_2LTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\neuralNet_2LTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\neuralNet_2LTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\neuralNet_2LTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\neuralNet_2LTest5.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\neuralNet_2LTest6.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\neuralNet_2LTest7.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\neuralNet_2LTest8.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\neuralNet_2LTest9.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\neuralNet_2L_CkTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\neuralNet_3LTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\neuralNet_3LTest10.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\neuralNet_3LTest11.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\neuralNet_3LTest12.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\neuralNet_3LTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\neuralNet_3LTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\neuralNet_3LTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\neuralNet_3LTest5.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\neuralNet_3LTest6.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\neuralNet_3LTest7.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\neuralNet_3LTest8.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\neuralNet_3LTest9.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\neuralNet_3L_C2Test.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\neuralNet_XLTTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\neuralNet_XLTTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\neuralNet_XLTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\neuralNet_XLTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\neuralNet_XLTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\neuralNet_XLTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\neuralNet_XLTest5.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\neuralNet_XLTest6.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\neuralNet_XLTest7.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\predictorMVTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\regressionMVTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\regressionMVTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\regressionMVTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\regressionMVTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\neuralnet\\regressionMVTest5.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\nonlinearRegressionTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\nullModelTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\nullModelTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\outlierTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\outlierTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\outlierTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\perceptronTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\perceptronTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\perceptronTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\perceptronTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\perceptronTest5.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\perceptronTest6.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\poissonRegressionTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\poissonRegressionTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\polyORegressionTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\polyORegressionTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\polyRegressionTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\polyRegressionTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\predictorTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\regressionCatTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\regressionCatTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\regressionCatTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\regressionCatTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\regressionCatTest5.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\regressionCatTest6.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\regressionCatTest7.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\regressionTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\regressionTest10.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\regressionTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\regressionTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\regressionTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\regressionTest5.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\regressionTest6.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\regressionTest7.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\regressionTest8.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\regressionTest9.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\regressionTreeGBTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\regressionTreeGBTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\regressionTreeGBTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\regressionTreeGBTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\regressionTreeGBTest5.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\regressionTreeGBTest6.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\regressionTreeMTTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\regressionTreeMTTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\regressionTreeMTTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\regressionTreeRFTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\regressionTreeRFTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\regressionTreeRFTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\regressionTreeRFTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\regressionTreeRFTest5.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\regressionTreeRF_MTTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\regressionTreeRF_MTTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\regressionTreeRF_MTTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\regressionTreeRF_MTTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\regressionTreeTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\regressionTreeTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\regressionTreeTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\regressionTreeTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\regressionWLSTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\regressionWLSTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\ridgeRegressionTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\ridgeRegressionTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\ridgeRegressionTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\ridgeRegressionTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\ridgeRegressionTest5.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\ridgeRegressionTest6.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\ridgeRegressionTest7.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\roundRegressionTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\simpleExpRegressionTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\simpleExpRegressionTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\simpleExpRegressionTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\simpleRegressionTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\simpleRegressionTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\simpleRegressionTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\simpleRegressionTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\simpleRegressionTest5.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\simpleRegressionTest6.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\simpleRegressionTest7.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\simpleRegressionTest8.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\simplerRegressionTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\simplerRegressionTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\simplerRegressionTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\simplerRegressionTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\sumQueueTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\sumQueueTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\symLassoRegressionTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\symLassoRegressionTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\symLassoRegressionTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\symLassoRegressionTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\symLassoRegressionTest5.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\symLassoRegressionTest6.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\symLassoRegressionTest7.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\symLassoRegressionTest8.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\symLassoRegressionTest9.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\symRidgeRegressionTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\symRidgeRegressionTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\symRidgeRegressionTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\symRidgeRegressionTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\symRidgeRegressionTest5.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\symRidgeRegressionTest6.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\symRidgeRegressionTest7.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\symRidgeRegressionTest8.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\symRidgeRegressionTest9.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\symbolicRegressionTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\symbolicRegressionTest10.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\symbolicRegressionTest11.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\symbolicRegressionTest12.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\symbolicRegressionTest13.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\symbolicRegressionTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\symbolicRegressionTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\symbolicRegressionTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\symbolicRegressionTest5.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\symbolicRegressionTest6.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\symbolicRegressionTest7.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\symbolicRegressionTest8.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\symbolicRegressionTest9.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\tranRegressionTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\tranRegressionTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\tranRegressionTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\tranRegressionTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\tranRegressionTest5.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\tranRegressionTest6.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\tranRegressionTest7.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\tranRegressionTest8.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\trigRegressionTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\trigRegressionTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\modeling\\variableTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\multiArrayDequesTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\BoundsConstraint$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\BoundsConstraint.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\ConjugateGradient$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\ConjugateGradient$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\ConjugateGradient$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\ConjugateGradient.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\ConjugateGradient_NoLS$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\ConjugateGradient_NoLS$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\ConjugateGradient_NoLS.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\CoordinateDescent$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\CoordinateDescent$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\CoordinateDescent$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\CoordinateDescent.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\GoldenSectionLS$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\GoldenSectionLS$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\GoldenSectionLS$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\GoldenSectionLS.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\GradientDescent$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\GradientDescent$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\GradientDescent$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\GradientDescent.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\GradientDescent_Adam$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\GradientDescent_Adam$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\GradientDescent_Adam$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\GradientDescent_Adam.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\GradientDescent_Mo$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\GradientDescent_Mo$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\GradientDescent_Mo$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\GradientDescent_Mo.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\GradientDescent_Mo2$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\GradientDescent_Mo2$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\GradientDescent_Mo2$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\GradientDescent_Mo2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\GradientDescent_NoLS$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\GradientDescent_NoLS$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\GradientDescent_NoLS$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\GradientDescent_NoLS.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\GridSearch$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\GridSearch$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\GridSearch$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\GridSearch.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\GridSearchLS$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\GridSearchLS$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\GridSearchLS.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\Hungarian$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\Hungarian$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\IntegerTabuSearch$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\IntegerTabuSearch$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\IntegerTabuSearch$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\IntegerTabuSearch.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\LassoAddm$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\LassoAddm$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\LassoAdmm$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\LassoAdmm.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\LineSearch.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\Minimize$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\Minimize$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\Minimize$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\Minimize.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\Minimizer$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\Minimizer.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\MonitorEpochs.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\NLPTest$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\NLPTest$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\NelderMeadSimplex$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\NelderMeadSimplex$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\NelderMeadSimplex.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\NelderMeadSimplex2$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\NelderMeadSimplex2$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\NelderMeadSimplex2$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\NelderMeadSimplex2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\NewtonRaphson$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\NewtonRaphson$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\NewtonRaphson.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\Newton_NoLS$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\Newton_NoLS$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\Newton_NoLS$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\Newton_NoLS.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\PathMonitor.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\SPSA$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\SPSA$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\SPSA$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\SPSA.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\StoppingRule$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\StoppingRule.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\TabuSearch$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\TabuSearch$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\TabuSearch$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\TabuSearch.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\WolfeConditions$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\WolfeConditions$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\WolfeConditions$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\WolfeConditions.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\WolfeLS$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\WolfeLS$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\WolfeLS$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\WolfeLS.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\WolfeLS2$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\WolfeLS2$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\WolfeLS2$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\WolfeLS2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\WolfeLS3$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\WolfeLS3$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\WolfeLS3$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\WolfeLS3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\conjugateGradientTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\conjugateGradientTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\conjugateGradientTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\conjugateGradient_NoLSTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\conjugateGradient_NoLSTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\conjugateGradient_NoLSTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\coordinateDescentTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\functions\\BealeFunction$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\functions\\BealeFunction.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\functions\\BenchmarkFunction.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\functions\\Bohachevsky1Function$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\functions\\Bohachevsky1Function.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\functions\\Bohachevsky2Function$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\functions\\Bohachevsky2Function.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\functions\\Bohachevsky3Function$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\functions\\Bohachevsky3Function.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\functions\\BoothFunction$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\functions\\BoothFunction.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\functions\\Camel3Function$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\functions\\Camel3Function.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\functions\\CubeFunction$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\functions\\CubeFunction.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\functions\\FreudensteinRothFunction$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\functions\\FreudensteinRothFunction.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\functions\\McCormickFunction$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\functions\\McCormickFunction.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\functions\\ParaboloidFunction$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\functions\\ParaboloidFunction.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\functions\\QuarticFunction$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\functions\\QuarticFunction.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\functions\\ReciprocalFunction$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\functions\\ReciprocalFunction.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\functions\\RosenbrockFunction$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\functions\\RosenbrockFunction.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\goldenSectionLSTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\goldenSectionLSTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\gradientDescentTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\gradientDescentTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\gradientDescentTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\gradientDescentTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\gradientDescent_AdamTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\gradientDescent_Mo2Test.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\gradientDescent_MoTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\gradientDescent_NoLSTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\gridSearchLSTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\gridSearchLSTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\gridSearchTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\gridSearchTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\hungarianTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\hungarianTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\integerTabuSearchTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\integerTabuSearchTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\lassoAdmmTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\lassoAdmmTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\lassoAdmmTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\linear_opt\\CheckLP.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\linear_opt\\IntegerLP$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\linear_opt\\IntegerLP$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\linear_opt\\IntegerLP$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\linear_opt\\IntegerLP.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\linear_opt\\MinimizerLP.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\linear_opt\\Simplex2P$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\linear_opt\\Simplex2P$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\linear_opt\\Simplex2P.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\linear_opt\\integerLPTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\linear_opt\\simplex2PTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\linearopt\\QuadraticSimplex$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\linearopt\\QuadraticSimplex$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\linearopt\\QuadraticSimplex$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\linearopt\\QuadraticSimplex.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\linearopt\\quadraticSimplexTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\nLPTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\nLPTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\nelderMeadSimplex2Test.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\nelderMeadSimplexTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\newtonRaphsonTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\newtonRaphsonTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\newtonRaphsonTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\newton_NoLSTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\newton_NoLSTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\newton_NoLSTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\BFGS$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\BFGS$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\BFGS$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\BFGS.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\BFGS_NoLS$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\BFGS_NoLS$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\BFGS_NoLS$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\BFGS_NoLS.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\DM_LBFGS$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\DM_LBFGS$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\DM_LBFGS$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\DM_LBFGS.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\EvaluationLogic.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\FunctionEvaluation$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\FunctionEvaluation.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\FunctionOptimization$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\FunctionOptimization.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGS$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGS$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGS$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGS.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSBacktrackingArmijo$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSBacktrackingArmijo.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSBacktrackingOrthantWise$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSBacktrackingOrthantWise.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSBacktrackingStrongWolfe$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSBacktrackingStrongWolfe.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSBacktrackingWolfe$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSBacktrackingWolfe.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSCallbackData$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSCallbackData.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSIterationData$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSIterationData.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSLineSearch$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSLineSearch$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSLineSearch$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSLineSearch.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSLineSearchAlg$$anon$1.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSLineSearchAlg$$anon$2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSLineSearchAlg$$anon$3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSLineSearchAlg$$anon$4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSLineSearchAlg$$anon$5.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSLineSearchAlg$$anon$6.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSLineSearchAlg$$anon$7.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSLineSearchAlg$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSLineSearchAlg.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSLineSearchFailure$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSLineSearchFailure.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSLineSearchIncomplete$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSLineSearchIncomplete.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSLineSearchPrms$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSLineSearchPrms.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSLineSearchStep$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSLineSearchStep.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSMoreThuente$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSMoreThuente.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSPrms$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSPrms.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSResults$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSResults.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSReturnCode$$anon$1.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSReturnCode$$anon$10.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSReturnCode$$anon$11.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSReturnCode$$anon$12.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSReturnCode$$anon$13.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSReturnCode$$anon$14.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSReturnCode$$anon$15.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSReturnCode$$anon$16.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSReturnCode$$anon$17.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSReturnCode$$anon$18.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSReturnCode$$anon$19.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSReturnCode$$anon$2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSReturnCode$$anon$20.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSReturnCode$$anon$21.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSReturnCode$$anon$22.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSReturnCode$$anon$23.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSReturnCode$$anon$24.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSReturnCode$$anon$25.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSReturnCode$$anon$26.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSReturnCode$$anon$27.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSReturnCode$$anon$28.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSReturnCode$$anon$29.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSReturnCode$$anon$3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSReturnCode$$anon$30.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSReturnCode$$anon$31.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSReturnCode$$anon$32.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSReturnCode$$anon$33.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSReturnCode$$anon$34.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSReturnCode$$anon$35.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSReturnCode$$anon$36.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSReturnCode$$anon$4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSReturnCode$$anon$5.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSReturnCode$$anon$6.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSReturnCode$$anon$7.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSReturnCode$$anon$8.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSReturnCode$$anon$9.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSReturnCode$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSReturnCode.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSVarEvaluationResults$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGSVarEvaluationResults.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGS_B$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGS_B$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGS_B$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGS_B.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGS_NoLS$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGS_NoLS$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGS_NoLS$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LBFGS_NoLS.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LineSearchTriInterval$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\LineSearchTriInterval.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\OptimizationLogic.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\OrthantWisePrms$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\OrthantWisePrms.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\QNewton$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\QNewton.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\bFGSBealeFunction.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\bFGSBohachevsky1Function.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\bFGSBohachevsky2Function.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\bFGSBohachevsky3Function.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\bFGSBoothFunction.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\bFGSCamel3Function.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\bFGSCubeFunction.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\bFGSFreudensteinRothFunction.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\bFGSMcCormickFunction.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\bFGSTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\bFGSTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\bFGSTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\bFGSTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\bFGS_NoLSTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\bFGS_NoLSTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\bFGS_NoLSTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\bFGS_NoLSTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\bealeFunctionLBFGSTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\bohachevsky1FunctionLBFGSTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\bohachevsky2FunctionLBFGSTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\bohachevsky3FunctionLBFGSTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\boothFunctionLBFGSTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\camel3FunctionLBFGSTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\cubeFunctionLBFGSTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\freudensteinRothFunctionLBFGSTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\lBFGS_BTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\lBFGS_BTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\lBFGS_BTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\lBFGS_NoLSTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\lBFGS_NoLSTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\lBFGS_NoLSTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\mccormickFunctionDMLBFGSTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newton\\mccormickFunctionLBFGSTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newtonC\\FunctionDescriptors$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newtonC\\FunctionDescriptors.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newtonC\\FunctionOptimizationFFM$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newtonC\\FunctionOptimizationFFM.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newtonC\\LBFGS_FFM$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newtonC\\LBFGS_FFM$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newtonC\\LBFGS_FFM$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newtonC\\LBFGS_FFM.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newtonC\\MethodTypes$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newtonC\\MethodTypes.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newtonC\\OptimizationLogicFFM.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newtonC\\OptimizationMethodHandlesFFM$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newtonC\\OptimizationMethodHandlesFFM.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newtonC\\bohachevsky2FunctionLBFGS_FFMTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\quasi_newtonC\\boothFunctionLBFGS_FFMTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\sPSATest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\tabuSearchTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\tabuSearchTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\wolfeConditionsTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\wolfeLS2Test.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\wolfeLS2Test2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\wolfeLS2Test3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\wolfeLS2Test4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\wolfeLS3Test.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\wolfeLS3Test2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\wolfeLS3Test3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\wolfeLS3Test4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\wolfeLSTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\wolfeLSTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\optimization\\wolfeLSTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\Bernoulli$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\Bernoulli.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\Beta$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\Beta.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\Binomial$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\Binomial.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\CDF$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\CDF$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\CDF$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\CDF.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\Cauchy$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\Cauchy.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\ChiSquare$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\ChiSquare.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\Dice$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\Dice.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\Dir$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\Dir.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\Discrete$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\Discrete.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\Erlang$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\Erlang.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\Exponential$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\Exponential.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\Fisher$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\Fisher.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\Gamma$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\Gamma.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\Geometric$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\Geometric.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\HyperExponential$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\HyperExponential.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\HyperExponential_$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\HyperExponential_.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\HyperGeometric$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\HyperGeometric.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\Known$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\Known.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\LogNormal$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\LogNormal.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\Logistic$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\Logistic.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\Multinomial$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\Multinomial.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\NHPoissonProcess$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\NHPoissonProcess.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\NegativeBinomial$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\NegativeBinomial.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\Normal$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\Normal.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\NormalMat$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\NormalMat.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\NormalTen$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\NormalTen.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\NormalVec$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\NormalVec.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\NormalVec_$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\NormalVec_.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\NormalVec_c$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\NormalVec_c.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\PermutedVecD$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\PermutedVecD.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\PermutedVecI$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\PermutedVecI.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\Poisson$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\Poisson.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\PoissonProcess$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\PoissonProcess$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\PoissonProcess$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\PoissonProcess.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\PowerLaw$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\PowerLaw.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\ProbabilityVec$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\ProbabilityVec.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\Quantile$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\Quantile$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\Quantile$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\Quantile.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\RNG$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\RNG$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\RNG.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\RNGStream$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\RNGStream.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\RNGTester$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\RNGTester$CoGram$1.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\RNGTester.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\Randi$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\Randi.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\Randi0$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\Randi0.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\RandiU0$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\RandiU0.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\Random$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\Random.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\Random0$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\Random0.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\Random2$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\Random2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\Random3$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\Random3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\RandomMatD$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\RandomMatD.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\RandomSeeds$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\RandomSeeds.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\RandomSeeds3$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\RandomSeeds3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\RandomSet$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\RandomSet.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\RandomSetS$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\RandomSetS.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\RandomSetW$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\RandomSetW.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\RandomStr$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\RandomStr.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\RandomTenD$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\RandomTenD.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\RandomVecD$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\RandomVecD.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\RandomVecD_$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\RandomVecD_.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\RandomVecI$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\RandomVecI.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\RandomVecS$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\RandomVecS.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\RandomVecSample$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\RandomVecSample.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\RandomVecTrend$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\RandomVecTrend.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\RandomWord$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\RandomWord.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\Sharp$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\Sharp.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\StdNormal$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\StdNormal.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\StreamMaker$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\StreamMaker$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\StreamMaker$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\StreamMaker.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\StreamMaker3$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\StreamMaker3$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\StudentT$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\StudentT.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\TimeVariate.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\Trapezoidal$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\Trapezoidal.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\Triangular$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\Triangular.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\Trinomial$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\Trinomial.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\Uniform$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\Uniform.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\Variate$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\Variate$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\Variate$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\Variate.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\VariateMat$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\VariateMat$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\VariateMat$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\VariateMat.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\VariateSet$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\VariateSet$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\VariateSet$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\VariateSet.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\VariateStr$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\VariateStr$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\VariateTen$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\VariateTen$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\VariateTen$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\VariateTen.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\VariateVec$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\VariateVec$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\VariateVec$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\VariateVec.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\Weibull$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\Weibull.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\cDFTest_ChiSquare.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\cDFTest_Empirical.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\cDFTest_Exponential.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\cDFTest_Fisher.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\cDFTest_Fisher2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\cDFTest_Normal.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\cDFTest_Normal_Diff.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\cDFTest_StudentT.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\cDFTest_Uniform.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\cDFTest_Weibull.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\cLTTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\diceTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\poissonProcessTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\quantileTest_ChiSquare.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\quantileTest_Empirical.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\quantileTest_Exponential.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\quantileTest_Fisher.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\quantileTest_Normal.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\quantileTest_StudentT.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\quantileTest_Uniform.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\rNGTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\randomStrTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\randomWordTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\streamMaker3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\streamMakerGen.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\variateMatTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\variateSetTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\variateSetTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\variateSetTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\variateTenTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\variateTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\variateVecTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\random\\variateVecTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\readFileTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\readFileTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\redirectOutTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\ringTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\runCalc.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\runCalcHelp.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\Arc$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\Arc.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\Arrow$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\Arrow$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\Arrow$package$Canvas$1.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\Arrow$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\Arrow.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\Base$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\Base$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\BorderLayout$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\BorderLayout.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\Colors$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\Colors$Randi$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\Colors$Randi.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\Colors$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\Colors$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\Colors.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\CurvilinearShape.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\Ellipse$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\Ellipse.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\Frame.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\Hexagon$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\Hexagon.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\ImageWriter$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\ImageWriter$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\Line$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\Line.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\Octagon$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\Octagon.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\Path$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\Path.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\Pentagon$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\Pentagon.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\Polygon$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\Polygon$package$Canvas$1.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\Polygon$package$Canvas$2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\Polygon$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\Polygon.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\QArrow$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\QArrow$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\QArrow$package$Canvas$1.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\QArrow$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\QArrow.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\QCurve$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\QCurve$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\QCurve$package$Canvas$1.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\QCurve$package$QCurveAnimator$1$Canvas.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\QCurve$package$QCurveAnimator$1.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\QCurve$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\QCurve.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\Quad$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\Quad.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\Rectangle$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\Rectangle.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\RoundRectangle$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\RoundRectangle.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\Shapes$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\Shapes$package$Canvas$1.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\Shapes$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\Transform.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\Triangle$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\Triangle.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\TrigConstants$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\TrigConstants.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\VizFrame$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\VizFrame.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\ZoomablePanel.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\arrowTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\colorsTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\lineTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\polygonTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\polygonTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\qArrowTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\qCurveTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\qCurveTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala2d\\writeImageTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala3d\\Clock.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala3d\\Road3d$$anon$10.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala3d\\Road3d$$anon$2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala3d\\Road3d$$anon$3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala3d\\Road3d$$anon$4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala3d\\Road3d$$anon$5.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala3d\\Road3d$$anon$6.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala3d\\Road3d$$anon$7.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala3d\\Road3d$$anon$8.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala3d\\Road3d$$anon$9.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala3d\\Road3d$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala3d\\Road3d$Direction$$anon$1.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala3d\\Road3d$Direction$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala3d\\Road3d$Direction.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala3d\\Road3d.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala3d\\Sink$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala3d\\Sink.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala3d\\Source$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala3d\\Source.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala3d\\Vehicle$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\scala3d\\Vehicle.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\setExtTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\Completion.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\Coroutine$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\Coroutine.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\CoroutineTest$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\CoroutineTest$Cor1.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\CoroutineTest$Cor2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\CoroutineTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\Identifiable$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\Identifiable.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\Locatable.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\Locatable2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\Modelable.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\Monitor$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\Monitor$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\Monitor$package$Mon$2$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\Monitor$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\Monitor.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\NH_PoissonProcess$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\NH_PoissonProcess$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\NH_PoissonProcess$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\NH_PoissonProcess.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\PoissonProcess$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\PoissonProcess$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\PoissonProcess$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\PoissonProcess.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\Temporal.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\activity\\ArcD$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\activity\\ArcD.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\activity\\ArcI$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\activity\\ArcI.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\activity\\Counter$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\activity\\Counter.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\activity\\PetriNet$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\activity\\PetriNet$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\activity\\PetriNet.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\activity\\PetriNetRules.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\activity\\PetriNetRulesTest$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\activity\\PetriNetRulesTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\activity\\PlaceD.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\activity\\PlaceI.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\activity\\Transition.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\activity\\petriNetTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\EdgeAgents.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\Gate$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\Gate.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\Junction$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\Junction.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\Link$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\Link.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\Model$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\Model$Reporter$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\Model$Reporter.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\Model.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\Monitor$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\Monitor$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\Monitor$package$Mon$2$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\Monitor$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\Monitor.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\QueueOps.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\Resource$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\Resource.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\Route$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\Route.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\SimAgent$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\SimAgent$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\SimAgent$package$TestAgent$1.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\SimAgent$package$TestAgent$3$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\SimAgent$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\SimAgent.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\Sink$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\Sink.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\Source$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\Source.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\Statistical.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\Transport$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\Transport.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\WaitQueue$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\WaitQueue.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\WaitQueue_LCFS$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\WaitQueue_LCFS.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\example_1\\Bank$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\example_1\\Bank$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\example_1\\BankModel$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\example_1\\BankModel$Customer$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\example_1\\BankModel$Customer.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\example_1\\BankModel.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\example_1\\CallCenter$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\example_1\\CallCenter$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\example_1\\CallCenterModel$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\example_1\\CallCenterModel$Call$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\example_1\\CallCenterModel$Call.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\example_1\\CallCenterModel.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\example_1\\Traffic2L$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\example_1\\Traffic2L$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\example_1\\Traffic2LModel$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\example_1\\Traffic2LModel$Car$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\example_1\\Traffic2LModel$Car.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\example_1\\Traffic2LModel.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\example_1\\Traffic4L$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\example_1\\Traffic4L$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\example_1\\Traffic4LModel$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\example_1\\Traffic4LModel$Car$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\example_1\\Traffic4LModel$Car.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\example_1\\Traffic4LModel.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\example_1\\UGABusRoutes$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\example_1\\UGABusRoutes$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\example_1\\UGABusRoutesModel$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\example_1\\UGABusRoutesModel$Bus$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\example_1\\UGABusRoutesModel$Bus.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\example_1\\UGABusRoutesModel.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\example_1\\runBank.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\example_1\\runCallCenter.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\example_1\\runTraffic2L.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\example_1\\runTraffic2L1.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\example_1\\runTraffic4L.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\example_1\\runUGABusRoutes.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\example_1\\runUGABusRoutes1.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\monitorTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\agent\\simAgentTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\CausalLink$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\CausalLink.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\Entity$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\Entity.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\Event$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\Event.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\EventNode$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\EventNode.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\Ex_Template$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\Ex_Template$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\Model$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\Model.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\SOMEModel$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\SOMEModel$Arrival$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\SOMEModel$Arrival.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\SOMEModel.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\WaitQueue$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\WaitQueue.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\WaitQueue_LCFS$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\WaitQueue_LCFS.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\Bank$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\Bank$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\Bank2$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\Bank2$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\Bank3$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\Bank3$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\BankModel$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\BankModel$Arrival$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\BankModel$Arrival.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\BankModel$Departure$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\BankModel$Departure.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\BankModel.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\BankModel2$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\BankModel2$Arrival$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\BankModel2$Arrival.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\BankModel2$Departure$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\BankModel2$Departure.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\BankModel2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\BankModel3$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\BankModel3$Arrival$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\BankModel3$Arrival.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\BankModel3$Departure$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\BankModel3$Departure.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\BankModel3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\CallCenter$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\CallCenter$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\CallCenter2$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\CallCenter2$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\CallCenterModel$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\CallCenterModel$Arrival$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\CallCenterModel$Arrival.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\CallCenterModel$Departure$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\CallCenterModel$Departure.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\CallCenterModel.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\CallCenterModel2$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\CallCenterModel2$Arrival$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\CallCenterModel2$Arrival.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\CallCenterModel2$Departure$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\CallCenterModel2$Departure.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\CallCenterModel2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\FastFood$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\FastFood$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\FastFoodModel$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\FastFoodModel$Arrival$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\FastFoodModel$Arrival.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\FastFoodModel$Departure$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\FastFoodModel$Departure.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\FastFoodModel.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\Machine$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\Machine$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\MachineModel$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\MachineModel$Arrival$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\MachineModel$Arrival.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\MachineModel$FinishMachine1$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\MachineModel$FinishMachine1.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\MachineModel$FinishMachine2$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\MachineModel$FinishMachine2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\MachineModel.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\Poisson$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\Poisson$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\Poisson2$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\Poisson2$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\PoissonModel$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\PoissonModel$Arrival$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\PoissonModel$Arrival.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\PoissonModel.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\PoissonModel2$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\PoissonModel2$Arrival$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\PoissonModel2$Arrival.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\PoissonModel2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\runBank.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\runBank2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\runBank3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\runCallCenter.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\runCallCenter2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\runFastFood.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\runMachine.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\runPoisson.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\example_1\\runPoisson2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\event\\runSOME.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\monitorTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\monte_carlo\\Cards$$anon$1.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\monte_carlo\\Cards$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\monte_carlo\\Cards$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\monte_carlo\\Cards$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\monte_carlo\\Cards.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\monte_carlo\\GrainDropping$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\monte_carlo\\GrainDropping$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\monte_carlo\\GrainDropping.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\monte_carlo\\MonteCarloIntegration$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\monte_carlo\\MonteCarloIntegration$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\monte_carlo\\MonteCarloIntegration$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\monte_carlo\\MonteCarloIntegration.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\monte_carlo\\MontyHall$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\monte_carlo\\MontyHall$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\monte_carlo\\RollDice$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\monte_carlo\\RollDice$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\monte_carlo\\RollDice$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\monte_carlo\\RollDice.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\monte_carlo\\SphereVolume$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\monte_carlo\\SphereVolume$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\monte_carlo\\cardsTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\monte_carlo\\cardsTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\monte_carlo\\cardsTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\monte_carlo\\grainDroppingTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\monte_carlo\\monteCarloIntegrationTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\monte_carlo\\montyHall.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\monte_carlo\\rollDiceTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\monte_carlo\\rollDiceTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\monte_carlo\\rollDiceTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\monte_carlo\\sphereVolumeTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\nH_PoissonProcessTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\nH_PoissonProcessTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\poissonProcessTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\Bus.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\Component.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\Dynamics.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\Gate$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\Gate.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\GippsDynamics$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\GippsDynamics.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\IDMDynamics$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\IDMDynamics.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\Junction$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\Junction.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\Model$$anon$1.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\Model$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\Model.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\Model_MBM$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\Model_MBM.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\Path$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\Path$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\Path$package$PathModel$1$Car$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\Path$package$PathModel$1$Car.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\Path$package$PathModel$1.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\Path$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\Path.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\Recorder$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\Recorder.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\Resource$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\Resource.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\SimActor$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\SimActor.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\Sink$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\Sink.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\Source$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\Source$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\Source$package$CarModel$2$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\Source$package$CarModel$2$Car$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\Source$package$CarModel$2$Car.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\Source$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\Source.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\Transport$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\Transport.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\VSource$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\VSource$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\VSource$package$CarModel$2$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\VSource$package$CarModel$2$Car$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\VSource$package$CarModel$2$Car.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\VSource$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\VSource.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\VTransport$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\VTransport.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\Vehicle$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\Vehicle.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\WaitQueue$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\WaitQueue.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\WaitQueue_LCFS$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\WaitQueue_LCFS.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\Bank$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\Bank$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\BankModel$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\BankModel$Customer$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\BankModel$Customer.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\BankModel.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\CallCenter$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\CallCenter$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\CallCenterModel$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\CallCenterModel$Call$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\CallCenterModel$Call.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\CallCenterModel.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\EmerDept$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\EmerDept$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\EmerDeptModel$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\EmerDeptModel$Patient$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\EmerDeptModel$Patient.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\EmerDeptModel.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\Loop$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\Loop$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\LoopModel$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\LoopModel$Car1$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\LoopModel$Car1.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\LoopModel$Car2$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\LoopModel$Car2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\LoopModel.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\Machine$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\Machine$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\MachineModel$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\MachineModel$Part$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\MachineModel$Part.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\MachineModel.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\OneWayStreet$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\OneWayStreet$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\OneWayStreetModel$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\OneWayStreetModel$Car$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\OneWayStreetModel$Car.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\OneWayStreetModel.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\OneWayVehicle$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\OneWayVehicle$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\OneWayVehicleModel$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\OneWayVehicleModel$Car$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\OneWayVehicleModel$Car.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\OneWayVehicleModel.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\Road$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\Road$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\RoadModel$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\RoadModel$Car1$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\RoadModel$Car1.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\RoadModel$Car2$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\RoadModel$Car2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\RoadModel.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\Traffic$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\Traffic$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\TrafficDyn$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\TrafficDyn$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\TrafficDynModel$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\TrafficDynModel$Car$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\TrafficDynModel$Car.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\TrafficDynModel.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\TrafficLaneChange$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\TrafficLaneChange$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\TrafficLaneChangeModel$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\TrafficLaneChangeModel$Car$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\TrafficLaneChangeModel$Car.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\TrafficLaneChangeModel.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\TrafficModel$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\TrafficModel$Car$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\TrafficModel$Car.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\TrafficModel.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\TrafficModelTurn$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\TrafficModelTurn$Car$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\TrafficModelTurn$Car.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\TrafficModelTurn.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\TrafficTurn$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\TrafficTurn$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\UGA_Bus$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\UGA_Bus$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\UGA_BusModel$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\UGA_BusModel$Rider$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\UGA_BusModel$Rider.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\UGA_BusModel$UGA_Bus$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\UGA_BusModel$UGA_Bus.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\UGA_BusModel.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\runBank.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\runCallCenter.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\runEmerDept.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\runLoop.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\runMachine.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\runOneWayStreet.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\runOneWayVehicle.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\runRoad.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\runTraffic.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\runTrafficDyn.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\runTrafficLaneChange.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\runTrafficTurn.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_1\\runUGA_Bus.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_MBM\\Bank$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_MBM\\Bank$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_MBM\\BankModel$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_MBM\\BankModel$Customer$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_MBM\\BankModel$Customer.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_MBM\\BankModel.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_MBM\\runBank.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_MBM\\testCorrBank.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_MIR\\Bank$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_MIR\\Bank$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_MIR\\BankModel$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_MIR\\BankModel$Customer$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_MIR\\BankModel$Customer.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_MIR\\BankModel.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\example_MIR\\runBank.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\pathTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\sourceTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\process\\vSourceTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\queueingnet\\JacksonNet$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\queueingnet\\JacksonNet$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\queueingnet\\JacksonNet$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\queueingnet\\JacksonNet.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\queueingnet\\MGc_Queue$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\queueingnet\\MGc_Queue$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\queueingnet\\MGc_Queue$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\queueingnet\\MGc_Queue.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\queueingnet\\MM1_Queue.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\queueingnet\\MM2_Queue.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\queueingnet\\MM_Queue$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\queueingnet\\MM_Queue$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\queueingnet\\MM_Queue.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\queueingnet\\MMc_Queue.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\queueingnet\\MMck_Queue$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\queueingnet\\MMck_Queue$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\queueingnet\\MMck_Queue$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\queueingnet\\MMck_Queue.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\queueingnet\\jacksonNetTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\queueingnet\\mGc_QueueTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\queueingnet\\mM_QueueTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\queueingnet\\mMck_QueueTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\runCoroutineTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\state\\MarkovCT.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\state\\MarkovChain$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\state\\MarkovChain$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\state\\MarkovChain.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\state\\MarkovChainCT$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\state\\MarkovChainCT$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\state\\markovCTTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\state\\markovChainTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\state\\markovChainTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\state\\markovChainTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\state\\markovChainTest4.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\state\\markovChainTest5.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\tableau\\CallCenterModel.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\tableau\\Ex_Bank$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\tableau\\Ex_Bank$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\tableau\\Ex_CallCenter$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\tableau\\Ex_CallCenter$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\tableau\\Model$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\tableau\\Model$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\tableau\\Model$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\tableau\\Model.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\tableau\\Queue_MM1$package$.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\tableau\\Queue_MM1$package.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\tableau\\runEx_Bank.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\tableau\\runEx_CallCenter.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\tableau\\runModelTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\simulation\\tableau\\runQueue_MM1.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\skipListTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\timeNumTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\timeNumTest2.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\timeNumTest3.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\timerTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\unicodeTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\classes\\scalation\\valueTypeTest.class","C:\\Users\\youse\\OneDrive\\Documents\\New Scalation\\scalation_2.0\\target\\scala-3.6.4\\zinc\\inc_compile_3.zip"]] \ No newline at end of file diff --git a/target/streams/compile/_global/_global/discoveredMainClasses/data b/target/streams/compile/_global/_global/discoveredMainClasses/data deleted file mode 100644 index 3bcf4ceaa..000000000 --- a/target/streams/compile/_global/_global/discoveredMainClasses/data +++ /dev/null @@ -1 +0,0 @@ -["scalation.animation.dgAnimatorTest","scalation.animation.dgAnimatorTest2","scalation.animation.dgAnimatorTest3","scalation.animation.dgraphTest","scalation.animation.simpleAnimator2Test","scalation.animation.simpleAnimatorTest","scalation.biMapTest","scalation.boolTest","scalation.calculus.autoDiffTest","scalation.calculus.autoDiffTest2","scalation.calculus.b_SplineTest","scalation.calculus.b_SplineTest2","scalation.calculus.b_SplineTest3","scalation.calculus.b_SplineTest4","scalation.calculus.dB_SplineTest","scalation.calculus.dB_SplineTest2","scalation.calculus.dFourierTest","scalation.calculus.dRadialTest","scalation.calculus.differentialTest","scalation.calculus.differentialTest2","scalation.calculus.fFTTest","scalation.calculus.fourierTest","scalation.calculus.hilbertTest","scalation.calculus.integralTest","scalation.calculus.integralTest2","scalation.calculus.polyTest","scalation.calculus.radialTest","scalation.cforTest","scalation.circularQueueTest","scalation.commonFunTest","scalation.coordinatesTest","scalation.coordinatesTest2","scalation.coordinatesTest3","scalation.coordinatesTest4","scalation.counterTest","scalation.database.bpNodeTest","scalation.database.bpNodeTest2","scalation.database.bpNodeTest3","scalation.database.bpTreeMapTest","scalation.database.bpTreeMapTest2","scalation.database.bpTreeMapTest3","scalation.database.bpTreeMapTest4","scalation.database.bpTreeMapTest5","scalation.database.graph.edgeTest","scalation.database.graph.edgeTypeTest","scalation.database.graph.pGraphTest","scalation.database.graph.pGraphTest2","scalation.database.graph.vertexTest","scalation.database.graph.vertexTypeTest","scalation.database.graph_pm.dualIsoTest","scalation.database.graph_pm.dualIsoTest2","scalation.database.graph_pm.dualIsoTest3","scalation.database.graph_pm.dualSimTest","scalation.database.graph_pm.dualSimTest2","scalation.database.graph_pm.dualSimTest3","scalation.database.graph_pm.graphDFSTest","scalation.database.graph_pm.graphGenTest","scalation.database.graph_pm.graphGenTest2","scalation.database.graph_pm.graphGenTest3","scalation.database.graph_pm.graphGenTest4","scalation.database.graph_pm.graphGenTest5","scalation.database.graph_pm.graphGenTest6","scalation.database.graph_pm.graphGenTest7","scalation.database.graph_pm.graphGenTest8","scalation.database.graph_pm.graphIOTest","scalation.database.graph_pm.graphMetricsTest","scalation.database.graph_pm.graphSimTest","scalation.database.graph_pm.graphSimTest2","scalation.database.graph_pm.graphSimTest3","scalation.database.graph_pm.graphSimTest4","scalation.database.graph_pm.graphTest","scalation.database.graph_pm.graphTest2","scalation.database.graph_pm.graphTest4","scalation.database.graph_pm.matchAnswersTest","scalation.database.graph_pm.shortestPathTest","scalation.database.graph_pm.shortestPathTest2","scalation.database.graph_pm.topSortTest","scalation.database.graph_relation.vertexTypeTest","scalation.database.javaMapTest","scalation.database.linHashMapTest","scalation.database.logic.sATsolverTest","scalation.database.makeSchemaTest","scalation.database.makeSchemaTest2","scalation.database.minSpanningTreeTest","scalation.database.minSpanningTreeTest2","scalation.database.minSpanningTreeTest3","scalation.database.minSpanningTreeTest4","scalation.database.mugraph_pm.matchAnswersTest","scalation.database.mugraph_pm.muDualIsoTest","scalation.database.mugraph_pm.muDualIsoTest2","scalation.database.mugraph_pm.muDualIsoTest3","scalation.database.mugraph_pm.muDualSimTest","scalation.database.mugraph_pm.muDualSimTest2","scalation.database.mugraph_pm.muDualSimTest3","scalation.database.mugraph_pm.muGraphGenTest","scalation.database.mugraph_pm.muGraphGenTest2","scalation.database.mugraph_pm.muGraphGenTest3","scalation.database.mugraph_pm.muGraphSimTest","scalation.database.mugraph_pm.muGraphSimTest2","scalation.database.mugraph_pm.muGraphSimTest3","scalation.database.mugraph_pm.muGraphTest","scalation.database.mugraph_pm.muGraphTest2","scalation.database.multiMapTest","scalation.database.normalizationTest","scalation.database.normalizationTest2","scalation.database.normalizationTest3","scalation.database.normalizationTest4","scalation.database.normalizationTest5","scalation.database.normalizationTest6","scalation.database.normalizationTest7","scalation.database.normalizationTest8","scalation.database.relation.relationTest","scalation.database.relation.relationTest11","scalation.database.relation.relationTest2","scalation.database.relation.relationTest5","scalation.database.relation.relationTest7","scalation.database.relation.relationTest8","scalation.database.relation.relationTest9","scalation.database.relation.showTables","scalation.database.relation.tableGenTest","scalation.database.spanningTreeTest","scalation.database.tNodeTest","scalation.database.table.PurchaseOrderDB","scalation.database.table.bankDB","scalation.database.table.bankDB2","scalation.database.table.gTableTest","scalation.database.table.gTableTest2","scalation.database.table.gTableTest3","scalation.database.table.kGTableTest","scalation.database.table.kGTableTest2","scalation.database.table.lTableTest","scalation.database.table.lTableTest2","scalation.database.table.lTableTest3","scalation.database.table.lTableTest4","scalation.database.table.movieDB","scalation.database.table.showTabs","scalation.database.table.tableGenTest","scalation.database.table.tableTest","scalation.database.table.tableTest2","scalation.database.table.tableTest3","scalation.database.table.timer_function","scalation.database.table.vTableTest","scalation.database.table.vTableTest2","scalation.database.timeIntervalTest","scalation.database.timeIntervalTest2","scalation.database.timeIntervalTest3","scalation.database.timeOfWeekTest","scalation.database.treeTest","scalation.database.treeTest2","scalation.database.treeTest3","scalation.database.triplegraph.tripleGraphSimTest","scalation.database.triplegraph.tripleGraphTest4","scalation.doublyLinkedListTest","scalation.dynamics.ballFlight","scalation.dynamics.dormandPrinceTest","scalation.dynamics.dormandPrinceTest2","scalation.dynamics.dormandPrinceTest3","scalation.dynamics.dormandPrinceTest4","scalation.dynamics.dynamicEqTest","scalation.dynamics.firstOrderPDETest","scalation.dynamics.firstOrderPDETest2","scalation.dynamics.firstOrderPDETest3","scalation.dynamics.linearDiffEqTest","scalation.dynamics.modRosenbrockTest","scalation.dynamics.modRosenbrockTest2","scalation.dynamics.parabolicPDETest","scalation.dynamics.radauTest","scalation.dynamics.reactions","scalation.dynamics.rungeKutta2Test","scalation.dynamics.rungeKutta2Test2","scalation.dynamics.rungeKutta3Test","scalation.dynamics.rungeKutta3Test2","scalation.dynamics.rungeKutta3Test3","scalation.dynamics.rungeKuttaTest","scalation.dynamics.rungeKuttaTest2","scalation.easyWriterTest","scalation.fibTest","scalation.genIndexHtml","scalation.hyperParameterTest","scalation.hyperParameterTest2","scalation.latLongTest","scalation.latLongTest2","scalation.latLongTest3","scalation.latLongTest4","scalation.makeVectorI","scalation.mathstat.PivotingTest","scalation.mathstat.bidiagonalTest","scalation.mathstat.combinatoricsTest","scalation.mathstat.combinatoricsTest2","scalation.mathstat.complexTest","scalation.mathstat.correlogramTest","scalation.mathstat.eigenTest","scalation.mathstat.fac_CholeskyTest","scalation.mathstat.fac_CholeskyTest2","scalation.mathstat.fac_CholeskyTest3","scalation.mathstat.fac_InverseTest","scalation.mathstat.fac_LQTest","scalation.mathstat.fac_LUTest","scalation.mathstat.fac_LUTest2","scalation.mathstat.fac_LUTest3","scalation.mathstat.fac_QRTest","scalation.mathstat.fac_QRTest2","scalation.mathstat.fac_QR_RRTest","scalation.mathstat.fac_SVDTest","scalation.mathstat.fac_SVDTest2","scalation.mathstat.fac_SVDTest3","scalation.mathstat.fac_SVDTest4","scalation.mathstat.histogramTest","scalation.mathstat.householderTest","scalation.mathstat.inverseTest","scalation.mathstat.matrixCalc0","scalation.mathstat.matrixCalc2","scalation.mathstat.matrixCalc3","scalation.mathstat.matrixCalc4","scalation.mathstat.matrixD2Test","scalation.mathstat.matrixD2Test2","scalation.mathstat.matrixDTest","scalation.mathstat.matrixDTest2","scalation.mathstat.matrixDTest3","scalation.mathstat.matrixDTest4","scalation.mathstat.matrixDTest5","scalation.mathstat.matrixDTest6","scalation.mathstat.matrixDTest7","scalation.mathstat.plotCTest","scalation.mathstat.plotMTest","scalation.mathstat.plotMTest2","scalation.mathstat.plotTest","scalation.mathstat.probabilityTest","scalation.mathstat.probabilityTest2","scalation.mathstat.probabilityTest3","scalation.mathstat.probabilityTest4","scalation.mathstat.probabilityTest5","scalation.mathstat.probabilityTest6","scalation.mathstat.rTensor4DTest","scalation.mathstat.rTensorDTest","scalation.mathstat.statTableTest","scalation.mathstat.statisticTest","scalation.mathstat.stats4TSTest","scalation.mathstat.tensorDTest","scalation.mathstat.tensorDTest2","scalation.mathstat.tensorDTest3","scalation.mathstat.timeStatisticTest","scalation.mathstat.tnT_SplitTest","scalation.mathstat.transformTest","scalation.mathstat.transformTest2","scalation.mathstat.transformTest3","scalation.mathstat.vMatrixDTest","scalation.mathstat.vectorCTest","scalation.mathstat.vectorCTest2","scalation.mathstat.vectorCTest3","scalation.mathstat.vectorDTest","scalation.mathstat.vectorDTest2","scalation.mathstat.vectorDTest3","scalation.mathstat.vectorDTest4","scalation.mathstat.vectorDTest5","scalation.mathstat.vectorDTest6","scalation.mathstat.vectorITest","scalation.mathstat.vectorLTest","scalation.mathstat.vectorSTest","scalation.mathstat.vectorSTest2","scalation.mathstat.vectorTTest","scalation.mathstat.vectorTTest2","scalation.mergeSortIndirectTest","scalation.mergeSortIndirectTest2","scalation.modeling.activationFunTest","scalation.modeling.activationFunTest2","scalation.modeling.activationFunTest3","scalation.modeling.activationFunTest4","scalation.modeling.activationFunTest5","scalation.modeling.activationFunTest6","scalation.modeling.classifying.baggingTreesTest","scalation.modeling.classifying.baggingTreesTest2","scalation.modeling.classifying.baggingTreesTest3","scalation.modeling.classifying.baggingTreesTest4","scalation.modeling.classifying.baggingTreesTest5","scalation.modeling.classifying.baggingTreesTest6","scalation.modeling.classifying.baggingTreesTest7","scalation.modeling.classifying.bayesClassifierTest","scalation.modeling.classifying.bayesClassifierTest2","scalation.modeling.classifying.classifierTest","scalation.modeling.classifying.decisionTreeTest","scalation.modeling.classifying.decisionTree_C45Test","scalation.modeling.classifying.decisionTree_C45Test2","scalation.modeling.classifying.decisionTree_C45Test3","scalation.modeling.classifying.decisionTree_C45Test4","scalation.modeling.classifying.decisionTree_C45Test5","scalation.modeling.classifying.decisionTree_C45wpTest","scalation.modeling.classifying.decisionTree_C45wpTest2","scalation.modeling.classifying.decisionTree_ID3Test","scalation.modeling.classifying.decisionTree_ID3Test2","scalation.modeling.classifying.decisionTree_ID3Test3","scalation.modeling.classifying.decisionTree_ID3wpTest","scalation.modeling.classifying.decisionTree_ID3wpTest2","scalation.modeling.classifying.decisionTree_ID3wpTest3","scalation.modeling.classifying.example_BreastCancerTest","scalation.modeling.classifying.example_DiabetesTest","scalation.modeling.classifying.example_IrisTest","scalation.modeling.classifying.example_PlayTennisTest","scalation.modeling.classifying.example_PlayTennis_ContTest","scalation.modeling.classifying.fitCTest","scalation.modeling.classifying.fitCTest2","scalation.modeling.classifying.fitCTest3","scalation.modeling.classifying.fitCTest4","scalation.modeling.classifying.hiddenMarkovTest","scalation.modeling.classifying.hiddenMarkovTest2","scalation.modeling.classifying.hiddenMarkovTest3","scalation.modeling.classifying.hiddenMarkovTest4","scalation.modeling.classifying.kNN_ClassifierTest","scalation.modeling.classifying.kNN_ClassifierTest2","scalation.modeling.classifying.kNN_ClassifierTest3","scalation.modeling.classifying.kNN_ClassifierTest4","scalation.modeling.classifying.linDiscAnalyisTest","scalation.modeling.classifying.logisticRegressionTest","scalation.modeling.classifying.logisticRegressionTest2","scalation.modeling.classifying.naiveBayesRTest","scalation.modeling.classifying.naiveBayesRTest2","scalation.modeling.classifying.naiveBayesTest","scalation.modeling.classifying.naiveBayesTest2","scalation.modeling.classifying.naiveBayesTest3","scalation.modeling.classifying.naiveBayesTest4","scalation.modeling.classifying.nullModelTest","scalation.modeling.classifying.randomForestTest","scalation.modeling.classifying.randomForestTest2","scalation.modeling.classifying.randomForestTest3","scalation.modeling.classifying.randomForestTest4","scalation.modeling.classifying.randomForestTest5","scalation.modeling.classifying.randomForestTest6","scalation.modeling.classifying.randomForestTest7","scalation.modeling.classifying.simpleLDATest","scalation.modeling.classifying.simpleLDATest2","scalation.modeling.classifying.simpleLogisticRegressionTest","scalation.modeling.classifying.simpleLogisticRegressionTest3","scalation.modeling.classifying.simpleLogisticRegressionTest4","scalation.modeling.classifying.simpleLogisticRegressionTest5","scalation.modeling.classifying.simpleLogisticRegressionTest6","scalation.modeling.classifying.supportVectorMachineTest","scalation.modeling.classifying.supportVectorMachineTest2","scalation.modeling.classifying.tANBayesTest","scalation.modeling.classifying.tANBayesTest2","scalation.modeling.classifying.tANBayesTest3","scalation.modeling.classifying.tANBayesTest4","scalation.modeling.clustering.clusteringPredictorTest","scalation.modeling.clustering.clusteringPredictorTest2","scalation.modeling.clustering.clusteringPredictorTest3","scalation.modeling.clustering.gapStatisticTest","scalation.modeling.clustering.gapStatisticTest2","scalation.modeling.clustering.hierClustererTest","scalation.modeling.clustering.hierClustererTest2","scalation.modeling.clustering.kMeansClusterer2Test","scalation.modeling.clustering.kMeansClusterer2Test2","scalation.modeling.clustering.kMeansClusterer2Test3","scalation.modeling.clustering.kMeansClusterer2Test4","scalation.modeling.clustering.kMeansClustererHWTest","scalation.modeling.clustering.kMeansClustererHWTest2","scalation.modeling.clustering.kMeansClustererHWTest3","scalation.modeling.clustering.kMeansClustererPPTest","scalation.modeling.clustering.kMeansClustererPPTest2","scalation.modeling.clustering.kMeansClustererPPTest3","scalation.modeling.clustering.kMeansClustererTest","scalation.modeling.clustering.kMeansClustererTest2","scalation.modeling.clustering.kMeansClustererTest3","scalation.modeling.clustering.kMeansClustererTest4","scalation.modeling.clustering.kMeansPPClustererTest","scalation.modeling.clustering.kMeansPPClustererTest2","scalation.modeling.clustering.kMeansPPClustererTest3","scalation.modeling.clustering.kMeansPPClustererTest4","scalation.modeling.clustering.markovClustererTest","scalation.modeling.clustering.markovClustererTest2","scalation.modeling.clustering.randomGraphTest","scalation.modeling.clustering.tightClustererTest","scalation.modeling.example_AutoMPG_Correlation","scalation.modeling.example_AutoMPG_NullModel","scalation.modeling.example_AutoMPG_QuadRegression","scalation.modeling.example_AutoMPG_Regression","scalation.modeling.example_AutoMPG_SimpleRegression","scalation.modeling.example_AutoMPG_SimplerRegression","scalation.modeling.example_BPressureTest","scalation.modeling.example_BPressureTest2","scalation.modeling.example_BasketBallTest","scalation.modeling.expRegressionTest","scalation.modeling.expRegressionTest2","scalation.modeling.expRegressionTest3","scalation.modeling.fitTest","scalation.modeling.fitTest2","scalation.modeling.forecasting.aRIMATest","scalation.modeling.forecasting.aRIMATest2","scalation.modeling.forecasting.aRIMATest3","scalation.modeling.forecasting.aRIMA_diffTest","scalation.modeling.forecasting.aRMATest","scalation.modeling.forecasting.aRMATest2","scalation.modeling.forecasting.aRMATest3","scalation.modeling.forecasting.aRMATest4","scalation.modeling.forecasting.aRMATest5","scalation.modeling.forecasting.aRMATest6","scalation.modeling.forecasting.aRMATest7","scalation.modeling.forecasting.aRTest","scalation.modeling.forecasting.aRTest2","scalation.modeling.forecasting.aRTest3","scalation.modeling.forecasting.aRTest4","scalation.modeling.forecasting.aRTest5","scalation.modeling.forecasting.aRXTest3","scalation.modeling.forecasting.aRXTest4","scalation.modeling.forecasting.aRXTest5","scalation.modeling.forecasting.aRX_DTest3","scalation.modeling.forecasting.aRX_DTest4","scalation.modeling.forecasting.aRX_QuadTest3","scalation.modeling.forecasting.aRX_QuadTest4","scalation.modeling.forecasting.aRX_QuadTest5","scalation.modeling.forecasting.aRX_Quad_DTest3","scalation.modeling.forecasting.aRX_Quad_DTest4","scalation.modeling.forecasting.aRX_SymbTest3","scalation.modeling.forecasting.aRX_SymbTest4","scalation.modeling.forecasting.aRX_Symb_DTest3","scalation.modeling.forecasting.aRX_Symb_DTest4","scalation.modeling.forecasting.aRYTest","scalation.modeling.forecasting.aRYTest2","scalation.modeling.forecasting.aRYTest3","scalation.modeling.forecasting.aRYTest4","scalation.modeling.forecasting.aRYTest5","scalation.modeling.forecasting.aRYTest6","scalation.modeling.forecasting.aRY_DTest","scalation.modeling.forecasting.aRY_DTest2","scalation.modeling.forecasting.aRY_DTest3","scalation.modeling.forecasting.aRY_DTest4","scalation.modeling.forecasting.aRY_QuadTest","scalation.modeling.forecasting.aRY_QuadTest2","scalation.modeling.forecasting.aRY_QuadTest3","scalation.modeling.forecasting.aRY_QuadTest4","scalation.modeling.forecasting.aRY_QuadTest5","scalation.modeling.forecasting.baselineTest","scalation.modeling.forecasting.dTWTest","scalation.modeling.forecasting.example_CovidTest","scalation.modeling.forecasting.example_CovidTest10","scalation.modeling.forecasting.example_CovidTest11","scalation.modeling.forecasting.example_CovidTest12","scalation.modeling.forecasting.example_CovidTest13","scalation.modeling.forecasting.example_CovidTest14","scalation.modeling.forecasting.example_CovidTest15","scalation.modeling.forecasting.example_CovidTest16","scalation.modeling.forecasting.example_CovidTest2","scalation.modeling.forecasting.example_CovidTest3","scalation.modeling.forecasting.example_CovidTest4","scalation.modeling.forecasting.example_CovidTest5","scalation.modeling.forecasting.example_CovidTest6","scalation.modeling.forecasting.example_CovidTest7","scalation.modeling.forecasting.example_CovidTest8","scalation.modeling.forecasting.example_CovidTest9","scalation.modeling.forecasting.example_GasFurnaceTest","scalation.modeling.forecasting.example_GasFurnaceTest2","scalation.modeling.forecasting.example_ILITest","scalation.modeling.forecasting.example_ILITest10","scalation.modeling.forecasting.example_ILITest2","scalation.modeling.forecasting.example_ILITest3","scalation.modeling.forecasting.example_ILITest5","scalation.modeling.forecasting.example_ILITest6","scalation.modeling.forecasting.example_ILITest7","scalation.modeling.forecasting.example_ILITest8","scalation.modeling.forecasting.example_ILITest9","scalation.modeling.forecasting.forecastMatrixTest","scalation.modeling.forecasting.forecastMatrixTest2","scalation.modeling.forecasting.forecastMatrixTest3","scalation.modeling.forecasting.forecastMatrixTest4","scalation.modeling.forecasting.multivar.aR_StarTest","scalation.modeling.forecasting.multivar.aR_StarTest2","scalation.modeling.forecasting.multivar.aR_StarTest3","scalation.modeling.forecasting.multivar.forecastTensorTest","scalation.modeling.forecasting.multivar.forecastTensorTest2","scalation.modeling.forecasting.multivar.randomWalk_StarTest","scalation.modeling.forecasting.multivar.randomWalk_StarTest2","scalation.modeling.forecasting.multivar.randomWalk_StarTest3","scalation.modeling.forecasting.multivar.vARTest","scalation.modeling.forecasting.multivar.vARTest2","scalation.modeling.forecasting.multivar.vARTest3","scalation.modeling.forecasting.multivar.vARTest4","scalation.modeling.forecasting.multivar.vARTest5","scalation.modeling.forecasting.multivar.vARTest6","scalation.modeling.forecasting.neuralforecasting.attentionTest","scalation.modeling.forecasting.neuralforecasting.attentionTest2","scalation.modeling.forecasting.neuralforecasting.attentionTest3","scalation.modeling.forecasting.neuralforecasting.attentionTest4","scalation.modeling.forecasting.neuralforecasting.attentionTest5","scalation.modeling.forecasting.neuralforecasting.gRUTest","scalation.modeling.forecasting.neuralforecasting.gRUTest2","scalation.modeling.forecasting.neuralforecasting.gRUTest3","scalation.modeling.forecasting.neuralforecasting.lSTMTest","scalation.modeling.forecasting.neuralforecasting.lSTMTest2","scalation.modeling.forecasting.neuralforecasting.lSTMTest3","scalation.modeling.forecasting.neuralforecasting.neuralNet_3L4TSTest","scalation.modeling.forecasting.neuralforecasting.neuralNet_3L4TSTest2","scalation.modeling.forecasting.neuralforecasting.neuralNet_3L4TSTest3","scalation.modeling.forecasting.neuralforecasting.neuralNet_XL4TSTest","scalation.modeling.forecasting.neuralforecasting.neuralNet_XL4TSTest2","scalation.modeling.forecasting.neuralforecasting.neuralNet_XL4TSTest3","scalation.modeling.forecasting.neuralforecasting.neuralNet_XL4TSTest4","scalation.modeling.forecasting.neuralforecasting.neuralNet_XL4TSTest5","scalation.modeling.forecasting.neuralforecasting.positionalEncTest","scalation.modeling.forecasting.neuralforecasting.rNNTest","scalation.modeling.forecasting.neuralforecasting.rNNTest2","scalation.modeling.forecasting.neuralforecasting.rNNTest3","scalation.modeling.forecasting.neuralforecasting.rNNTest4","scalation.modeling.forecasting.nullModelTest","scalation.modeling.forecasting.nullModelTest2","scalation.modeling.forecasting.nullModelTest3","scalation.modeling.forecasting.nullModelTest4","scalation.modeling.forecasting.nullModelTest5","scalation.modeling.forecasting.periodogramTest","scalation.modeling.forecasting.randomWalkSTest","scalation.modeling.forecasting.randomWalkSTest2","scalation.modeling.forecasting.randomWalkSTest3","scalation.modeling.forecasting.randomWalkSTest4","scalation.modeling.forecasting.randomWalkTest","scalation.modeling.forecasting.randomWalkTest2","scalation.modeling.forecasting.randomWalkTest3","scalation.modeling.forecasting.randomWalkTest4","scalation.modeling.forecasting.randomWalkTest5","scalation.modeling.forecasting.randomWalkTest6","scalation.modeling.forecasting.sARYTest","scalation.modeling.forecasting.sARYTest2","scalation.modeling.forecasting.sARYTest3","scalation.modeling.forecasting.sARYTest4","scalation.modeling.forecasting.sARYTest5","scalation.modeling.forecasting.simpleExpSmoothingTest","scalation.modeling.forecasting.simpleExpSmoothingTest2","scalation.modeling.forecasting.simpleExpSmoothingTest3","scalation.modeling.forecasting.simpleExpSmoothingTest4","scalation.modeling.forecasting.simpleMovingAverageTest","scalation.modeling.forecasting.simpleMovingAverageTest2","scalation.modeling.forecasting.simpleMovingAverageTest3","scalation.modeling.forecasting.simpleMovingAverageTest4","scalation.modeling.forecasting.stationarity_KPSSTest","scalation.modeling.forecasting.stationarity_KPSSTest2","scalation.modeling.forecasting.stationarity_KPSSTest3","scalation.modeling.forecasting.stationaryTest","scalation.modeling.forecasting.stationaryTest2","scalation.modeling.forecasting.stationaryTest3","scalation.modeling.forecasting.tranARYTest","scalation.modeling.forecasting.tranARYTest2","scalation.modeling.forecasting.tranARYTest3","scalation.modeling.forecasting.tranARYTest4","scalation.modeling.forecasting.trendModelTest","scalation.modeling.forecasting.trendModelTest2","scalation.modeling.forecasting.trendModelTest3","scalation.modeling.forecasting.trendModelTest4","scalation.modeling.forecasting.weightedMovingAverageTest","scalation.modeling.forecasting.weightedMovingAverageTest2","scalation.modeling.forecasting.weightedMovingAverageTest3","scalation.modeling.forecasting.weightedMovingAverageTest4","scalation.modeling.forecasting_old.ARX_QuadTest","scalation.modeling.forecasting_old.ARX_QuadTest2","scalation.modeling.forecasting_old.ARX_QuadTest3","scalation.modeling.forecasting_old.ARX_QuadTest4","scalation.modeling.forecasting_old.ARX_QuadTest5","scalation.modeling.forecasting_old.ARX_QuadTest6","scalation.modeling.forecasting_old.aR1MATest","scalation.modeling.forecasting_old.aR1MATest2","scalation.modeling.forecasting_old.aR1MATest3","scalation.modeling.forecasting_old.aR1MATest4","scalation.modeling.forecasting_old.aR1MATest5","scalation.modeling.forecasting_old.aRIMATest","scalation.modeling.forecasting_old.aRIMATest2","scalation.modeling.forecasting_old.aRIMATest3","scalation.modeling.forecasting_old.aRIMATest4","scalation.modeling.forecasting_old.aRIMA_diffTest","scalation.modeling.forecasting_old.aRMATest","scalation.modeling.forecasting_old.aRMATest2","scalation.modeling.forecasting_old.aRMATest3","scalation.modeling.forecasting_old.aRMATest4","scalation.modeling.forecasting_old.aRMATest5","scalation.modeling.forecasting_old.aRMATest6","scalation.modeling.forecasting_old.aRMATest7","scalation.modeling.forecasting_old.aRTest","scalation.modeling.forecasting_old.aRTest2","scalation.modeling.forecasting_old.aRTest3","scalation.modeling.forecasting_old.aRTest4","scalation.modeling.forecasting_old.aRTest5","scalation.modeling.forecasting_old.aRTest6","scalation.modeling.forecasting_old.aRTest7","scalation.modeling.forecasting_old.aRXTest","scalation.modeling.forecasting_old.aRXTest2","scalation.modeling.forecasting_old.aRXTest3","scalation.modeling.forecasting_old.aRXTest4","scalation.modeling.forecasting_old.aRXTest5","scalation.modeling.forecasting_old.aRXTest6","scalation.modeling.forecasting_old.aRXTest7","scalation.modeling.forecasting_old.aRX_MVTest","scalation.modeling.forecasting_old.aRX_MVTest2","scalation.modeling.forecasting_old.aRX_MVTest3","scalation.modeling.forecasting_old.aRX_MVTest4","scalation.modeling.forecasting_old.aRX_MVTest5","scalation.modeling.forecasting_old.aRX_MVTest6","scalation.modeling.forecasting_old.aRX_Quad_MVTest","scalation.modeling.forecasting_old.aRX_Quad_MVTest2","scalation.modeling.forecasting_old.aRX_Quad_MVTest3","scalation.modeling.forecasting_old.aRX_Quad_MVTest4","scalation.modeling.forecasting_old.aRX_Quad_MVTest5","scalation.modeling.forecasting_old.aRX_Quad_MVTest6","scalation.modeling.forecasting_old.buildTensor4TSTest","scalation.modeling.forecasting_old.kalmanFilterTest","scalation.modeling.forecasting_old.nullModelTest","scalation.modeling.forecasting_old.nullModelTest2","scalation.modeling.forecasting_old.nullModelTest3","scalation.modeling.forecasting_old.quadSplineTest","scalation.modeling.forecasting_old.quadSplineTest2","scalation.modeling.forecasting_old.quadSplineTest3","scalation.modeling.forecasting_old.quadSplineTest4","scalation.modeling.forecasting_old.randomWalkTest","scalation.modeling.forecasting_old.randomWalkTest2","scalation.modeling.forecasting_old.randomWalkTest3","scalation.modeling.forecasting_old.randomWalkTest4","scalation.modeling.forecasting_old.regressionTreeGB4TS2Test","scalation.modeling.forecasting_old.regressionTreeGB4TS2Test2","scalation.modeling.forecasting_old.regressionTreeGB4TS2Test3","scalation.modeling.forecasting_old.regressionTreeGB4TS2Test4","scalation.modeling.forecasting_old.regressionTreeGB4TS2Test5","scalation.modeling.forecasting_old.regressionTreeGB4TS2Test6","scalation.modeling.forecasting_old.regressionTreeGB4TSTest","scalation.modeling.forecasting_old.regressionTreeGB4TSTest2","scalation.modeling.forecasting_old.regressionTreeGB4TSTest3","scalation.modeling.forecasting_old.regressionTreeGB4TSTest4","scalation.modeling.forecasting_old.regressionTreeGB4TSTest5","scalation.modeling.forecasting_old.regressionTreeGB4TSTest6","scalation.modeling.forecasting_old.regressionTreeGB4TSTest7","scalation.modeling.forecasting_old.regressionTreeMT4TSTest","scalation.modeling.forecasting_old.regressionTreeMT4TSTest2","scalation.modeling.forecasting_old.regressionTreeMT4TSTest3","scalation.modeling.forecasting_old.regressionTreeMT4TSTest4","scalation.modeling.forecasting_old.regressionTreeMT4TSTest5","scalation.modeling.forecasting_old.regressionTreeMT4TSTest6","scalation.modeling.forecasting_old.regressionTreeMT4TSTest7","scalation.modeling.forecasting_old.regressionTreeRF4TSTest","scalation.modeling.forecasting_old.regressionTreeRF4TSTest2","scalation.modeling.forecasting_old.regressionTreeRF4TSTest3","scalation.modeling.forecasting_old.regressionTreeRF4TSTest4","scalation.modeling.forecasting_old.regressionTreeRF4TSTest5","scalation.modeling.forecasting_old.regressionTreeRF4TSTest6","scalation.modeling.forecasting_old.regressionTreeRF4TSTest7","scalation.modeling.forecasting_old.regressionTreeRF_MT4TSTest","scalation.modeling.forecasting_old.regressionTreeRF_MT4TSTest2","scalation.modeling.forecasting_old.regressionTreeRF_MT4TSTest3","scalation.modeling.forecasting_old.regressionTreeRF_MT4TSTest4","scalation.modeling.forecasting_old.regressionTreeRF_MT4TSTest5","scalation.modeling.forecasting_old.regressionTreeRF_MT4TSTest6","scalation.modeling.forecasting_old.regressionTreeRF_MT4TSTest7","scalation.modeling.forecasting_old.rollingValidationTest","scalation.modeling.forecasting_old.rollingValidationTest2","scalation.modeling.forecasting_old.rollingValidationTest3","scalation.modeling.forecasting_old.rollingValidationTest4","scalation.modeling.forecasting_old.simpleExpSmoothingTest","scalation.modeling.forecasting_old.simpleExpSmoothingTest2","scalation.modeling.forecasting_old.simpleExpSmoothingTest3","scalation.modeling.forecasting_old.simpleExpSmoothingTest4","scalation.modeling.forecasting_old.simpleExpSmoothingTest5","scalation.modeling.forecasting_old.simpleMovingAverageTest","scalation.modeling.forecasting_old.simpleMovingAverageTest2","scalation.modeling.forecasting_old.simpleMovingAverageTest3","scalation.modeling.forecasting_old.simpleMovingAverageTest4","scalation.modeling.forecasting_old.simpleMovingAverageTest5","scalation.modeling.forecasting_old.stationaryTest","scalation.modeling.forecasting_old.stationaryTest2","scalation.modeling.forecasting_old.stationaryTest3","scalation.modeling.forecasting_old.trendModelTest","scalation.modeling.forecasting_old.trendModelTest2","scalation.modeling.forecasting_old.trendModelTest3","scalation.modeling.forecasting_old.varTest","scalation.modeling.forecasting_old.varTest2","scalation.modeling.forecasting_old.varTest3","scalation.modeling.forecasting_old.varTest4","scalation.modeling.forecasting_old.varTest5","scalation.modeling.forecasting_old.varTest6","scalation.modeling.forecasting_old.weightedMovingAverageTest","scalation.modeling.forecasting_old.weightedMovingAverageTest2","scalation.modeling.forecasting_old.weightedMovingAverageTest3","scalation.modeling.forecasting_old.weightedMovingAverageTest4","scalation.modeling.forecasting_old.weightedMovingAverageTest5","scalation.modeling.imputationTest","scalation.modeling.imputationTest2","scalation.modeling.kNN_RegressionTest","scalation.modeling.kNN_RegressionTest2","scalation.modeling.kNN_RegressionTest3","scalation.modeling.lassoRegressionTest","scalation.modeling.lassoRegressionTest2","scalation.modeling.lassoRegressionTest3","scalation.modeling.matrixTransformTest","scalation.modeling.matrixTransformTest2","scalation.modeling.neuralnet.cNN_1DTest","scalation.modeling.neuralnet.cNN_1DTest2","scalation.modeling.neuralnet.cNN_1DTest3","scalation.modeling.neuralnet.cNN_2DTest2","scalation.modeling.neuralnet.cNN_2DTest3","scalation.modeling.neuralnet.coFilter_1DTest","scalation.modeling.neuralnet.coFilter_1DTest2","scalation.modeling.neuralnet.coFilter_1DTest3","scalation.modeling.neuralnet.coFilter_2DTest","scalation.modeling.neuralnet.coFilter_2DTest2","scalation.modeling.neuralnet.coFilter_2DTest3","scalation.modeling.neuralnet.eLM_3L1Test","scalation.modeling.neuralnet.eLM_3L1Test2","scalation.modeling.neuralnet.eLM_3L1Test3","scalation.modeling.neuralnet.eLM_3L1Test4","scalation.modeling.neuralnet.eLM_3L1Test5","scalation.modeling.neuralnet.eLM_3L1Test6","scalation.modeling.neuralnet.example_ConcreteTest","scalation.modeling.neuralnet.example_ConcreteTest2","scalation.modeling.neuralnet.example_ConcreteTest3","scalation.modeling.neuralnet.neuralNet_2LTest","scalation.modeling.neuralnet.neuralNet_2LTest2","scalation.modeling.neuralnet.neuralNet_2LTest3","scalation.modeling.neuralnet.neuralNet_2LTest4","scalation.modeling.neuralnet.neuralNet_2LTest5","scalation.modeling.neuralnet.neuralNet_2LTest6","scalation.modeling.neuralnet.neuralNet_2LTest7","scalation.modeling.neuralnet.neuralNet_2LTest8","scalation.modeling.neuralnet.neuralNet_2LTest9","scalation.modeling.neuralnet.neuralNet_2L_CkTest","scalation.modeling.neuralnet.neuralNet_3LTest","scalation.modeling.neuralnet.neuralNet_3LTest10","scalation.modeling.neuralnet.neuralNet_3LTest11","scalation.modeling.neuralnet.neuralNet_3LTest12","scalation.modeling.neuralnet.neuralNet_3LTest2","scalation.modeling.neuralnet.neuralNet_3LTest3","scalation.modeling.neuralnet.neuralNet_3LTest4","scalation.modeling.neuralnet.neuralNet_3LTest5","scalation.modeling.neuralnet.neuralNet_3LTest6","scalation.modeling.neuralnet.neuralNet_3LTest7","scalation.modeling.neuralnet.neuralNet_3LTest8","scalation.modeling.neuralnet.neuralNet_3LTest9","scalation.modeling.neuralnet.neuralNet_3L_C2Test","scalation.modeling.neuralnet.neuralNet_XLTTest","scalation.modeling.neuralnet.neuralNet_XLTTest2","scalation.modeling.neuralnet.neuralNet_XLTest","scalation.modeling.neuralnet.neuralNet_XLTest2","scalation.modeling.neuralnet.neuralNet_XLTest3","scalation.modeling.neuralnet.neuralNet_XLTest4","scalation.modeling.neuralnet.neuralNet_XLTest5","scalation.modeling.neuralnet.neuralNet_XLTest6","scalation.modeling.neuralnet.neuralNet_XLTest7","scalation.modeling.neuralnet.predictorMVTest","scalation.modeling.neuralnet.regressionMVTest","scalation.modeling.neuralnet.regressionMVTest2","scalation.modeling.neuralnet.regressionMVTest3","scalation.modeling.neuralnet.regressionMVTest4","scalation.modeling.neuralnet.regressionMVTest5","scalation.modeling.nonlinearRegressionTest","scalation.modeling.nullModelTest","scalation.modeling.nullModelTest2","scalation.modeling.outlierTest","scalation.modeling.outlierTest2","scalation.modeling.outlierTest3","scalation.modeling.perceptronTest","scalation.modeling.perceptronTest2","scalation.modeling.perceptronTest3","scalation.modeling.perceptronTest4","scalation.modeling.perceptronTest5","scalation.modeling.perceptronTest6","scalation.modeling.poissonRegressionTest","scalation.modeling.poissonRegressionTest2","scalation.modeling.polyORegressionTest","scalation.modeling.polyORegressionTest2","scalation.modeling.polyRegressionTest","scalation.modeling.polyRegressionTest2","scalation.modeling.predictorTest","scalation.modeling.regressionCatTest","scalation.modeling.regressionCatTest2","scalation.modeling.regressionCatTest3","scalation.modeling.regressionCatTest4","scalation.modeling.regressionCatTest5","scalation.modeling.regressionCatTest6","scalation.modeling.regressionCatTest7","scalation.modeling.regressionTest","scalation.modeling.regressionTest10","scalation.modeling.regressionTest2","scalation.modeling.regressionTest3","scalation.modeling.regressionTest4","scalation.modeling.regressionTest5","scalation.modeling.regressionTest6","scalation.modeling.regressionTest7","scalation.modeling.regressionTest8","scalation.modeling.regressionTest9","scalation.modeling.regressionTreeGBTest","scalation.modeling.regressionTreeGBTest2","scalation.modeling.regressionTreeGBTest3","scalation.modeling.regressionTreeGBTest4","scalation.modeling.regressionTreeGBTest5","scalation.modeling.regressionTreeGBTest6","scalation.modeling.regressionTreeMTTest","scalation.modeling.regressionTreeMTTest2","scalation.modeling.regressionTreeMTTest3","scalation.modeling.regressionTreeRFTest","scalation.modeling.regressionTreeRFTest2","scalation.modeling.regressionTreeRFTest3","scalation.modeling.regressionTreeRFTest4","scalation.modeling.regressionTreeRFTest5","scalation.modeling.regressionTreeRF_MTTest","scalation.modeling.regressionTreeRF_MTTest2","scalation.modeling.regressionTreeRF_MTTest3","scalation.modeling.regressionTreeRF_MTTest4","scalation.modeling.regressionTreeTest","scalation.modeling.regressionTreeTest2","scalation.modeling.regressionTreeTest3","scalation.modeling.regressionTreeTest4","scalation.modeling.regressionWLSTest","scalation.modeling.regressionWLSTest2","scalation.modeling.ridgeRegressionTest","scalation.modeling.ridgeRegressionTest2","scalation.modeling.ridgeRegressionTest3","scalation.modeling.ridgeRegressionTest4","scalation.modeling.ridgeRegressionTest5","scalation.modeling.ridgeRegressionTest6","scalation.modeling.ridgeRegressionTest7","scalation.modeling.roundRegressionTest","scalation.modeling.simpleExpRegressionTest","scalation.modeling.simpleExpRegressionTest2","scalation.modeling.simpleExpRegressionTest3","scalation.modeling.simpleRegressionTest","scalation.modeling.simpleRegressionTest2","scalation.modeling.simpleRegressionTest3","scalation.modeling.simpleRegressionTest4","scalation.modeling.simpleRegressionTest5","scalation.modeling.simpleRegressionTest6","scalation.modeling.simpleRegressionTest7","scalation.modeling.simpleRegressionTest8","scalation.modeling.simplerRegressionTest","scalation.modeling.simplerRegressionTest2","scalation.modeling.simplerRegressionTest3","scalation.modeling.simplerRegressionTest4","scalation.modeling.sumQueueTest","scalation.modeling.sumQueueTest2","scalation.modeling.symLassoRegressionTest","scalation.modeling.symLassoRegressionTest2","scalation.modeling.symLassoRegressionTest3","scalation.modeling.symLassoRegressionTest4","scalation.modeling.symLassoRegressionTest5","scalation.modeling.symLassoRegressionTest6","scalation.modeling.symLassoRegressionTest7","scalation.modeling.symLassoRegressionTest8","scalation.modeling.symLassoRegressionTest9","scalation.modeling.symRidgeRegressionTest","scalation.modeling.symRidgeRegressionTest2","scalation.modeling.symRidgeRegressionTest3","scalation.modeling.symRidgeRegressionTest4","scalation.modeling.symRidgeRegressionTest5","scalation.modeling.symRidgeRegressionTest6","scalation.modeling.symRidgeRegressionTest7","scalation.modeling.symRidgeRegressionTest8","scalation.modeling.symRidgeRegressionTest9","scalation.modeling.symbolicRegressionTest","scalation.modeling.symbolicRegressionTest10","scalation.modeling.symbolicRegressionTest11","scalation.modeling.symbolicRegressionTest12","scalation.modeling.symbolicRegressionTest13","scalation.modeling.symbolicRegressionTest2","scalation.modeling.symbolicRegressionTest3","scalation.modeling.symbolicRegressionTest4","scalation.modeling.symbolicRegressionTest5","scalation.modeling.symbolicRegressionTest6","scalation.modeling.symbolicRegressionTest7","scalation.modeling.symbolicRegressionTest8","scalation.modeling.symbolicRegressionTest9","scalation.modeling.tranRegressionTest","scalation.modeling.tranRegressionTest2","scalation.modeling.tranRegressionTest3","scalation.modeling.tranRegressionTest4","scalation.modeling.tranRegressionTest5","scalation.modeling.tranRegressionTest6","scalation.modeling.tranRegressionTest7","scalation.modeling.tranRegressionTest8","scalation.modeling.trigRegressionTest","scalation.modeling.trigRegressionTest2","scalation.modeling.variableTest","scalation.multiArrayDequesTest","scalation.optimization.conjugateGradientTest","scalation.optimization.conjugateGradientTest2","scalation.optimization.conjugateGradientTest3","scalation.optimization.conjugateGradient_NoLSTest","scalation.optimization.conjugateGradient_NoLSTest2","scalation.optimization.conjugateGradient_NoLSTest3","scalation.optimization.coordinateDescentTest","scalation.optimization.goldenSectionLSTest","scalation.optimization.goldenSectionLSTest2","scalation.optimization.gradientDescentTest","scalation.optimization.gradientDescentTest2","scalation.optimization.gradientDescentTest3","scalation.optimization.gradientDescentTest4","scalation.optimization.gradientDescent_AdamTest","scalation.optimization.gradientDescent_Mo2Test","scalation.optimization.gradientDescent_MoTest","scalation.optimization.gradientDescent_NoLSTest","scalation.optimization.gridSearchLSTest","scalation.optimization.gridSearchLSTest2","scalation.optimization.gridSearchTest","scalation.optimization.gridSearchTest2","scalation.optimization.hungarianTest","scalation.optimization.hungarianTest2","scalation.optimization.integerTabuSearchTest","scalation.optimization.integerTabuSearchTest2","scalation.optimization.lassoAdmmTest","scalation.optimization.lassoAdmmTest2","scalation.optimization.lassoAdmmTest3","scalation.optimization.linear_opt.integerLPTest","scalation.optimization.linear_opt.simplex2PTest","scalation.optimization.linearopt.quadraticSimplexTest","scalation.optimization.nLPTest","scalation.optimization.nLPTest2","scalation.optimization.nelderMeadSimplex2Test","scalation.optimization.nelderMeadSimplexTest","scalation.optimization.newtonRaphsonTest","scalation.optimization.newtonRaphsonTest2","scalation.optimization.newtonRaphsonTest3","scalation.optimization.newton_NoLSTest","scalation.optimization.newton_NoLSTest2","scalation.optimization.newton_NoLSTest3","scalation.optimization.quasi_newton.bFGSBealeFunction","scalation.optimization.quasi_newton.bFGSBohachevsky1Function","scalation.optimization.quasi_newton.bFGSBohachevsky2Function","scalation.optimization.quasi_newton.bFGSBohachevsky3Function","scalation.optimization.quasi_newton.bFGSBoothFunction","scalation.optimization.quasi_newton.bFGSCamel3Function","scalation.optimization.quasi_newton.bFGSCubeFunction","scalation.optimization.quasi_newton.bFGSFreudensteinRothFunction","scalation.optimization.quasi_newton.bFGSMcCormickFunction","scalation.optimization.quasi_newton.bFGSTest","scalation.optimization.quasi_newton.bFGSTest2","scalation.optimization.quasi_newton.bFGSTest3","scalation.optimization.quasi_newton.bFGSTest4","scalation.optimization.quasi_newton.bFGS_NoLSTest","scalation.optimization.quasi_newton.bFGS_NoLSTest2","scalation.optimization.quasi_newton.bFGS_NoLSTest3","scalation.optimization.quasi_newton.bFGS_NoLSTest4","scalation.optimization.quasi_newton.bealeFunctionLBFGSTest","scalation.optimization.quasi_newton.bohachevsky1FunctionLBFGSTest","scalation.optimization.quasi_newton.bohachevsky2FunctionLBFGSTest","scalation.optimization.quasi_newton.bohachevsky3FunctionLBFGSTest","scalation.optimization.quasi_newton.boothFunctionLBFGSTest","scalation.optimization.quasi_newton.camel3FunctionLBFGSTest","scalation.optimization.quasi_newton.cubeFunctionLBFGSTest","scalation.optimization.quasi_newton.freudensteinRothFunctionLBFGSTest","scalation.optimization.quasi_newton.lBFGS_BTest","scalation.optimization.quasi_newton.lBFGS_BTest2","scalation.optimization.quasi_newton.lBFGS_BTest3","scalation.optimization.quasi_newton.lBFGS_NoLSTest","scalation.optimization.quasi_newton.lBFGS_NoLSTest2","scalation.optimization.quasi_newton.lBFGS_NoLSTest3","scalation.optimization.quasi_newton.mccormickFunctionDMLBFGSTest","scalation.optimization.quasi_newton.mccormickFunctionLBFGSTest","scalation.optimization.quasi_newtonC.bohachevsky2FunctionLBFGS_FFMTest","scalation.optimization.quasi_newtonC.boothFunctionLBFGS_FFMTest","scalation.optimization.sPSATest","scalation.optimization.tabuSearchTest","scalation.optimization.tabuSearchTest2","scalation.optimization.wolfeConditionsTest","scalation.optimization.wolfeLS2Test","scalation.optimization.wolfeLS2Test2","scalation.optimization.wolfeLS2Test3","scalation.optimization.wolfeLS2Test4","scalation.optimization.wolfeLS3Test","scalation.optimization.wolfeLS3Test2","scalation.optimization.wolfeLS3Test3","scalation.optimization.wolfeLS3Test4","scalation.optimization.wolfeLSTest","scalation.optimization.wolfeLSTest2","scalation.optimization.wolfeLSTest3","scalation.random.cDFTest_ChiSquare","scalation.random.cDFTest_Empirical","scalation.random.cDFTest_Exponential","scalation.random.cDFTest_Fisher","scalation.random.cDFTest_Fisher2","scalation.random.cDFTest_Normal","scalation.random.cDFTest_Normal_Diff","scalation.random.cDFTest_StudentT","scalation.random.cDFTest_Uniform","scalation.random.cDFTest_Weibull","scalation.random.cLTTest","scalation.random.diceTest","scalation.random.poissonProcessTest","scalation.random.quantileTest_ChiSquare","scalation.random.quantileTest_Empirical","scalation.random.quantileTest_Exponential","scalation.random.quantileTest_Fisher","scalation.random.quantileTest_Normal","scalation.random.quantileTest_StudentT","scalation.random.quantileTest_Uniform","scalation.random.rNGTest","scalation.random.randomStrTest","scalation.random.randomWordTest","scalation.random.streamMaker3","scalation.random.streamMakerGen","scalation.random.variateMatTest","scalation.random.variateSetTest","scalation.random.variateSetTest2","scalation.random.variateSetTest3","scalation.random.variateTenTest","scalation.random.variateTest","scalation.random.variateVecTest","scalation.random.variateVecTest2","scalation.readFileTest","scalation.readFileTest2","scalation.redirectOutTest","scalation.ringTest","scalation.runCalc","scalation.runCalcHelp","scalation.scala2d.arrowTest","scalation.scala2d.colorsTest","scalation.scala2d.lineTest","scalation.scala2d.polygonTest","scalation.scala2d.polygonTest2","scalation.scala2d.qArrowTest","scalation.scala2d.qCurveTest","scalation.scala2d.qCurveTest2","scalation.scala2d.writeImageTest","scalation.scala3d.Road3d","scalation.setExtTest","scalation.simulation.activity.PetriNetRulesTest","scalation.simulation.activity.petriNetTest","scalation.simulation.agent.example_1.runBank","scalation.simulation.agent.example_1.runCallCenter","scalation.simulation.agent.example_1.runTraffic2L","scalation.simulation.agent.example_1.runTraffic2L1","scalation.simulation.agent.example_1.runTraffic4L","scalation.simulation.agent.example_1.runUGABusRoutes","scalation.simulation.agent.example_1.runUGABusRoutes1","scalation.simulation.agent.monitorTest","scalation.simulation.agent.simAgentTest","scalation.simulation.event.example_1.runBank","scalation.simulation.event.example_1.runBank2","scalation.simulation.event.example_1.runBank3","scalation.simulation.event.example_1.runCallCenter","scalation.simulation.event.example_1.runCallCenter2","scalation.simulation.event.example_1.runFastFood","scalation.simulation.event.example_1.runMachine","scalation.simulation.event.example_1.runPoisson","scalation.simulation.event.example_1.runPoisson2","scalation.simulation.event.runSOME","scalation.simulation.monitorTest","scalation.simulation.monte_carlo.cardsTest","scalation.simulation.monte_carlo.cardsTest2","scalation.simulation.monte_carlo.cardsTest3","scalation.simulation.monte_carlo.grainDroppingTest","scalation.simulation.monte_carlo.monteCarloIntegrationTest","scalation.simulation.monte_carlo.montyHall","scalation.simulation.monte_carlo.rollDiceTest","scalation.simulation.monte_carlo.rollDiceTest2","scalation.simulation.monte_carlo.rollDiceTest3","scalation.simulation.monte_carlo.sphereVolumeTest","scalation.simulation.nH_PoissonProcessTest","scalation.simulation.nH_PoissonProcessTest2","scalation.simulation.poissonProcessTest","scalation.simulation.process.example_1.runBank","scalation.simulation.process.example_1.runCallCenter","scalation.simulation.process.example_1.runEmerDept","scalation.simulation.process.example_1.runLoop","scalation.simulation.process.example_1.runMachine","scalation.simulation.process.example_1.runOneWayStreet","scalation.simulation.process.example_1.runOneWayVehicle","scalation.simulation.process.example_1.runRoad","scalation.simulation.process.example_1.runTraffic","scalation.simulation.process.example_1.runTrafficDyn","scalation.simulation.process.example_1.runTrafficLaneChange","scalation.simulation.process.example_1.runTrafficTurn","scalation.simulation.process.example_1.runUGA_Bus","scalation.simulation.process.example_MBM.runBank","scalation.simulation.process.example_MBM.testCorrBank","scalation.simulation.process.example_MIR.runBank","scalation.simulation.process.pathTest","scalation.simulation.process.sourceTest","scalation.simulation.process.vSourceTest","scalation.simulation.queueingnet.jacksonNetTest","scalation.simulation.queueingnet.mGc_QueueTest","scalation.simulation.queueingnet.mM_QueueTest","scalation.simulation.queueingnet.mMck_QueueTest","scalation.simulation.runCoroutineTest","scalation.simulation.state.markovCTTest","scalation.simulation.state.markovChainTest","scalation.simulation.state.markovChainTest2","scalation.simulation.state.markovChainTest3","scalation.simulation.state.markovChainTest4","scalation.simulation.state.markovChainTest5","scalation.simulation.tableau.runEx_Bank","scalation.simulation.tableau.runEx_CallCenter","scalation.simulation.tableau.runModelTest","scalation.simulation.tableau.runQueue_MM1","scalation.skipListTest","scalation.timeNumTest","scalation.timeNumTest2","scalation.timeNumTest3","scalation.timerTest","scalation.unicodeTest","scalation.valueTypeTest"] \ No newline at end of file diff --git a/target/streams/compile/bspReporter/_global/streams/out b/target/streams/compile/bspReporter/_global/streams/out deleted file mode 100644 index e69de29bb..000000000 diff --git a/target/streams/compile/compile/_global/streams/out b/target/streams/compile/compile/_global/streams/out deleted file mode 100644 index e69de29bb..000000000 diff --git a/target/streams/compile/compileIncremental/_global/streams/export b/target/streams/compile/compileIncremental/_global/streams/export deleted file mode 100644 index e69de29bb..000000000 diff --git a/target/streams/compile/compileIncremental/_global/streams/out b/target/streams/compile/compileIncremental/_global/streams/out deleted file mode 100644 index 28801459e..000000000 --- a/target/streams/compile/compileIncremental/_global/streams/out +++ /dev/null @@ -1,6 +0,0 @@ -[debug] [zinc] IncrementalCompile ----------- -[debug] IncrementalCompile.incrementalCompile -[debug] previous = Stamps for: 3536 products, 567 sources, 5 libraries -[debug] current source = Set(${BASE}/src/main/scala/scalation/modeling/KNN_Regression.scala, ${BASE}/src/main/scala/scalation/modeling/RegressionTreeGB.scala, ${BASE}/src/main/scala/scalation/mathstat/Plot.scala, ${BASE}/src/main/scala/scalation/LatLong.scala, ${BASE}/src/main/scala/scalation/Ring.scala, ${BASE}/src/main/scala/scalation/dynamics/RungeKutta.scala, ${BASE}/src/main/scala/scalation/dynamics/FirstOrderPDE.scala, ${BASE}/src/main/scala/scalation/simulation/process/Path.scala, ${BASE}/src/main/scala/scalation/modeling/MatrixTransform.scala, ${BASE}/src/main/scala/scalation/mathstat/Eigen.scala, ${BASE}/src/main/scala/scalation/calculus/Integral.scala, ${BASE}/src/main/scala/scalation/optimization/quasi_newton/FunctionEvaluation.scala, ${BASE}/src/main/scala/scalation/simulation/monte_carlo/RollDice.scala, ${BASE}/src/main/scala/scalation/mathstat/Fac_Inverse.scala, ${BASE}/src/main/scala/scalation/simulation/event/example_1/Bank3.scala, ${BASE}/src/main/scala/scalation/scala3d/Road3d.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting/SARY.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting/multivar/VAR.scala, ${BASE}/src/main/scala/scalation/optimization/quasi_newtonC/OptimizationMethodHandlesFFM.scala, ${BASE}/src/main/scala/scalation/modeling/NullModel.scala, ${BASE}/src/main/scala/scalation/scala3d/Clock.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting/neuralforecasting/NeuralNet_3L4TS.scala, ${BASE}/src/main/scala/scalation/modeling/Variable.scala, ${BASE}/src/main/scala/scalation/simulation/process/Resource.scala, ${BASE}/src/main/scala/scalation/simulation/activity/PetriNet.scala, ${BASE}/src/main/scala/scalation/mathstat/Householder.scala, ${BASE}/src/main/scala/scalation/scala2d/ZoomablePanel.scala, ${BASE}/src/main/scala/scalation/dynamics/RungeKutta3.scala, ${BASE}/src/main/scala/scalation/modeling/neuralnet/Optimizer_SGDM.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting/multivar/ForecastTensor.scala, ${BASE}/src/main/scala/scalation/random/VariateStr.scala, ${BASE}/src/main/scala/scalation/dynamics/Integrator.scala, ${BASE}/src/main/scala/scalation/simulation/queueingnet/MM_Queue.scala, ${BASE}/src/main/scala/scalation/database/relation/TableGen.scala, ${BASE}/src/main/scala/scalation/database/mugraph_pm/ExampleMuGraphS.scala, ${BASE}/src/main/scala/scalation/DoublyLinkedList.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting_old/WeightedMovingAverage.scala, ${BASE}/src/main/scala/scalation/database/graph_pm/GraphDFS.scala, ${BASE}/src/main/scala/scalation/database/table/KGTable.scala, ${BASE}/src/main/scala/scalation/mathstat/VectorI.scala, ${BASE}/src/main/scala/scalation/modeling/Scaling.scala, ${BASE}/src/main/scala/scalation/simulation/agent/Transport.scala, ${BASE}/src/main/scala/scalation/database/MinSpanningTree.scala, ${BASE}/src/main/scala/scalation/animation/AnimateCommand.scala, ${BASE}/src/main/scala/scalation/modeling/clustering/Distance.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting_old/NullModel.scala, ${BASE}/src/main/scala/scalation/modeling/PolyORegression.scala, ${BASE}/src/main/scala/scalation/simulation/process/Model_MBM.scala, ${BASE}/src/main/scala/scalation/simulation/event/example_1/Bank2.scala, ${BASE}/src/main/scala/scalation/modeling/neuralnet/PredictorMV.scala, ${BASE}/src/main/scala/scalation/modeling/classifying/Example_PlayTennis_Cont.scala, ${BASE}/src/main/scala/scalation/scala2d/CurvilinearShape.scala, ${BASE}/src/main/scala/scalation/scala3d/Vehicle.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting_old/RollingValidation.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting/neuralforecasting/NeuralNet_XL4TS.scala, ${BASE}/src/main/scala/scalation/modeling/Sampling.scala, ${BASE}/src/main/scala/scalation/mathstat/VectorL.scala, ${BASE}/src/main/scala/scalation/database/mugraph_pm/ExampleMuGraphD.scala, ${BASE}/src/main/scala/scalation/simulation/state/MarkovChainCT.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting/Diagnoser.scala, ${BASE}/src/main/scala/scalation/mathstat/Fac_LQ.scala, ${BASE}/src/main/scala/scalation/modeling/RegressionCat.scala, ${BASE}/src/main/scala/scalation/database/graph_relation/VertexType.scala, ${BASE}/src/main/scala/scalation/database/graph/PGraph.scala, ${BASE}/src/main/scala/scalation/optimization/CoordinateDescent.scala, ${BASE}/src/main/scala/scalation/mathstat/PlotC.scala, ${BASE}/src/main/scala/scalation/modeling/neuralnet/NeuralNet_3L_C2.scala, ${BASE}/src/main/scala/scalation/modeling/neuralnet/CoFilter_1D.scala, ${BASE}/src/main/scala/scalation/Counter.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting/neuralforecasting/GRU.scala, ${BASE}/src/main/scala/scalation/modeling/classifying/Example_Iris.scala, ${BASE}/src/main/scala/scalation/optimization/TabuSearch.scala, ${BASE}/src/main/scala/scalation/optimization/linear_opt/IntegerLP.scala, ${BASE}/src/main/scala/scalation/optimization/BoundsConstraint.scala, ${BASE}/src/main/scala/scalation/modeling/SimplerRegression.scala, ${BASE}/src/main/scala/scalation/database/JavaMap.scala, ${BASE}/src/main/scala/scalation/simulation/process/Vehicle.scala, ${BASE}/src/main/scala/scalation/dynamics/Reactions.scala, ${BASE}/src/main/scala/scalation/optimization/GradientDescent_Mo2.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting_old/ARX_MV.scala, ${BASE}/src/main/scala/scalation/optimization/quasi_newton/BFGS_NoLS.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting/neuralforecasting/PositionalEnc.scala, ${BASE}/src/main/scala/scalation/database/triplegraph/TripleGraph.scala, ${BASE}/src/main/scala/scalation/mathstat/Bidiagonal.scala, ${BASE}/src/main/scala/scalation/Calc.scala, ${BASE}/src/main/scala/scalation/database/graph/VertexType.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting/AR.scala, ${BASE}/src/main/scala/scalation/mathstat/Fac_LU.scala, ${BASE}/src/main/scala/scalation/database/Tabular.scala, ${BASE}/src/main/scala/scalation/TimeNum.scala, ${BASE}/src/main/scala/scalation/database/graph_pm/MatchAnswers.scala, ${BASE}/src/main/scala/scalation/simulation/agent/Route.scala, ${BASE}/src/main/scala/scalation/calculus/B_Spline.scala, ${BASE}/src/main/scala/scalation/simulation/process/Gate.scala, ${BASE}/src/main/scala/scalation/modeling/classifying/DecisionTree.scala, ${BASE}/src/main/scala/scalation/calculus/Hilbert.scala, ${BASE}/src/main/scala/scalation/modeling/clustering/HierClusterer.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting/Stationarity.scala, ${BASE}/src/main/scala/scalation/PriorityQueue.scala, ${BASE}/src/main/scala/scalation/optimization/quasi_newton/FunctionOptimization.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting/neuralforecasting/DropoutLayer.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting/ARY_D.scala, ${BASE}/src/main/scala/scalation/random/PoissonProcess.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting_old/RegressionTreeRF_MT4TS.scala, ${BASE}/src/main/scala/scalation/modeling/TrigRegression.scala, ${BASE}/src/main/scala/scalation/optimization/quasi_newtonC/LBFGS_FFM.scala, ${BASE}/src/main/scala/scalation/database/mugraph_pm/MatchAnswers.scala, ${BASE}/src/main/scala/scalation/modeling/classifying/Example_BreastCancer.scala, ${BASE}/src/main/scala/scalation/simulation/process/Recorder.scala, ${BASE}/src/main/scala/scalation/simulation/agent/Link.scala, ${BASE}/src/main/scala/scalation/simulation/agent/WaitQueue_LCFS.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting/RandomWalk.scala, ${BASE}/src/main/scala/scalation/Unicode.scala, ${BASE}/src/main/scala/scalation/animation/SimpleAnimator.scala, ${BASE}/src/main/scala/scalation/simulation/process/SimActor.scala, ${BASE}/src/main/scala/scalation/scala2d/Arrow.scala, ${BASE}/src/main/scala/scalation/simulation/tableau/Ex_Bank.scala, ${BASE}/src/main/scala/scalation/database/relation/Vectr.scala, ${BASE}/src/main/scala/scalation/modeling/Outlier.scala, ${BASE}/src/main/scala/scalation/modeling/Model.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting_old/Stationarity.scala, ${BASE}/src/main/scala/scalation/simulation/agent/EdgeAgents.scala, ${BASE}/src/main/scala/scalation/optimization/WolfeLS3.scala, ${BASE}/src/main/scala/scalation/optimization/ConjugateGradient_NoLS.scala, ${BASE}/src/main/scala/scalation/optimization/quasi_newtonC/FunctionOptimizationFFM.scala, ${BASE}/src/main/scala/scalation/database/relation/Relation.scala, ${BASE}/src/main/scala/scalation/optimization/quasi_newton/LBFGSMoreThuente.scala, ${BASE}/src/main/scala/scalation/calculus/AutoDiff.scala, ${BASE}/src/main/scala/scalation/Bool.scala, ${BASE}/src/main/scala/scalation/FileReader.scala, ${BASE}/src/main/scala/scalation/calculus/FFT.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting_old/AR.scala, ${BASE}/src/main/scala/scalation/modeling/MonitorLoss.scala, ${BASE}/src/main/scala/scalation/database/graph_pm/GraphIO.scala, ${BASE}/src/main/scala/scalation/random/Random0.scala, ${BASE}/src/main/scala/scalation/database/BinTree.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting/DTW.scala, ${BASE}/src/main/scala/scalation/optimization/ConjugateGradient.scala, ${BASE}/src/main/scala/scalation/random/RandomSeeds.scala, ${BASE}/src/main/scala/scalation/modeling/classifying/DecisionTree_ID3.scala, ${BASE}/src/main/scala/scalation/optimization/quasi_newton/LBFGS.scala, ${BASE}/src/main/scala/scalation/database/graph_pm/TopSort.scala, ${BASE}/src/main/scala/scalation/dynamics/ParabolicPDE.scala, ${BASE}/src/main/scala/scalation/Make_VectorI.scala, ${BASE}/src/main/scala/scalation/modeling/classifying/Example_PlayTennis.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting/neuralforecasting/DenseLayer.scala, ${BASE}/src/main/scala/scalation/optimization/GradientDescent.scala, ${BASE}/src/main/scala/scalation/optimization/linear_opt/Simplex2P.scala, ${BASE}/src/main/scala/scalation/simulation/agent/Gate.scala, ${BASE}/src/main/scala/scalation/optimization/quasi_newton/LBFGSLineSearchAlg.scala, ${BASE}/src/main/scala/scalation/simulation/monte_carlo/MontyHall.scala, ${BASE}/src/main/scala/scalation/database/mugraph_pm/MuGraph.scala, ${BASE}/src/main/scala/scalation/optimization/linear_opt/CheckLP.scala, ${BASE}/src/main/scala/scalation/scala2d/Polygon.scala, ${BASE}/src/main/scala/scalation/dynamics/LinearDiffEq.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting/SimpleExpSmoothing.scala, ${BASE}/src/main/scala/scalation/simulation/agent/example_1/UGABusRoutes.scala, ${BASE}/src/main/scala/scalation/simulation/queueingnet/JacksonNet.scala, ${BASE}/src/main/scala/scalation/optimization/Minimize.scala, ${BASE}/src/main/scala/scalation/optimization/StoppingRule.scala, ${BASE}/src/main/scala/scalation/modeling/neuralnet/NetParam.scala, ${BASE}/src/main/scala/scalation/simulation/event/example_1/FastFood.scala, ${BASE}/src/main/scala/scalation/modeling/NoBuildModel.scala, ${BASE}/src/main/scala/scalation/modeling/neuralnet/Optimizer_Adam.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting/Example_GasFurnace.scala, ${BASE}/src/main/scala/scalation/database/triplegraph/TripleGraphSim.scala, ${BASE}/src/main/scala/scalation/database/table/GTable.scala, ${BASE}/src/main/scala/scalation/mathstat/MatrixI.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting_old/SARIMA.scala, ${BASE}/src/main/scala/scalation/modeling/neuralnet/Example_Concrete.scala, ${BASE}/src/main/scala/scalation/simulation/process/example_1/Road.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting/Periodogram.scala, ${BASE}/src/main/scala/scalation/mathstat/Factorization.scala, ${BASE}/src/main/scala/scalation/simulation/process/Component.scala, ${BASE}/src/main/scala/scalation/simulation/process/Source.scala, ${BASE}/src/main/scala/scalation/SkipList.scala, ${BASE}/src/main/scala/scalation/scala2d/ImageWriter.scala, ${BASE}/src/main/scala/scalation/optimization/GradientDescent_Mo.scala, ${BASE}/src/main/scala/scalation/modeling/RegressionTreeRF_MT.scala, ${BASE}/src/main/scala/scalation/database/graph_pm/GraphMatcher.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting_old/RegressionTreeRF4TS.scala, ${BASE}/src/main/scala/scalation/optimization/quasi_newtonC/MethodTypes.scala, ${BASE}/src/main/scala/scalation/MultiArrayDeque.scala, ${BASE}/src/main/scala/scalation/random/VariateTen.scala, ${BASE}/src/main/scala/scalation/optimization/quasi_newton/LBFGSCallbackData.scala, ${BASE}/src/main/scala/scalation/modeling/LassoRegression.scala, ${BASE}/src/main/scala/scalation/simulation/process/example_1/OneWayVehicle.scala, ${BASE}/src/main/scala/scalation/simulation/monte_carlo/GrainDropping.scala, ${BASE}/src/main/scala/scalation/simulation/Modelable.scala, ${BASE}/src/main/scala/scalation/random/Random.scala, ${BASE}/src/main/scala/scalation/SetExt.scala, ${BASE}/src/main/scala/scalation/modeling/classifying/RandomForest.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting_old/ARX_Quad.scala, ${BASE}/src/main/scala/scalation/scala2d/QCurve.scala, ${BASE}/src/main/scala/scalation/database/Temporal.scala, ${BASE}/src/main/scala/scalation/simulation/process/VTransport.scala, ${BASE}/src/main/scala/scalation/modeling/clustering/KMeansClustererPP.scala, ${BASE}/src/main/scala/scalation/modeling/clustering/Cluster.scala, ${BASE}/src/main/scala/scalation/modeling/neuralnet/Optimizer_SGD.scala, ${BASE}/src/main/scala/scalation/scala2d/Colors.scala, ${BASE}/src/main/scala/scalation/simulation/process/example_MIR/Bank.scala, ${BASE}/src/main/scala/scalation/simulation/Locatable.scala, ${BASE}/src/main/scala/scalation/modeling/neuralnet/NeuralNet_XLT.scala, ${BASE}/src/main/scala/scalation/mathstat/Transform.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting/NullModel.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting/ARMA.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting/ARIMA_diff.scala, ${BASE}/src/main/scala/scalation/simulation/Monitor.scala, ${BASE}/src/main/scala/scalation/optimization/functions/ExampleFunctions.scala, ${BASE}/src/main/scala/scalation/modeling/classifying/NullModel.scala, ${BASE}/src/main/scala/scalation/calculus/DB_Spline.scala, ${BASE}/src/main/scala/scalation/Fib.scala, ${BASE}/src/main/scala/scalation/database/KeyType.scala, ${BASE}/src/main/scala/scalation/Coordinates.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting_old/ARX.scala, ${BASE}/src/main/scala/scalation/simulation/agent/example_1/CallCenter.scala, ${BASE}/src/main/scala/scalation/simulation/process/Dynamics.scala, ${BASE}/src/main/scala/scalation/mathstat/Combinatorics.scala, ${BASE}/src/main/scala/scalation/modeling/ExpRegression.scala, ${BASE}/src/main/scala/scalation/simulation/process/example_1/EmerDept.scala, ${BASE}/src/main/scala/scalation/optimization/quasi_newton/LBFGSLineSearch.scala, ${BASE}/src/main/scala/scalation/simulation/agent/example_1/Traffic4L.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting_old/QuadSpline.scala, ${BASE}/src/main/scala/scalation/mathstat/Stats4TS.scala, ${BASE}/src/main/scala/scalation/simulation/process/example_1/TrafficDyn.scala, ${BASE}/src/main/scala/scalation/modeling/classifying/DecisionTree_ID3wp.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting/ARY.scala, ${BASE}/src/main/scala/scalation/database/MultiMap.scala, ${BASE}/src/main/scala/scalation/scala2d/QArrow.scala, ${BASE}/src/main/scala/scalation/mathstat/TensorD.scala, ${BASE}/src/main/scala/scalation/mathstat/StatTable.scala, ${BASE}/src/main/scala/scalation/database/mugraph_pm/MuGraphSim.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting_old/RegressionTreeGB4TS.scala, ${BASE}/src/main/scala/scalation/simulation/agent/example_1/Traffic2L.scala, ${BASE}/src/main/scala/scalation/optimization/GridSearchLS.scala, ${BASE}/src/main/scala/scalation/modeling/SimpleExpRegression.scala, ${BASE}/src/main/scala/scalation/modeling/classifying/Example_Diabetes.scala, ${BASE}/src/main/scala/scalation/simulation/agent/Monitor.scala, ${BASE}/src/main/scala/scalation/database/graph/Vertex.scala, ${BASE}/src/main/scala/scalation/simulation/Identifiable.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting_old/RegressionTreeMT4TS.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting/Stationarity_KPSS.scala, ${BASE}/src/main/scala/scalation/optimization/SPSA.scala, ${BASE}/src/main/scala/scalation/mathstat/MatrixCalc.scala, ${BASE}/src/main/scala/scalation/optimization/quasi_newton/LBFGSBacktrackingArmijo.scala, ${BASE}/src/main/scala/scalation/simulation/event/Ex_Template.scala, ${BASE}/src/main/scala/scalation/modeling/neuralnet/Optimizer.scala, ${BASE}/src/main/scala/scalation/mathstat/Pivoting.scala, ${BASE}/src/main/scala/scalation/optimization/quasi_newton/LBFGSBacktrackingStrongWolfe.scala, ${BASE}/src/main/scala/scalation/modeling/classifying/BayesClassifier.scala, ${BASE}/src/main/scala/scalation/calculus/Differential.scala, ${BASE}/src/main/scala/scalation/mathstat/VectorS.scala, ${BASE}/src/main/scala/scalation/database/mugraph_pm/MuGraphMatcher.scala, ${BASE}/src/main/scala/scalation/random/RandomSeeds3.scala, ${BASE}/src/main/scala/scalation/simulation/PoissonProcess.scala, ${BASE}/src/main/scala/scalation/simulation/process/example_1/Machine.scala, ${BASE}/src/main/scala/scalation/simulation/process/Model.scala, ${BASE}/src/main/scala/scalation/optimization/GradientDescent_Adam.scala, ${BASE}/src/main/scala/scalation/database/table/VTable.scala, ${BASE}/src/main/scala/scalation/mathstat/VectorD.scala, ${BASE}/src/main/scala/scalation/simulation/event/Entity.scala, ${BASE}/src/main/scala/scalation/database/graph/EdgeType.scala, ${BASE}/src/main/scala/scalation/mathstat/Fac_QR.scala, ${BASE}/src/main/scala/scalation/simulation/event/example_1/CallCenter.scala, ${BASE}/src/main/scala/scalation/modeling/clustering/ClusteringPredictor.scala, ${BASE}/src/main/scala/scalation/modeling/clustering/KMeansClusterer2.scala, ${BASE}/src/main/scala/scalation/mathstat/Convert.scala, ${BASE}/src/main/scala/scalation/mathstat/Complex.scala, ${BASE}/src/main/scala/scalation/mathstat/TnT_Split.scala, ${BASE}/src/main/scala/scalation/optimization/quasi_newtonC/OptimizationLogicFFM.scala, ${BASE}/src/main/scala/scalation/modeling/neuralnet/NeuralNet_2L.scala, ${BASE}/src/main/scala/scalation/database/graph_pm/ShortestPath.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting/Example_LakeLevels.scala, ${BASE}/src/main/scala/scalation/random/VariateVec.scala, ${BASE}/src/main/scala/scalation/database/relation/Ex_Teaching.scala, ${BASE}/src/main/scala/scalation/modeling/clustering/KMeansClusterer.scala, ${BASE}/src/main/scala/scalation/modeling/neuralnet/ELM_3L1.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting/Forecaster.scala, ${BASE}/src/main/scala/scalation/EasyWriter.scala, ${BASE}/src/main/scala/scalation/modeling/classifying/TANBayes.scala, ${BASE}/src/main/scala/scalation/CircularQueue.scala, ${BASE}/src/main/scala/scalation/mathstat/Probability.scala, ${BASE}/src/main/scala/scalation/simulation/Completion.scala, ${BASE}/src/main/scala/scalation/modeling/clustering/RandomGraph.scala, ${BASE}/src/main/scala/scalation/optimization/quasi_newton/LBFGSIterationData.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting_old/ForecasterX.scala, ${BASE}/src/main/scala/scalation/random/Variate.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting_old/RegressionTreeGB4TS2.scala, ${BASE}/src/main/scala/scalation/simulation/process/example_1/Bank.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting_old/KalmanFilter.scala, ${BASE}/src/main/scala/scalation/database/graph/Edge.scala, ${BASE}/src/main/scala/scalation/simulation/agent/Resource.scala, ${BASE}/src/main/scala/scalation/modeling/clustering/KMeansPPClusterer.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting/Baselines.scala, ${BASE}/src/main/scala/scalation/modeling/clustering/TightClusterer.scala, ${BASE}/src/main/scala/scalation/optimization/GoldenSectionLS.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting_old/ARMA.scala, ${BASE}/src/main/scala/scalation/simulation/process/Bus.scala, ${BASE}/src/main/scala/scalation/optimization/Minimizer.scala, ${BASE}/src/main/scala/scalation/modeling/neuralnet/StoppingRule.scala, ${BASE}/src/main/scala/scalation/random/StreamMaker3.scala, ${BASE}/src/main/scala/scalation/simulation/event/Event.scala, ${BASE}/src/main/scala/scalation/mathstat/MatrixD.scala, ${BASE}/src/main/scala/scalation/simulation/monte_carlo/Cards.scala, ${BASE}/src/main/scala/scalation/optimization/quasi_newton/LBFGSBacktrackingOrthantWise.scala, ${BASE}/src/main/scala/scalation/modeling/Example_AutoMPG.scala, ${BASE}/src/main/scala/scalation/modeling/classifying/LinDiscAnalyis.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting/ARX_Symb_D.scala, ${BASE}/src/main/scala/scalation/modeling/PoissonRegression.scala, ${BASE}/src/main/scala/scalation/database/table/TA_AssignmentDB.scala, ${BASE}/src/main/scala/scalation/mathstat/Fac_Cholesky.scala, ${BASE}/src/main/scala/scalation/dynamics/DynamicEq.scala, ${BASE}/src/main/scala/scalation/optimization/NelderMeadSimplex.scala, ${BASE}/src/main/scala/scalation/random/Random3.scala, ${BASE}/src/main/scala/scalation/modeling/Predictor.scala, ${BASE}/src/main/scala/scalation/animation/PointOn.scala, ${BASE}/src/main/scala/scalation/modeling/RegressionWLS.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting/ARX_D.scala, ${BASE}/src/main/scala/scalation/modeling/classifying/KNN_Classifier.scala, ${BASE}/src/main/scala/scalation/simulation/tableau/Model.scala, ${BASE}/src/main/scala/scalation/optimization/NelderMeadSimplex2.scala, ${BASE}/src/main/scala/scalation/modeling/neuralnet/CoFilter_2D.scala, ${BASE}/src/main/scala/scalation/simulation/process/example_1/Loop.scala, ${BASE}/src/main/scala/scalation/simulation/event/EventNode.scala, ${BASE}/src/main/scala/scalation/simulation/process/Junction.scala, ${BASE}/src/main/scala/scalation/optimization/PathMonitor.scala, ${BASE}/src/main/scala/scalation/database/table/TimeComparison.scala, ${BASE}/src/main/scala/scalation/mathstat/InverseTest.scala, ${BASE}/src/main/scala/scalation/modeling/NonlinearRegression.scala, ${BASE}/src/main/scala/scalation/database/logic/SATsolver.scala, ${BASE}/src/main/scala/scalation/mathstat/VectorC.scala, ${BASE}/src/main/scala/scalation/database/table/TableGen.scala, ${BASE}/src/main/scala/scalation/modeling/Example_BasketBall.scala, ${BASE}/src/main/scala/scalation/optimization/IntegerTabuSearch.scala, ${BASE}/src/main/scala/scalation/calculus/DBasisFunction.scala, ${BASE}/src/main/scala/scalation/MergeSortIndirect.scala, ${BASE}/src/main/scala/scalation/simulation/agent/QueueOps.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting/RandomWalkS.scala, ${BASE}/src/main/scala/scalation/simulation/agent/Statistical.scala, ${BASE}/src/main/scala/scalation/simulation/event/WaitQueue.scala, ${BASE}/src/main/scala/scalation/modeling/SymbolicRegression.scala, ${BASE}/src/main/scala/scalation/optimization/quasi_newton/LBFGSPrms.scala, ${BASE}/src/main/scala/scalation/optimization/LassoAddm.scala, ${BASE}/src/main/scala/scalation/modeling/FeatureSelection.scala, ${BASE}/src/main/scala/scalation/random/RNG.scala, ${BASE}/src/main/scala/scalation/optimization/WolfeLS.scala, ${BASE}/src/main/scala/scalation/database/graph_pm/Graph0.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting_old/TrendModel.scala, ${BASE}/src/main/scala/scalation/animation/SimpleAnimator2.scala, ${BASE}/src/main/scala/scalation/simulation/Locatable2.scala, ${BASE}/src/main/scala/scalation/optimization/functions/BenchmarkFunction.scala, ${BASE}/src/main/scala/scalation/scala2d/VizFrame.scala, ${BASE}/src/main/scala/scalation/dynamics/BallFlight.scala, ${BASE}/src/main/scala/scalation/random/VariateSet.scala, ${BASE}/src/main/scala/scalation/simulation/monte_carlo/SphereVolume.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting_old/ARX_Quad_MV.scala, ${BASE}/src/main/scala/scalation/modeling/PolyRegression.scala, ${BASE}/src/main/scala/scalation/optimization/Newton_NoLS.scala, ${BASE}/src/main/scala/scalation/mathstat/RTensor4D.scala, ${BASE}/src/main/scala/scalation/database/table/PurchaseOrderDB.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting_old/ARIMA.scala, ${BASE}/src/main/scala/scalation/BiMap.scala, ${BASE}/src/main/scala/scalation/database/SpanningTree.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting/ARY_Quad.scala, ${BASE}/src/main/scala/scalation/simulation/agent/example_1/Bank.scala, ${BASE}/src/main/scala/scalation/simulation/agent/Model.scala, ${BASE}/src/main/scala/scalation/calculus/Fourier.scala, ${BASE}/src/main/scala/scalation/Util.scala, ${BASE}/src/main/scala/scalation/database/table/LTable.scala, ${BASE}/src/main/scala/scalation/scala3d/Source.scala, ${BASE}/src/main/scala/scalation/simulation/event/example_1/Machine.scala, ${BASE}/src/main/scala/scalation/calculus/Poly.scala, ${BASE}/src/main/scala/scalation/simulation/agent/Source.scala, ${BASE}/src/main/scala/scalation/animation/Animator.scala, ${BASE}/src/main/scala/scalation/modeling/Example_BPressure.scala, ${BASE}/src/main/scala/scalation/database/mugraph_pm/MuDualSim.scala, ${BASE}/src/main/scala/scalation/modeling/classifying/NaiveBayesR.scala, ${BASE}/src/main/scala/scalation/simulation/event/CausalLink.scala, ${BASE}/src/main/scala/scalation/random/VariateMat.scala, ${BASE}/src/main/scala/scalation/dynamics/ModRosenbrock.scala, ${BASE}/src/main/scala/scalation/mathstat/VMatrixD.scala, ${BASE}/src/main/scala/scalation/simulation/process/example_1/OneWayStreet.scala, ${BASE}/src/main/scala/scalation/modeling/classifying/FitC.scala, ${BASE}/src/main/scala/scalation/simulation/activity/PetriNetRules.scala, ${BASE}/src/main/scala/scalation/modeling/RidgeRegression.scala, ${BASE}/src/main/scala/scalation/modeling/classifying/HiddenMarkov.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting/Forecaster_Reg.scala, ${BASE}/src/main/scala/scalation/modeling/FitM.scala, ${BASE}/src/main/scala/scalation/modeling/SumQueue.scala, ${BASE}/src/main/scala/scalation/database/graph_pm/ExampleGraphD.scala, ${BASE}/src/main/scala/scalation/dynamics/RungeKutta2.scala, ${BASE}/src/main/scala/scalation/database/TNode.scala, ${BASE}/src/main/scala/scalation/database/TimeInterval.scala, ${BASE}/src/main/scala/scalation/database/mugraph_pm/MuDualIso.scala, ${BASE}/src/main/scala/scalation/modeling/Initialzer.scala, ${BASE}/src/main/scala/scalation/scala2d/Transform.scala, ${BASE}/src/main/scala/scalation/modeling/classifying/SimpleLogisticRegression.scala, ${BASE}/src/main/scala/scalation/SimpleUniform.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting/Example_ILI.scala, ${BASE}/src/main/scala/scalation/optimization/quasi_newton/DM_LBFGS.scala, ${BASE}/src/main/scala/scalation/simulation/state/MarkovChain.scala, ${BASE}/src/main/scala/scalation/optimization/Hungarian.scala, ${BASE}/src/main/scala/scalation/simulation/process/WaitQueue_LCFS.scala, ${BASE}/src/main/scala/scalation/modeling/RegressionTree.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting/ARX_Quad.scala, ${BASE}/src/main/scala/scalation/simulation/event/Model.scala, ${BASE}/src/main/scala/scalation/modeling/classifying/SimpleLDA.scala, ${BASE}/src/main/scala/scalation/optimization/quasi_newton/LineSearchTriInterval.scala, ${BASE}/src/main/scala/scalation/modeling/TranRegression.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting/multivar/RandomWalk_Star.scala, ${BASE}/src/main/scala/scalation/GenIndexHtml.scala, ${BASE}/src/main/scala/scalation/modeling/neuralnet/CNN_1D.scala, ${BASE}/src/main/scala/scalation/modeling/classifying/DecisionTree_C45.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting/MakeMatrix4TS.scala, ${BASE}/src/main/scala/scalation/modeling/neuralnet/CNN_2D.scala, ${BASE}/src/main/scala/scalation/random/StreamMaker.scala, ${BASE}/src/main/scala/scalation/random/CDF.scala, ${BASE}/src/main/scala/scalation/simulation/process/example_1/UGA_Bus.scala, ${BASE}/src/main/scala/scalation/modeling/RoundRegression.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting/neuralforecasting/LSTM.scala, ${BASE}/src/main/scala/scalation/dynamics/Radau.scala, ${BASE}/src/main/scala/scalation/simulation/event/example_1/Poisson2.scala, ${BASE}/src/main/scala/scalation/optimization/WolfeLS2.scala, ${BASE}/src/main/scala/scalation/modeling/classifying/Classifier.scala, ${BASE}/src/main/scala/scalation/database/graph_pm/GraphGen.scala, ${BASE}/src/main/scala/scalation/modeling/RegressionTreeRF.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting_old/VAR.scala, ${BASE}/src/main/scala/scalation/simulation/process/example_1/TrafficTurn.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting_old/ARIMA_diff.scala, ${BASE}/src/main/scala/scalation/simulation/agent/SimAgent.scala, ${BASE}/src/main/scala/scalation/optimization/quasi_newtonC/FunctionDescriptors.scala, ${BASE}/src/main/scala/scalation/mathstat/Correlogram.scala, ${BASE}/src/main/scala/scalation/modeling/Regression.scala, ${BASE}/src/main/scala/scalation/random/Random2.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting/SimpleMovingAverage.scala, ${BASE}/src/main/scala/scalation/database/table/Table.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting/neuralforecasting/LayerNorm.scala, ${BASE}/src/main/scala/scalation/simulation/event/example_1/Bank.scala, ${BASE}/src/main/scala/scalation/mathstat/TimeStatistic.scala, ${BASE}/src/main/scala/scalation/optimization/quasi_newton/EvaluationLogic.scala, ${BASE}/src/main/scala/scalation/modeling/classifying/Example_MTcars.scala, ${BASE}/src/main/scala/scalation/calculus/DRadial.scala, ${BASE}/src/main/scala/scalation/simulation/event/example_1/CallCenter2.scala, ${BASE}/src/main/scala/scalation/modeling/clustering/GapStatistic.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting_old/SimpleExpSmoothing.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting/TrendModel.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting/ARX_Symb.scala, ${BASE}/src/main/scala/scalation/database/relation/Ex_ProduceSales.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting_old/SimpleMovingAverage.scala, ${BASE}/src/main/scala/scalation/modeling/SymRidgeRegression.scala, ${BASE}/src/main/scala/scalation/random/Quantile.scala, ${BASE}/src/main/scala/scalation/mathstat/Fac_SVD.scala, ${BASE}/src/main/scala/scalation/optimization/quasi_newton/LBFGSReturnCode.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting_old/ForecastUtil.scala, ${BASE}/src/main/scala/scalation/simulation/process/Transport.scala, ${BASE}/src/main/scala/scalation/modeling/classifying/LogisticRegression.scala, ${BASE}/src/main/scala/scalation/optimization/quasi_newton/QNewton.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting/ARIMA.scala, ${BASE}/src/main/scala/scalation/simulation/agent/Sink.scala, ${BASE}/src/main/scala/scalation/optimization/quasi_newton/LBFGSResults.scala, ${BASE}/src/main/scala/scalation/scala2d/Base.scala, ${BASE}/src/main/scala/scalation/modeling/neuralnet/NeuralNet_3L.scala, ${BASE}/src/main/scala/scalation/modeling/neuralnet/NeuralNet_3L_Ck.scala, ${BASE}/src/main/scala/scalation/modeling/classifying/SupportVectorMachine.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting/Forecaster_D.scala, ${BASE}/src/main/scala/scalation/simulation/Temporal.scala, ${BASE}/src/main/scala/scalation/simulation/process/example_1/Traffic.scala, ${BASE}/src/main/scala/scalation/mathstat/VectorT.scala, ${BASE}/src/main/scala/scalation/database/mugraph_pm/MuGraphGen.scala, ${BASE}/src/main/scala/scalation/optimization/NewtonRaphson.scala, ${BASE}/src/main/scala/scalation/optimization/WolfeConditions.scala, ${BASE}/src/main/scala/scalation/database/BpNode.scala, ${BASE}/src/main/scala/scalation/simulation/Coroutine.scala, ${BASE}/src/main/scala/scalation/simulation/process/example_1/CallCenter.scala, ${BASE}/src/main/scala/scalation/mathstat/Histogram.scala, ${BASE}/src/main/scala/scalation/simulation/agent/Junction.scala, ${BASE}/src/main/scala/scalation/scala2d/Shapes.scala, ${BASE}/src/main/scala/scalation/database/Tree.scala, ${BASE}/src/main/scala/scalation/optimization/MonitorEpochs.scala, ${BASE}/src/main/scala/scalation/simulation/NH_PoissonProcess.scala, ${BASE}/src/main/scala/scalation/database/TimeOfWeek.scala, ${BASE}/src/main/scala/scalation/database/graph_pm/DualIso.scala, ${BASE}/src/main/scala/scalation/modeling/classifying/BaggingTrees.scala, ${BASE}/src/main/scala/scalation/CommonFunctions.scala, ${BASE}/src/main/scala/scalation/optimization/quasi_newton/OptimizationLogic.scala, ${BASE}/src/main/scala/scalation/Timer.scala, ${BASE}/src/main/scala/scalation/simulation/tableau/Ex_CallCenter.scala, ${BASE}/src/main/scala/scalation/simulation/event/example_1/Poisson.scala, ${BASE}/src/main/scala/scalation/simulation/process/example_MBM/Bank.scala, ${BASE}/src/main/scala/scalation/database/Spatial.scala, ${BASE}/src/main/scala/scalation/optimization/GradientDescent_NoLS.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting/neuralforecasting/TrEncoderLayer.scala, ${BASE}/src/main/scala/scalation/optimization/quasi_newton/LBFGSLineSearchStep.scala, ${BASE}/src/main/scala/scalation/database/graph_pm/DualSim.scala, ${BASE}/src/main/scala/scalation/optimization/quasi_newton/LBFGSBacktrackingWolfe.scala, ${BASE}/src/main/scala/scalation/optimization/linear_opt/QuadraticSimplex.scala, ${BASE}/src/main/scala/scalation/mathstat/PlotM.scala, ${BASE}/src/main/scala/scalation/modeling/SymLassoRegression.scala, ${BASE}/src/main/scala/scalation/mathstat/MatrixD2.scala, ${BASE}/src/main/scala/scalation/modeling/RegressionTreeMT.scala, ${BASE}/src/main/scala/scalation/optimization/quasi_newton/LBFGS_NoLS.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting/TranARY.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting/ForecastMatrix.scala, ${BASE}/src/main/scala/scalation/modeling/Imputation.scala, ${BASE}/src/main/scala/scalation/optimization/quasi_newton/LBFGS_B.scala, ${BASE}/src/main/scala/scalation/optimization/quasi_newton/LBFGSLineSearchFailure.scala, ${BASE}/src/main/scala/scalation/modeling/Perceptron.scala, ${BASE}/src/main/scala/scalation/animation/DgAnimator.scala, ${BASE}/src/main/scala/scalation/simulation/agent/WaitQueue.scala, ${BASE}/src/main/scala/scalation/simulation/tableau/Queue_MM1.scala, ${BASE}/src/main/scala/scalation/optimization/linear_opt/MinimizerLP.scala, ${BASE}/src/main/scala/scalation/calculus/BasisFunction.scala, ${BASE}/src/main/scala/scalation/modeling/Fit.scala, ${BASE}/src/main/scala/scalation/optimization/NLPTest.scala, ${BASE}/src/main/scala/scalation/modeling/clustering/KMeansClustererHW.scala, ${BASE}/src/main/scala/scalation/database/BpTreeMap.scala, ${BASE}/src/main/scala/scalation/simulation/monte_carlo/MonteCarloIntegration.scala, ${BASE}/src/main/scala/scalation/modeling/classifying/NaiveBayes.scala, ${BASE}/src/main/scala/scalation/modeling/neuralnet/RegressionMV.scala, ${BASE}/src/main/scala/scalation/database/LinHashMap.scala, ${BASE}/src/main/scala/scalation/calculus/Radial.scala, ${BASE}/src/main/scala/scalation/database/graph/Topological.scala, ${BASE}/src/main/scala/scalation/database/graph_pm/GraphSim.scala, ${BASE}/src/main/scala/scalation/database/graph_pm/ExampleGraphS.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting/multivar/AR_Star.scala, ${BASE}/src/main/scala/scalation/modeling/classifying/DecisionTree_C45wp.scala, ${BASE}/src/main/scala/scalation/database/table/BankDB.scala, ${BASE}/src/main/scala/scalation/mathstat/RTensorD.scala, ${BASE}/src/main/scala/scalation/mathstat/Statistic.scala, ${BASE}/src/main/scala/scalation/scala3d/Sink.scala, ${BASE}/src/main/scala/scalation/modeling/clustering/MarkovClustering.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting/ARX_Quad_D.scala, ${BASE}/src/main/scala/scalation/simulation/process/example_1/TrafficLaneChange.scala, ${BASE}/src/main/scala/scalation/simulation/event/WaitQueue_LCFS.scala, ${BASE}/src/main/scala/scalation/simulation/queueingnet/MMck_Queue.scala, ${BASE}/src/main/scala/scalation/modeling/ActivationFun.scala, ${BASE}/src/main/scala/scalation/HyperParameter.scala, ${BASE}/src/main/scala/scalation/simulation/process/VSource.scala, ${BASE}/src/main/scala/scalation/simulation/queueingnet/MGc_Queue.scala, ${BASE}/src/main/scala/scalation/database/Normalization.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting/WeightedMovingAverage.scala, ${BASE}/src/main/scala/scalation/database/table/MovieDB.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting_old/Forecaster.scala, ${BASE}/src/main/scala/scalation/database/triplegraph/TripleGraphMatcher.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting/neuralforecasting/RNN.scala, ${BASE}/src/main/scala/scalation/database/graph_pm/GraphMetrics.scala, ${BASE}/src/main/scala/scalation/optimization/quasi_newton/BFGS.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting_old/AR1MA.scala, ${BASE}/src/main/scala/scalation/database/Identifiable.scala, ${BASE}/src/main/scala/scalation/animation/Dgraph.scala, ${BASE}/src/main/scala/scalation/modeling/SimpleRegression.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting_old/RandomWalk.scala, ${BASE}/src/main/scala/scalation/optimization/LineSearch.scala, ${BASE}/src/main/scala/scalation/optimization/GridSearch.scala, ${BASE}/src/main/scala/scalation/ValueType.scala, ${BASE}/src/main/scala/scalation/simulation/process/Sink.scala, ${BASE}/src/main/scala/scalation/modeling/neuralnet/NeuralNet_XL.scala, ${BASE}/src/main/scala/scalation/dynamics/DormandPrince.scala, ${BASE}/src/main/scala/scalation/calculus/DFourier.scala, ${BASE}/src/main/scala/scalation/mathstat/Fac_QR_RR.scala, ${BASE}/src/main/scala/scalation/database/MakeSchema.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting/ARX.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting/Example_Covid.scala, ${BASE}/src/main/scala/scalation/modeling/forecasting/neuralforecasting/Attention.scala, ${BASE}/src/main/scala/scalation/database/graph_pm/Graph.scala, ${BASE}/src/main/scala/scalation/simulation/process/WaitQueue.scala, ${BASE}/src/main/scala/scalation/modeling/clustering/Clusterer.scala) -[debug] > initialChanges = InitialChanges(Changes(added = Set(), removed = Set(), changed = Set(), unmodified = ...),Set(),Set(),API Changes: Set()) -[debug] No changes diff --git a/target/streams/compile/dependencyClasspath/_global/streams/export b/target/streams/compile/dependencyClasspath/_global/streams/export deleted file mode 100644 index e76194940..000000000 --- a/target/streams/compile/dependencyClasspath/_global/streams/export +++ /dev/null @@ -1 +0,0 @@ -C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\scala-lang\scala3-library_3\3.6.4\scala3-library_3-3.6.4.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\scalafx\scalafx_3\22.0.0-R33\scalafx_3-22.0.0-R33.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\scala-lang\scala-library\2.13.15\scala-library-2.13.15.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-base\22\javafx-base-22.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-controls\22\javafx-controls-22.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-fxml\22\javafx-fxml-22.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-graphics\22\javafx-graphics-22.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-media\22\javafx-media-22.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-swing\22\javafx-swing-22.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-web\22\javafx-web-22.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-base\22\javafx-base-22-win.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-controls\22\javafx-controls-22-win.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-fxml\22\javafx-fxml-22-win.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-graphics\22\javafx-graphics-22-win.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-media\22\javafx-media-22-win.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-swing\22\javafx-swing-22-win.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-web\22\javafx-web-22-win.jar diff --git a/target/streams/compile/doc/_global/streams/export b/target/streams/compile/doc/_global/streams/export deleted file mode 100644 index e69de29bb..000000000 diff --git a/target/streams/compile/doc/_global/streams/out b/target/streams/compile/doc/_global/streams/out deleted file mode 100644 index b70742ae2..000000000 --- a/target/streams/compile/doc/_global/streams/out +++ /dev/null @@ -1,2242 +0,0 @@ -[info] Main Scala API documentation to C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\api... -[debug] Returning already retrieved and compiled bridge: C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\scala-lang\scala3-sbt-bridge\3.6.4\scala3-sbt-bridge-3.6.4.jar. -[debug] Calling Dottydoc with arguments (ScaladocInterface): -[debug]  -d -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\api -[debug]  -deprecation -[debug]  -explain -[debug]  -new-syntax -[debug]  -Wunused:all -[debug]  -Xfatal-warnings -[debug]  -project -[debug]  scalation -[debug]  -classpath -[debug]  C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\scala-lang\scala3-library_3\3.6.4\scala3-library_3-3.6.4.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\scalafx\scalafx_3\22.0.0-R33\scalafx_3-22.0.0-R33.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\scala-lang\scala-library\2.13.15\scala-library-2.13.15.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-base\22\javafx-base-22.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-controls\22\javafx-controls-22.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-fxml\22\javafx-fxml-22.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-graphics\22\javafx-graphics-22.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-media\22\javafx-media-22.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-swing\22\javafx-swing-22.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-web\22\javafx-web-22.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-base\22\javafx-base-22-win.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-controls\22\javafx-controls-22-win.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-fxml\22\javafx-fxml-22-win.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-graphics\22\javafx-graphics-22-win.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-media\22\javafx-media-22-win.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-swing\22\javafx-swing-22-win.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-web\22\javafx-web-22-win.jar -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\BiMap$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\BiMap.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\Bool$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\Calc$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\Calc.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\CircularQueue$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\CircularQueue.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\CommonFunctions$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\Coordinates$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\Coordinates.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\Counter$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\Counter.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\DoublyLinkedList$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\DoublyLinkedList.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\Earth.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\EasyWriter$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\EasyWriter.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\Fib$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\Fib.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\FileReader$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\GenIndexHtml$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\HyperParameter$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\HyperParameter.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\LatLong$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\LatLong.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\LatLong2CTM.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\LatLong2UTM.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\Make_VectorI$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\MergeSortIndirect$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\MergeSortIndirect.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\MultiArrayDeque$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\MultiArrayDeques.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\PriorityQueue.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\Ring$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\Ring.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\SetExt$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\SimpleUniform.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\SkipList$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\SkipList.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\SkipNodeType.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\TimeNum$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\TimeNum.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\Timer$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\UTM2LatLong.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\Unicode$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\Unicode.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\Util$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\ValueType$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\ValueTypeOrd.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\animation\AnimateCommand.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\animation\Animator.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\animation\CommandType.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\animation\Counter.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\animation\DgAnimator$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\animation\DgAnimator.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\animation\Dgraph$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\animation\Dgraph.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\animation\EidCounter.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\animation\PointOn$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\animation\SimpleAnimator$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\animation\SimpleAnimator.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\animation\SimpleAnimator2$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\animation\SimpleAnimator2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\animation\dgAnimatorTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\animation\dgAnimatorTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\animation\dgAnimatorTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\animation\dgraphTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\animation\simpleAnimator2Test.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\animation\simpleAnimatorTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\biMapTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\boolTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\calculus\AutoDiff$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\calculus\AutoDiff.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\calculus\B_Spline$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\calculus\B_Spline.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\calculus\BasisFunction.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\calculus\DB_Spline$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\calculus\DB_Spline.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\calculus\DBasisFunction.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\calculus\DFourier$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\calculus\DFourier.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\calculus\DRadial$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\calculus\DRadial.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\calculus\Differential$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\calculus\Differential.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\calculus\FFT$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\calculus\FFT.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\calculus\Fourier$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\calculus\Fourier.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\calculus\GaussianFunc.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\calculus\Hilbert$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\calculus\Hilbert.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\calculus\Integral$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\calculus\Integral.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\calculus\Node.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\calculus\Poly$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\calculus\Poly.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\calculus\Radial$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\calculus\Radial.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\calculus\RadialType.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\calculus\autoDiffTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\calculus\autoDiffTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\calculus\b_SplineTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\calculus\b_SplineTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\calculus\b_SplineTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\calculus\b_SplineTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\calculus\dB_SplineTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\calculus\dB_SplineTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\calculus\dFourierTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\calculus\dRadialTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\calculus\differentialTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\calculus\differentialTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\calculus\fFTTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\calculus\fourierTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\calculus\hilbertTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\calculus\integralTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\calculus\integralTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\calculus\polyTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\calculus\radialTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\cforTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\circularQueueTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\commonFunTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\coordinatesTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\coordinatesTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\coordinatesTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\coordinatesTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\counterTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\BinTree.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\BpNode$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\BpNode.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\BpTreeMap$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\BpTreeMap.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\BpTreeMultiMap.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\FD.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\HashMultiMap.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\Identifiable.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\JHashMap.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\JHashMultiMap.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\JTreeMap.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\JTreeMultiMap.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\JavaMap$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\KeyType.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\LinHashMap$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\LinHashMap.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\LinHashMultiMap.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\MakeSchema$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\MakeSchema.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\MaxSpanningTree.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\MinSpanningTree$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\MinSpanningTree.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\MultiMap$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\Normalization$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\Normalization.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\SpanningTree$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\SpanningTree.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\Spatial.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\TNode$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\TNode.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\Tabular$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\Tabular.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\Temporal.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\TimeInterval$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\TimeInterval.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\TimeOfWeek$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\TimeOfWeek.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\Tree$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\Tree.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\TreeMultiMap.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\TreeNode.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\bpNodeTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\bpNodeTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\bpNodeTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\bpTreeMapTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\bpTreeMapTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\bpTreeMapTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\bpTreeMapTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\bpTreeMapTest5.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph\Edge$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph\Edge.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph\EdgeType$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph\EdgeType.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph\PGraph$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph\PGraph.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph\SocialNetwork.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph\Topological$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph\Topological.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph\Vertex$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph\Vertex.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph\VertexType$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph\VertexType.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph\edgeTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph\edgeTypeTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph\pGraphTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph\pGraphTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph\vertexTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph\vertexTypeTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph_pm\DualIso$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph_pm\DualIso.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph_pm\DualSim$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph_pm\DualSim.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph_pm\ExampleGraphD.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph_pm\ExampleGraphS.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph_pm\Graph$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph_pm\Graph.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph_pm\Graph0.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph_pm\GraphDFS$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph_pm\GraphDFS.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph_pm\GraphGen$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph_pm\GraphGen.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph_pm\GraphIO$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph_pm\GraphIO.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph_pm\GraphMatcher.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph_pm\GraphMetrics$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph_pm\GraphMetrics.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph_pm\GraphSim$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph_pm\GraphSim.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph_pm\MatchAnswers$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph_pm\MatchAnswers.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph_pm\ShortestPath$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph_pm\ShortestPath.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph_pm\TopSort$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph_pm\TopSort.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph_pm\TrafficLight.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph_pm\dualIsoTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph_pm\dualIsoTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph_pm\dualIsoTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph_pm\dualSimTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph_pm\dualSimTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph_pm\dualSimTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph_pm\graphDFSTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph_pm\graphGenTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph_pm\graphGenTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph_pm\graphGenTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph_pm\graphGenTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph_pm\graphGenTest5.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph_pm\graphGenTest6.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph_pm\graphGenTest7.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph_pm\graphGenTest8.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph_pm\graphIOTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph_pm\graphMetricsTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph_pm\graphSimTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph_pm\graphSimTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph_pm\graphSimTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph_pm\graphSimTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph_pm\graphTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph_pm\graphTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph_pm\graphTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph_pm\matchAnswersTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph_pm\shortestPathTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph_pm\shortestPathTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph_pm\topSortTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph_relation\Vertex.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph_relation\VertexType$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph_relation\VertexType.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\graph_relation\vertexTypeTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\javaMapTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\linHashMapTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\logic\SATsolver$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\logic\sATsolverTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\makeSchemaTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\makeSchemaTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\minSpanningTreeTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\minSpanningTreeTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\minSpanningTreeTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\minSpanningTreeTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\mugraph_pm\ExampleMuGraphD.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\mugraph_pm\ExampleMuGraphS.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\mugraph_pm\MatchAnswers$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\mugraph_pm\MatchAnswers.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\mugraph_pm\MuDualIso$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\mugraph_pm\MuDualIso.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\mugraph_pm\MuDualSim$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\mugraph_pm\MuDualSim.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\mugraph_pm\MuGraph$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\mugraph_pm\MuGraph.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\mugraph_pm\MuGraphGen$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\mugraph_pm\MuGraphGen.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\mugraph_pm\MuGraphMatcher.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\mugraph_pm\MuGraphSim$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\mugraph_pm\MuGraphSim.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\mugraph_pm\matchAnswersTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\mugraph_pm\muDualIsoTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\mugraph_pm\muDualIsoTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\mugraph_pm\muDualIsoTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\mugraph_pm\muDualSimTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\mugraph_pm\muDualSimTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\mugraph_pm\muDualSimTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\mugraph_pm\muGraphGenTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\mugraph_pm\muGraphGenTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\mugraph_pm\muGraphGenTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\mugraph_pm\muGraphSimTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\mugraph_pm\muGraphSimTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\mugraph_pm\muGraphSimTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\mugraph_pm\muGraphTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\mugraph_pm\muGraphTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\multiMapTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\normalizationTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\normalizationTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\normalizationTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\normalizationTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\normalizationTest5.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\normalizationTest6.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\normalizationTest7.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\normalizationTest8.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\relation\Ex_Days.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\relation\Ex_ProductSales.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\relation\Ex_Teaching$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\relation\Ex_Teaching.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\relation\Relation$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\relation\Relation.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\relation\TableGen$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\relation\TableGen.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\relation\Vectr$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\relation\relationTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\relation\relationTest11.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\relation\relationTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\relation\relationTest5.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\relation\relationTest7.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\relation\relationTest8.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\relation\relationTest9.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\relation\showTables.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\relation\tableGenTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\spanningTreeTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\tNodeTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\table\BankDB$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\table\Edge.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\table\GTable$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\table\GTable.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\table\KGTable$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\table\KGTable.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\table\LTable$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\table\LTable.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\table\MovieDB$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\table\PurchaseOrderDB$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\table\PurchaseOrderDB.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\table\TA_AssignmentDB$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\table\TA_AssignmentDB.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\table\Table$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\table\Table.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\table\TableGen$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\table\TableGen.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\table\TimeComparison$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\table\VTable$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\table\VTable.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\table\Vertex.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\table\Vertex_.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\table\bankDB.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\table\bankDB2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\table\gTableTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\table\gTableTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\table\gTableTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\table\kGTableTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\table\kGTableTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\table\lTableTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\table\lTableTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\table\lTableTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\table\lTableTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\table\movieDB.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\table\showTabs.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\table\tableGenTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\table\tableTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\table\tableTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\table\tableTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\table\timer_function.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\table\vTableTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\table\vTableTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\timeIntervalTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\timeIntervalTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\timeIntervalTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\timeOfWeekTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\treeTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\treeTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\treeTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\triplegraph\RDFTriple.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\triplegraph\Triple.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\triplegraph\TripleGraph$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\triplegraph\TripleGraph.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\triplegraph\TripleGraphMatcher.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\triplegraph\TripleGraphSim$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\triplegraph\TripleGraphSim.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\triplegraph\tripleGraphSimTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\database\triplegraph\tripleGraphTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\doublyLinkedListTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\dynamics\BallFlight$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\dynamics\DormandPrince$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\dynamics\DormandPrince.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\dynamics\DynamicEq$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\dynamics\DynamicEq.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\dynamics\FirstOrderPDE$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\dynamics\FirstOrderPDE.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\dynamics\Integrator$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\dynamics\Integrator.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\dynamics\LinearDiffEq$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\dynamics\LinearDiffEq.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\dynamics\ModRosenbrock$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\dynamics\ModRosenbrock.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\dynamics\ParabolicPDE$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\dynamics\ParabolicPDE.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\dynamics\Radau$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\dynamics\Radau.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\dynamics\Reactions$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\dynamics\RungeKutta$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\dynamics\RungeKutta.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\dynamics\RungeKutta2$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\dynamics\RungeKutta2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\dynamics\RungeKutta3$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\dynamics\RungeKutta3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\dynamics\ballFlight.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\dynamics\dormandPrinceTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\dynamics\dormandPrinceTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\dynamics\dormandPrinceTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\dynamics\dormandPrinceTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\dynamics\dynamicEqTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\dynamics\firstOrderPDETest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\dynamics\firstOrderPDETest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\dynamics\firstOrderPDETest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\dynamics\linearDiffEqTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\dynamics\modRosenbrockTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\dynamics\modRosenbrockTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\dynamics\parabolicPDETest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\dynamics\radauTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\dynamics\reactions.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\dynamics\rungeKutta2Test.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\dynamics\rungeKutta2Test2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\dynamics\rungeKutta3Test.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\dynamics\rungeKutta3Test2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\dynamics\rungeKutta3Test3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\dynamics\rungeKuttaTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\dynamics\rungeKuttaTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\easyWriterTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\fibTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\genIndexHtml.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\hyperParameterTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\hyperParameterTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\latLongTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\latLongTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\latLongTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\latLongTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\makeVectorI.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\Bidiagonal$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\Bidiagonal.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\Canvas.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\Combinatorics$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\Combinatorics.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\Complex$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\Complex.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\Convert$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\Correlogram$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\Correlogram.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\Eigen$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\Eigenvalue.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\EigenvalueSym.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\Eigenvector.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\Fac_Cholesky$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\Fac_Cholesky.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\Fac_Inverse$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\Fac_Inverse.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\Fac_LQ$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\Fac_LQ.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\Fac_LU$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\Fac_LU.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\Fac_QR$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\Fac_QR.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\Fac_QR_RR$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\Fac_QR_RR.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\Fac_SVD$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\Fac_SVD.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\Factorization.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\FramelessHistogram.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\FramelessPlot.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\HCanvas.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\Hessenburg.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\Histogram$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\Histogram.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\Householder$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\Householder.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\HouseholderT.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\InverseTest$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\MatrixCalc$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\MatrixCalc.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\MatrixD$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\MatrixD.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\MatrixD2$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\MatrixD2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\MatrixD2Example.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\MatrixDExample.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\MatrixDOps.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\MatrixI.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\Pivoting.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\PivotingTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\Plot$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\Plot.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\PlotC$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\PlotC.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\PlotM$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\PlotM.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\Probability$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\Probability.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\RTensor4D$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\RTensor4D.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\RTensorD$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\RTensorD.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\StatTable$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\StatTable.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\Statistic$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\Statistic.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\Stats4TS$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\Stats4TS.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\SymTriMatrixD.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\SymmetricQRstep.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\TensorD$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\TensorD.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\TimeStatistic$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\TimeStatistic.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\TnT_Split$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\TnT_Split.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\Transform$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\Transform.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\VMatrixD$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\VMatrixD.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\VectorC$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\VectorC.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\VectorD$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\VectorD.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\VectorDOps.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\VectorI$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\VectorI.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\VectorL$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\VectorL.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\VectorS$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\VectorS.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\VectorT$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\VectorT.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\bidiagonalTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\combinatoricsTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\combinatoricsTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\complexTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\correlogramTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\cosForm.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\eigenTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\fac_CholeskyTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\fac_CholeskyTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\fac_CholeskyTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\fac_InverseTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\fac_LQTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\fac_LUTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\fac_LUTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\fac_LUTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\fac_QRTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\fac_QRTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\fac_QR_RRTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\fac_SVDTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\fac_SVDTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\fac_SVDTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\fac_SVDTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\histogramTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\householderTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\inverseTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\log1pForm.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\logForm.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\matrixCalc0.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\matrixCalc2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\matrixCalc3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\matrixCalc4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\matrixD2Test.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\matrixD2Test2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\matrixDTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\matrixDTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\matrixDTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\matrixDTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\matrixDTest5.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\matrixDTest6.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\matrixDTest7.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\plotCTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\plotMTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\plotMTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\plotTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\powForm.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\probabilityTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\probabilityTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\probabilityTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\probabilityTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\probabilityTest5.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\probabilityTest6.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\rTensor4DTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\rTensorDTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\rangeForm.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\sinForm.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\statTableTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\statisticTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\stats4TSTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\tensorDTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\tensorDTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\tensorDTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\timeStatisticTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\tnT_SplitTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\transformTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\transformTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\transformTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\vMatrixDTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\vectorCTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\vectorCTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\vectorCTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\vectorDTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\vectorDTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\vectorDTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\vectorDTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\vectorDTest5.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\vectorDTest6.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\vectorITest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\vectorLTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\vectorSTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\vectorSTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\vectorTTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\vectorTTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mathstat\zForm.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mergeSortIndirectTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\mergeSortIndirectTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\AFF.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\ActivationFun$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\ActivationFun.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\BestStep.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\DistanceOutlier.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\Example_AutoMPG$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\Example_AutoMPG.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\Example_BPressure$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\Example_BPressure.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\Example_BasketBall$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\Example_BasketBall.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\ExpRegression$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\ExpRegression.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\ExpandableVariable.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\FeatureSelection$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\FeatureSelection.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\Fit$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\Fit.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\FitM.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\Imputation$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\Imputation.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\ImputeBackward.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\ImputeForward.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\ImputeMean.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\ImputeMovingAvg.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\ImputeNormal.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\ImputeNormalWin.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\ImputeRegression.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\Initializer.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\KNN_Regression$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\KNN_Regression.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\LassoRegression$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\LassoRegression.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\MatrixTransform$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\Model$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\Model.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\MonitorLoss.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\NoSubModels.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\Node.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\NonlinearRegression$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\NonlinearRegression.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\NullModel$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\NullModel.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\Outlier$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\Outlier.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\Perceptron$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\Perceptron.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\PoissonRegression$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\PoissonRegression.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\PolyORegression$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\PolyORegression.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\PolyRegression$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\PolyRegression.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\Predictor$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\Predictor.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\QoF.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\QuantileOutlier.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\QuartileXOutlier.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\Regression$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\Regression.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\RegressionCat$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\RegressionCat.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\RegressionTree$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\RegressionTree.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\RegressionTreeGB$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\RegressionTreeGB.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\RegressionTreeMT$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\RegressionTreeMT.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\RegressionTreeRF$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\RegressionTreeRF.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\RegressionTreeRF_MT$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\RegressionTreeRF_MT.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\RegressionWLS$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\RegressionWLS.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\RidgeRegression$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\RidgeRegression.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\RoundRegression$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\RoundRegression.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\Sampling$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\Scaling.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\SelectionTech.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\SimpleExpRegression$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\SimpleExpRegression.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\SimpleRegression$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\SimpleRegression.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\SimplerRegression$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\SimplerRegression.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\SumQueue$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\SumQueue.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\SumSqQueue.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\SymLassoRegression$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\SymLassoRegression.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\SymRidgeRegression$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\SymRidgeRegression.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\SymbolicRegression$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\SymbolicRegression.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\TestFit.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\TranRegression$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\TranRegression.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\TranRegressionEx.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\TrigRegression$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\TrigRegression.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\Variable$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\Variable.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\VariableKind.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\activationFunTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\activationFunTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\activationFunTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\activationFunTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\activationFunTest5.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\activationFunTest6.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\BaggingTrees$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\BaggingTrees.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\BayesClassifier$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\BayesClassifier.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\Classifier$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\Classifier.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\DecisionTree$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\DecisionTree.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\DecisionTree_C45$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\DecisionTree_C45.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\DecisionTree_C45wp$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\DecisionTree_C45wp.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\DecisionTree_ID3$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\DecisionTree_ID3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\DecisionTree_ID3wp$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\DecisionTree_ID3wp.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\Example_BreastCancer$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\Example_BreastCancer.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\Example_Diabetes$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\Example_Diabetes.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\Example_Iris$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\Example_Iris.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\Example_MTcars.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\Example_PlayTennis$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\Example_PlayTennis.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\Example_PlayTennis_Cont$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\Example_PlayTennis_Cont.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\FitC$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\FitC.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\HiddenMarkov$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\HiddenMarkov.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\KNN_Classifier$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\KNN_Classifier.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\LinDiscAnalyis$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\LinDiscAnalyis.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\LogisticRegression$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\LogisticRegression.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\NaiveBayes$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\NaiveBayes.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\NaiveBayesR$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\NaiveBayesR.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\Node.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\NullModel$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\NullModel.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\QoFC.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\RandomForest$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\RandomForest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\SimpleLDA$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\SimpleLDA.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\SimpleLogisticRegression$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\SimpleLogisticRegression.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\SupportVectorMachine$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\SupportVectorMachine.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\TANBayes$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\TANBayes.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\baggingTreesTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\baggingTreesTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\baggingTreesTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\baggingTreesTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\baggingTreesTest5.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\baggingTreesTest6.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\baggingTreesTest7.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\bayesClassifierTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\bayesClassifierTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\classifierTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\decisionTreeTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\decisionTree_C45Test.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\decisionTree_C45Test2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\decisionTree_C45Test3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\decisionTree_C45Test4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\decisionTree_C45Test5.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\decisionTree_C45wpTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\decisionTree_C45wpTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\decisionTree_ID3Test.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\decisionTree_ID3Test2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\decisionTree_ID3Test3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\decisionTree_ID3wpTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\decisionTree_ID3wpTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\decisionTree_ID3wpTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\example_BreastCancerTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\example_DiabetesTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\example_IrisTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\example_PlayTennisTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\example_PlayTennis_ContTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\fitCTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\fitCTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\fitCTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\fitCTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\hiddenMarkovTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\hiddenMarkovTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\hiddenMarkovTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\hiddenMarkovTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\kNN_ClassifierTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\kNN_ClassifierTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\kNN_ClassifierTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\kNN_ClassifierTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\linDiscAnalyisTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\logisticRegressionTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\logisticRegressionTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\naiveBayesRTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\naiveBayesRTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\naiveBayesTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\naiveBayesTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\naiveBayesTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\naiveBayesTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\nullModelTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\randomForestTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\randomForestTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\randomForestTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\randomForestTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\randomForestTest5.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\randomForestTest6.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\randomForestTest7.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\simpleLDATest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\simpleLDATest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\simpleLogisticRegressionTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\simpleLogisticRegressionTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\simpleLogisticRegressionTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\simpleLogisticRegressionTest5.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\simpleLogisticRegressionTest6.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\supportVectorMachineTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\supportVectorMachineTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\tANBayesTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\tANBayesTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\tANBayesTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\classifying\tANBayesTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\clustering\Algorithm.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\clustering\Cluster.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\clustering\Clusterer.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\clustering\ClusteringPredictor$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\clustering\ClusteringPredictor.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\clustering\Distance$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\clustering\GapStatistic$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\clustering\GapStatistic.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\clustering\HierClusterer$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\clustering\HierClusterer.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\clustering\KMeansClusterer$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\clustering\KMeansClusterer.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\clustering\KMeansClusterer2$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\clustering\KMeansClusterer2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\clustering\KMeansClustererHW$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\clustering\KMeansClustererHW.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\clustering\KMeansClustererPP$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\clustering\KMeansClustererPP.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\clustering\KMeansPPClusterer$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\clustering\KMeansPPClusterer.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\clustering\KMeansPPClustererTester.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\clustering\MarkovClusterer.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\clustering\MarkovClustering$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\clustering\RandomGraph$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\clustering\RandomGraph.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\clustering\TightClusterer$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\clustering\TightClusterer.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\clustering\clusteringPredictorTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\clustering\clusteringPredictorTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\clustering\clusteringPredictorTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\clustering\gapStatisticTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\clustering\gapStatisticTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\clustering\hierClustererTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\clustering\hierClustererTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\clustering\kMeansClusterer2Test.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\clustering\kMeansClusterer2Test2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\clustering\kMeansClusterer2Test3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\clustering\kMeansClusterer2Test4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\clustering\kMeansClustererHWTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\clustering\kMeansClustererHWTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\clustering\kMeansClustererHWTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\clustering\kMeansClustererPPTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\clustering\kMeansClustererPPTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\clustering\kMeansClustererPPTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\clustering\kMeansClustererTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\clustering\kMeansClustererTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\clustering\kMeansClustererTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\clustering\kMeansClustererTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\clustering\kMeansPPClustererTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\clustering\kMeansPPClustererTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\clustering\kMeansPPClustererTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\clustering\kMeansPPClustererTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\clustering\markovClustererTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\clustering\markovClustererTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\clustering\randomGraphTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\clustering\tightClustererTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\example_AutoMPG_Correlation.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\example_AutoMPG_NullModel.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\example_AutoMPG_QuadRegression.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\example_AutoMPG_Regression.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\example_AutoMPG_SimpleRegression.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\example_AutoMPG_SimplerRegression.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\example_BPressureTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\example_BPressureTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\example_BasketBallTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\expRegressionTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\expRegressionTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\expRegressionTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\fitTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\fitTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\AR$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\AR.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\ARIMA$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\ARIMA.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\ARIMA_diff$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\ARIMA_diff.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\ARMA$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\ARMA.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\ARX$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\ARX.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\ARX_D$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\ARX_D.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\ARX_Quad$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\ARX_Quad.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\ARX_Quad_D$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\ARX_Quad_D.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\ARX_Symb$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\ARX_Symb.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\ARX_Symb_D$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\ARX_Symb_D.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\ARY$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\ARY.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\ARY_D$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\ARY_D.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\ARY_Quad$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\ARY_Quad.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\Baseline.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\Baselines$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\DTW$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\DTW.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\Diagnoser.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\Example_Covid$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\Example_Covid.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\Example_GasFurnace$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\Example_GasFurnace.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\Example_ILI$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\Example_ILI.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\Example_LakeLevels.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\ForecastMatrix$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\ForecastMatrix.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\Forecaster.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\Forecaster_D.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\Forecaster_Reg.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\MakeMatrix4TS$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\MakeMatrix4TS.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\MakeMatrix4TSY.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\NullModel$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\NullModel.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\Periodogram$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\Periodogram.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\RandomWalk$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\RandomWalk.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\RandomWalkS$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\RandomWalkS.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\SARY$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\SARY.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\SimpleExpSmoothing$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\SimpleExpSmoothing.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\SimpleMovingAverage$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\SimpleMovingAverage.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\Stationarity$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\Stationarity_KPSS$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\Stationarity_KPSS.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\TranARY$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\TranARY.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\TrendModel$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\TrendModel.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\UnitRoot.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\WeightedMovingAverage$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\WeightedMovingAverage.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\aRIMATest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\aRIMATest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\aRIMATest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\aRIMA_diffTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\aRMATest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\aRMATest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\aRMATest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\aRMATest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\aRMATest5.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\aRMATest6.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\aRMATest7.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\aRTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\aRTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\aRTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\aRTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\aRTest5.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\aRXTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\aRXTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\aRXTest5.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\aRX_DTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\aRX_DTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\aRX_QuadTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\aRX_QuadTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\aRX_QuadTest5.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\aRX_Quad_DTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\aRX_Quad_DTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\aRX_SymbTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\aRX_SymbTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\aRX_Symb_DTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\aRX_Symb_DTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\aRYTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\aRYTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\aRYTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\aRYTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\aRYTest5.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\aRYTest6.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\aRY_DTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\aRY_DTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\aRY_DTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\aRY_DTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\aRY_QuadTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\aRY_QuadTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\aRY_QuadTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\aRY_QuadTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\aRY_QuadTest5.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\baselineTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\dTWTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\example_CovidTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\example_CovidTest10.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\example_CovidTest11.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\example_CovidTest12.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\example_CovidTest13.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\example_CovidTest14.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\example_CovidTest15.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\example_CovidTest16.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\example_CovidTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\example_CovidTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\example_CovidTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\example_CovidTest5.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\example_CovidTest6.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\example_CovidTest7.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\example_CovidTest8.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\example_CovidTest9.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\example_GasFurnaceTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\example_GasFurnaceTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\example_ILITest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\example_ILITest10.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\example_ILITest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\example_ILITest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\example_ILITest5.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\example_ILITest6.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\example_ILITest7.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\example_ILITest8.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\example_ILITest9.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\forecastMatrixTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\forecastMatrixTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\forecastMatrixTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\forecastMatrixTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\multivar\AR_Star$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\multivar\AR_Star.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\multivar\ForecastTensor$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\multivar\ForecastTensor.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\multivar\RandomWalk_Star$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\multivar\RandomWalk_Star.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\multivar\VAR$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\multivar\VAR.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\multivar\aR_StarTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\multivar\aR_StarTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\multivar\aR_StarTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\multivar\forecastTensorTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\multivar\forecastTensorTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\multivar\randomWalk_StarTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\multivar\randomWalk_StarTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\multivar\randomWalk_StarTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\multivar\vARTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\multivar\vARTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\multivar\vARTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\multivar\vARTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\multivar\vARTest5.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\multivar\vARTest6.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\neuralforecasting\Attention$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\neuralforecasting\Attention.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\neuralforecasting\DenseLayer.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\neuralforecasting\DropoutLayer.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\neuralforecasting\GRU$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\neuralforecasting\GRU.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\neuralforecasting\Gate.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\neuralforecasting\LSTM$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\neuralforecasting\LSTM.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\neuralforecasting\LayerNorm.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\neuralforecasting\NeuralNet_3L4TS$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\neuralforecasting\NeuralNet_3L4TS.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\neuralforecasting\NeuralNet_XL4TS$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\neuralforecasting\NeuralNet_XL4TS.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\neuralforecasting\PositionalEnc$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\neuralforecasting\PositionalEnc.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\neuralforecasting\RMSNorm.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\neuralforecasting\RNN$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\neuralforecasting\RNN.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\neuralforecasting\TrEncoderLayer.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\neuralforecasting\attentionTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\neuralforecasting\attentionTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\neuralforecasting\attentionTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\neuralforecasting\attentionTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\neuralforecasting\attentionTest5.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\neuralforecasting\gRUTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\neuralforecasting\gRUTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\neuralforecasting\gRUTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\neuralforecasting\lSTMTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\neuralforecasting\lSTMTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\neuralforecasting\lSTMTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\neuralforecasting\neuralNet_3L4TSTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\neuralforecasting\neuralNet_3L4TSTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\neuralforecasting\neuralNet_3L4TSTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\neuralforecasting\neuralNet_XL4TSTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\neuralforecasting\neuralNet_XL4TSTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\neuralforecasting\neuralNet_XL4TSTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\neuralforecasting\neuralNet_XL4TSTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\neuralforecasting\neuralNet_XL4TSTest5.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\neuralforecasting\positionalEncTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\neuralforecasting\rNNTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\neuralforecasting\rNNTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\neuralforecasting\rNNTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\neuralforecasting\rNNTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\nullModelTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\nullModelTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\nullModelTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\nullModelTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\nullModelTest5.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\periodogramTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\randomWalkSTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\randomWalkSTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\randomWalkSTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\randomWalkSTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\randomWalkTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\randomWalkTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\randomWalkTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\randomWalkTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\randomWalkTest5.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\randomWalkTest6.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\sARYTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\sARYTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\sARYTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\sARYTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\sARYTest5.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\simpleExpSmoothingTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\simpleExpSmoothingTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\simpleExpSmoothingTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\simpleExpSmoothingTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\simpleMovingAverageTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\simpleMovingAverageTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\simpleMovingAverageTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\simpleMovingAverageTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\stationarity_KPSSTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\stationarity_KPSSTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\stationarity_KPSSTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\stationaryTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\stationaryTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\stationaryTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\tranARYTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\tranARYTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\tranARYTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\tranARYTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\trendModelTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\trendModelTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\trendModelTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\trendModelTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\weightedMovingAverageTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\weightedMovingAverageTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\weightedMovingAverageTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting\weightedMovingAverageTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\AR$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\AR.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\AR1MA$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\AR1MA.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\ARIMA$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\ARIMA.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\ARIMA_diff$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\ARIMA_diff.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\ARMA$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\ARMA.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\ARX$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\ARX.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\ARX_MV$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\ARX_MV.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\ARX_Quad$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\ARX_Quad.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\ARX_QuadTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\ARX_QuadTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\ARX_QuadTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\ARX_QuadTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\ARX_QuadTest5.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\ARX_QuadTest6.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\ARX_Quad_MV$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\ARX_Quad_MV.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\ForecastUtil$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\Forecaster$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\Forecaster.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\ForecasterX.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\KalmanFilter$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\KalmanFilter.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\NullModel$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\NullModel.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\QuadSpline$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\QuadSpline.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\RandomWalk$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\RandomWalk.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\RegressionTreeGB4TS$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\RegressionTreeGB4TS.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\RegressionTreeGB4TS2$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\RegressionTreeGB4TS2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\RegressionTreeMT4TS$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\RegressionTreeMT4TS.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\RegressionTreeRF4TS$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\RegressionTreeRF4TS.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\RegressionTreeRF_MT4TS$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\RegressionTreeRF_MT4TS.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\RollingValidation$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\RollingValidation.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\SARIMA.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\SimpleExpSmoothing$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\SimpleExpSmoothing.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\SimpleMovingAverage$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\SimpleMovingAverage.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\Stationarity$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\TrendModel$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\TrendModel.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\UnitRoot.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\VAR$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\VAR.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\WeightedMovingAverage$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\WeightedMovingAverage.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\aR1MATest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\aR1MATest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\aR1MATest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\aR1MATest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\aR1MATest5.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\aRIMATest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\aRIMATest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\aRIMATest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\aRIMATest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\aRIMA_diffTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\aRMATest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\aRMATest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\aRMATest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\aRMATest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\aRMATest5.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\aRMATest6.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\aRMATest7.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\aRTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\aRTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\aRTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\aRTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\aRTest5.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\aRTest6.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\aRTest7.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\aRXTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\aRXTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\aRXTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\aRXTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\aRXTest5.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\aRXTest6.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\aRXTest7.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\aRX_MVTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\aRX_MVTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\aRX_MVTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\aRX_MVTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\aRX_MVTest5.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\aRX_MVTest6.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\aRX_Quad_MVTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\aRX_Quad_MVTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\aRX_Quad_MVTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\aRX_Quad_MVTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\aRX_Quad_MVTest5.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\aRX_Quad_MVTest6.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\buildTensor4TSTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\kalmanFilterTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\nullModelTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\nullModelTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\nullModelTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\quadSplineTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\quadSplineTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\quadSplineTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\quadSplineTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\randomWalkTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\randomWalkTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\randomWalkTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\randomWalkTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\regressionTreeGB4TS2Test.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\regressionTreeGB4TS2Test2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\regressionTreeGB4TS2Test3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\regressionTreeGB4TS2Test4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\regressionTreeGB4TS2Test5.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\regressionTreeGB4TS2Test6.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\regressionTreeGB4TSTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\regressionTreeGB4TSTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\regressionTreeGB4TSTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\regressionTreeGB4TSTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\regressionTreeGB4TSTest5.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\regressionTreeGB4TSTest6.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\regressionTreeGB4TSTest7.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\regressionTreeMT4TSTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\regressionTreeMT4TSTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\regressionTreeMT4TSTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\regressionTreeMT4TSTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\regressionTreeMT4TSTest5.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\regressionTreeMT4TSTest6.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\regressionTreeMT4TSTest7.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\regressionTreeRF4TSTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\regressionTreeRF4TSTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\regressionTreeRF4TSTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\regressionTreeRF4TSTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\regressionTreeRF4TSTest5.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\regressionTreeRF4TSTest6.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\regressionTreeRF4TSTest7.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\regressionTreeRF_MT4TSTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\regressionTreeRF_MT4TSTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\regressionTreeRF_MT4TSTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\regressionTreeRF_MT4TSTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\regressionTreeRF_MT4TSTest5.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\regressionTreeRF_MT4TSTest6.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\regressionTreeRF_MT4TSTest7.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\rollingValidationTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\rollingValidationTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\rollingValidationTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\rollingValidationTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\simpleExpSmoothingTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\simpleExpSmoothingTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\simpleExpSmoothingTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\simpleExpSmoothingTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\simpleExpSmoothingTest5.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\simpleMovingAverageTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\simpleMovingAverageTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\simpleMovingAverageTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\simpleMovingAverageTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\simpleMovingAverageTest5.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\stationaryTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\stationaryTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\stationaryTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\trendModelTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\trendModelTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\trendModelTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\varTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\varTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\varTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\varTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\varTest5.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\varTest6.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\weightedMovingAverageTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\weightedMovingAverageTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\weightedMovingAverageTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\weightedMovingAverageTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\forecasting_old\weightedMovingAverageTest5.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\imputationTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\imputationTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\kNN_RegressionTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\kNN_RegressionTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\kNN_RegressionTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\lassoRegressionTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\lassoRegressionTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\lassoRegressionTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\matrixTransformTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\matrixTransformTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\CNN_1D$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\CNN_1D.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\CNN_2D$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\CNN_2D.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\CoFilter_1D$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\CoFilter_1D.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\CoFilter_2D$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\CoFilter_2D.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\ELM_3L1$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\ELM_3L1.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\Example_Concrete$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\Example_Concrete.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\NetParam$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\NetParam.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\NeuralNet_2L$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\NeuralNet_2L.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\NeuralNet_2L_Ck.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\NeuralNet_3L$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\NeuralNet_3L.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\NeuralNet_3L_C2$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\NeuralNet_3L_C2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\NeuralNet_3L_Ck$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\NeuralNet_XL$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\NeuralNet_XL.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\NeuralNet_XLT$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\NeuralNet_XLT.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\Optimizer.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\Optimizer_Adam.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\Optimizer_SGD.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\Optimizer_SGDM.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\PredictorMV$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\PredictorMV.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\RegressionMV$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\RegressionMV.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\StoppingRule.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\cNN_1DTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\cNN_1DTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\cNN_1DTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\cNN_2DTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\cNN_2DTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\coFilter_1DTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\coFilter_1DTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\coFilter_1DTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\coFilter_2DTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\coFilter_2DTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\coFilter_2DTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\eLM_3L1Test.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\eLM_3L1Test2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\eLM_3L1Test3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\eLM_3L1Test4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\eLM_3L1Test5.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\eLM_3L1Test6.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\example_ConcreteTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\example_ConcreteTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\example_ConcreteTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\neuralNet_2LTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\neuralNet_2LTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\neuralNet_2LTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\neuralNet_2LTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\neuralNet_2LTest5.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\neuralNet_2LTest6.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\neuralNet_2LTest7.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\neuralNet_2LTest8.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\neuralNet_2LTest9.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\neuralNet_2L_CkTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\neuralNet_3LTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\neuralNet_3LTest10.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\neuralNet_3LTest11.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\neuralNet_3LTest12.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\neuralNet_3LTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\neuralNet_3LTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\neuralNet_3LTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\neuralNet_3LTest5.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\neuralNet_3LTest6.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\neuralNet_3LTest7.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\neuralNet_3LTest8.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\neuralNet_3LTest9.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\neuralNet_3L_C2Test.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\neuralNet_XLTTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\neuralNet_XLTTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\neuralNet_XLTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\neuralNet_XLTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\neuralNet_XLTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\neuralNet_XLTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\neuralNet_XLTest5.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\neuralNet_XLTest6.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\neuralNet_XLTest7.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\predictorMVTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\regressionMVTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\regressionMVTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\regressionMVTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\regressionMVTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\neuralnet\regressionMVTest5.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\nonlinearRegressionTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\nullModelTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\nullModelTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\outlierTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\outlierTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\outlierTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\perceptronTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\perceptronTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\perceptronTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\perceptronTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\perceptronTest5.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\perceptronTest6.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\poissonRegressionTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\poissonRegressionTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\polyORegressionTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\polyORegressionTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\polyRegressionTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\polyRegressionTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\predictorTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\regressionCatTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\regressionCatTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\regressionCatTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\regressionCatTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\regressionCatTest5.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\regressionCatTest6.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\regressionCatTest7.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\regressionTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\regressionTest10.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\regressionTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\regressionTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\regressionTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\regressionTest5.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\regressionTest6.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\regressionTest7.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\regressionTest8.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\regressionTest9.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\regressionTreeGBTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\regressionTreeGBTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\regressionTreeGBTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\regressionTreeGBTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\regressionTreeGBTest5.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\regressionTreeGBTest6.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\regressionTreeMTTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\regressionTreeMTTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\regressionTreeMTTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\regressionTreeRFTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\regressionTreeRFTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\regressionTreeRFTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\regressionTreeRFTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\regressionTreeRFTest5.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\regressionTreeRF_MTTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\regressionTreeRF_MTTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\regressionTreeRF_MTTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\regressionTreeRF_MTTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\regressionTreeTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\regressionTreeTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\regressionTreeTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\regressionTreeTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\regressionWLSTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\regressionWLSTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\ridgeRegressionTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\ridgeRegressionTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\ridgeRegressionTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\ridgeRegressionTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\ridgeRegressionTest5.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\ridgeRegressionTest6.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\ridgeRegressionTest7.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\roundRegressionTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\simpleExpRegressionTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\simpleExpRegressionTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\simpleExpRegressionTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\simpleRegressionTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\simpleRegressionTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\simpleRegressionTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\simpleRegressionTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\simpleRegressionTest5.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\simpleRegressionTest6.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\simpleRegressionTest7.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\simpleRegressionTest8.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\simplerRegressionTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\simplerRegressionTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\simplerRegressionTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\simplerRegressionTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\sumQueueTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\sumQueueTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\symLassoRegressionTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\symLassoRegressionTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\symLassoRegressionTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\symLassoRegressionTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\symLassoRegressionTest5.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\symLassoRegressionTest6.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\symLassoRegressionTest7.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\symLassoRegressionTest8.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\symLassoRegressionTest9.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\symRidgeRegressionTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\symRidgeRegressionTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\symRidgeRegressionTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\symRidgeRegressionTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\symRidgeRegressionTest5.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\symRidgeRegressionTest6.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\symRidgeRegressionTest7.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\symRidgeRegressionTest8.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\symRidgeRegressionTest9.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\symbolicRegressionTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\symbolicRegressionTest10.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\symbolicRegressionTest11.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\symbolicRegressionTest12.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\symbolicRegressionTest13.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\symbolicRegressionTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\symbolicRegressionTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\symbolicRegressionTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\symbolicRegressionTest5.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\symbolicRegressionTest6.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\symbolicRegressionTest7.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\symbolicRegressionTest8.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\symbolicRegressionTest9.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\tranRegressionTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\tranRegressionTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\tranRegressionTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\tranRegressionTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\tranRegressionTest5.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\tranRegressionTest6.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\tranRegressionTest7.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\tranRegressionTest8.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\trigRegressionTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\trigRegressionTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\modeling\variableTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\multiArrayDequesTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\BoundsConstraint.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\ConjugateGradient$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\ConjugateGradient.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\ConjugateGradient_NoLS$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\ConjugateGradient_NoLS.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\CoordinateDescent$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\CoordinateDescent.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\GoldenSectionLS$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\GoldenSectionLS.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\GradientDescent$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\GradientDescent.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\GradientDescent_Adam$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\GradientDescent_Adam.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\GradientDescent_Mo$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\GradientDescent_Mo.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\GradientDescent_Mo2$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\GradientDescent_Mo2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\GradientDescent_NoLS$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\GradientDescent_NoLS.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\GridSearch$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\GridSearch.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\GridSearchLS$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\GridSearchLS.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\Hungarian$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\IntegerTabuSearch$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\IntegerTabuSearch.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\LassoAddm$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\LassoAdmm.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\LineSearch.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\Minimize$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\Minimize.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\Minimizer.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\MonitorEpochs.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\NLPTest$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\NelderMeadSimplex$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\NelderMeadSimplex.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\NelderMeadSimplex2$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\NelderMeadSimplex2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\NewtonRaphson$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\NewtonRaphson.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\Newton_NoLS$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\Newton_NoLS.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\PathMonitor.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\SPSA$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\SPSA.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\StoppingRule.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\TabuSearch$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\TabuSearch.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\WolfeConditions$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\WolfeConditions.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\WolfeLS$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\WolfeLS.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\WolfeLS2$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\WolfeLS2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\WolfeLS3$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\WolfeLS3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\conjugateGradientTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\conjugateGradientTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\conjugateGradientTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\conjugateGradient_NoLSTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\conjugateGradient_NoLSTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\conjugateGradient_NoLSTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\coordinateDescentTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\functions\BealeFunction.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\functions\BenchmarkFunction.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\functions\Bohachevsky1Function.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\functions\Bohachevsky2Function.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\functions\Bohachevsky3Function.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\functions\BoothFunction.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\functions\Camel3Function.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\functions\CubeFunction.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\functions\FreudensteinRothFunction.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\functions\McCormickFunction.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\functions\ParaboloidFunction.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\functions\QuarticFunction.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\functions\ReciprocalFunction.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\functions\RosenbrockFunction.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\goldenSectionLSTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\goldenSectionLSTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\gradientDescentTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\gradientDescentTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\gradientDescentTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\gradientDescentTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\gradientDescent_AdamTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\gradientDescent_Mo2Test.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\gradientDescent_MoTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\gradientDescent_NoLSTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\gridSearchLSTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\gridSearchLSTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\gridSearchTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\gridSearchTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\hungarianTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\hungarianTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\integerTabuSearchTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\integerTabuSearchTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\lassoAdmmTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\lassoAdmmTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\lassoAdmmTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\linear_opt\CheckLP.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\linear_opt\IntegerLP$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\linear_opt\IntegerLP.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\linear_opt\MinimizerLP.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\linear_opt\Simplex2P$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\linear_opt\Simplex2P.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\linear_opt\integerLPTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\linear_opt\simplex2PTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\linearopt\QuadraticSimplex$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\linearopt\QuadraticSimplex.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\linearopt\quadraticSimplexTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\nLPTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\nLPTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\nelderMeadSimplex2Test.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\nelderMeadSimplexTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\newtonRaphsonTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\newtonRaphsonTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\newtonRaphsonTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\newton_NoLSTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\newton_NoLSTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\newton_NoLSTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newtonC\FunctionDescriptors.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newtonC\FunctionOptimizationFFM.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newtonC\LBFGS_FFM$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newtonC\LBFGS_FFM.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newtonC\MethodTypes.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newtonC\OptimizationLogicFFM.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newtonC\OptimizationMethodHandlesFFM.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newtonC\bohachevsky2FunctionLBFGS_FFMTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newtonC\boothFunctionLBFGS_FFMTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newton\BFGS$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newton\BFGS.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newton\BFGS_NoLS$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newton\BFGS_NoLS.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newton\DM_LBFGS$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newton\DM_LBFGS.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newton\EvaluationLogic.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newton\FunctionEvaluation.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newton\FunctionOptimization.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newton\LBFGS$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newton\LBFGS.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newton\LBFGSBacktrackingArmijo.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newton\LBFGSBacktrackingOrthantWise.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newton\LBFGSBacktrackingStrongWolfe.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newton\LBFGSBacktrackingWolfe.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newton\LBFGSCallbackData.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newton\LBFGSIterationData.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newton\LBFGSLineSearch$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newton\LBFGSLineSearch.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newton\LBFGSLineSearchAlg.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newton\LBFGSLineSearchFailure.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newton\LBFGSLineSearchIncomplete.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newton\LBFGSLineSearchPrms.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newton\LBFGSLineSearchStep.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newton\LBFGSMoreThuente.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newton\LBFGSPrms.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newton\LBFGSResults.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newton\LBFGSReturnCode.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newton\LBFGSVarEvaluationResults.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newton\LBFGS_B$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newton\LBFGS_B.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newton\LBFGS_NoLS$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newton\LBFGS_NoLS.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newton\LineSearchTriInterval.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newton\OptimizationLogic.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newton\OrthantWisePrms.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newton\QNewton.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newton\bFGSBealeFunction.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newton\bFGSBohachevsky1Function.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newton\bFGSBohachevsky2Function.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newton\bFGSBohachevsky3Function.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newton\bFGSBoothFunction.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newton\bFGSCamel3Function.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newton\bFGSCubeFunction.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newton\bFGSFreudensteinRothFunction.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newton\bFGSMcCormickFunction.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newton\bFGSTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newton\bFGSTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newton\bFGSTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newton\bFGSTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newton\bFGS_NoLSTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newton\bFGS_NoLSTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newton\bFGS_NoLSTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newton\bFGS_NoLSTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newton\bealeFunctionLBFGSTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newton\bohachevsky1FunctionLBFGSTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newton\bohachevsky2FunctionLBFGSTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newton\bohachevsky3FunctionLBFGSTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newton\boothFunctionLBFGSTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newton\camel3FunctionLBFGSTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newton\cubeFunctionLBFGSTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newton\freudensteinRothFunctionLBFGSTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newton\lBFGS_BTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newton\lBFGS_BTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newton\lBFGS_BTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newton\lBFGS_NoLSTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newton\lBFGS_NoLSTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newton\lBFGS_NoLSTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newton\mccormickFunctionDMLBFGSTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\quasi_newton\mccormickFunctionLBFGSTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\sPSATest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\tabuSearchTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\tabuSearchTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\wolfeConditionsTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\wolfeLS2Test.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\wolfeLS2Test2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\wolfeLS2Test3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\wolfeLS2Test4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\wolfeLS3Test.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\wolfeLS3Test2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\wolfeLS3Test3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\wolfeLS3Test4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\wolfeLSTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\wolfeLSTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\optimization\wolfeLSTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\Bernoulli.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\Beta.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\Binomial.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\CDF$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\CDF.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\Cauchy.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\ChiSquare.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\Dice.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\Dir.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\Discrete.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\Erlang.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\Exponential.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\Fisher.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\Gamma.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\Geometric.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\HyperExponential.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\HyperExponential_.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\HyperGeometric.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\Known.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\LogNormal.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\Logistic.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\Multinomial.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\NHPoissonProcess.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\NegativeBinomial.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\Normal.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\NormalMat.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\NormalTen.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\NormalVec.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\NormalVec_.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\NormalVec_c.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\PermutedVecD.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\PermutedVecI.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\Poisson.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\PoissonProcess$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\PoissonProcess.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\PowerLaw.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\ProbabilityVec.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\Quantile$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\Quantile.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\RNG$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\RNG.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\RNGStream.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\RNGTester.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\Randi.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\Randi0.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\RandiU0.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\Random.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\Random0.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\Random2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\Random3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\RandomMatD.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\RandomSeeds.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\RandomSeeds3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\RandomSet.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\RandomSetS.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\RandomSetW.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\RandomStr.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\RandomTenD.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\RandomVecD.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\RandomVecD_.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\RandomVecI.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\RandomVecS.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\RandomVecSample.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\RandomVecTrend.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\RandomWord.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\Sharp.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\StdNormal.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\StreamMaker$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\StreamMaker.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\StreamMaker3$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\StudentT.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\TimeVariate.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\Trapezoidal.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\Triangular.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\Trinomial.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\Uniform.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\Variate$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\Variate.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\VariateMat$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\VariateMat.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\VariateSet$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\VariateSet.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\VariateStr$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\VariateTen$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\VariateTen.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\VariateVec$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\VariateVec.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\Weibull.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\cDFTest_ChiSquare.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\cDFTest_Empirical.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\cDFTest_Exponential.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\cDFTest_Fisher.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\cDFTest_Fisher2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\cDFTest_Normal.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\cDFTest_Normal_Diff.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\cDFTest_StudentT.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\cDFTest_Uniform.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\cDFTest_Weibull.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\cLTTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\diceTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\poissonProcessTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\quantileTest_ChiSquare.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\quantileTest_Empirical.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\quantileTest_Exponential.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\quantileTest_Fisher.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\quantileTest_Normal.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\quantileTest_StudentT.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\quantileTest_Uniform.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\rNGTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\randomStrTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\randomWordTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\streamMaker3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\streamMakerGen.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\variateMatTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\variateSetTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\variateSetTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\variateSetTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\variateTenTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\variateTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\variateVecTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\random\variateVecTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\readFileTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\readFileTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\redirectOutTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\ringTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\runCalc.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\runCalcHelp.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\scala2d\Arc.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\scala2d\Arrow$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\scala2d\Arrow.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\scala2d\Base$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\scala2d\BorderLayout.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\scala2d\Colors$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\scala2d\Colors.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\scala2d\CurvilinearShape.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\scala2d\Ellipse.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\scala2d\Frame.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\scala2d\Hexagon.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\scala2d\ImageWriter$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\scala2d\Line.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\scala2d\Octagon.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\scala2d\Path.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\scala2d\Pentagon.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\scala2d\Polygon$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\scala2d\Polygon.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\scala2d\QArrow$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\scala2d\QArrow.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\scala2d\QCurve$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\scala2d\QCurve.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\scala2d\Quad.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\scala2d\Rectangle.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\scala2d\RoundRectangle.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\scala2d\Shapes$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\scala2d\Transform.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\scala2d\Triangle.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\scala2d\TrigConstants.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\scala2d\VizFrame.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\scala2d\ZoomablePanel.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\scala2d\arrowTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\scala2d\colorsTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\scala2d\lineTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\scala2d\polygonTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\scala2d\polygonTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\scala2d\qArrowTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\scala2d\qCurveTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\scala2d\qCurveTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\scala2d\writeImageTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\scala3d\Clock.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\scala3d\Road3d.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\scala3d\Sink.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\scala3d\Source.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\scala3d\Vehicle.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\setExtTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\Completion.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\Coroutine.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\CoroutineTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\Identifiable.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\Locatable.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\Locatable2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\Modelable.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\Monitor$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\Monitor.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\NH_PoissonProcess$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\NH_PoissonProcess.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\PoissonProcess$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\PoissonProcess.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\Temporal.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\activity\ArcD.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\activity\ArcI.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\activity\Counter.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\activity\PetriNet$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\activity\PetriNet.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\activity\PetriNetRules.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\activity\PetriNetRulesTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\activity\PlaceD.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\activity\PlaceI.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\activity\Transition.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\activity\petriNetTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\agent\EdgeAgents.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\agent\Gate.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\agent\Junction.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\agent\Link.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\agent\Model.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\agent\Monitor$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\agent\Monitor.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\agent\QueueOps.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\agent\Resource.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\agent\Route.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\agent\SimAgent$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\agent\SimAgent.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\agent\Sink.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\agent\Source.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\agent\Statistical.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\agent\Transport.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\agent\WaitQueue.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\agent\WaitQueue_LCFS.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\agent\example_1\Bank$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\agent\example_1\BankModel.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\agent\example_1\CallCenter$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\agent\example_1\CallCenterModel.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\agent\example_1\Traffic2L$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\agent\example_1\Traffic2LModel.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\agent\example_1\Traffic4L$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\agent\example_1\Traffic4LModel.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\agent\example_1\UGABusRoutes$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\agent\example_1\UGABusRoutesModel.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\agent\example_1\runBank.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\agent\example_1\runCallCenter.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\agent\example_1\runTraffic2L.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\agent\example_1\runTraffic2L1.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\agent\example_1\runTraffic4L.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\agent\example_1\runUGABusRoutes.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\agent\example_1\runUGABusRoutes1.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\agent\monitorTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\agent\simAgentTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\event\CausalLink.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\event\Entity.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\event\Event.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\event\EventNode.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\event\Ex_Template$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\event\Model.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\event\SOMEModel.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\event\WaitQueue.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\event\WaitQueue_LCFS.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\event\example_1\Bank$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\event\example_1\Bank2$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\event\example_1\Bank3$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\event\example_1\BankModel.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\event\example_1\BankModel2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\event\example_1\BankModel3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\event\example_1\CallCenter$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\event\example_1\CallCenter2$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\event\example_1\CallCenterModel.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\event\example_1\CallCenterModel2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\event\example_1\FastFood$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\event\example_1\FastFoodModel.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\event\example_1\Machine$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\event\example_1\MachineModel.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\event\example_1\Poisson$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\event\example_1\Poisson2$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\event\example_1\PoissonModel.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\event\example_1\PoissonModel2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\event\example_1\runBank.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\event\example_1\runBank2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\event\example_1\runBank3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\event\example_1\runCallCenter.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\event\example_1\runCallCenter2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\event\example_1\runFastFood.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\event\example_1\runMachine.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\event\example_1\runPoisson.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\event\example_1\runPoisson2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\event\runSOME.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\monitorTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\monte_carlo\Cards$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\monte_carlo\Cards.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\monte_carlo\GrainDropping$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\monte_carlo\GrainDropping.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\monte_carlo\MonteCarloIntegration$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\monte_carlo\MonteCarloIntegration.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\monte_carlo\MontyHall$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\monte_carlo\RollDice$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\monte_carlo\RollDice.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\monte_carlo\SphereVolume$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\monte_carlo\cardsTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\monte_carlo\cardsTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\monte_carlo\cardsTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\monte_carlo\grainDroppingTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\monte_carlo\monteCarloIntegrationTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\monte_carlo\montyHall.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\monte_carlo\rollDiceTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\monte_carlo\rollDiceTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\monte_carlo\rollDiceTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\monte_carlo\sphereVolumeTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\nH_PoissonProcessTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\nH_PoissonProcessTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\poissonProcessTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\Bus.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\Component.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\Dynamics.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\Gate.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\GippsDynamics.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\IDMDynamics.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\Junction.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\Model.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\Model_MBM.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\Path$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\Path.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\Recorder.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\Resource.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\SimActor.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\Sink.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\Source$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\Source.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\Transport.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\VSource$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\VSource.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\VTransport.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\Vehicle.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\WaitQueue.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\WaitQueue_LCFS.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\example_1\Bank$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\example_1\BankModel.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\example_1\CallCenter$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\example_1\CallCenterModel.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\example_1\EmerDept$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\example_1\EmerDeptModel.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\example_1\Loop$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\example_1\LoopModel.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\example_1\Machine$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\example_1\MachineModel.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\example_1\OneWayStreet$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\example_1\OneWayStreetModel.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\example_1\OneWayVehicle$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\example_1\OneWayVehicleModel.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\example_1\Road$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\example_1\RoadModel.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\example_1\Traffic$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\example_1\TrafficDyn$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\example_1\TrafficDynModel.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\example_1\TrafficLaneChange$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\example_1\TrafficLaneChangeModel.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\example_1\TrafficModel.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\example_1\TrafficModelTurn.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\example_1\TrafficTurn$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\example_1\UGA_Bus$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\example_1\UGA_BusModel.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\example_1\runBank.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\example_1\runCallCenter.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\example_1\runEmerDept.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\example_1\runLoop.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\example_1\runMachine.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\example_1\runOneWayStreet.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\example_1\runOneWayVehicle.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\example_1\runRoad.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\example_1\runTraffic.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\example_1\runTrafficDyn.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\example_1\runTrafficLaneChange.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\example_1\runTrafficTurn.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\example_1\runUGA_Bus.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\example_MBM\Bank$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\example_MBM\BankModel.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\example_MBM\runBank.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\example_MBM\testCorrBank.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\example_MIR\Bank$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\example_MIR\BankModel.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\example_MIR\runBank.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\pathTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\sourceTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\process\vSourceTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\queueingnet\JacksonNet$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\queueingnet\JacksonNet.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\queueingnet\MGc_Queue$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\queueingnet\MGc_Queue.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\queueingnet\MM1_Queue.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\queueingnet\MM2_Queue.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\queueingnet\MM_Queue$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\queueingnet\MM_Queue.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\queueingnet\MMc_Queue.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\queueingnet\MMck_Queue$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\queueingnet\MMck_Queue.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\queueingnet\jacksonNetTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\queueingnet\mGc_QueueTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\queueingnet\mM_QueueTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\queueingnet\mMck_QueueTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\runCoroutineTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\state\MarkovCT.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\state\MarkovChain$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\state\MarkovChain.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\state\MarkovChainCT$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\state\markovCTTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\state\markovChainTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\state\markovChainTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\state\markovChainTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\state\markovChainTest4.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\state\markovChainTest5.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\tableau\CallCenterModel.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\tableau\Ex_Bank$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\tableau\Ex_CallCenter$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\tableau\Model$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\tableau\Model.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\tableau\Queue_MM1$package.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\tableau\runEx_Bank.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\tableau\runEx_CallCenter.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\tableau\runModelTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\simulation\tableau\runQueue_MM1.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\skipListTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\timeNumTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\timeNumTest2.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\timeNumTest3.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\timerTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\unicodeTest.tasty -[debug]  C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes\scalation\valueTypeTest.tasty -[info] Skipping unused scalacOptions: -Werror, -new-syntax -[warn] Flag -classpath set repeatedly -[warn] one warning found -[info] Main Scala API documentation successful. diff --git a/target/streams/compile/doc/_global/streams/scala/inputs b/target/streams/compile/doc/_global/streams/scala/inputs deleted file mode 100644 index 8c1760dd3..000000000 --- a/target/streams/compile/doc/_global/streams/scala/inputs +++ /dev/null @@ -1 +0,0 @@ --693383437 \ No newline at end of file diff --git a/target/streams/compile/doc/_global/streams/scala/output b/target/streams/compile/doc/_global/streams/scala/output deleted file mode 100644 index b96a3f430..000000000 --- a/target/streams/compile/doc/_global/streams/scala/output +++ /dev/null @@ -1 +0,0 @@ -473519988 \ No newline at end of file diff --git a/target/streams/compile/externalDependencyClasspath/_global/streams/export b/target/streams/compile/externalDependencyClasspath/_global/streams/export deleted file mode 100644 index e76194940..000000000 --- a/target/streams/compile/externalDependencyClasspath/_global/streams/export +++ /dev/null @@ -1 +0,0 @@ -C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\scala-lang\scala3-library_3\3.6.4\scala3-library_3-3.6.4.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\scalafx\scalafx_3\22.0.0-R33\scalafx_3-22.0.0-R33.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\scala-lang\scala-library\2.13.15\scala-library-2.13.15.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-base\22\javafx-base-22.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-controls\22\javafx-controls-22.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-fxml\22\javafx-fxml-22.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-graphics\22\javafx-graphics-22.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-media\22\javafx-media-22.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-swing\22\javafx-swing-22.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-web\22\javafx-web-22.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-base\22\javafx-base-22-win.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-controls\22\javafx-controls-22-win.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-fxml\22\javafx-fxml-22-win.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-graphics\22\javafx-graphics-22-win.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-media\22\javafx-media-22-win.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-swing\22\javafx-swing-22-win.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-web\22\javafx-web-22-win.jar diff --git a/target/streams/compile/incOptions/_global/streams/out b/target/streams/compile/incOptions/_global/streams/out deleted file mode 100644 index 77e6a327c..000000000 --- a/target/streams/compile/incOptions/_global/streams/out +++ /dev/null @@ -1,2 +0,0 @@ -[debug] Created transactional ClassFileManager with tempDir = C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes.bak -[debug] Removing the temporary directory used for backing up class files: C:\Users\youse\OneDrive\Documents\New Scalation\scalation_2.0\target\scala-3.6.4\classes.bak diff --git a/target/streams/compile/internalDependencyClasspath/_global/streams/export b/target/streams/compile/internalDependencyClasspath/_global/streams/export deleted file mode 100644 index 8b1378917..000000000 --- a/target/streams/compile/internalDependencyClasspath/_global/streams/export +++ /dev/null @@ -1 +0,0 @@ - diff --git a/target/streams/compile/internalDependencyClasspath/_global/streams/out b/target/streams/compile/internalDependencyClasspath/_global/streams/out deleted file mode 100644 index e69de29bb..000000000 diff --git a/target/streams/compile/managedClasspath/_global/streams/export b/target/streams/compile/managedClasspath/_global/streams/export deleted file mode 100644 index e76194940..000000000 --- a/target/streams/compile/managedClasspath/_global/streams/export +++ /dev/null @@ -1 +0,0 @@ -C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\scala-lang\scala3-library_3\3.6.4\scala3-library_3-3.6.4.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\scalafx\scalafx_3\22.0.0-R33\scalafx_3-22.0.0-R33.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\scala-lang\scala-library\2.13.15\scala-library-2.13.15.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-base\22\javafx-base-22.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-controls\22\javafx-controls-22.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-fxml\22\javafx-fxml-22.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-graphics\22\javafx-graphics-22.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-media\22\javafx-media-22.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-swing\22\javafx-swing-22.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-web\22\javafx-web-22.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-base\22\javafx-base-22-win.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-controls\22\javafx-controls-22-win.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-fxml\22\javafx-fxml-22-win.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-graphics\22\javafx-graphics-22-win.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-media\22\javafx-media-22-win.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-swing\22\javafx-swing-22-win.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-web\22\javafx-web-22-win.jar diff --git a/target/streams/compile/scalacOptions/_global/streams/out b/target/streams/compile/scalacOptions/_global/streams/out deleted file mode 100644 index e69de29bb..000000000 diff --git a/target/streams/compile/unmanagedClasspath/_global/streams/export b/target/streams/compile/unmanagedClasspath/_global/streams/export deleted file mode 100644 index 8b1378917..000000000 --- a/target/streams/compile/unmanagedClasspath/_global/streams/export +++ /dev/null @@ -1 +0,0 @@ - diff --git a/target/streams/compile/unmanagedClasspath/_global/streams/out b/target/streams/compile/unmanagedClasspath/_global/streams/out deleted file mode 100644 index e69de29bb..000000000 diff --git a/target/streams/compile/unmanagedJars/_global/streams/export b/target/streams/compile/unmanagedJars/_global/streams/export deleted file mode 100644 index 8b1378917..000000000 --- a/target/streams/compile/unmanagedJars/_global/streams/export +++ /dev/null @@ -1 +0,0 @@ - diff --git a/target/streams/runtime/externalDependencyClasspath/_global/streams/export b/target/streams/runtime/externalDependencyClasspath/_global/streams/export deleted file mode 100644 index e76194940..000000000 --- a/target/streams/runtime/externalDependencyClasspath/_global/streams/export +++ /dev/null @@ -1 +0,0 @@ -C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\scala-lang\scala3-library_3\3.6.4\scala3-library_3-3.6.4.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\scalafx\scalafx_3\22.0.0-R33\scalafx_3-22.0.0-R33.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\scala-lang\scala-library\2.13.15\scala-library-2.13.15.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-base\22\javafx-base-22.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-controls\22\javafx-controls-22.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-fxml\22\javafx-fxml-22.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-graphics\22\javafx-graphics-22.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-media\22\javafx-media-22.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-swing\22\javafx-swing-22.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-web\22\javafx-web-22.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-base\22\javafx-base-22-win.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-controls\22\javafx-controls-22-win.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-fxml\22\javafx-fxml-22-win.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-graphics\22\javafx-graphics-22-win.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-media\22\javafx-media-22-win.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-swing\22\javafx-swing-22-win.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-web\22\javafx-web-22-win.jar diff --git a/target/streams/runtime/managedClasspath/_global/streams/export b/target/streams/runtime/managedClasspath/_global/streams/export deleted file mode 100644 index e76194940..000000000 --- a/target/streams/runtime/managedClasspath/_global/streams/export +++ /dev/null @@ -1 +0,0 @@ -C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\scala-lang\scala3-library_3\3.6.4\scala3-library_3-3.6.4.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\scalafx\scalafx_3\22.0.0-R33\scalafx_3-22.0.0-R33.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\scala-lang\scala-library\2.13.15\scala-library-2.13.15.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-base\22\javafx-base-22.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-controls\22\javafx-controls-22.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-fxml\22\javafx-fxml-22.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-graphics\22\javafx-graphics-22.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-media\22\javafx-media-22.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-swing\22\javafx-swing-22.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-web\22\javafx-web-22.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-base\22\javafx-base-22-win.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-controls\22\javafx-controls-22-win.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-fxml\22\javafx-fxml-22-win.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-graphics\22\javafx-graphics-22-win.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-media\22\javafx-media-22-win.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-swing\22\javafx-swing-22-win.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-web\22\javafx-web-22-win.jar diff --git a/target/streams/runtime/unmanagedClasspath/_global/streams/export b/target/streams/runtime/unmanagedClasspath/_global/streams/export deleted file mode 100644 index 8b1378917..000000000 --- a/target/streams/runtime/unmanagedClasspath/_global/streams/export +++ /dev/null @@ -1 +0,0 @@ - diff --git a/target/streams/runtime/unmanagedClasspath/_global/streams/out b/target/streams/runtime/unmanagedClasspath/_global/streams/out deleted file mode 100644 index e69de29bb..000000000 diff --git a/target/streams/runtime/unmanagedJars/_global/streams/export b/target/streams/runtime/unmanagedJars/_global/streams/export deleted file mode 100644 index 8b1378917..000000000 --- a/target/streams/runtime/unmanagedJars/_global/streams/export +++ /dev/null @@ -1 +0,0 @@ - diff --git a/target/streams/test/externalDependencyClasspath/_global/streams/export b/target/streams/test/externalDependencyClasspath/_global/streams/export deleted file mode 100644 index e76194940..000000000 --- a/target/streams/test/externalDependencyClasspath/_global/streams/export +++ /dev/null @@ -1 +0,0 @@ -C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\scala-lang\scala3-library_3\3.6.4\scala3-library_3-3.6.4.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\scalafx\scalafx_3\22.0.0-R33\scalafx_3-22.0.0-R33.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\scala-lang\scala-library\2.13.15\scala-library-2.13.15.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-base\22\javafx-base-22.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-controls\22\javafx-controls-22.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-fxml\22\javafx-fxml-22.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-graphics\22\javafx-graphics-22.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-media\22\javafx-media-22.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-swing\22\javafx-swing-22.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-web\22\javafx-web-22.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-base\22\javafx-base-22-win.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-controls\22\javafx-controls-22-win.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-fxml\22\javafx-fxml-22-win.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-graphics\22\javafx-graphics-22-win.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-media\22\javafx-media-22-win.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-swing\22\javafx-swing-22-win.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-web\22\javafx-web-22-win.jar diff --git a/target/streams/test/managedClasspath/_global/streams/export b/target/streams/test/managedClasspath/_global/streams/export deleted file mode 100644 index e76194940..000000000 --- a/target/streams/test/managedClasspath/_global/streams/export +++ /dev/null @@ -1 +0,0 @@ -C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\scala-lang\scala3-library_3\3.6.4\scala3-library_3-3.6.4.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\scalafx\scalafx_3\22.0.0-R33\scalafx_3-22.0.0-R33.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\scala-lang\scala-library\2.13.15\scala-library-2.13.15.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-base\22\javafx-base-22.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-controls\22\javafx-controls-22.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-fxml\22\javafx-fxml-22.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-graphics\22\javafx-graphics-22.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-media\22\javafx-media-22.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-swing\22\javafx-swing-22.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-web\22\javafx-web-22.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-base\22\javafx-base-22-win.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-controls\22\javafx-controls-22-win.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-fxml\22\javafx-fxml-22-win.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-graphics\22\javafx-graphics-22-win.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-media\22\javafx-media-22-win.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-swing\22\javafx-swing-22-win.jar;C:\Users\youse\AppData\Local\Coursier\Cache\v1\https\repo1.maven.org\maven2\org\openjfx\javafx-web\22\javafx-web-22-win.jar diff --git a/target/streams/test/scalacOptions/_global/streams/out b/target/streams/test/scalacOptions/_global/streams/out deleted file mode 100644 index e69de29bb..000000000 diff --git a/target/streams/test/unmanagedClasspath/_global/streams/export b/target/streams/test/unmanagedClasspath/_global/streams/export deleted file mode 100644 index 8b1378917..000000000 --- a/target/streams/test/unmanagedClasspath/_global/streams/export +++ /dev/null @@ -1 +0,0 @@ - diff --git a/target/streams/test/unmanagedClasspath/_global/streams/out b/target/streams/test/unmanagedClasspath/_global/streams/out deleted file mode 100644 index e69de29bb..000000000 diff --git a/target/streams/test/unmanagedJars/_global/streams/export b/target/streams/test/unmanagedJars/_global/streams/export deleted file mode 100644 index 8b1378917..000000000 --- a/target/streams/test/unmanagedJars/_global/streams/export +++ /dev/null @@ -1 +0,0 @@ -